Spaces:
Running
on
Zero
Running
on
Zero
| import os | |
| import torch | |
| import traceback | |
| import gradio as gr # ✅ Needed for gr.Error | |
| from diffusers import AutoPipelineForImage2Image | |
| # ✅ Cache models and tokenizers inside persistent storage | |
| os.environ["HF_HOME"] = "/data/.cache/huggingface" | |
| # Load SDXL pipeline with LoRA | |
| pipe = AutoPipelineForImage2Image.from_pretrained( | |
| "stabilityai/stable-diffusion-xl-base-1.0", | |
| torch_dtype=torch.float16, | |
| variant="fp16", | |
| use_safetensors=True, | |
| token=os.getenv("HF_TOKEN") # ✅ Your token from Space secrets | |
| ).to("cuda") | |
| pipe.load_lora_weights("theoracle/sdxl-lora-headshot") | |
| def generate_with_lora(image, prompt, negative_prompt, strength, guidance_scale): | |
| try: | |
| if image is None: | |
| raise ValueError("Uploaded image is None. Please upload a valid image.") | |
| print("[INFO] Received image size:", image.size) | |
| image = image.convert("RGB").resize((1024, 1024)) # ✅ Safer with convert("RGB") | |
| print("[INFO] Starting pipeline with prompt:", prompt) | |
| result = pipe( | |
| prompt=prompt, | |
| negative_prompt=negative_prompt or "", | |
| image=image, | |
| strength=strength, | |
| guidance_scale=guidance_scale, | |
| num_inference_steps=50 | |
| ).images[0] | |
| print("[INFO] Generation successful.") | |
| return result | |
| except Exception as e: | |
| print("[ERROR] Exception in generate_with_lora:\n", traceback.format_exc()) | |
| raise gr.Error(f"Image generation failed: {str(e)}") | |