Spaces:
Sleeping
Sleeping
File size: 3,049 Bytes
4bdb54f 55d13a0 173fd35 55d13a0 4bdb54f 55d13a0 4bdb54f 77c7489 4bdb54f 173fd35 55d13a0 4bdb54f 55d13a0 c50ee85 55d13a0 4bdb54f 55d13a0 4bdb54f c50ee85 55d13a0 173fd35 4bdb54f 77c7489 4bdb54f 55d13a0 173fd35 4bdb54f 173fd35 55d13a0 4bdb54f 55d13a0 173fd35 55d13a0 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 |
# app.py (Final Corrected Version)
import gradio as gr
import torch
from diffusers import AutoPipelineForInpainting
from PIL import Image
import time
# --- Model Loading ---
print("Loading model for low-RAM CPU environment...")
model_id = "runwayml/stable-diffusion-inpainting"
try:
pipe = AutoPipelineForInpainting.from_pretrained(model_id, torch_dtype=torch.float32)
pipe.enable_model_cpu_offload()
print("Model loaded successfully with CPU offloading enabled.")
except Exception as e:
print(f"An error occurred during model loading: {e}")
raise e
# --- Prompts ---
DEFAULT_PROMPT = "photorealistic, 4k, ultra high quality, sharp focus, masterpiece, high detail"
DEFAULT_NEGATIVE_PROMPT = "blurry, pixelated, distorted, deformed, ugly, disfigured, cartoon, watermark"
# --- Inpainting Function (Correct Signature) ---
def inpaint_image(image_and_mask, user_prompt, guidance_scale, num_steps, progress=gr.Progress(track_tqdm=True)):
# The input is now a dictionary with 'image' and 'mask' keys
image = image_and_mask["image"].convert("RGB")
mask = image_and_mask["mask"].convert("RGB")
if image is None or mask is None:
raise gr.Error("Please upload an image and draw a mask on it first!")
if user_prompt and user_prompt.strip():
prompt = user_prompt
negative_prompt = DEFAULT_NEGATIVE_PROMPT
else:
prompt = DEFAULT_PROMPT
negative_prompt = DEFAULT_NEGATIVE_PROMPT
print(f"Starting inpainting on CPU...")
result_image = pipe(
prompt=prompt, image=image, mask_image=mask, negative_prompt=negative_prompt,
guidance_scale=guidance_scale, num_inference_steps=int(num_steps)
).images[0]
return result_image
# --- UI ---
with gr.Blocks(theme=gr.themes.Soft()) as demo:
gr.Markdown("# 🎨 AI Image Fixer (Stable Version)")
gr.Warning("‼️ PATIENCE REQUIRED! Generation can take 15-30 minutes on free hardware.")
with gr.Row():
with gr.Column(scale=2):
# This component returns a dictionary when tool='brush'
input_image = gr.Image(label="1. Upload & Mask Image", source="upload", tool="brush", type="pil")
prompt_textbox = gr.Textbox(label="2. Describe Your Fix (Optional)", placeholder="Leave empty for a general fix")
with gr.Accordion("Advanced Settings", open=False):
guidance_scale = gr.Slider(minimum=0, maximum=20, value=8.0, label="Guidance Scale")
num_steps = gr.Slider(minimum=10, maximum=50, step=1, value=20, label="Inference Steps")
with gr.Column(scale=1):
output_image = gr.Image(label="Result", type="pil")
submit_button = gr.Button("Fix It!", variant="primary")
# The `inputs` list is simple. The function signature must match what Gradio provides.
submit_button.click(
fn=inpaint_image,
inputs=[input_image, prompt_textbox, guidance_scale, num_steps],
outputs=output_image
)
if __name__ == "__main__":
demo.launch() |