Spaces:
Running
on
Zero
Running
on
Zero
| import os | |
| # ββ before you set the env var ββ | |
| hf_home = "/data/.cache/huggingface" | |
| yolo_cfg = "/data/ultralytics" | |
| # create the folders (and any parents) if they donβt already exist | |
| os.makedirs(hf_home, exist_ok=True) | |
| os.makedirs(yolo_cfg, exist_ok=True) | |
| # now point HF and YOLO at them | |
| os.environ["HF_HOME"] = hf_home | |
| os.environ["YOLO_CONFIG_DIR"] = yolo_cfg | |
| from ultralytics import YOLO | |
| import numpy as np | |
| import torch | |
| from PIL import Image | |
| import cv2 | |
| from diffusers import StableDiffusionXLInpaintPipeline | |
| from utils import pil_to_cv2, cv2_to_pil | |
| import gradio as gr # β Needed for error handling | |
| INPAINT_SIZE = 1024 | |
| # Load clothing model | |
| clothing_model = YOLO("deepfashion2_yolov8s-seg.pt") | |
| # β Load models once | |
| yolo = YOLO("yolov8x-seg.pt") | |
| inpaint_pipe = StableDiffusionXLInpaintPipeline.from_pretrained( | |
| "diffusers/stable-diffusion-xl-1.0-inpainting-0.1", | |
| torch_dtype=torch.float16, | |
| use_safetensors=True, | |
| use_auth_token=os.getenv("HF_TOKEN") | |
| ).to("cuda") | |
| def run_background_removal_and_inpaint(image_path, prompt, negative_prompt, guidance_scale=10): | |
| if not image_path or not os.path.isfile(image_path): | |
| raise gr.Error("No valid image found. Please run Step 1 first.") | |
| image = Image.open(image_path).convert("RGB") | |
| img_cv = pil_to_cv2(image) | |
| results = yolo(img_cv) | |
| if not results or not results[0].masks or len(results[0].masks.data) == 0: | |
| raise gr.Error("No subject detected in the image. Please upload a clearer photo.") | |
| mask = results[0].masks.data[0].cpu().numpy() | |
| binary = (mask > 0.5).astype(np.uint8) | |
| background_mask = 1 - binary | |
| kernel = np.ones((15, 15), np.uint8) | |
| dilated = cv2.dilate(background_mask, kernel, iterations=1) | |
| inpaint_mask = (dilated * 255).astype(np.uint8) | |
| mask_pil = cv2_to_pil(inpaint_mask).resize((INPAINT_SIZE, INPAINT_SIZE)).convert("L") | |
| img_pil = image.resize((INPAINT_SIZE, INPAINT_SIZE)).convert("RGB") | |
| result = inpaint_pipe( | |
| prompt=prompt, | |
| negative_prompt=negative_prompt or "", | |
| image=img_pil, | |
| mask_image=mask_pil, | |
| guidance_scale=guidance_scale, | |
| num_inference_steps=40 | |
| ).images[0] | |
| return result | |
| def run_clothing_inpaint(image, prompt, negative_prompt, guidance): | |
| try: | |
| print("[INFO] Step 3: Clothing segmentation and inpainting...", flush=True) | |
| img_cv = np.array(image.convert("RGB"))[..., ::-1] # PIL β OpenCV BGR | |
| h, w = img_cv.shape[:2] | |
| # Segment clothing | |
| results = clothing_model(img_cv) | |
| masks = results[0].masks.data.cpu().numpy() | |
| if len(masks) == 0: | |
| raise gr.Error("No clothing detected. Try a different image.") | |
| mask = masks[0] | |
| resized_mask = cv2.resize(mask, (w, h), interpolation=cv2.INTER_NEAREST) | |
| binary_mask = (resized_mask > 0.5).astype(np.uint8) * 255 | |
| mask_pil = Image.fromarray(binary_mask).convert("L").resize((INPAINT_SIZE, INPAINT_SIZE)) | |
| # Resize input image | |
| resized_image = image.convert("RGB").resize((INPAINT_SIZE, INPAINT_SIZE)) | |
| # Inpaint clothing | |
| result = inpaint_pipe( | |
| prompt=prompt, | |
| negative_prompt=negative_prompt, | |
| image=resized_image, | |
| mask_image=mask_pil, | |
| guidance_scale=guidance, | |
| num_inference_steps=50 | |
| ).images[0] | |
| return result, "" | |
| except gr.Error as e: | |
| return None, f"π {str(e)}" | |
| except Exception as e: | |
| traceback.print_exc() | |
| return None, f"β Unexpected Error: {type(e).__name__}: {str(e)}" | |