primerz commited on
Commit
ee4fca1
ยท
verified ยท
1 Parent(s): 962b8c2

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +246 -160
app.py CHANGED
@@ -2,21 +2,25 @@ import spaces # MUST be first, before any CUDA-related imports
2
  import gradio as gr
3
  import torch
4
  from diffusers import (
5
- StableDiffusionXLPipeline,
6
- StableDiffusionXLControlNetPipeline,
7
  ControlNetModel,
8
  AutoencoderKL,
9
- LCMScheduler # CORRECT SCHEDULER FOR LCM
 
10
  )
11
  from diffusers.models.attention_processor import AttnProcessor2_0
12
  from insightface.app import FaceAnalysis
13
  from PIL import Image
14
  import numpy as np
15
  import cv2
16
- from transformers import pipeline as transformers_pipeline
17
  from huggingface_hub import hf_hub_download
18
  import os
19
 
 
 
 
 
 
 
20
  # Configuration
21
  MODEL_REPO = "primerz/pixagram"
22
  device = "cuda" if torch.cuda.is_available() else "cpu"
@@ -30,9 +34,10 @@ print(f"Loading models from: {MODEL_REPO}")
30
  print(f"LORA Trigger Word: {TRIGGER_WORD}")
31
 
32
  class RetroArtConverter:
33
- def __init__(self):
34
  self.device = device
35
  self.dtype = dtype
 
36
  self.models_loaded = {
37
  'custom_checkpoint': False,
38
  'lora': False,
@@ -40,7 +45,7 @@ class RetroArtConverter:
40
  }
41
 
42
  # Initialize face analysis for InstantID
43
- print("Loading face analysis model...")
44
  try:
45
  self.face_app = FaceAnalysis(
46
  name='antelopev2',
@@ -55,14 +60,7 @@ class RetroArtConverter:
55
  self.face_app = None
56
  self.face_detection_enabled = False
57
 
58
- # Load ControlNet for depth
59
- print("Loading ControlNet depth model...")
60
- self.controlnet_depth = ControlNetModel.from_pretrained(
61
- "diffusers/controlnet-zoe-depth-sdxl-1.0",
62
- torch_dtype=self.dtype
63
- ).to(self.device)
64
-
65
- # Load InstantID ControlNet (optional)
66
  print("Loading InstantID ControlNet...")
67
  try:
68
  self.controlnet_instantid = ControlNetModel.from_pretrained(
@@ -78,50 +76,82 @@ class RetroArtConverter:
78
  self.controlnet_instantid = None
79
  self.instantid_enabled = False
80
 
81
- # Load depth estimator
82
- print("Loading depth estimator...")
83
- self.depth_estimator = transformers_pipeline(
84
- 'depth-estimation',
85
- model="Intel/dpt-hybrid-midas",
86
- device=self.device if self.device == "cuda" else -1
87
- )
 
 
 
 
 
 
 
 
 
88
 
89
  # Determine which controlnets to use
90
  if self.instantid_enabled and self.controlnet_instantid is not None:
91
- controlnets = [self.controlnet_depth, self.controlnet_instantid]
92
- print(f"Initializing with multiple ControlNets: Depth + InstantID")
93
  else:
94
  controlnets = self.controlnet_depth
95
- print(f"Initializing with single ControlNet: Depth only")
 
 
 
 
 
 
 
96
 
97
  # Load SDXL checkpoint from HuggingFace Hub
98
- # NOTE: VAE is bundled in the checkpoint, don't load separately!
99
- print("Loading SDXL checkpoint (horizon) with bundled VAE from HuggingFace Hub...")
100
  try:
101
  model_path = hf_hub_download(
102
  repo_id=MODEL_REPO,
103
  filename="horizon.safetensors",
104
  repo_type="model"
105
  )
106
- self.pipe = StableDiffusionXLControlNetPipeline.from_single_file(
 
107
  model_path,
108
  controlnet=controlnets,
 
109
  torch_dtype=self.dtype,
110
  use_safetensors=True
111
  ).to(self.device)
112
- print("โœ“ Custom checkpoint loaded successfully (VAE bundled)")
113
  self.models_loaded['custom_checkpoint'] = True
114
  except Exception as e:
115
  print(f"โš ๏ธ Could not load custom checkpoint: {e}")
116
  print("Using default SDXL base model")
117
- self.pipe = StableDiffusionXLControlNetPipeline.from_pretrained(
118
  "stabilityai/stable-diffusion-xl-base-1.0",
119
  controlnet=controlnets,
 
120
  torch_dtype=self.dtype,
121
  use_safetensors=True
122
  ).to(self.device)
123
  self.models_loaded['custom_checkpoint'] = False
124
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
125
  # Load LORA from HuggingFace Hub
126
  print("Loading LORA (retroart) from HuggingFace Hub...")
127
  try:
@@ -138,11 +168,18 @@ class RetroArtConverter:
138
  print(f"โš ๏ธ Could not load LORA: {e}")
139
  self.models_loaded['lora'] = False
140
 
141
- # CRITICAL: Use LCM Scheduler for this model!
142
- print("Setting up LCM scheduler...")
143
- self.pipe.scheduler = LCMScheduler.from_config(
144
- self.pipe.scheduler.config
145
- )
 
 
 
 
 
 
 
146
 
147
  # Enable attention optimizations
148
  self.pipe.unet.set_attn_processor(AttnProcessor2_0())
@@ -155,11 +192,6 @@ class RetroArtConverter:
155
  except Exception as e:
156
  print(f"โš ๏ธ xformers not available: {e}")
157
 
158
- # Set CLIP skip to 2
159
- if hasattr(self.pipe, 'text_encoder'):
160
- self.clip_skip = 2
161
- print(f"โœ“ CLIP skip set to {self.clip_skip}")
162
-
163
  # Track controlnet configuration
164
  self.using_multiple_controlnets = isinstance(controlnets, list)
165
  print(f"Pipeline initialized with {'multiple' if self.using_multiple_controlnets else 'single'} ControlNet(s)")
@@ -171,40 +203,38 @@ class RetroArtConverter:
171
  print("===================\n")
172
 
173
  print("โœ“ Model initialization complete!")
174
- print("\n=== LCM CONFIGURATION ===")
175
- print("Scheduler: LCM")
176
- print("Recommended Steps: 12")
177
- print("Recommended CFG: 1.0-1.5")
178
- print("Recommended Resolution: 896x1152 or 832x1216")
179
- print("CLIP Skip: 2")
 
 
 
 
 
 
180
  print(f"LORA Trigger: '{TRIGGER_WORD}'")
181
  print("=========================\n")
182
 
183
  def get_depth_map(self, image):
184
- """Generate depth map from input image"""
185
- depth = self.depth_estimator(image)
186
- depth_image = depth['depth']
187
-
188
- depth_array = np.array(depth_image)
189
-
190
- # Normalize with percentile clipping
191
- depth_min, depth_max = np.percentile(depth_array, [2, 98])
192
- depth_normalized = np.clip((depth_array - depth_min) / (depth_max - depth_min + 1e-8), 0, 1) * 255
193
- depth_normalized = depth_normalized.astype(np.uint8)
194
-
195
- # Slight blur to reduce noise
196
- depth_normalized = cv2.GaussianBlur(depth_normalized, (3, 3), 0)
197
-
198
- # Convert to RGB
199
- depth_colored = cv2.cvtColor(depth_normalized, cv2.COLOR_GRAY2RGB)
200
-
201
- return Image.fromarray(depth_colored)
202
 
203
  def calculate_optimal_size(self, original_width, original_height):
204
  """Calculate optimal size from recommended resolutions"""
205
  aspect_ratio = original_width / original_height
206
 
207
- # Recommended resolutions for this model
208
  recommended_sizes = [
209
  (896, 1152), # Portrait
210
  (1152, 896), # Landscape
@@ -242,14 +272,15 @@ class RetroArtConverter:
242
  input_image,
243
  prompt="retro game character, vibrant colors, detailed",
244
  negative_prompt="blurry, low quality, ugly, distorted",
245
- num_inference_steps=12, # LCM recommended: 12 steps
246
- guidance_scale=1.0, # LCM recommended: 1.0-1.5
 
247
  controlnet_conditioning_scale=0.8,
248
  lora_scale=1.0,
249
- identity_preservation=0.8,
250
- image_scale=0.2
251
  ):
252
- """Generate retro art with correct LCM settings"""
253
 
254
  # Add trigger word to prompt
255
  prompt = self.add_trigger_word(prompt)
@@ -264,26 +295,41 @@ class RetroArtConverter:
264
  # Resize with high quality
265
  resized_image = input_image.resize((target_width, target_height), Image.LANCZOS)
266
 
267
- # Generate depth map
268
- print("Generating depth map...")
269
  depth_image = self.get_depth_map(resized_image)
270
- depth_image = depth_image.resize((target_width, target_height), Image.LANCZOS)
 
271
 
272
  # Handle face detection for InstantID
273
  using_multiple_controlnets = self.using_multiple_controlnets
 
274
  face_embeddings = None
275
  has_detected_faces = False
276
 
277
- if using_multiple_controlnets:
278
- print("Checking for faces...")
279
  img_array = np.array(resized_image)
280
- faces = self.face_app.get(img_array) if self.face_app is not None else []
281
 
282
  if len(faces) > 0:
283
  has_detected_faces = True
284
  print(f"Detected {len(faces)} face(s)")
285
- face = sorted(faces, key=lambda x: (x.bbox[2] - x.bbox[0]) * (x.bbox[3] - x.bbox[1]))[-1]
286
- face_embeddings = torch.from_numpy(face.normed_embedding).unsqueeze(0).to(self.device, dtype=self.dtype)
 
 
 
 
 
 
 
 
 
 
 
 
 
287
 
288
  # Set LORA scale
289
  if hasattr(self.pipe, 'set_adapters') and self.models_loaded['lora']:
@@ -297,51 +343,52 @@ class RetroArtConverter:
297
  pipe_kwargs = {
298
  "prompt": prompt,
299
  "negative_prompt": negative_prompt,
 
300
  "num_inference_steps": num_inference_steps,
301
  "guidance_scale": guidance_scale,
302
- "width": target_width,
303
- "height": target_height,
304
  "generator": torch.Generator(device=self.device).manual_seed(42)
305
  }
306
 
307
- # Add CLIP skip
308
- if hasattr(self.pipe, 'text_encoder'):
309
- pipe_kwargs["clip_skip"] = 2
310
-
311
  # Configure ControlNet inputs
312
- if using_multiple_controlnets and has_detected_faces:
313
- print("Using Depth + InstantID ControlNets")
314
- control_images = [depth_image, resized_image]
315
- conditioning_scales = [controlnet_conditioning_scale, image_scale]
316
 
317
- pipe_kwargs["image"] = control_images
318
  pipe_kwargs["controlnet_conditioning_scale"] = conditioning_scales
319
 
320
- if face_embeddings is not None:
321
- pipe_kwargs["cross_attention_kwargs"] = {"ip_adapter_image_embeds": [face_embeddings]}
 
322
 
323
- elif using_multiple_controlnets and not has_detected_faces:
324
- print("Multiple ControlNets available but no faces detected")
 
325
  control_images = [depth_image, depth_image]
326
- conditioning_scales = [controlnet_conditioning_scale, 0.0]
327
 
328
- pipe_kwargs["image"] = control_images
329
  pipe_kwargs["controlnet_conditioning_scale"] = conditioning_scales
330
 
331
  else:
332
- print("Using Depth ControlNet only")
333
- pipe_kwargs["image"] = depth_image
334
- pipe_kwargs["controlnet_conditioning_scale"] = controlnet_conditioning_scale
335
 
336
  # Generate
337
- print(f"Generating with LCM: Steps={num_inference_steps}, CFG={guidance_scale}")
 
338
  result = self.pipe(**pipe_kwargs)
339
 
340
  return result.images[0]
341
 
342
  # Initialize converter
343
  print("Initializing RetroArt Converter...")
344
- converter = RetroArtConverter()
 
 
345
 
346
  @spaces.GPU
347
  def process_image(
@@ -350,25 +397,31 @@ def process_image(
350
  negative_prompt,
351
  steps,
352
  guidance_scale,
 
353
  controlnet_scale,
354
  lora_scale,
355
- identity_preservation,
356
- image_scale
 
357
  ):
358
  if image is None:
359
  return None
360
 
361
  try:
 
 
 
362
  result = converter.generate_retro_art(
363
  input_image=image,
364
  prompt=prompt,
365
  negative_prompt=negative_prompt,
366
  num_inference_steps=int(steps),
367
  guidance_scale=guidance_scale,
 
368
  controlnet_conditioning_scale=controlnet_scale,
369
  lora_scale=lora_scale,
370
- identity_preservation=identity_preservation,
371
- image_scale=image_scale
372
  )
373
  return result
374
  except Exception as e:
@@ -378,36 +431,26 @@ def process_image(
378
  raise gr.Error(f"Generation failed: {str(e)}")
379
 
380
  # Gradio UI
381
- with gr.Blocks(title="RetroArt Converter - LCM", theme=gr.themes.Soft()) as demo:
382
  gr.Markdown("""
383
- # ๐ŸŽฎ RetroArt Converter (LCM Optimized)
384
 
385
- Convert images into retro pixel art style using LCM (Latent Consistency Model) for fast, high-quality generation!
386
 
387
- **โœจ Features:**
388
- - โšก Ultra-fast generation (12 steps!)
389
- - ๐ŸŽจ Custom pixel art LORA with trigger word: `p1x3l4rt, pixel art`
390
- - ๐Ÿ“ Optimized resolutions: 896x1152 / 832x1216
391
- - ๐Ÿ–ผ๏ธ Bundled VAE for authentic retro look
392
- - ๐ŸŽฏ CLIP Skip 2 for better style
393
  """)
394
 
395
  # Model status
396
- if converter.models_loaded:
397
- status_text = "**๐Ÿ“ฆ Loaded Models:**\n"
398
- status_text += f"- Custom Checkpoint (Horizon): {'โœ“ Loaded' if converter.models_loaded['custom_checkpoint'] else 'โœ— Using SDXL base'}\n"
399
- status_text += f"- LORA (RetroArt): {'โœ“ Loaded' if converter.models_loaded['lora'] else 'โœ— Disabled'}\n"
400
- status_text += f"- InstantID: {'โœ“ Loaded' if converter.models_loaded['instantid'] else 'โœ— Disabled'}\n"
401
- gr.Markdown(status_text)
402
-
403
- gr.Markdown(f"""
404
- **โš™๏ธ LCM Configuration:**
405
- - Scheduler: LCM (Latent Consistency Model)
406
- - Recommended Steps: **12** (fast!)
407
- - Recommended CFG: **1.0-1.5** (lower than normal)
408
- - CLIP Skip: **2**
409
- - LORA Trigger: `{TRIGGER_WORD}` (auto-added)
410
- """)
411
 
412
  with gr.Row():
413
  with gr.Column():
@@ -426,31 +469,38 @@ with gr.Blocks(title="RetroArt Converter - LCM", theme=gr.themes.Soft()) as demo
426
  lines=2
427
  )
428
 
429
- with gr.Accordion("โšก LCM Settings (Optimized)", open=True):
 
 
 
 
 
 
430
  steps = gr.Slider(
431
  minimum=4,
432
- maximum=20,
433
  value=12,
434
  step=1,
435
- label="Inference Steps (LCM works great with just 12!)"
436
  )
437
 
438
  guidance_scale = gr.Slider(
439
  minimum=0.5,
440
- maximum=3.0,
441
  value=1.0,
442
  step=0.1,
443
- label="Guidance Scale (CFG) - LCM uses 1.0-1.5"
444
  )
445
 
446
- controlnet_scale = gr.Slider(
447
  minimum=0.3,
448
- maximum=1.2,
449
- value=0.8,
450
  step=0.05,
451
- label="ControlNet Depth Scale"
452
  )
453
-
 
454
  lora_scale = gr.Slider(
455
  minimum=0.5,
456
  maximum=1.5,
@@ -458,22 +508,32 @@ with gr.Blocks(title="RetroArt Converter - LCM", theme=gr.themes.Soft()) as demo
458
  step=0.05,
459
  label="RetroArt LORA Scale"
460
  )
 
 
 
 
 
 
 
 
461
 
462
- with gr.Accordion("๐ŸŽญ Identity Settings (for portraits)", open=False):
463
- identity_preservation = gr.Slider(
464
  minimum=0,
465
- maximum=1.5,
466
- value=0.8,
467
- step=0.1,
468
- label="Identity Preservation"
 
469
  )
470
 
471
- image_scale = gr.Slider(
472
  minimum=0,
473
  maximum=1.0,
474
- value=0.2,
475
  step=0.05,
476
- label="InstantID Image Scale"
 
477
  )
478
 
479
  generate_btn = gr.Button("๐ŸŽจ Generate Retro Art", variant="primary", size="lg")
@@ -484,27 +544,53 @@ with gr.Blocks(title="RetroArt Converter - LCM", theme=gr.themes.Soft()) as demo
484
  gr.Markdown("""
485
  ### ๐Ÿ’ก Tips for Best Results:
486
 
487
- **For LCM Models:**
488
- - โœ… Use **12 steps** (already optimized!)
489
- - โœ… Keep CFG at **1.0-1.5** (not 7.5!)
490
- - โœ… LORA trigger word is **auto-added**
491
- - โœ… Resolution auto-optimized to 896x1152 or 832x1216
 
 
 
492
 
493
- **For Quality:**
494
- - Use high-resolution input images
495
- - Be specific in prompts: "16-bit game character" vs "character"
496
- - Adjust ControlNet scale: lower = more creative, higher = more faithful
 
497
 
498
- **For Style:**
499
- - Increase LORA scale (1.0-1.5) for stronger pixel art effect
500
- - Try prompts like: "SNES style", "16-bit RPG", "Game Boy advance style"
 
501
  """)
502
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
503
  generate_btn.click(
504
  fn=process_image,
505
  inputs=[
506
- input_image, prompt, negative_prompt, steps, guidance_scale,
507
- controlnet_scale, lora_scale, identity_preservation, image_scale
508
  ],
509
  outputs=[output_image]
510
  )
 
2
  import gradio as gr
3
  import torch
4
  from diffusers import (
 
 
5
  ControlNetModel,
6
  AutoencoderKL,
7
+ DPMSolverMultistepScheduler,
8
+ LCMScheduler
9
  )
10
  from diffusers.models.attention_processor import AttnProcessor2_0
11
  from insightface.app import FaceAnalysis
12
  from PIL import Image
13
  import numpy as np
14
  import cv2
 
15
  from huggingface_hub import hf_hub_download
16
  import os
17
 
18
+ # Import the custom img2img pipeline with InstantID
19
+ from pipeline_stable_diffusion_xl_instantid_img2img import StableDiffusionXLInstantIDImg2ImgPipeline, draw_kps
20
+
21
+ # Import ZoeDetector for better depth maps
22
+ from controlnet_aux import ZoeDetector
23
+
24
  # Configuration
25
  MODEL_REPO = "primerz/pixagram"
26
  device = "cuda" if torch.cuda.is_available() else "cpu"
 
34
  print(f"LORA Trigger Word: {TRIGGER_WORD}")
35
 
36
  class RetroArtConverter:
37
+ def __init__(self, use_lcm=False):
38
  self.device = device
39
  self.dtype = dtype
40
+ self.use_lcm = use_lcm
41
  self.models_loaded = {
42
  'custom_checkpoint': False,
43
  'lora': False,
 
45
  }
46
 
47
  # Initialize face analysis for InstantID
48
+ print("Loading face analysis model (antelopev2)...")
49
  try:
50
  self.face_app = FaceAnalysis(
51
  name='antelopev2',
 
60
  self.face_app = None
61
  self.face_detection_enabled = False
62
 
63
+ # Load ControlNet for InstantID
 
 
 
 
 
 
 
64
  print("Loading InstantID ControlNet...")
65
  try:
66
  self.controlnet_instantid = ControlNetModel.from_pretrained(
 
76
  self.controlnet_instantid = None
77
  self.instantid_enabled = False
78
 
79
+ # Load ControlNet for Zoe depth
80
+ print("Loading Zoe Depth ControlNet...")
81
+ self.controlnet_depth = ControlNetModel.from_pretrained(
82
+ "diffusers/controlnet-zoe-depth-sdxl-1.0",
83
+ torch_dtype=self.dtype
84
+ ).to(self.device)
85
+
86
+ # Load Zoe depth detector (better than DPT)
87
+ print("Loading Zoe depth detector...")
88
+ try:
89
+ self.zoe_detector = ZoeDetector.from_pretrained("lllyasviel/Annotators")
90
+ self.zoe_detector.to(self.device)
91
+ print("โœ“ Zoe detector loaded successfully")
92
+ except Exception as e:
93
+ print(f"โš ๏ธ Could not load Zoe detector: {e}")
94
+ self.zoe_detector = None
95
 
96
  # Determine which controlnets to use
97
  if self.instantid_enabled and self.controlnet_instantid is not None:
98
+ controlnets = [self.controlnet_instantid, self.controlnet_depth]
99
+ print(f"Initializing with multiple ControlNets: InstantID + Zoe Depth")
100
  else:
101
  controlnets = self.controlnet_depth
102
+ print(f"Initializing with single ControlNet: Zoe Depth only")
103
+
104
+ # Load VAE
105
+ print("Loading VAE...")
106
+ self.vae = AutoencoderKL.from_pretrained(
107
+ "madebyollin/sdxl-vae-fp16-fix",
108
+ torch_dtype=self.dtype
109
+ ).to(self.device)
110
 
111
  # Load SDXL checkpoint from HuggingFace Hub
112
+ print("Loading SDXL checkpoint (horizon) from HuggingFace Hub...")
 
113
  try:
114
  model_path = hf_hub_download(
115
  repo_id=MODEL_REPO,
116
  filename="horizon.safetensors",
117
  repo_type="model"
118
  )
119
+ # Use the custom img2img pipeline for better results
120
+ self.pipe = StableDiffusionXLInstantIDImg2ImgPipeline.from_single_file(
121
  model_path,
122
  controlnet=controlnets,
123
+ vae=self.vae,
124
  torch_dtype=self.dtype,
125
  use_safetensors=True
126
  ).to(self.device)
127
+ print("โœ“ Custom checkpoint loaded successfully")
128
  self.models_loaded['custom_checkpoint'] = True
129
  except Exception as e:
130
  print(f"โš ๏ธ Could not load custom checkpoint: {e}")
131
  print("Using default SDXL base model")
132
+ self.pipe = StableDiffusionXLInstantIDImg2ImgPipeline.from_pretrained(
133
  "stabilityai/stable-diffusion-xl-base-1.0",
134
  controlnet=controlnets,
135
+ vae=self.vae,
136
  torch_dtype=self.dtype,
137
  use_safetensors=True
138
  ).to(self.device)
139
  self.models_loaded['custom_checkpoint'] = False
140
 
141
+ # Load InstantID IP-Adapter
142
+ if self.instantid_enabled:
143
+ print("Loading InstantID IP-Adapter...")
144
+ try:
145
+ ip_adapter_path = hf_hub_download(
146
+ repo_id="InstantX/InstantID",
147
+ filename="ip-adapter.bin"
148
+ )
149
+ self.pipe.load_ip_adapter_instantid(ip_adapter_path)
150
+ self.pipe.set_ip_adapter_scale(0.8)
151
+ print("โœ“ InstantID IP-Adapter loaded successfully")
152
+ except Exception as e:
153
+ print(f"โš ๏ธ Could not load IP-Adapter: {e}")
154
+
155
  # Load LORA from HuggingFace Hub
156
  print("Loading LORA (retroart) from HuggingFace Hub...")
157
  try:
 
168
  print(f"โš ๏ธ Could not load LORA: {e}")
169
  self.models_loaded['lora'] = False
170
 
171
+ # Choose scheduler based on mode
172
+ if use_lcm:
173
+ print("Setting up LCM scheduler for fast generation...")
174
+ self.pipe.scheduler = LCMScheduler.from_config(
175
+ self.pipe.scheduler.config
176
+ )
177
+ else:
178
+ print("Setting up DPMSolverMultistep scheduler with Karras sigmas for quality...")
179
+ self.pipe.scheduler = DPMSolverMultistepScheduler.from_config(
180
+ self.pipe.scheduler.config,
181
+ use_karras_sigmas=True
182
+ )
183
 
184
  # Enable attention optimizations
185
  self.pipe.unet.set_attn_processor(AttnProcessor2_0())
 
192
  except Exception as e:
193
  print(f"โš ๏ธ xformers not available: {e}")
194
 
 
 
 
 
 
195
  # Track controlnet configuration
196
  self.using_multiple_controlnets = isinstance(controlnets, list)
197
  print(f"Pipeline initialized with {'multiple' if self.using_multiple_controlnets else 'single'} ControlNet(s)")
 
203
  print("===================\n")
204
 
205
  print("โœ“ Model initialization complete!")
206
+ if use_lcm:
207
+ print("\n=== LCM CONFIGURATION ===")
208
+ print("Scheduler: LCM")
209
+ print("Recommended Steps: 8-12")
210
+ print("Recommended CFG: 1.0-1.5")
211
+ print("Recommended Strength: 0.6-0.8")
212
+ else:
213
+ print("\n=== QUALITY CONFIGURATION ===")
214
+ print("Scheduler: DPMSolverMultistep + Karras")
215
+ print("Recommended Steps: 25-40")
216
+ print("Recommended CFG: 5.0-7.5")
217
+ print("Recommended Strength: 0.4-0.7")
218
  print(f"LORA Trigger: '{TRIGGER_WORD}'")
219
  print("=========================\n")
220
 
221
  def get_depth_map(self, image):
222
+ """Generate depth map from input image using Zoe"""
223
+ if self.zoe_detector is not None:
224
+ # Use Zoe detector for better depth maps
225
+ depth_image = self.zoe_detector(image)
226
+ return depth_image
227
+ else:
228
+ # Fallback to basic conversion
229
+ img_array = np.array(image.convert('L'))
230
+ depth_colored = cv2.cvtColor(img_array, cv2.COLOR_GRAY2RGB)
231
+ return Image.fromarray(depth_colored)
 
 
 
 
 
 
 
 
232
 
233
  def calculate_optimal_size(self, original_width, original_height):
234
  """Calculate optimal size from recommended resolutions"""
235
  aspect_ratio = original_width / original_height
236
 
237
+ # Recommended resolutions for SDXL
238
  recommended_sizes = [
239
  (896, 1152), # Portrait
240
  (1152, 896), # Landscape
 
272
  input_image,
273
  prompt="retro game character, vibrant colors, detailed",
274
  negative_prompt="blurry, low quality, ugly, distorted",
275
+ num_inference_steps=25,
276
+ guidance_scale=5.0,
277
+ strength=0.6, # img2img strength
278
  controlnet_conditioning_scale=0.8,
279
  lora_scale=1.0,
280
+ face_strength=0.85, # InstantID face strength
281
+ depth_control_scale=0.8 # Zoe depth strength
282
  ):
283
+ """Generate retro art using img2img pipeline with face keypoints"""
284
 
285
  # Add trigger word to prompt
286
  prompt = self.add_trigger_word(prompt)
 
295
  # Resize with high quality
296
  resized_image = input_image.resize((target_width, target_height), Image.LANCZOS)
297
 
298
+ # Generate depth map using Zoe
299
+ print("Generating Zoe depth map...")
300
  depth_image = self.get_depth_map(resized_image)
301
+ if depth_image.size != (target_width, target_height):
302
+ depth_image = depth_image.resize((target_width, target_height), Image.LANCZOS)
303
 
304
  # Handle face detection for InstantID
305
  using_multiple_controlnets = self.using_multiple_controlnets
306
+ face_kps = None
307
  face_embeddings = None
308
  has_detected_faces = False
309
 
310
+ if using_multiple_controlnets and self.face_app is not None:
311
+ print("Detecting faces and extracting keypoints...")
312
  img_array = np.array(resized_image)
313
+ faces = self.face_app.get(img_array)
314
 
315
  if len(faces) > 0:
316
  has_detected_faces = True
317
  print(f"Detected {len(faces)} face(s)")
318
+
319
+ # Get the largest face
320
+ face = sorted(faces,
321
+ key=lambda x: (x.bbox[2] - x.bbox[0]) * (x.bbox[3] - x.bbox[1]))[-1]
322
+
323
+ # Extract face embeddings
324
+ face_embeddings = torch.from_numpy(face.normed_embedding).unsqueeze(0).to(
325
+ self.device, dtype=self.dtype
326
+ )
327
+
328
+ # Draw keypoints (this shows age, gender, expression)
329
+ face_kps = draw_kps(resized_image, face.kps)
330
+ print(f"Face keypoints drawn (age/gender/expression preserved)")
331
+ else:
332
+ print("No faces detected in image")
333
 
334
  # Set LORA scale
335
  if hasattr(self.pipe, 'set_adapters') and self.models_loaded['lora']:
 
343
  pipe_kwargs = {
344
  "prompt": prompt,
345
  "negative_prompt": negative_prompt,
346
+ "image": resized_image, # Original image for img2img
347
  "num_inference_steps": num_inference_steps,
348
  "guidance_scale": guidance_scale,
349
+ "strength": strength, # img2img denoising strength
 
350
  "generator": torch.Generator(device=self.device).manual_seed(42)
351
  }
352
 
 
 
 
 
353
  # Configure ControlNet inputs
354
+ if using_multiple_controlnets and has_detected_faces and face_kps is not None:
355
+ print("Using InstantID + Zoe Depth ControlNets with face keypoints")
356
+ control_images = [face_kps, depth_image]
357
+ conditioning_scales = [face_strength, depth_control_scale]
358
 
359
+ pipe_kwargs["control_image"] = control_images
360
  pipe_kwargs["controlnet_conditioning_scale"] = conditioning_scales
361
 
362
+ # Add face embeddings through IP-Adapter
363
+ if face_embeddings is not None and hasattr(self.pipe, 'set_ip_adapter_scale'):
364
+ pipe_kwargs["ip_adapter_image_embeds"] = [face_embeddings]
365
 
366
+ elif using_multiple_controlnets:
367
+ print("Multiple ControlNets available but no faces detected - using depth only")
368
+ # Use depth for both to maintain structure
369
  control_images = [depth_image, depth_image]
370
+ conditioning_scales = [0.0, depth_control_scale] # Disable InstantID
371
 
372
+ pipe_kwargs["control_image"] = control_images
373
  pipe_kwargs["controlnet_conditioning_scale"] = conditioning_scales
374
 
375
  else:
376
+ print("Using Zoe Depth ControlNet only")
377
+ pipe_kwargs["control_image"] = depth_image
378
+ pipe_kwargs["controlnet_conditioning_scale"] = depth_control_scale
379
 
380
  # Generate
381
+ mode = "LCM" if self.use_lcm else "Quality"
382
+ print(f"Generating with {mode} mode: Steps={num_inference_steps}, CFG={guidance_scale}, Strength={strength}")
383
  result = self.pipe(**pipe_kwargs)
384
 
385
  return result.images[0]
386
 
387
  # Initialize converter
388
  print("Initializing RetroArt Converter...")
389
+ print("Choose mode: LCM (fast) or Quality (better)")
390
+ converter_lcm = RetroArtConverter(use_lcm=True)
391
+ converter_quality = RetroArtConverter(use_lcm=False)
392
 
393
  @spaces.GPU
394
  def process_image(
 
397
  negative_prompt,
398
  steps,
399
  guidance_scale,
400
+ strength,
401
  controlnet_scale,
402
  lora_scale,
403
+ face_strength,
404
+ depth_control_scale,
405
+ use_lcm_mode
406
  ):
407
  if image is None:
408
  return None
409
 
410
  try:
411
+ # Choose the right converter based on mode
412
+ converter = converter_lcm if use_lcm_mode else converter_quality
413
+
414
  result = converter.generate_retro_art(
415
  input_image=image,
416
  prompt=prompt,
417
  negative_prompt=negative_prompt,
418
  num_inference_steps=int(steps),
419
  guidance_scale=guidance_scale,
420
+ strength=strength,
421
  controlnet_conditioning_scale=controlnet_scale,
422
  lora_scale=lora_scale,
423
+ face_strength=face_strength,
424
+ depth_control_scale=depth_control_scale
425
  )
426
  return result
427
  except Exception as e:
 
431
  raise gr.Error(f"Generation failed: {str(e)}")
432
 
433
  # Gradio UI
434
+ with gr.Blocks(title="RetroArt Converter - Improved", theme=gr.themes.Soft()) as demo:
435
  gr.Markdown("""
436
+ # ๐ŸŽฎ RetroArt Converter (Improved with True Img2Img)
437
 
438
+ Convert images into retro pixel art style with **proper face detection** and **gender/age preservation**!
439
 
440
+ **โœจ Key Improvements:**
441
+ - ๐ŸŽฏ **True img2img pipeline** for better structure preservation
442
+ - ๐Ÿ‘ค **draw_kps**: Detects and preserves age, gender, expression
443
+ - ๐Ÿ—บ๏ธ **Zoe Depth**: Superior depth estimation
444
+ - โšก **Dual Mode**: Fast LCM or Quality DPM++
445
+ - ๐ŸŽจ Custom pixel art LORA with trigger: `p1x3l4rt, pixel art`
446
  """)
447
 
448
  # Model status
449
+ status_text = "**๐Ÿ“ฆ Loaded Models (LCM Mode):**\n"
450
+ status_text += f"- Custom Checkpoint: {'โœ“ Loaded' if converter_lcm.models_loaded['custom_checkpoint'] else 'โœ— Using SDXL base'}\n"
451
+ status_text += f"- LORA (RetroArt): {'โœ“ Loaded' if converter_lcm.models_loaded['lora'] else 'โœ— Disabled'}\n"
452
+ status_text += f"- InstantID: {'โœ“ Loaded' if converter_lcm.models_loaded['instantid'] else 'โœ— Disabled'}\n"
453
+ gr.Markdown(status_text)
 
 
 
 
 
 
 
 
 
 
454
 
455
  with gr.Row():
456
  with gr.Column():
 
469
  lines=2
470
  )
471
 
472
+ use_lcm_mode = gr.Checkbox(
473
+ label="Use LCM Mode (Fast)",
474
+ value=True,
475
+ info="Uncheck for Quality mode (slower but better)"
476
+ )
477
+
478
+ with gr.Accordion("โš™๏ธ Generation Settings", open=True):
479
  steps = gr.Slider(
480
  minimum=4,
481
+ maximum=50,
482
  value=12,
483
  step=1,
484
+ label="Inference Steps (12 for LCM, 25-40 for Quality)"
485
  )
486
 
487
  guidance_scale = gr.Slider(
488
  minimum=0.5,
489
+ maximum=15.0,
490
  value=1.0,
491
  step=0.1,
492
+ label="Guidance Scale (1.0-1.5 for LCM, 5-7.5 for Quality)"
493
  )
494
 
495
+ strength = gr.Slider(
496
  minimum=0.3,
497
+ maximum=1.0,
498
+ value=0.7,
499
  step=0.05,
500
+ label="Img2Img Strength (how much to change)"
501
  )
502
+
503
+ with gr.Accordion("๐ŸŽจ Style Settings", open=True):
504
  lora_scale = gr.Slider(
505
  minimum=0.5,
506
  maximum=1.5,
 
508
  step=0.05,
509
  label="RetroArt LORA Scale"
510
  )
511
+
512
+ controlnet_scale = gr.Slider(
513
+ minimum=0.3,
514
+ maximum=1.2,
515
+ value=0.8,
516
+ step=0.05,
517
+ label="Overall ControlNet Scale"
518
+ )
519
 
520
+ with gr.Accordion("๐Ÿ‘ค Face & Depth Settings", open=False):
521
+ face_strength = gr.Slider(
522
  minimum=0,
523
+ maximum=2.0,
524
+ value=0.85,
525
+ step=0.05,
526
+ label="Face Preservation (InstantID)",
527
+ info="Higher = better face likeness"
528
  )
529
 
530
+ depth_control_scale = gr.Slider(
531
  minimum=0,
532
  maximum=1.0,
533
+ value=0.8,
534
  step=0.05,
535
+ label="Zoe Depth Control Scale",
536
+ info="Higher = more structure preservation"
537
  )
538
 
539
  generate_btn = gr.Button("๐ŸŽจ Generate Retro Art", variant="primary", size="lg")
 
544
  gr.Markdown("""
545
  ### ๐Ÿ’ก Tips for Best Results:
546
 
547
+ **Mode Selection:**
548
+ - โœ… **LCM Mode**: 12 steps, CFG 1.0-1.5, Strength 0.6-0.8 (โšก fast!)
549
+ - โœ… **Quality Mode**: 25-40 steps, CFG 5-7.5, Strength 0.4-0.7 (๐ŸŽจ better!)
550
+
551
+ **Face Preservation:**
552
+ - System automatically detects faces and draws keypoints
553
+ - Preserves age, gender, and expression characteristics
554
+ - Adjust "Face Preservation" slider for control
555
 
556
+ **For Best Quality:**
557
+ - Use high-resolution input images (min 512px)
558
+ - For portraits: enable Quality mode + high face strength
559
+ - For scenes: lower img2img strength for more creativity
560
+ - Adjust depth control for structure vs creativity balance
561
 
562
+ **Style Control:**
563
+ - LORA trigger word auto-added for pixel art style
564
+ - Increase LORA scale (1.2-1.5) for stronger retro effect
565
+ - Try: "SNES style", "16-bit RPG", "Game Boy advance style"
566
  """)
567
 
568
+ # Update defaults when switching modes
569
+ def update_mode_defaults(use_lcm):
570
+ if use_lcm:
571
+ return (
572
+ gr.update(value=12), # steps
573
+ gr.update(value=1.0), # guidance_scale
574
+ gr.update(value=0.7) # strength
575
+ )
576
+ else:
577
+ return (
578
+ gr.update(value=30), # steps
579
+ gr.update(value=6.0), # guidance_scale
580
+ gr.update(value=0.6) # strength
581
+ )
582
+
583
+ use_lcm_mode.change(
584
+ fn=update_mode_defaults,
585
+ inputs=[use_lcm_mode],
586
+ outputs=[steps, guidance_scale, strength]
587
+ )
588
+
589
  generate_btn.click(
590
  fn=process_image,
591
  inputs=[
592
+ input_image, prompt, negative_prompt, steps, guidance_scale, strength,
593
+ controlnet_scale, lora_scale, face_strength, depth_control_scale, use_lcm_mode
594
  ],
595
  outputs=[output_image]
596
  )