primerz commited on
Commit
66937e6
·
verified ·
1 Parent(s): 38897a0

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +9 -28
app.py CHANGED
@@ -1,3 +1,4 @@
 
1
  import gradio as gr
2
  import torch
3
  from diffusers import (
@@ -15,35 +16,19 @@ import cv2
15
  from transformers import pipeline as transformers_pipeline
16
  from huggingface_hub import hf_hub_download
17
  import os
18
- import spaces
19
 
20
  # Configuration
21
  MODEL_REPO = "primerz/pixagram"
 
 
22
 
23
- # Note: For ZeroGPU, device detection happens dynamically
24
- # We'll set device inside GPU-decorated functions
25
- print("Using ZeroGPU - GPU will be allocated on-demand")
26
 
27
  class RetroArtConverter:
28
  def __init__(self):
29
- self.models_loaded = False
30
- self.device = None
31
- self.dtype = None
32
- self.face_detection_enabled = False
33
- print("RetroArtConverter initialized - models will load on first generation")
34
-
35
- def _initialize_models(self):
36
- """Lazy model initialization - called on first generation when GPU is available"""
37
- if self.models_loaded:
38
- return
39
-
40
- print("Initializing models...")
41
- print(f"Loading models from: {MODEL_REPO}")
42
-
43
- # Set device (will be cuda when called from GPU-decorated function)
44
- self.device = "cuda" if torch.cuda.is_available() else "cpu"
45
- self.dtype = torch.float16 if self.device == "cuda" else torch.float32
46
- print(f"Using device: {self.device}")
47
 
48
  # Initialize face analysis for InstantID (optional)
49
  print("Loading face analysis model...")
@@ -144,7 +129,7 @@ class RetroArtConverter:
144
  self.pipe.scheduler.config
145
  )
146
 
147
- # For ZeroGPU, we don't use model_cpu_offload
148
  # self.pipe.enable_model_cpu_offload()
149
 
150
  self.pipe.enable_vae_slicing()
@@ -160,8 +145,7 @@ class RetroArtConverter:
160
  except Exception as e:
161
  print(f"⚠️ xformers not available: {e}")
162
 
163
- self.models_loaded = True
164
- print("✓ Model initialization complete!")
165
 
166
  def get_depth_map(self, image):
167
  """Generate depth map from input image"""
@@ -222,9 +206,6 @@ class RetroArtConverter:
222
  ):
223
  """Main generation function"""
224
 
225
- # Initialize models on first run (lazy loading for ZeroGPU)
226
- self._initialize_models()
227
-
228
  # Resize image maintaining aspect ratio
229
  original_width, original_height = input_image.size
230
  target_width, target_height = self.calculate_target_size(original_width, original_height)
 
1
+ import spaces # MUST be first, before any CUDA-related imports
2
  import gradio as gr
3
  import torch
4
  from diffusers import (
 
16
  from transformers import pipeline as transformers_pipeline
17
  from huggingface_hub import hf_hub_download
18
  import os
 
19
 
20
  # Configuration
21
  MODEL_REPO = "primerz/pixagram"
22
+ device = "cuda" if torch.cuda.is_available() else "cpu"
23
+ dtype = torch.float16 if device == "cuda" else torch.float32
24
 
25
+ print(f"Using device: {device}")
26
+ print(f"Loading models from: {MODEL_REPO}")
 
27
 
28
  class RetroArtConverter:
29
  def __init__(self):
30
+ self.device = device
31
+ self.dtype = dtype
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
32
 
33
  # Initialize face analysis for InstantID (optional)
34
  print("Loading face analysis model...")
 
129
  self.pipe.scheduler.config
130
  )
131
 
132
+ # For ZeroGPU, don't use model_cpu_offload
133
  # self.pipe.enable_model_cpu_offload()
134
 
135
  self.pipe.enable_vae_slicing()
 
145
  except Exception as e:
146
  print(f"⚠️ xformers not available: {e}")
147
 
148
+ print("Model initialization complete!")
 
149
 
150
  def get_depth_map(self, image):
151
  """Generate depth map from input image"""
 
206
  ):
207
  """Main generation function"""
208
 
 
 
 
209
  # Resize image maintaining aspect ratio
210
  original_width, original_height = input_image.size
211
  target_width, target_height = self.calculate_target_size(original_width, original_height)