alexander00001 commited on
Commit
af8ce61
Β·
verified Β·
1 Parent(s): 6f9b5bf

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +100 -59
app.py CHANGED
@@ -22,31 +22,33 @@ import numpy as np
22
  # Configuration Section (Modify here to expand)
23
  # ======================
24
 
25
- # 1. Base Model - Illustrious XL v1.0 (Multiple options for stability)
26
  BASE_MODELS = {
27
- "illustrious_xl_v1": "Liberata/illustrious-xl-v1.0", # Primary choice - more stable
28
- "illustrious_xl_v2": "OnomaAIResearch/Illustrious-XL-v2.0", # Backup option
29
- "sdxl_base": "stabilityai/stable-diffusion-xl-base-1.0", # Fallback to standard SDXL
30
- "realistic_vision": "SG161222/RealVisXL_V4.0" # Alternative realistic model
31
  }
32
 
33
- # Current model selection (change this to switch models)
34
  CURRENT_MODEL_KEY = "sdxl_base"
35
  BASE_MODEL = BASE_MODELS[CURRENT_MODEL_KEY]
36
 
37
- # 2. Fixed LoRAs (Auto-loaded, not user-selectable) - Using reliable SDXL LoRAs
38
  FIXED_LORAS = {
39
  "quality_enhancer": {
40
- "repo_id": "ByteDance/SDXL-Lightning",
41
- "filename": None,
42
  "weight": 0.6,
43
- "trigger_words": "high quality, detailed, masterpiece"
 
44
  },
45
- "pose_control": {
46
  "repo_id": "latent-consistency/lcm-lora-sdxl",
47
- "filename": None,
48
- "weight": 0.5,
49
- "trigger_words": "perfect anatomy, natural pose"
 
50
  }
51
  }
52
 
@@ -59,7 +61,7 @@ STYLE_PROMPTS = {
59
  "Watercolor": "watercolor painting, soft brush strokes, translucent layers, artistic, painterly, paper texture, traditional art, masterpiece, ",
60
  }
61
 
62
- # 4. Optional LoRAs (User-selectable via dropdown, can select multiple) - Using verified SDXL LoRAs
63
  OPTIONAL_LORAS = {
64
  "None": {
65
  "repo_id": None,
@@ -67,35 +69,35 @@ OPTIONAL_LORAS = {
67
  "trigger_words": "",
68
  "description": "No additional LoRA"
69
  },
70
- "Lightning Speed": {
71
- "repo_id": "ByteDance/SDXL-Lightning",
72
- "weight": 0.8,
73
- "trigger_words": "high quality, detailed, fast generation",
74
- "description": "Fast generation with quality enhancement"
75
- },
76
- "LCM LoRA": {
77
  "repo_id": "latent-consistency/lcm-lora-sdxl",
78
- "weight": 0.9,
79
- "trigger_words": "lcm, high quality, detailed",
80
- "description": "Latent Consistency Model for faster inference"
81
  },
82
- "Offset Noise": {
83
- "repo_id": "stabilityai/stable-diffusion-xl-offset-example-lora",
84
  "weight": 0.7,
85
- "trigger_words": "high contrast, dramatic lighting",
86
- "description": "Enhanced contrast and lighting effects"
87
  },
88
- "AnimeDiff": {
89
- "repo_id": "Linaqruf/anime-detailer-xl-lora",
90
  "weight": 0.8,
91
- "trigger_words": "anime style, detailed anime, cel shading",
92
- "description": "Anime and manga style enhancement"
93
  },
94
- "PhotoReal": {
95
- "repo_id": "Jovian-Experiments/photorealistic-xl-lora",
96
  "weight": 0.9,
97
- "trigger_words": "photorealistic, ultra realistic, 8k uhd",
98
- "description": "Photorealistic enhancement"
 
 
 
 
 
 
99
  }
100
  }
101
 
@@ -123,24 +125,50 @@ current_loras = {}
123
  device = "cuda" if torch.cuda.is_available() else "cpu"
124
 
125
  def load_pipeline():
126
- """Load the base Illustrious XL pipeline"""
127
  global pipe
128
  if pipe is None:
129
- print("πŸš€ Loading Illustrious XL base model...")
130
- pipe = StableDiffusionXLPipeline.from_pretrained(
131
- BASE_MODEL,
132
- torch_dtype=torch.float16,
133
- use_safetensors=True,
134
- variant="fp16"
135
- ).to(device)
136
 
137
- # Enable memory optimizations for ZeroGPU
138
- pipe.enable_attention_slicing()
139
- pipe.enable_vae_slicing()
140
- pipe.enable_model_cpu_offload()
141
- pipe.enable_xformers_memory_efficient_attention()
142
 
143
- print("βœ… Illustrious XL model loaded successfully.")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
144
  return pipe
145
 
146
  def unload_pipeline():
@@ -159,7 +187,7 @@ def unload_pipeline():
159
  print("πŸ—‘οΈ Pipeline unloaded.")
160
 
161
  def load_lora_weights(lora_configs: List[Dict]):
162
- """Load multiple LoRA weights efficiently"""
163
  global pipe, current_loras
164
 
165
  if not lora_configs:
@@ -174,33 +202,46 @@ def load_lora_weights(lora_configs: List[Dict]):
174
  except:
175
  pass
176
 
177
- # Load new LoRAs
178
  adapter_names = []
179
  adapter_weights = []
180
 
181
  for config in lora_configs:
182
  if config['repo_id'] and config['repo_id'] not in current_loras:
183
  try:
 
 
 
 
184
  pipe.load_lora_weights(
185
  config['repo_id'],
186
- adapter_name=config['name']
187
  )
188
- current_loras[config['repo_id']] = config['name']
189
  print(f"βœ… Loaded LoRA: {config['name']}")
 
190
  except Exception as e:
191
- print(f"❌ Failed to load LoRA {config['name']}: {e}")
 
192
  continue
193
 
194
- if config['repo_id']:
195
- adapter_names.append(config['name'])
 
196
  adapter_weights.append(config['weight'])
197
 
198
- # Set adapter weights
199
  if adapter_names:
200
  try:
201
  pipe.set_adapters(adapter_names, adapter_weights=adapter_weights)
 
202
  except Exception as e:
203
  print(f"⚠️ Warning setting adapter weights: {e}")
 
 
 
 
 
204
 
205
  def process_long_prompt(prompt: str, max_length: int = 77) -> str:
206
  """Process long prompts by intelligent truncation and optimization"""
 
22
  # Configuration Section (Modify here to expand)
23
  # ======================
24
 
25
+ # 1. Base Model - Using reliable SDXL models
26
  BASE_MODELS = {
27
+ "sdxl_base": "stabilityai/stable-diffusion-xl-base-1.0", # Primary - most stable
28
+ "realistic_vision": "SG161222/RealVisXL_V4.0", # Alternative realistic model
29
+ "juggernaut": "RunDiffusion/Juggernaut-XL-v9", # Popular realistic model
30
+ "dreamshaper": "Lykon/dreamshaper-xl-1-0" # Versatile model
31
  }
32
 
33
+ # Current model selection
34
  CURRENT_MODEL_KEY = "sdxl_base"
35
  BASE_MODEL = BASE_MODELS[CURRENT_MODEL_KEY]
36
 
37
+ # 2. Fixed LoRAs (Auto-loaded, not user-selectable) - Using actual working LoRAs
38
  FIXED_LORAS = {
39
  "quality_enhancer": {
40
+ "repo_id": "stabilityai/stable-diffusion-xl-refiner-1.0",
41
+ "subfolder": None,
42
  "weight": 0.6,
43
+ "trigger_words": "high quality, detailed, masterpiece",
44
+ "enabled": False # Disabled for now due to compatibility
45
  },
46
+ "consistency_model": {
47
  "repo_id": "latent-consistency/lcm-lora-sdxl",
48
+ "subfolder": None,
49
+ "weight": 0.3,
50
+ "trigger_words": "lcm style",
51
+ "enabled": False # Optional, can cause issues
52
  }
53
  }
54
 
 
61
  "Watercolor": "watercolor painting, soft brush strokes, translucent layers, artistic, painterly, paper texture, traditional art, masterpiece, ",
62
  }
63
 
64
+ # 4. Optional LoRAs (User-selectable) - Using verified working LoRAs
65
  OPTIONAL_LORAS = {
66
  "None": {
67
  "repo_id": None,
 
69
  "trigger_words": "",
70
  "description": "No additional LoRA"
71
  },
72
+ "LCM Speed": {
 
 
 
 
 
 
73
  "repo_id": "latent-consistency/lcm-lora-sdxl",
74
+ "weight": 0.8,
75
+ "trigger_words": "lcm style, high quality",
76
+ "description": "Faster generation with LCM"
77
  },
78
+ "Realistic Detail": {
79
+ "repo_id": "nerijs/pixel-art-xl",
80
  "weight": 0.7,
81
+ "trigger_words": "detailed, sharp, realistic",
82
+ "description": "Enhanced detail and realism"
83
  },
84
+ "Anime Style": {
85
+ "repo_id": "Linaqruf/anime-detailer-xl-lora",
86
  "weight": 0.8,
87
+ "trigger_words": "anime style, detailed anime",
88
+ "description": "Anime and manga style"
89
  },
90
+ "Portrait Focus": {
91
+ "repo_id": "TheLastBen/Papercut_SDXL",
92
  "weight": 0.9,
93
+ "trigger_words": "portrait, detailed face, beautiful eyes",
94
+ "description": "Portrait enhancement"
95
+ },
96
+ "Artistic Style": {
97
+ "repo_id": "ostris/ikea-instructions-lora-sdxl",
98
+ "weight": 0.6,
99
+ "trigger_words": "artistic style, creative",
100
+ "description": "Artistic and creative effects"
101
  }
102
  }
103
 
 
125
  device = "cuda" if torch.cuda.is_available() else "cpu"
126
 
127
  def load_pipeline():
128
+ """Load the base Illustrious XL pipeline with fallback options"""
129
  global pipe
130
  if pipe is None:
131
+ print(f"πŸš€ Loading base model: {BASE_MODEL}...")
 
 
 
 
 
 
132
 
133
+ # Try to load the selected model with fallback options
134
+ model_loaded = False
135
+ models_to_try = [BASE_MODEL]
 
 
136
 
137
+ # Add fallback models if primary fails
138
+ if CURRENT_MODEL_KEY != "sdxl_base":
139
+ models_to_try.append(BASE_MODELS["sdxl_base"])
140
+ if CURRENT_MODEL_KEY != "realistic_vision":
141
+ models_to_try.append(BASE_MODELS["realistic_vision"])
142
+
143
+ for model_id in models_to_try:
144
+ try:
145
+ print(f"Attempting to load: {model_id}")
146
+ pipe = StableDiffusionXLPipeline.from_pretrained(
147
+ model_id,
148
+ torch_dtype=torch.float16,
149
+ use_safetensors=True,
150
+ variant="fp16"
151
+ ).to(device)
152
+
153
+ # Enable memory optimizations for ZeroGPU
154
+ pipe.enable_attention_slicing()
155
+ pipe.enable_vae_slicing()
156
+ if hasattr(pipe, 'enable_model_cpu_offload'):
157
+ pipe.enable_model_cpu_offload()
158
+ if hasattr(pipe, 'enable_xformers_memory_efficient_attention'):
159
+ pipe.enable_xformers_memory_efficient_attention()
160
+
161
+ print(f"βœ… Successfully loaded: {model_id}")
162
+ model_loaded = True
163
+ break
164
+
165
+ except Exception as e:
166
+ print(f"❌ Failed to load {model_id}: {e}")
167
+ continue
168
+
169
+ if not model_loaded:
170
+ raise Exception("Failed to load any model. Please check your configuration.")
171
+
172
  return pipe
173
 
174
  def unload_pipeline():
 
187
  print("πŸ—‘οΈ Pipeline unloaded.")
188
 
189
  def load_lora_weights(lora_configs: List[Dict]):
190
+ """Load multiple LoRA weights efficiently with error handling"""
191
  global pipe, current_loras
192
 
193
  if not lora_configs:
 
202
  except:
203
  pass
204
 
205
+ # Load new LoRAs with better error handling
206
  adapter_names = []
207
  adapter_weights = []
208
 
209
  for config in lora_configs:
210
  if config['repo_id'] and config['repo_id'] not in current_loras:
211
  try:
212
+ # Try different loading methods
213
+ adapter_name = config['name'].replace(' ', '_').lower()
214
+
215
+ # Method 1: Direct loading
216
  pipe.load_lora_weights(
217
  config['repo_id'],
218
+ adapter_name=adapter_name
219
  )
220
+ current_loras[config['repo_id']] = adapter_name
221
  print(f"βœ… Loaded LoRA: {config['name']}")
222
+
223
  except Exception as e:
224
+ print(f"⚠️ Failed to load LoRA {config['name']}: {e}")
225
+ # Skip this LoRA and continue with others
226
  continue
227
 
228
+ # Add to active adapters if successfully loaded
229
+ if config['repo_id'] in current_loras:
230
+ adapter_names.append(current_loras[config['repo_id']])
231
  adapter_weights.append(config['weight'])
232
 
233
+ # Set adapter weights if any adapters loaded
234
  if adapter_names:
235
  try:
236
  pipe.set_adapters(adapter_names, adapter_weights=adapter_weights)
237
+ print(f"βœ… Activated {len(adapter_names)} LoRA adapters")
238
  except Exception as e:
239
  print(f"⚠️ Warning setting adapter weights: {e}")
240
+ # Try without weights
241
+ try:
242
+ pipe.set_adapters(adapter_names)
243
+ except:
244
+ print("❌ Failed to set any adapters")
245
 
246
  def process_long_prompt(prompt: str, max_length: int = 77) -> str:
247
  """Process long prompts by intelligent truncation and optimization"""