Spaces:
Runtime error
Runtime error
RohitGandikota
commited on
Commit
Β·
86fa2c8
1
Parent(s):
118ce23
multiple models dropdown
Browse files
app.py
CHANGED
|
@@ -67,13 +67,18 @@ class Demo:
|
|
| 67 |
self.device = 'cuda'
|
| 68 |
self.weight_dtype = torch.bfloat16
|
| 69 |
model_id = "stabilityai/sdxl-turbo"
|
|
|
|
| 70 |
euler_anc = EulerAncestralDiscreteScheduler.from_pretrained(model_id, subfolder="scheduler")
|
| 71 |
self.pipe = StableDiffusionXLPipeline.from_pretrained(model_id, scheduler=euler_anc, torch_dtype=self.weight_dtype).to(self.device)
|
| 72 |
self.pipe.enable_xformers_memory_efficient_attention()
|
|
|
|
|
|
|
|
|
|
|
|
|
| 73 |
with gr.Blocks() as demo:
|
| 74 |
self.layout()
|
| 75 |
demo.queue(max_size=5).launch(share=True, max_threads=2)
|
| 76 |
-
|
| 77 |
|
| 78 |
def layout(self):
|
| 79 |
|
|
@@ -103,6 +108,13 @@ class Demo:
|
|
| 103 |
)
|
| 104 |
|
| 105 |
with gr.Row():
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 106 |
|
| 107 |
self.model_dropdown = gr.Dropdown(
|
| 108 |
label="Pretrained Sliders",
|
|
@@ -141,13 +153,14 @@ class Demo:
|
|
| 141 |
|
| 142 |
with gr.Row():
|
| 143 |
|
| 144 |
-
self.
|
| 145 |
-
label="
|
| 146 |
interactive=False,
|
| 147 |
type='pil',
|
| 148 |
)
|
| 149 |
-
|
| 150 |
-
|
|
|
|
| 151 |
interactive=False,
|
| 152 |
type='pil',
|
| 153 |
)
|
|
@@ -234,7 +247,8 @@ class Demo:
|
|
| 234 |
self.seed_infr,
|
| 235 |
self.start_noise_infr,
|
| 236 |
self.slider_scale_infr,
|
| 237 |
-
self.model_dropdown
|
|
|
|
| 238 |
],
|
| 239 |
outputs=[
|
| 240 |
self.image_new,
|
|
@@ -286,10 +300,25 @@ class Demo:
|
|
| 286 |
return [gr.update(interactive=True, value='Train'), gr.update(value='Done Training! \n Try your custom slider in the "Test" tab'), f'models/{save_name}', gr.update(choices=list(model_map.keys()), value=save_name.replace('.pt',''))]
|
| 287 |
|
| 288 |
|
| 289 |
-
def inference(self, prompt, seed, start_noise, scale, model_name, pbar = gr.Progress(track_tqdm=True)):
|
| 290 |
|
| 291 |
seed = seed or 42
|
| 292 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 293 |
generator = torch.manual_seed(seed)
|
| 294 |
|
| 295 |
model_path = model_map[model_name]
|
|
@@ -327,10 +356,10 @@ class Demo:
|
|
| 327 |
|
| 328 |
|
| 329 |
generator = torch.manual_seed(seed)
|
| 330 |
-
edited_image = self.pipe(prompt, num_images_per_prompt=1, num_inference_steps=
|
| 331 |
|
| 332 |
generator = torch.manual_seed(seed)
|
| 333 |
-
original_image = self.pipe(prompt, num_images_per_prompt=1, num_inference_steps=
|
| 334 |
|
| 335 |
del unet, network
|
| 336 |
unet = None
|
|
|
|
| 67 |
self.device = 'cuda'
|
| 68 |
self.weight_dtype = torch.bfloat16
|
| 69 |
model_id = "stabilityai/sdxl-turbo"
|
| 70 |
+
self.current_model = 'SDXL Turbo'
|
| 71 |
euler_anc = EulerAncestralDiscreteScheduler.from_pretrained(model_id, subfolder="scheduler")
|
| 72 |
self.pipe = StableDiffusionXLPipeline.from_pretrained(model_id, scheduler=euler_anc, torch_dtype=self.weight_dtype).to(self.device)
|
| 73 |
self.pipe.enable_xformers_memory_efficient_attention()
|
| 74 |
+
|
| 75 |
+
self.guidance_scale = 1
|
| 76 |
+
self.num_inference_steps = 3
|
| 77 |
+
|
| 78 |
with gr.Blocks() as demo:
|
| 79 |
self.layout()
|
| 80 |
demo.queue(max_size=5).launch(share=True, max_threads=2)
|
| 81 |
+
|
| 82 |
|
| 83 |
def layout(self):
|
| 84 |
|
|
|
|
| 108 |
)
|
| 109 |
|
| 110 |
with gr.Row():
|
| 111 |
+
|
| 112 |
+
self.model_type = gr.Dropdown(
|
| 113 |
+
label="Model",
|
| 114 |
+
choices= ['SDXL Turbo', 'SDXL'],
|
| 115 |
+
value='SDXL Turbo',
|
| 116 |
+
interactive=True
|
| 117 |
+
)
|
| 118 |
|
| 119 |
self.model_dropdown = gr.Dropdown(
|
| 120 |
label="Pretrained Sliders",
|
|
|
|
| 153 |
|
| 154 |
with gr.Row():
|
| 155 |
|
| 156 |
+
self.image_orig = gr.Image(
|
| 157 |
+
label="Original SD",
|
| 158 |
interactive=False,
|
| 159 |
type='pil',
|
| 160 |
)
|
| 161 |
+
|
| 162 |
+
self.image_new = gr.Image(
|
| 163 |
+
label=f"{self.model_dropdown} Slider",
|
| 164 |
interactive=False,
|
| 165 |
type='pil',
|
| 166 |
)
|
|
|
|
| 247 |
self.seed_infr,
|
| 248 |
self.start_noise_infr,
|
| 249 |
self.slider_scale_infr,
|
| 250 |
+
self.model_dropdown,
|
| 251 |
+
self.model
|
| 252 |
],
|
| 253 |
outputs=[
|
| 254 |
self.image_new,
|
|
|
|
| 300 |
return [gr.update(interactive=True, value='Train'), gr.update(value='Done Training! \n Try your custom slider in the "Test" tab'), f'models/{save_name}', gr.update(choices=list(model_map.keys()), value=save_name.replace('.pt',''))]
|
| 301 |
|
| 302 |
|
| 303 |
+
def inference(self, prompt, seed, start_noise, scale, model_name, model, pbar = gr.Progress(track_tqdm=True)):
|
| 304 |
|
| 305 |
seed = seed or 42
|
| 306 |
+
if self.current_model != model:
|
| 307 |
+
if model=='SDXL Turbo':
|
| 308 |
+
model_id = "stabilityai/sdxl-turbo"
|
| 309 |
+
euler_anc = EulerAncestralDiscreteScheduler.from_pretrained(model_id, subfolder="scheduler")
|
| 310 |
+
self.pipe = StableDiffusionXLPipeline.from_pretrained(model_id, scheduler=euler_anc, torch_dtype=self.weight_dtype).to(self.device)
|
| 311 |
+
self.pipe.enable_xformers_memory_efficient_attention()
|
| 312 |
+
self.guidance_scale = 1
|
| 313 |
+
self.num_inference_steps = 3
|
| 314 |
+
self.current_model = 'SDXL Turbo'
|
| 315 |
+
else:
|
| 316 |
+
model_id = 'stabilityai/stable-diffusion-xl-base-1.0'
|
| 317 |
+
self.pipe = StableDiffusionXLPipeline.from_pretrained(model_id, torch_dtype=self.weight_dtype).to(self.device)
|
| 318 |
+
self.pipe.enable_xformers_memory_efficient_attention()
|
| 319 |
+
self.guidance_scale = 7.5
|
| 320 |
+
self.num_inference_steps = 20
|
| 321 |
+
self.current_model = 'SDXL'
|
| 322 |
generator = torch.manual_seed(seed)
|
| 323 |
|
| 324 |
model_path = model_map[model_name]
|
|
|
|
| 356 |
|
| 357 |
|
| 358 |
generator = torch.manual_seed(seed)
|
| 359 |
+
edited_image = self.pipe(prompt, num_images_per_prompt=1, num_inference_steps=self.num_inference_steps, generator=generator, network=network, start_noise=int(start_noise), scale=float(scale), unet=unet, guidance_scale=self.guidance_scale).images[0]
|
| 360 |
|
| 361 |
generator = torch.manual_seed(seed)
|
| 362 |
+
original_image = self.pipe(prompt, num_images_per_prompt=1, num_inference_steps=self.num_inference_steps, generator=generator, network=network, start_noise=start_noise, scale=0, unet=unet, guidance_scale=self.guidance_scale).images[0]
|
| 363 |
|
| 364 |
del unet, network
|
| 365 |
unet = None
|