Spaces:
Paused
Paused
Simplify demo: use 1 model, reduce default number of images
Browse files
app.py
CHANGED
|
@@ -322,26 +322,36 @@ def main():
|
|
| 322 |
header = """
|
| 323 |
# MLR Text-to-Image Diffusion Model Web Demo
|
| 324 |
|
|
|
|
|
|
|
| 325 |
### Usage
|
| 326 |
-
- Select examples below or manually input
|
| 327 |
- Change more advanced settings such as inference steps.
|
| 328 |
"""
|
| 329 |
gr.Markdown(header)
|
| 330 |
|
| 331 |
with gr.Row(equal_height=False):
|
| 332 |
pid = gr.State()
|
| 333 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 334 |
with gr.Row(equal_height=False):
|
| 335 |
-
|
| 336 |
-
|
| 337 |
-
|
| 338 |
-
|
| 339 |
-
|
| 340 |
-
|
| 341 |
-
|
| 342 |
-
|
| 343 |
-
|
| 344 |
-
|
|
|
|
|
|
|
|
|
|
| 345 |
with gr.Row(equal_height=False):
|
| 346 |
with gr.Column(scale=1):
|
| 347 |
save_diffusion_path = gr.Checkbox(
|
|
@@ -350,27 +360,12 @@ def main():
|
|
| 350 |
show_diffusion_path = gr.Checkbox(
|
| 351 |
value=False, label="Show diffusion progress"
|
| 352 |
)
|
| 353 |
-
with gr.Column(scale=1):
|
| 354 |
show_xt = gr.Checkbox(value=False, label="Show predicted x_t")
|
|
|
|
| 355 |
output_inner = gr.Checkbox(
|
| 356 |
value=False,
|
| 357 |
label="Output inner UNet (High-res models Only)",
|
| 358 |
-
|
| 359 |
-
|
| 360 |
-
with gr.Column(scale=2):
|
| 361 |
-
prompt_input = gr.Textbox(label="Input prompt")
|
| 362 |
-
with gr.Row(equal_height=False):
|
| 363 |
-
with gr.Column(scale=1):
|
| 364 |
-
guidance_scale = gr.Slider(
|
| 365 |
-
value=7.5,
|
| 366 |
-
minimum=0.0,
|
| 367 |
-
maximum=50,
|
| 368 |
-
step=0.1,
|
| 369 |
-
label="Guidance scale",
|
| 370 |
-
)
|
| 371 |
-
with gr.Column(scale=1):
|
| 372 |
-
batch_size = gr.Slider(
|
| 373 |
-
value=64, minimum=1, maximum=128, step=1, label="Number of images"
|
| 374 |
)
|
| 375 |
|
| 376 |
with gr.Row(equal_height=False):
|
|
@@ -530,14 +525,14 @@ def main():
|
|
| 530 |
cancels=[run_event],
|
| 531 |
queue=False,
|
| 532 |
)
|
| 533 |
-
example0 = gr.Examples(
|
| 534 |
-
|
| 535 |
-
|
| 536 |
-
|
| 537 |
-
|
| 538 |
-
|
| 539 |
-
|
| 540 |
-
)
|
| 541 |
example1 = gr.Examples(
|
| 542 |
examples=[[t.strip()] for t in example_texts],
|
| 543 |
inputs=[prompt_input],
|
|
|
|
| 322 |
header = """
|
| 323 |
# MLR Text-to-Image Diffusion Model Web Demo
|
| 324 |
|
| 325 |
+
This is a demo of model `mdm-flickr-64`. For additional models, please check [our repo](https://github.com/apple/ml-mdm).
|
| 326 |
+
|
| 327 |
### Usage
|
| 328 |
+
- Select examples below or manually input prompt
|
| 329 |
- Change more advanced settings such as inference steps.
|
| 330 |
"""
|
| 331 |
gr.Markdown(header)
|
| 332 |
|
| 333 |
with gr.Row(equal_height=False):
|
| 334 |
pid = gr.State()
|
| 335 |
+
ckpt_name = gr.Label("mdm-flickr-64", visible=False)
|
| 336 |
+
# with gr.Row():
|
| 337 |
+
prompt_input = gr.Textbox(label="Input prompt")
|
| 338 |
+
|
| 339 |
+
with gr.Row(equal_height=False):
|
| 340 |
+
with gr.Column(scale=1):
|
| 341 |
with gr.Row(equal_height=False):
|
| 342 |
+
guidance_scale = gr.Slider(
|
| 343 |
+
value=7.5,
|
| 344 |
+
minimum=0.0,
|
| 345 |
+
maximum=50,
|
| 346 |
+
step=0.1,
|
| 347 |
+
label="Guidance scale",
|
| 348 |
+
)
|
| 349 |
+
with gr.Row(equal_height=False):
|
| 350 |
+
batch_size = gr.Slider(
|
| 351 |
+
value=4, minimum=1, maximum=32, step=1, label="Number of images"
|
| 352 |
+
)
|
| 353 |
+
|
| 354 |
+
with gr.Column(scale=1):
|
| 355 |
with gr.Row(equal_height=False):
|
| 356 |
with gr.Column(scale=1):
|
| 357 |
save_diffusion_path = gr.Checkbox(
|
|
|
|
| 360 |
show_diffusion_path = gr.Checkbox(
|
| 361 |
value=False, label="Show diffusion progress"
|
| 362 |
)
|
|
|
|
| 363 |
show_xt = gr.Checkbox(value=False, label="Show predicted x_t")
|
| 364 |
+
with gr.Column(scale=1):
|
| 365 |
output_inner = gr.Checkbox(
|
| 366 |
value=False,
|
| 367 |
label="Output inner UNet (High-res models Only)",
|
| 368 |
+
visible=False,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 369 |
)
|
| 370 |
|
| 371 |
with gr.Row(equal_height=False):
|
|
|
|
| 525 |
cancels=[run_event],
|
| 526 |
queue=False,
|
| 527 |
)
|
| 528 |
+
# example0 = gr.Examples(
|
| 529 |
+
# [
|
| 530 |
+
# ["mdm-flickr-64", 64, 50, 0],
|
| 531 |
+
# ["mdm-flickr-256", 16, 100, 0],
|
| 532 |
+
# ["mdm-flickr-1024", 4, 250, 1],
|
| 533 |
+
# ],
|
| 534 |
+
# inputs=[ckpt_name, batch_size, num_inference_steps, eta],
|
| 535 |
+
# )
|
| 536 |
example1 = gr.Examples(
|
| 537 |
examples=[[t.strip()] for t in example_texts],
|
| 538 |
inputs=[prompt_input],
|