Spaces:
Running
on
Zero
Running
on
Zero
Added History
Browse filesAdded history same as this PR .
https://huggingface.co/spaces/fffiloni/diffusers-image-outpaint/discussions/16
and also ZeroGPU duration was extended because time is getting over before generating full image.
app.py
CHANGED
|
@@ -270,7 +270,7 @@ def remove_custom_lora(selected_indices, current_loras):
|
|
| 270 |
lora_image_2
|
| 271 |
)
|
| 272 |
|
| 273 |
-
@spaces.GPU(duration=
|
| 274 |
def generate_image(prompt_mash, steps, seed, cfg_scale, width, height, progress):
|
| 275 |
print("Generating image...")
|
| 276 |
pipe.to("cuda")
|
|
@@ -291,7 +291,7 @@ def generate_image(prompt_mash, steps, seed, cfg_scale, width, height, progress)
|
|
| 291 |
yield img
|
| 292 |
pipe.to("cpu")
|
| 293 |
|
| 294 |
-
@spaces.GPU(duration=
|
| 295 |
def generate_image_to_image(prompt_mash, image_input_path, image_strength, steps, cfg_scale, width, height, seed):
|
| 296 |
pipe_i2i.to("cuda")
|
| 297 |
generator = torch.Generator(device="cuda").manual_seed(seed)
|
|
@@ -311,7 +311,7 @@ def generate_image_to_image(prompt_mash, image_input_path, image_strength, steps
|
|
| 311 |
pipe_i2i.to("cpu")
|
| 312 |
return final_image
|
| 313 |
|
| 314 |
-
@spaces.GPU(duration=
|
| 315 |
def run_lora(prompt, image_input, image_strength, cfg_scale, steps, selected_indices, lora_scale_1, lora_scale_2, randomize_seed, seed, width, height, loras_state, progress=gr.Progress(track_tqdm=True)):
|
| 316 |
if not selected_indices:
|
| 317 |
raise gr.Error("You must select at least one LoRA before proceeding.")
|
|
@@ -435,6 +435,13 @@ def check_custom_model(link):
|
|
| 435 |
# Assume it's a Hugging Face model path
|
| 436 |
return get_huggingface_safetensors(link)
|
| 437 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 438 |
css = '''
|
| 439 |
#gen_btn{height: 100%}
|
| 440 |
#title{text-align: center}
|
|
@@ -514,6 +521,8 @@ with gr.Blocks(theme=gr.themes.Soft(), css=css, delete_cache=(60, 3600)) as app:
|
|
| 514 |
with gr.Column():
|
| 515 |
progress_bar = gr.Markdown(elem_id="progress", visible=False)
|
| 516 |
result = gr.Image(label="Generated Image")
|
|
|
|
|
|
|
| 517 |
with gr.Row():
|
| 518 |
with gr.Accordion("Advanced Settings", open=False):
|
| 519 |
with gr.Row():
|
|
@@ -566,6 +575,10 @@ with gr.Blocks(theme=gr.themes.Soft(), css=css, delete_cache=(60, 3600)) as app:
|
|
| 566 |
fn=run_lora,
|
| 567 |
inputs=[prompt, input_image, image_strength, cfg_scale, steps, selected_indices, lora_scale_1, lora_scale_2, randomize_seed, seed, width, height, loras_state],
|
| 568 |
outputs=[result, seed, progress_bar]
|
|
|
|
|
|
|
|
|
|
|
|
|
| 569 |
)
|
| 570 |
|
| 571 |
app.queue()
|
|
|
|
| 270 |
lora_image_2
|
| 271 |
)
|
| 272 |
|
| 273 |
+
@spaces.GPU(duration=90)
|
| 274 |
def generate_image(prompt_mash, steps, seed, cfg_scale, width, height, progress):
|
| 275 |
print("Generating image...")
|
| 276 |
pipe.to("cuda")
|
|
|
|
| 291 |
yield img
|
| 292 |
pipe.to("cpu")
|
| 293 |
|
| 294 |
+
@spaces.GPU(duration=90)
|
| 295 |
def generate_image_to_image(prompt_mash, image_input_path, image_strength, steps, cfg_scale, width, height, seed):
|
| 296 |
pipe_i2i.to("cuda")
|
| 297 |
generator = torch.Generator(device="cuda").manual_seed(seed)
|
|
|
|
| 311 |
pipe_i2i.to("cpu")
|
| 312 |
return final_image
|
| 313 |
|
| 314 |
+
@spaces.GPU(duration=90)
|
| 315 |
def run_lora(prompt, image_input, image_strength, cfg_scale, steps, selected_indices, lora_scale_1, lora_scale_2, randomize_seed, seed, width, height, loras_state, progress=gr.Progress(track_tqdm=True)):
|
| 316 |
if not selected_indices:
|
| 317 |
raise gr.Error("You must select at least one LoRA before proceeding.")
|
|
|
|
| 435 |
# Assume it's a Hugging Face model path
|
| 436 |
return get_huggingface_safetensors(link)
|
| 437 |
|
| 438 |
+
def update_history(new_image, history):
|
| 439 |
+
"""Updates the history gallery with the new image."""
|
| 440 |
+
if history is None:
|
| 441 |
+
history = []
|
| 442 |
+
history.insert(0, new_image)
|
| 443 |
+
return history
|
| 444 |
+
|
| 445 |
css = '''
|
| 446 |
#gen_btn{height: 100%}
|
| 447 |
#title{text-align: center}
|
|
|
|
| 521 |
with gr.Column():
|
| 522 |
progress_bar = gr.Markdown(elem_id="progress", visible=False)
|
| 523 |
result = gr.Image(label="Generated Image")
|
| 524 |
+
history_gallery = gr.Gallery(label="History", columns=6, object_fit="contain")
|
| 525 |
+
|
| 526 |
with gr.Row():
|
| 527 |
with gr.Accordion("Advanced Settings", open=False):
|
| 528 |
with gr.Row():
|
|
|
|
| 575 |
fn=run_lora,
|
| 576 |
inputs=[prompt, input_image, image_strength, cfg_scale, steps, selected_indices, lora_scale_1, lora_scale_2, randomize_seed, seed, width, height, loras_state],
|
| 577 |
outputs=[result, seed, progress_bar]
|
| 578 |
+
).then( # Update the history gallery
|
| 579 |
+
fn=lambda x, history: update_history(x[1], history),
|
| 580 |
+
inputs=[result, history_gallery],
|
| 581 |
+
outputs=history_gallery,
|
| 582 |
)
|
| 583 |
|
| 584 |
app.queue()
|