Spaces:
Runtime error
Runtime error
Handle any size
Browse filesThis PR handles and restores any original sizes, even greater than 1 million of pixels:
1. It saves the original size
2. It computes the best size for SDXL, even if it's landscape or portrait
3. It handles that the size must be multiple of 8
4. After the computation, it restores the orignal size
This code is already used here: https://huggingface.co/spaces/Fabrice-TIERCELIN/Uncrop
app.py
CHANGED
|
@@ -1,4 +1,5 @@
|
|
| 1 |
import gradio as gr
|
|
|
|
| 2 |
import torch
|
| 3 |
|
| 4 |
from diffusers import AutoPipelineForInpainting, UNet2DConditionModel
|
|
@@ -30,12 +31,46 @@ def predict(dict, prompt="", negative_prompt="", guidance_scale=7.5, steps=20, s
|
|
| 30 |
scheduler = getattr(diffusers, scheduler_class_name)
|
| 31 |
pipe.scheduler = scheduler.from_pretrained("stabilityai/stable-diffusion-xl-base-1.0", subfolder="scheduler", **add_kwargs)
|
| 32 |
|
| 33 |
-
init_image = dict["image"].convert("RGB")
|
| 34 |
-
mask = dict["mask"].convert("RGB")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 35 |
|
| 36 |
-
output = pipe(
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 37 |
|
| 38 |
-
return
|
| 39 |
|
| 40 |
|
| 41 |
css = '''
|
|
@@ -80,7 +115,7 @@ with image_blocks as demo:
|
|
| 80 |
gr.HTML(read_content("header.html"))
|
| 81 |
with gr.Row():
|
| 82 |
with gr.Column():
|
| 83 |
-
image = gr.Image(source='upload', tool='sketch', elem_id="image_upload", type="pil", label="Upload"
|
| 84 |
with gr.Row(elem_id="prompt-container", mobile_collapse=False, equal_height=True):
|
| 85 |
with gr.Row():
|
| 86 |
prompt = gr.Textbox(placeholder="Your prompt (what you want in place of what is erased)", show_label=False, elem_id="prompt")
|
|
@@ -97,7 +132,7 @@ with image_blocks as demo:
|
|
| 97 |
scheduler = gr.Dropdown(label="Schedulers", choices=schedulers, value="EulerDiscreteScheduler")
|
| 98 |
|
| 99 |
with gr.Column():
|
| 100 |
-
image_out = gr.Image(label="Output", elem_id="output-img"
|
| 101 |
with gr.Group(elem_id="share-btn-container", visible=False) as share_btn_container:
|
| 102 |
community_icon = gr.HTML(community_icon_html)
|
| 103 |
loading_icon = gr.HTML(loading_icon_html)
|
|
|
|
| 1 |
import gradio as gr
|
| 2 |
+
import numpy as np
|
| 3 |
import torch
|
| 4 |
|
| 5 |
from diffusers import AutoPipelineForInpainting, UNet2DConditionModel
|
|
|
|
| 31 |
scheduler = getattr(diffusers, scheduler_class_name)
|
| 32 |
pipe.scheduler = scheduler.from_pretrained("stabilityai/stable-diffusion-xl-base-1.0", subfolder="scheduler", **add_kwargs)
|
| 33 |
|
| 34 |
+
init_image = dict["image"].convert("RGB")
|
| 35 |
+
mask = dict["mask"].convert("RGB")
|
| 36 |
+
original_height, original_width, original_channel = np.array(init_image).shape
|
| 37 |
+
|
| 38 |
+
# Limited to 1 million pixels
|
| 39 |
+
if 1024 * 1024 < original_width * original_height:
|
| 40 |
+
factor = ((1024 * 1024) / (original_width * original_height))**0.5
|
| 41 |
+
process_width = math.floor(original_width * factor)
|
| 42 |
+
process_height = math.floor(original_height * factor)
|
| 43 |
+
else:
|
| 44 |
+
process_width = original_width
|
| 45 |
+
process_height = original_height
|
| 46 |
+
|
| 47 |
+
# Width and height must be multiple of 8
|
| 48 |
+
if (process_width % 8) != 0 or (process_height % 8) != 0:
|
| 49 |
+
process_width = process_width - (process_width % 8)
|
| 50 |
+
process_height = process_height - (process_height % 8)
|
| 51 |
+
|
| 52 |
+
if ((process_width + 8) * (process_height + 8)) <= (1024 * 1024):
|
| 53 |
+
process_width = process_width + 8
|
| 54 |
+
process_height = process_height + 8
|
| 55 |
+
|
| 56 |
+
init_image = init_image.resize((process_width, process_height))
|
| 57 |
+
mask = mask.resize((process_width, process_height))
|
| 58 |
|
| 59 |
+
output = pipe(
|
| 60 |
+
prompt = prompt,
|
| 61 |
+
negative_prompt=negative_prompt,
|
| 62 |
+
image=init_image,
|
| 63 |
+
mask_image=mask,
|
| 64 |
+
guidance_scale=guidance_scale,
|
| 65 |
+
num_inference_steps=int(steps),
|
| 66 |
+
strength=strength,
|
| 67 |
+
width = process_width,
|
| 68 |
+
height = process_height
|
| 69 |
+
)
|
| 70 |
+
|
| 71 |
+
output_image = output.images[0].resize((original_width, original_height))
|
| 72 |
|
| 73 |
+
return output_image, gr.update(visible=True)
|
| 74 |
|
| 75 |
|
| 76 |
css = '''
|
|
|
|
| 115 |
gr.HTML(read_content("header.html"))
|
| 116 |
with gr.Row():
|
| 117 |
with gr.Column():
|
| 118 |
+
image = gr.Image(source='upload', tool='sketch', elem_id="image_upload", type="pil", label="Upload")
|
| 119 |
with gr.Row(elem_id="prompt-container", mobile_collapse=False, equal_height=True):
|
| 120 |
with gr.Row():
|
| 121 |
prompt = gr.Textbox(placeholder="Your prompt (what you want in place of what is erased)", show_label=False, elem_id="prompt")
|
|
|
|
| 132 |
scheduler = gr.Dropdown(label="Schedulers", choices=schedulers, value="EulerDiscreteScheduler")
|
| 133 |
|
| 134 |
with gr.Column():
|
| 135 |
+
image_out = gr.Image(label="Output", elem_id="output-img")
|
| 136 |
with gr.Group(elem_id="share-btn-container", visible=False) as share_btn_container:
|
| 137 |
community_icon = gr.HTML(community_icon_html)
|
| 138 |
loading_icon = gr.HTML(loading_icon_html)
|