Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
|
@@ -174,50 +174,21 @@ tag_model.eval()
|
|
| 174 |
tag_model.to(device, dtype=weight_dtype)
|
| 175 |
|
| 176 |
def preprocess_image(input_image: Image.Image) -> Image.Image:
|
| 177 |
-
|
| 178 |
-
|
| 179 |
-
|
| 180 |
-
duration = 60
|
| 181 |
-
|
| 182 |
-
if ori_width > 384 or ori_height > 384:
|
| 183 |
-
duration = 180
|
| 184 |
-
elif ori_width > 1024 or ori_height > 1024:
|
| 185 |
-
duration = 240
|
| 186 |
-
|
| 187 |
-
return duration
|
| 188 |
|
| 189 |
@spaces.GPU()
|
| 190 |
def preprocess_n_magnify(input_image: Image.Image):
|
| 191 |
-
|
| 192 |
-
|
| 193 |
-
img, magnified_img = magnify(input_image, duration)
|
| 194 |
|
| 195 |
-
|
| 196 |
|
|
|
|
| 197 |
|
| 198 |
-
|
| 199 |
-
input_image: Image.Image,
|
| 200 |
-
duration_seconds,
|
| 201 |
-
user_prompt,
|
| 202 |
-
positive_prompt,
|
| 203 |
-
negative_prompt,
|
| 204 |
-
num_inference_steps,
|
| 205 |
-
scale_factor,
|
| 206 |
-
cfg_scale,
|
| 207 |
-
seed,
|
| 208 |
-
latent_tiled_size,
|
| 209 |
-
latent_tiled_overlap,
|
| 210 |
-
sample_times,
|
| 211 |
-
progress,
|
| 212 |
-
):
|
| 213 |
-
|
| 214 |
-
return int(duration_seconds)
|
| 215 |
-
|
| 216 |
-
|
| 217 |
-
@spaces.GPU(duration=get_duration)
|
| 218 |
def magnify(
|
| 219 |
input_image: Image.Image,
|
| 220 |
-
duration_seconds = 60,
|
| 221 |
user_prompt = "",
|
| 222 |
positive_prompt = "clean, high-resolution, 8k, best quality, masterpiece",
|
| 223 |
negative_prompt = "dotted, noise, blur, lowres, oversmooth, longbody, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality",
|
|
@@ -360,14 +331,14 @@ with gr.Blocks(css=css, theme=theme) as demo:
|
|
| 360 |
inputs=[
|
| 361 |
input_image,
|
| 362 |
],
|
| 363 |
-
outputs=[
|
| 364 |
fn=preprocess_n_magnify,
|
| 365 |
cache_examples=True,
|
| 366 |
)
|
| 367 |
inputs = [
|
| 368 |
input_image,
|
| 369 |
]
|
| 370 |
-
run_button.click(fn=magnify, inputs=
|
| 371 |
-
input_image.upload(fn=preprocess_image,inputs=input_image, outputs=
|
| 372 |
|
| 373 |
demo.launch(share=True)
|
|
|
|
| 174 |
tag_model.to(device, dtype=weight_dtype)
|
| 175 |
|
| 176 |
def preprocess_image(input_image: Image.Image) -> Image.Image:
|
| 177 |
+
img = input_image.copy()
|
| 178 |
+
img.thumbnail((256, 256), Image.Resampling.BILINEAR)
|
| 179 |
+
return img
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 180 |
|
| 181 |
@spaces.GPU()
|
| 182 |
def preprocess_n_magnify(input_image: Image.Image):
|
| 183 |
+
processed_img = preprocess_image(input_image)
|
|
|
|
|
|
|
| 184 |
|
| 185 |
+
img, magnified_img = magnify(processed_img)
|
| 186 |
|
| 187 |
+
return (img, magnified_img)
|
| 188 |
|
| 189 |
+
@spaces.GPU()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 190 |
def magnify(
|
| 191 |
input_image: Image.Image,
|
|
|
|
| 192 |
user_prompt = "",
|
| 193 |
positive_prompt = "clean, high-resolution, 8k, best quality, masterpiece",
|
| 194 |
negative_prompt = "dotted, noise, blur, lowres, oversmooth, longbody, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality",
|
|
|
|
| 331 |
inputs=[
|
| 332 |
input_image,
|
| 333 |
],
|
| 334 |
+
outputs=[result_gallery],
|
| 335 |
fn=preprocess_n_magnify,
|
| 336 |
cache_examples=True,
|
| 337 |
)
|
| 338 |
inputs = [
|
| 339 |
input_image,
|
| 340 |
]
|
| 341 |
+
run_button.click(fn=magnify, inputs=input_image, outputs=[result_gallery])
|
| 342 |
+
input_image.upload(fn=preprocess_image,inputs=input_image, outputs=input_image)
|
| 343 |
|
| 344 |
demo.launch(share=True)
|