Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
|
@@ -12,7 +12,7 @@ model8 = RealESRGAN(device, scale=8)
|
|
| 12 |
model8.load_weights('weights/RealESRGAN_x8.pth', download=True)
|
| 13 |
|
| 14 |
|
| 15 |
-
def
|
| 16 |
global model2
|
| 17 |
global model4
|
| 18 |
global model8
|
|
@@ -55,19 +55,35 @@ def inference(image, size):
|
|
| 55 |
return result
|
| 56 |
|
| 57 |
|
| 58 |
-
title = "RealESRGAN UpScale Model: 2x 4x 8x"
|
| 59 |
-
description = "This model running on cpu so it takes bit time,so pls be patient :)"
|
| 60 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 61 |
|
| 62 |
-
gr.Interface(
|
| 63 |
-
|
| 64 |
-
|
| 65 |
-
|
| 66 |
-
|
| 67 |
-
|
| 68 |
-
|
| 69 |
-
|
| 70 |
-
|
| 71 |
-
|
| 72 |
-
|
| 73 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 12 |
model8.load_weights('weights/RealESRGAN_x8.pth', download=True)
|
| 13 |
|
| 14 |
|
| 15 |
+
def inference_image(image, size):
|
| 16 |
global model2
|
| 17 |
global model4
|
| 18 |
global model8
|
|
|
|
| 55 |
return result
|
| 56 |
|
| 57 |
|
|
|
|
|
|
|
| 58 |
|
| 59 |
+
input_image = gr.Image(type='pil', label='Input Image')
|
| 60 |
+
input_model_image = gr.Radio(['2x', '4x', '8x'], type="value", value="4x", label="Model Upscale/Enhance Type")
|
| 61 |
+
submit_image_button = gr.Button('Submit')
|
| 62 |
+
output_image = gr.Image(type="filepath", label="Output Image")
|
| 63 |
|
| 64 |
+
tab_img = gr.Interface(
|
| 65 |
+
fn=inference_image,
|
| 66 |
+
inputs=[input_image, input_model_image, face_enhance_image, outscale_image, fp32_image, extension_image],
|
| 67 |
+
outputs=output_image,
|
| 68 |
+
title="Real-ESRGAN Pytorch",
|
| 69 |
+
description="Gradio UI for Real-ESRGAN Pytorch version. To use it, simply upload your image, or click one of examples and choose the model. Read more at the links below. Please click submit only once <br><p style='text-align: center'><a href='https://arxiv.org/abs/2107.10833'>Real-ESRGAN: Training Real-World Blind Super-Resolution with Pure Synthetic Data</a> | <a href='https://github.com/ai-forever/Real-ESRGAN'>Github Repo</a></p>"
|
| 70 |
+
)
|
| 71 |
+
|
| 72 |
+
input_video = gr.Video(type="filepath", label='Input Video')
|
| 73 |
+
input_model_video = gr.Radio(['2x', '4x', '8x'], type="value", value="4x", label="Model Upscale/Enhance Type")
|
| 74 |
+
submit_video_button = gr.Button('Submit')
|
| 75 |
+
output_video = gr.Video(type="filepath", label='Output Video')
|
| 76 |
+
|
| 77 |
+
tab_vid = gr.Interface(
|
| 78 |
+
fn=inference_video,
|
| 79 |
+
inputs=[input_video, input_model_video],
|
| 80 |
+
outputs=output_video,
|
| 81 |
+
title="Real-ESRGAN Pytorch",
|
| 82 |
+
description="Gradio UI for Real-ESRGAN Pytorch version. To use it, simply upload your video, or click one of examples and choose the model. Read more at the links below. Please click submit only once <br><p style='text-align: center'><a href='https://arxiv.org/abs/2107.10833'>Real-ESRGAN: Training Real-World Blind Super-Resolution with Pure Synthetic Data</a> | <a href='https://github.com/ai-forever/Real-ESRGAN'>Github Repo</a></p>"
|
| 83 |
+
)
|
| 84 |
+
|
| 85 |
+
demo = gr.TabbedInterface([tab_img, tab_vid], ["Image", "Video"])
|
| 86 |
+
|
| 87 |
+
|
| 88 |
+
|
| 89 |
+
demo.launch(debug=True, show_error=True)
|