Spaces:
Running
on
L4
Running
on
L4
update.
Browse files
app.py
CHANGED
|
@@ -109,7 +109,7 @@ def inference(image, background_enhance, face_upsample, upscale, codeformer_fide
|
|
| 109 |
only_center_face = False
|
| 110 |
draw_box = False
|
| 111 |
detection_model = "retinaface_resnet50"
|
| 112 |
-
print(image, background_enhance, face_upsample, upscale, codeformer_fidelity)
|
| 113 |
|
| 114 |
upscale = int(upscale) # covert type to int
|
| 115 |
if upscale > 4:
|
|
@@ -129,14 +129,14 @@ def inference(image, background_enhance, face_upsample, upscale, codeformer_fide
|
|
| 129 |
|
| 130 |
img = cv2.imread(str(image), cv2.IMREAD_COLOR)
|
| 131 |
|
| 132 |
-
print('
|
| 133 |
|
| 134 |
if has_aligned:
|
| 135 |
# the input faces are already cropped and aligned
|
| 136 |
img = cv2.resize(img, (512, 512), interpolation=cv2.INTER_LINEAR)
|
| 137 |
face_helper.is_gray = is_gray(img, threshold=5)
|
| 138 |
if face_helper.is_gray:
|
| 139 |
-
print('
|
| 140 |
face_helper.cropped_faces = [img]
|
| 141 |
else:
|
| 142 |
face_helper.read_image(img)
|
|
@@ -144,7 +144,7 @@ def inference(image, background_enhance, face_upsample, upscale, codeformer_fide
|
|
| 144 |
num_det_faces = face_helper.get_face_landmarks_5(
|
| 145 |
only_center_face=only_center_face, resize=640, eye_dist_threshold=5
|
| 146 |
)
|
| 147 |
-
print(f
|
| 148 |
# align and warp each face
|
| 149 |
face_helper.align_warp_face()
|
| 150 |
|
|
@@ -166,7 +166,7 @@ def inference(image, background_enhance, face_upsample, upscale, codeformer_fide
|
|
| 166 |
del output
|
| 167 |
torch.cuda.empty_cache()
|
| 168 |
except RuntimeError as error:
|
| 169 |
-
print(f"
|
| 170 |
restored_face = tensor2img(
|
| 171 |
cropped_face_t, rgb2bgr=True, min_max=(-1, 1)
|
| 172 |
)
|
|
@@ -202,7 +202,7 @@ def inference(image, background_enhance, face_upsample, upscale, codeformer_fide
|
|
| 202 |
restored_img = cv2.cvtColor(restored_img, cv2.COLOR_BGR2RGB)
|
| 203 |
return restored_img, save_path
|
| 204 |
except Exception as error:
|
| 205 |
-
print('
|
| 206 |
return None, None
|
| 207 |
|
| 208 |
|
|
@@ -248,7 +248,7 @@ demo = gr.Interface(
|
|
| 248 |
gr.inputs.Checkbox(default=True, label="Background_Enhance"),
|
| 249 |
gr.inputs.Checkbox(default=True, label="Face_Upsample"),
|
| 250 |
gr.inputs.Number(default=2, label="Rescaling_Factor (up to 4)"),
|
| 251 |
-
gr.Slider(0, 1, value=0.5, step=0.01, label='Codeformer_Fidelity
|
| 252 |
], [
|
| 253 |
gr.outputs.Image(type="numpy", label="Output"),
|
| 254 |
gr.outputs.File(label="Download the output")
|
|
|
|
| 109 |
only_center_face = False
|
| 110 |
draw_box = False
|
| 111 |
detection_model = "retinaface_resnet50"
|
| 112 |
+
print('Inp:', image, background_enhance, face_upsample, upscale, codeformer_fidelity)
|
| 113 |
|
| 114 |
upscale = int(upscale) # covert type to int
|
| 115 |
if upscale > 4:
|
|
|
|
| 129 |
|
| 130 |
img = cv2.imread(str(image), cv2.IMREAD_COLOR)
|
| 131 |
|
| 132 |
+
print('\timage size:', img.shape)
|
| 133 |
|
| 134 |
if has_aligned:
|
| 135 |
# the input faces are already cropped and aligned
|
| 136 |
img = cv2.resize(img, (512, 512), interpolation=cv2.INTER_LINEAR)
|
| 137 |
face_helper.is_gray = is_gray(img, threshold=5)
|
| 138 |
if face_helper.is_gray:
|
| 139 |
+
print('\tgrayscale input: True')
|
| 140 |
face_helper.cropped_faces = [img]
|
| 141 |
else:
|
| 142 |
face_helper.read_image(img)
|
|
|
|
| 144 |
num_det_faces = face_helper.get_face_landmarks_5(
|
| 145 |
only_center_face=only_center_face, resize=640, eye_dist_threshold=5
|
| 146 |
)
|
| 147 |
+
print(f'\tdetect {num_det_faces} faces')
|
| 148 |
# align and warp each face
|
| 149 |
face_helper.align_warp_face()
|
| 150 |
|
|
|
|
| 166 |
del output
|
| 167 |
torch.cuda.empty_cache()
|
| 168 |
except RuntimeError as error:
|
| 169 |
+
print(f"Failed inference for CodeFormer: {error}")
|
| 170 |
restored_face = tensor2img(
|
| 171 |
cropped_face_t, rgb2bgr=True, min_max=(-1, 1)
|
| 172 |
)
|
|
|
|
| 202 |
restored_img = cv2.cvtColor(restored_img, cv2.COLOR_BGR2RGB)
|
| 203 |
return restored_img, save_path
|
| 204 |
except Exception as error:
|
| 205 |
+
print('Global exception', error)
|
| 206 |
return None, None
|
| 207 |
|
| 208 |
|
|
|
|
| 248 |
gr.inputs.Checkbox(default=True, label="Background_Enhance"),
|
| 249 |
gr.inputs.Checkbox(default=True, label="Face_Upsample"),
|
| 250 |
gr.inputs.Number(default=2, label="Rescaling_Factor (up to 4)"),
|
| 251 |
+
gr.Slider(0, 1, value=0.5, step=0.01, label='Codeformer_Fidelity (0 for better quality, 1 for better identity)')
|
| 252 |
], [
|
| 253 |
gr.outputs.Image(type="numpy", label="Output"),
|
| 254 |
gr.outputs.File(label="Download the output")
|