Spaces:
Running
on
L4
Running
on
L4
fix memory exceed issue.
Browse files- CodeFormer/basicsr/utils/realesrgan_utils.py +16 -13
- app.py +2 -2
CodeFormer/basicsr/utils/realesrgan_utils.py
CHANGED
|
@@ -196,19 +196,22 @@ class RealESRGANer():
|
|
| 196 |
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
|
| 197 |
|
| 198 |
# ------------------- process image (without the alpha channel) ------------------- #
|
| 199 |
-
|
| 200 |
-
|
| 201 |
-
|
| 202 |
-
self.
|
| 203 |
-
|
| 204 |
-
|
| 205 |
-
|
| 206 |
-
|
| 207 |
-
|
| 208 |
-
|
| 209 |
-
|
| 210 |
-
|
| 211 |
-
|
|
|
|
|
|
|
|
|
|
| 212 |
|
| 213 |
# ------------------- process the alpha channel if necessary ------------------- #
|
| 214 |
if img_mode == 'RGBA':
|
|
|
|
| 196 |
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
|
| 197 |
|
| 198 |
# ------------------- process image (without the alpha channel) ------------------- #
|
| 199 |
+
try:
|
| 200 |
+
with torch.no_grad():
|
| 201 |
+
self.pre_process(img)
|
| 202 |
+
if self.tile_size > 0:
|
| 203 |
+
self.tile_process()
|
| 204 |
+
else:
|
| 205 |
+
self.process()
|
| 206 |
+
output_img_t = self.post_process()
|
| 207 |
+
output_img = output_img_t.data.squeeze().float().cpu().clamp_(0, 1).numpy()
|
| 208 |
+
output_img = np.transpose(output_img[[2, 1, 0], :, :], (1, 2, 0))
|
| 209 |
+
if img_mode == 'L':
|
| 210 |
+
output_img = cv2.cvtColor(output_img, cv2.COLOR_BGR2GRAY)
|
| 211 |
+
del output_img_t
|
| 212 |
+
torch.cuda.empty_cache()
|
| 213 |
+
except RuntimeError as error:
|
| 214 |
+
print(f"Failed inference for RealESRGAN: {error}")
|
| 215 |
|
| 216 |
# ------------------- process the alpha channel if necessary ------------------- #
|
| 217 |
if img_mode == 'RGBA':
|
app.py
CHANGED
|
@@ -117,7 +117,7 @@ def inference(image, background_enhance, face_upsample, upscale, codeformer_fide
|
|
| 117 |
upscale = int(upscale) # covert type to int
|
| 118 |
if upscale > 4:
|
| 119 |
upscale = 4 # avoid momory exceeded due to too large upscale
|
| 120 |
-
if upscale > 2 and
|
| 121 |
upscale = 2 # avoid momory exceeded due to too large img resolution
|
| 122 |
|
| 123 |
face_helper = FaceRestoreHelper(
|
|
@@ -267,5 +267,5 @@ demo = gr.Interface(
|
|
| 267 |
]
|
| 268 |
)
|
| 269 |
|
| 270 |
-
demo.queue(concurrency_count=
|
| 271 |
demo.launch()
|
|
|
|
| 117 |
upscale = int(upscale) # covert type to int
|
| 118 |
if upscale > 4:
|
| 119 |
upscale = 4 # avoid momory exceeded due to too large upscale
|
| 120 |
+
if upscale > 2 and max(img.shape[:2])>1000:
|
| 121 |
upscale = 2 # avoid momory exceeded due to too large img resolution
|
| 122 |
|
| 123 |
face_helper = FaceRestoreHelper(
|
|
|
|
| 267 |
]
|
| 268 |
)
|
| 269 |
|
| 270 |
+
demo.queue(concurrency_count=2)
|
| 271 |
demo.launch()
|