Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
|
@@ -10,6 +10,7 @@ import urllib
|
|
| 10 |
import time
|
| 11 |
import os
|
| 12 |
import datetime
|
|
|
|
| 13 |
|
| 14 |
from models.transformer_sd3 import SD3Transformer2DModel
|
| 15 |
#from diffusers import StableDiffusion3Pipeline
|
|
@@ -125,6 +126,9 @@ def infer(
|
|
| 125 |
nb_token=64,
|
| 126 |
)
|
| 127 |
upscaler_2.to(torch.device('cpu'))
|
|
|
|
|
|
|
|
|
|
| 128 |
torch.set_float32_matmul_precision("highest")
|
| 129 |
seed = random.randint(0, MAX_SEED)
|
| 130 |
generator = torch.Generator(device='cuda').manual_seed(seed)
|
|
@@ -183,6 +187,9 @@ def infer(
|
|
| 183 |
sd_image.save(rv_path,optimize=False,compress_level=0)
|
| 184 |
upload_to_ftp(rv_path)
|
| 185 |
upscaler_2.to(torch.device('cuda'))
|
|
|
|
|
|
|
|
|
|
| 186 |
with torch.no_grad():
|
| 187 |
upscale2 = upscaler_2(sd_image, tiling=True, tile_width=256, tile_height=256)
|
| 188 |
print('-- got upscaled image --')
|
|
|
|
| 10 |
import time
|
| 11 |
import os
|
| 12 |
import datetime
|
| 13 |
+
import gc
|
| 14 |
|
| 15 |
from models.transformer_sd3 import SD3Transformer2DModel
|
| 16 |
#from diffusers import StableDiffusion3Pipeline
|
|
|
|
| 126 |
nb_token=64,
|
| 127 |
)
|
| 128 |
upscaler_2.to(torch.device('cpu'))
|
| 129 |
+
gc.collect()
|
| 130 |
+
torch.cuda.empty_cache()
|
| 131 |
+
torch.cuda.reset_peak_memory_stats()
|
| 132 |
torch.set_float32_matmul_precision("highest")
|
| 133 |
seed = random.randint(0, MAX_SEED)
|
| 134 |
generator = torch.Generator(device='cuda').manual_seed(seed)
|
|
|
|
| 187 |
sd_image.save(rv_path,optimize=False,compress_level=0)
|
| 188 |
upload_to_ftp(rv_path)
|
| 189 |
upscaler_2.to(torch.device('cuda'))
|
| 190 |
+
gc.collect()
|
| 191 |
+
torch.cuda.empty_cache()
|
| 192 |
+
torch.cuda.reset_peak_memory_stats()
|
| 193 |
with torch.no_grad():
|
| 194 |
upscale2 = upscaler_2(sd_image, tiling=True, tile_width=256, tile_height=256)
|
| 195 |
print('-- got upscaled image --')
|