Spaces:
Running
on
Zero
Running
on
Zero
Commit
·
40e81a0
1
Parent(s):
e12bf11
Initial commit
Browse files
app.py
CHANGED
|
@@ -202,8 +202,14 @@ class FaceLiftPipeline:
|
|
| 202 |
multiview_path = output_dir / "multiview.png"
|
| 203 |
multiview_image.save(multiview_path)
|
| 204 |
|
| 205 |
-
#
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 206 |
torch.cuda.empty_cache()
|
|
|
|
| 207 |
|
| 208 |
# Prepare 3D reconstruction input
|
| 209 |
view_arrays = [np.array(view) for view in selected_views]
|
|
@@ -233,6 +239,21 @@ class FaceLiftPipeline:
|
|
| 233 |
"index": batch_indices,
|
| 234 |
})
|
| 235 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 236 |
# Run 3D reconstruction
|
| 237 |
with torch.no_grad(), torch.autocast(enabled=True, device_type="cuda", dtype=torch.float16):
|
| 238 |
result = self.gs_lrm_model.forward(batch, create_visual=False, split_data=True)
|
|
|
|
| 202 |
multiview_path = output_dir / "multiview.png"
|
| 203 |
multiview_image.save(multiview_path)
|
| 204 |
|
| 205 |
+
# Move diffusion model to CPU to free GPU memory for GS-LRM
|
| 206 |
+
print("Moving diffusion model to CPU to free memory...")
|
| 207 |
+
self.mvdiffusion_pipeline.to("cpu")
|
| 208 |
+
|
| 209 |
+
# Delete intermediate variables to free memory
|
| 210 |
+
del result, generator
|
| 211 |
torch.cuda.empty_cache()
|
| 212 |
+
torch.cuda.synchronize()
|
| 213 |
|
| 214 |
# Prepare 3D reconstruction input
|
| 215 |
view_arrays = [np.array(view) for view in selected_views]
|
|
|
|
| 239 |
"index": batch_indices,
|
| 240 |
})
|
| 241 |
|
| 242 |
+
# Ensure GS-LRM model is on GPU
|
| 243 |
+
if next(self.gs_lrm_model.parameters()).device.type == "cpu":
|
| 244 |
+
print("Moving GS-LRM model to GPU...")
|
| 245 |
+
self.gs_lrm_model.to(self.device)
|
| 246 |
+
torch.cuda.empty_cache()
|
| 247 |
+
|
| 248 |
+
# Log GPU memory status
|
| 249 |
+
if torch.cuda.is_available():
|
| 250 |
+
allocated = torch.cuda.memory_allocated() / 1024**3
|
| 251 |
+
reserved = torch.cuda.memory_reserved() / 1024**3
|
| 252 |
+
print(f"GPU memory before GS-LRM: {allocated:.2f}GB allocated, {reserved:.2f}GB reserved")
|
| 253 |
+
|
| 254 |
+
# Final memory cleanup before reconstruction
|
| 255 |
+
torch.cuda.empty_cache()
|
| 256 |
+
|
| 257 |
# Run 3D reconstruction
|
| 258 |
with torch.no_grad(), torch.autocast(enabled=True, device_type="cuda", dtype=torch.float16):
|
| 259 |
result = self.gs_lrm_model.forward(batch, create_visual=False, split_data=True)
|