Spaces:
Running
on
Zero
Running
on
Zero
Commit
·
49f568d
1
Parent(s):
fcc9ef6
add
Browse files
app.py
CHANGED
|
@@ -231,25 +231,14 @@ class ShapERenderer:
|
|
| 231 |
def __init__(self, device):
|
| 232 |
print("Initializing Shap-E models...")
|
| 233 |
self.device = device
|
| 234 |
-
|
| 235 |
-
self.
|
| 236 |
-
self.
|
|
|
|
| 237 |
print("Shap-E models initialized!")
|
| 238 |
|
| 239 |
-
def ensure_models_loaded(self):
|
| 240 |
-
if self.xm is None:
|
| 241 |
-
try:
|
| 242 |
-
torch.cuda.empty_cache() # Clear GPU memory before loading
|
| 243 |
-
self.xm = load_model('transmitter', device=self.device)
|
| 244 |
-
self.model = load_model('text300M', device=self.device)
|
| 245 |
-
self.diffusion = diffusion_from_config(load_config('diffusion'))
|
| 246 |
-
except Exception as e:
|
| 247 |
-
print(f"Error loading models: {e}")
|
| 248 |
-
raise
|
| 249 |
-
|
| 250 |
def generate_views(self, prompt, guidance_scale=15.0, num_steps=64):
|
| 251 |
try:
|
| 252 |
-
self.ensure_models_loaded()
|
| 253 |
torch.cuda.empty_cache() # Clear GPU memory before generation
|
| 254 |
|
| 255 |
# Generate latents using the text-to-3D model
|
|
@@ -313,24 +302,13 @@ class ShapERenderer:
|
|
| 313 |
class RefinerInterface:
|
| 314 |
def __init__(self):
|
| 315 |
print("Initializing InstantMesh models...")
|
| 316 |
-
|
| 317 |
-
self.model =
|
| 318 |
-
self.infer_config = None
|
| 319 |
print("InstantMesh models initialized!")
|
| 320 |
|
| 321 |
-
def ensure_models_loaded(self):
|
| 322 |
-
if self.pipeline is None:
|
| 323 |
-
try:
|
| 324 |
-
torch.cuda.empty_cache() # Clear GPU memory before loading
|
| 325 |
-
self.pipeline, self.model, self.infer_config = load_models()
|
| 326 |
-
except Exception as e:
|
| 327 |
-
print(f"Error loading models: {e}")
|
| 328 |
-
raise
|
| 329 |
-
|
| 330 |
def refine_model(self, input_image, prompt, steps=75, guidance_scale=7.5):
|
| 331 |
"""Main refinement function"""
|
| 332 |
try:
|
| 333 |
-
self.ensure_models_loaded()
|
| 334 |
torch.cuda.empty_cache() # Clear GPU memory before processing
|
| 335 |
|
| 336 |
# Process image and get refined output
|
|
@@ -404,9 +382,13 @@ class RefinerInterface:
|
|
| 404 |
raise
|
| 405 |
|
| 406 |
def create_demo():
|
|
|
|
| 407 |
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
|
|
|
|
|
|
| 408 |
shap_e = ShapERenderer(device)
|
| 409 |
refiner = RefinerInterface()
|
|
|
|
| 410 |
|
| 411 |
with gr.Blocks() as demo:
|
| 412 |
gr.Markdown("# Shap-E to InstantMesh Pipeline")
|
|
|
|
| 231 |
def __init__(self, device):
|
| 232 |
print("Initializing Shap-E models...")
|
| 233 |
self.device = device
|
| 234 |
+
torch.cuda.empty_cache() # Clear GPU memory before loading
|
| 235 |
+
self.xm = load_model('transmitter', device=self.device)
|
| 236 |
+
self.model = load_model('text300M', device=self.device)
|
| 237 |
+
self.diffusion = diffusion_from_config(load_config('diffusion'))
|
| 238 |
print("Shap-E models initialized!")
|
| 239 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 240 |
def generate_views(self, prompt, guidance_scale=15.0, num_steps=64):
|
| 241 |
try:
|
|
|
|
| 242 |
torch.cuda.empty_cache() # Clear GPU memory before generation
|
| 243 |
|
| 244 |
# Generate latents using the text-to-3D model
|
|
|
|
| 302 |
class RefinerInterface:
|
| 303 |
def __init__(self):
|
| 304 |
print("Initializing InstantMesh models...")
|
| 305 |
+
torch.cuda.empty_cache() # Clear GPU memory before loading
|
| 306 |
+
self.pipeline, self.model, self.infer_config = load_models()
|
|
|
|
| 307 |
print("InstantMesh models initialized!")
|
| 308 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 309 |
def refine_model(self, input_image, prompt, steps=75, guidance_scale=7.5):
|
| 310 |
"""Main refinement function"""
|
| 311 |
try:
|
|
|
|
| 312 |
torch.cuda.empty_cache() # Clear GPU memory before processing
|
| 313 |
|
| 314 |
# Process image and get refined output
|
|
|
|
| 382 |
raise
|
| 383 |
|
| 384 |
def create_demo():
|
| 385 |
+
print("Initializing models...")
|
| 386 |
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
| 387 |
+
|
| 388 |
+
# Initialize models at startup
|
| 389 |
shap_e = ShapERenderer(device)
|
| 390 |
refiner = RefinerInterface()
|
| 391 |
+
print("All models initialized!")
|
| 392 |
|
| 393 |
with gr.Blocks() as demo:
|
| 394 |
gr.Markdown("# Shap-E to InstantMesh Pipeline")
|