MogensR commited on
Commit
2167778
·
1 Parent(s): ad645ee

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +613 -502
app.py CHANGED
@@ -1,542 +1,653 @@
1
  #!/usr/bin/env python3
2
- # ========================= PRE-IMPORT ENV GUARDS =========================
3
- import os
4
- os.environ.pop("OMP_NUM_THREADS", None)
5
- os.environ.setdefault("MKL_NUM_THREADS", "1")
6
- os.environ.setdefault("OPENBLAS_NUM_THREADS", "1")
7
- os.environ.setdefault("VECLIB_MAXIMUM_THREADS", "1")
8
- os.environ.setdefault("NUMEXPR_NUM_THREADS", "1")
9
-
10
- # ========================= IMPORTS =========================
11
- import gc
12
  import sys
13
  import cv2
14
- import torch
15
  import numpy as np
16
- import gradio as gr
17
- import tempfile
18
- import time
19
  from pathlib import Path
20
- import logging
21
  import traceback
22
- from datetime import datetime
23
- import psutil
24
- import warnings
25
- warnings.filterwarnings("ignore")
 
 
 
26
 
27
- # Import the properly implemented functions from utilities
28
  from utilities import (
29
  segment_person_hq,
30
  refine_mask_hq,
 
31
  replace_background_hq,
32
- load_background_image,
33
- resize_background_to_match,
34
- apply_temporal_smoothing,
35
- smooth_edges,
36
- estimate_foreground
37
  )
38
 
39
- # Import two-stage processor for advanced mode
40
- from two_stage_processor import TwoStageProcessor
41
-
42
- # Import UI components
43
- from ui_components import create_ui, get_example_videos, get_example_backgrounds
 
44
 
45
- # ========================= LOGGING SETUP =========================
46
- logging.basicConfig(
47
- level=logging.INFO,
48
- format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
49
- )
50
  logger = logging.getLogger(__name__)
51
 
52
- # ========================= GPU/DEVICE SETUP =========================
53
- device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
54
- logger.info(f"Using device: {device}")
55
-
56
- if device.type == "cuda":
57
- torch.cuda.empty_cache()
58
- # Optimize CUDA settings for memory efficiency
59
- torch.backends.cudnn.benchmark = False
60
- torch.backends.cudnn.deterministic = True
61
- torch.cuda.set_per_process_memory_fraction(0.8) # Limit to 80% of VRAM
62
-
63
- # ========================= GLOBAL MODELS =========================
64
- # Models will be loaded on demand to save RAM
65
- sam2_model = None
66
- matta_model = None
 
 
 
 
 
67
  two_stage_processor = None
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
68
 
69
- # ========================= MODEL LOADING =========================
70
- def load_models_on_demand(use_two_stage=False):
71
- """Load models only when needed, with proper memory management"""
72
- global sam2_model, matta_model, two_stage_processor
73
-
74
  try:
75
- # Clear any existing models first
76
- clear_models_from_memory()
77
-
78
- if use_two_stage and two_stage_processor is None:
79
- logger.info("Loading Two-Stage Processor (SAM2 + MattA)...")
80
- two_stage_processor = TwoStageProcessor(device=device)
81
- logger.info("Two-Stage Processor loaded successfully")
82
- elif not use_two_stage:
83
- # Load individual models for single-stage processing
84
- if sam2_model is None:
85
- logger.info("Loading SAM2 model...")
86
- # This should be imported from your SAM2 implementation
87
- from sam2_integration import load_sam2_model
88
- sam2_model = load_sam2_model(device=device)
89
- logger.info("SAM2 model loaded")
90
-
91
- if matta_model is None:
92
- logger.info("Loading MattingAnything model...")
93
- # This should be imported from your MattA implementation
94
- from matta_integration import load_matta_model
95
- matta_model = load_matta_model(device=device)
96
- logger.info("MattingAnything model loaded")
97
-
98
- # Force garbage collection after loading
99
- gc.collect()
100
- if device.type == "cuda":
101
- torch.cuda.empty_cache()
102
-
 
 
 
 
 
 
 
 
 
 
 
103
  except Exception as e:
104
- logger.error(f"Error loading models: {str(e)}")
105
- raise
106
-
107
- def clear_models_from_memory():
108
- """Clear models from memory to free up RAM"""
109
- global sam2_model, matta_model, two_stage_processor
110
-
111
- if sam2_model is not None:
112
- del sam2_model
113
- sam2_model = None
114
-
115
- if matta_model is not None:
116
- del matta_model
117
- matta_model = None
118
-
119
- if two_stage_processor is not None:
120
- del two_stage_processor
121
- two_stage_processor = None
122
-
123
- gc.collect()
124
- if device.type == "cuda":
125
- torch.cuda.empty_cache()
126
-
127
- # ========================= MEMORY MONITORING =========================
128
- def log_memory_usage(stage=""):
129
- """Log current memory usage"""
130
- process = psutil.Process()
131
- mem_info = process.memory_info()
132
- ram_usage = mem_info.rss / 1024 / 1024 / 1024 # GB
133
-
134
- if device.type == "cuda":
135
- vram_usage = torch.cuda.memory_allocated() / 1024 / 1024 / 1024 # GB
136
- vram_reserved = torch.cuda.memory_reserved() / 1024 / 1024 / 1024 # GB
137
- logger.info(f"[{stage}] RAM: {ram_usage:.2f}GB | VRAM: {vram_usage:.2f}GB (reserved: {vram_reserved:.2f}GB)")
138
- else:
139
- logger.info(f"[{stage}] RAM: {ram_usage:.2f}GB")
140
-
141
- # ========================= PROGRESS TRACKING =========================
142
- def write_progress_info(info_dict):
143
- """Write formatted progress information to temp file for UI display"""
144
  try:
145
- progress_file = "/tmp/processing_info.txt"
146
- with open(progress_file, "w") as f:
147
- if "error" in info_dict:
148
- f.write(f"❌ ERROR\n{info_dict['error']}\n")
149
- elif "complete" in info_dict:
150
- f.write(f"✅ COMPLETE\n")
151
- f.write(f"Total Frames: {info_dict.get('total_frames', 'N/A')}\n")
152
- f.write(f"Processing Time: {info_dict.get('time', 'N/A')}\n")
153
- f.write(f"Average FPS: {info_dict.get('fps', 'N/A')}\n")
154
- f.write(f"Resolution: {info_dict.get('resolution', 'N/A')}\n")
155
- f.write(f"Background: {info_dict.get('background', 'N/A')}\n")
 
 
 
 
156
  else:
157
- f.write(f"📊 PROCESSING STATUS\n")
158
- f.write(f"━━━━━━━━━━━━━━━━━━━━━━━━━━\n")
159
- f.write(f"🎬 Frame {info_dict.get('current_frame', 0)}/{info_dict.get('total_frames', 0)}\n")
160
- f.write(f"⏱️ Elapsed: {info_dict.get('elapsed', '0s')}\n")
161
- f.write(f"⚡ Speed: {info_dict.get('speed', '0')} fps\n")
162
- f.write(f"🎯 ETA: {info_dict.get('eta', 'calculating...')}\n")
163
- f.write(f"━━━━━━━━━━━━━━━━━━━━━━━━━━\n")
164
- f.write(f"📈 Progress: {info_dict.get('progress', 0):.1f}%\n")
165
  except Exception as e:
166
- logger.error(f"Error writing progress: {e}")
167
-
168
- # ========================= MAIN PROCESSING FUNCTION =========================
169
- def process_video(
170
- input_video,
171
- background_image,
172
- use_two_stage=False,
173
- use_mask_refinement=True,
174
- use_temporal_smoothing=True,
175
- mask_blur=5,
176
- edge_smoothing=5,
177
- background_type="Color",
178
- background_color="#00FF00",
179
- progress=gr.Progress()
180
- ):
181
- """
182
- Main video processing function with proper SAM2+MattA integration
183
- """
184
- temp_dir = None
185
- cap = None
186
- out = None
187
- start_time = time.time()
188
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
189
  try:
190
- # Initial setup
191
- logger.info("Starting video processing...")
192
- log_memory_usage("Start")
193
-
194
- # Validate inputs
195
- if input_video is None:
196
- raise ValueError("No input video provided")
197
-
198
- # Load models based on processing mode
199
- load_models_on_demand(use_two_stage=use_two_stage)
200
- log_memory_usage("Models Loaded")
201
-
202
- # Setup video capture
203
- cap = cv2.VideoCapture(input_video)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
204
  if not cap.isOpened():
205
- raise ValueError(f"Failed to open video: {input_video}")
206
-
207
- # Get video properties
208
- fps = int(cap.get(cv2.CAP_PROP_FPS))
209
  total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
210
- width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
211
- height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
212
-
213
- logger.info(f"Video info: {width}x{height}, {fps} fps, {total_frames} frames")
214
-
 
 
 
 
215
  # Prepare background
216
- if background_type == "Color":
217
- background = np.full((height, width, 3),
218
- tuple(int(background_color[i:i+2], 16) for i in (5, 3, 1)),
219
- dtype=np.uint8)
220
- elif background_type == "Image" and background_image is not None:
221
- background = load_background_image(background_image)
222
- background = resize_background_to_match(background, (width, height))
223
- elif background_type == "Blur":
224
- # Will be handled per frame
225
- background = None
 
 
226
  else:
227
- background = np.full((height, width, 3), (0, 255, 0), dtype=np.uint8)
228
-
229
- # Setup output video
230
- temp_dir = tempfile.mkdtemp()
231
- output_path = os.path.join(temp_dir, "output_video.mp4")
232
- fourcc = cv2.VideoWriter_fourcc(*'mp4v')
233
- out = cv2.VideoWriter(output_path, fourcc, fps, (width, height))
234
-
235
- # Process frames
236
- frame_idx = 0
237
- processed_frames = []
238
- masks_history = [] # For temporal smoothing
239
-
240
- # Batch processing for memory efficiency
241
- BATCH_SIZE = 10 if device.type == "cuda" else 5
242
- frame_batch = []
243
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
244
  while True:
 
 
 
 
 
 
 
245
  ret, frame = cap.read()
246
  if not ret:
247
  break
248
-
249
- frame_batch.append(frame)
250
-
251
- # Process batch when full or at end
252
- if len(frame_batch) == BATCH_SIZE or frame_idx == total_frames - 1:
253
-
254
- for batch_frame in frame_batch:
255
- # Update progress
256
- progress(frame_idx / total_frames, f"Processing frame {frame_idx}/{total_frames}")
257
-
258
- # Calculate and write detailed progress info
259
- elapsed_time = time.time() - start_time
260
- if frame_idx > 0:
261
- fps_current = frame_idx / elapsed_time
262
- eta = (total_frames - frame_idx) / fps_current
263
- write_progress_info({
264
- 'current_frame': frame_idx,
265
- 'total_frames': total_frames,
266
- 'elapsed': f"{elapsed_time:.1f}s",
267
- 'speed': f"{fps_current:.1f}",
268
- 'eta': f"{eta:.0f}s",
269
- 'progress': (frame_idx / total_frames) * 100
270
- })
271
-
272
- # Process frame based on mode
273
- if use_two_stage:
274
- # Use integrated two-stage processor
275
- processed_frame, mask = two_stage_processor.process_frame(
276
- batch_frame,
277
- background if background is not None else batch_frame,
278
- use_refinement=use_mask_refinement,
279
- mask_blur=mask_blur
280
- )
281
- else:
282
- # Use utilities functions (properly implemented with transparency fix)
283
- # Step 1: Segment person using SAM2
284
- mask = segment_person_hq(batch_frame, sam2_model)
285
-
286
- # Step 2: Refine mask using MattA if enabled
287
- if use_mask_refinement and matta_model is not None:
288
- mask = refine_mask_hq(batch_frame, mask, matta_model)
289
-
290
- # Step 3: Apply temporal smoothing if enabled
291
- if use_temporal_smoothing and len(masks_history) > 0:
292
- mask = apply_temporal_smoothing(mask, masks_history, window_size=5)
293
-
294
- # Store mask for temporal smoothing
295
- masks_history.append(mask)
296
- if len(masks_history) > 10: # Keep only recent masks
297
- masks_history.pop(0)
298
-
299
- # Step 4: Apply edge smoothing
300
- if edge_smoothing > 0:
301
- mask = smooth_edges(mask, edge_smoothing)
302
-
303
- # Step 5: Handle background
304
- if background_type == "Blur":
305
- background_frame = cv2.GaussianBlur(batch_frame, (21, 21), 0)
306
- else:
307
- background_frame = background
308
-
309
- # Step 6: Replace background with proper alpha handling
310
- processed_frame = replace_background_hq(
311
- batch_frame,
312
- mask,
313
- background_frame
314
- )
315
-
316
- # Write frame
317
- out.write(processed_frame)
318
- processed_frames.append(processed_frame)
319
- frame_idx += 1
320
-
321
- # Memory management - clear every 100 frames
322
- if frame_idx % 100 == 0:
323
- gc.collect()
324
- if device.type == "cuda":
325
- torch.cuda.empty_cache()
326
- log_memory_usage(f"Frame {frame_idx}")
327
-
328
- # Clear batch
329
- frame_batch = []
330
-
331
- # Finalize
332
  cap.release()
333
- out.release()
334
-
335
- # Write completion info
 
 
 
 
 
 
 
 
336
  total_time = time.time() - start_time
337
- avg_fps = total_frames / total_time if total_time > 0 else 0
338
- write_progress_info({
339
- 'complete': True,
340
- 'total_frames': total_frames,
341
- 'time': f"{total_time:.1f}s",
342
- 'fps': f"{avg_fps:.1f}",
343
- 'resolution': f"{width}x{height}",
344
- 'background': background_type
345
- })
346
-
347
- logger.info(f"Processing complete: {total_frames} frames in {total_time:.1f}s ({avg_fps:.1f} fps)")
348
- log_memory_usage("Complete")
349
-
350
- return output_path
351
-
352
- except Exception as e:
353
- logger.error(f"Processing error: {str(e)}\n{traceback.format_exc()}")
354
- write_progress_info({'error': str(e)})
355
- raise gr.Error(f"Processing failed: {str(e)}")
356
-
357
- finally:
358
- # Cleanup
359
- if cap is not None:
360
- cap.release()
361
- if out is not None:
362
- out.release()
363
-
364
- # Clear models to free memory
365
- clear_models_from_memory()
366
-
367
- # Final garbage collection
368
- gc.collect()
369
- if device.type == "cuda":
370
- torch.cuda.empty_cache()
371
-
372
- # ========================= GRADIO APP =========================
373
- def create_app():
374
- """Create and configure the Gradio application"""
375
-
376
- with gr.Blocks(title="Video Background Replacement - SAM2+MattA", theme=gr.themes.Soft()) as app:
377
- gr.Markdown("""
378
- # 🎬 Video Background Replacement
379
- ### Powered by SAM2 + MattingAnything
380
-
381
- Upload a video and replace the background with:
382
- - 🎨 Solid colors
383
- - 🖼️ Custom images
384
- - 🌫️ Blurred background
385
-
386
- **Two-Stage Mode**: Combines SAM2 segmentation with MattA refinement for best quality
387
- """)
388
-
389
- with gr.Tabs():
390
- with gr.TabItem("🎥 Process Video"):
391
- with gr.Row():
392
- with gr.Column(scale=1):
393
- input_video = gr.Video(label="Input Video", height=300)
394
-
395
- with gr.Accordion("⚙️ Processing Options", open=True):
396
- use_two_stage = gr.Checkbox(
397
- label="Use Two-Stage Processing (SAM2→MattA)",
398
- value=True,
399
- info="Better quality but slower"
400
- )
401
- use_mask_refinement = gr.Checkbox(
402
- label="Refine Masks",
403
- value=True,
404
- info="Use MattA for better edges"
405
- )
406
- use_temporal_smoothing = gr.Checkbox(
407
- label="Temporal Smoothing",
408
- value=True,
409
- info="Reduce flickering between frames"
410
- )
411
- mask_blur = gr.Slider(
412
- minimum=0,
413
- maximum=21,
414
- value=5,
415
- step=2,
416
- label="Mask Blur"
417
- )
418
- edge_smoothing = gr.Slider(
419
- minimum=0,
420
- maximum=21,
421
- value=5,
422
- step=2,
423
- label="Edge Smoothing"
424
- )
425
-
426
- with gr.Accordion("🎨 Background Options", open=True):
427
- background_type = gr.Radio(
428
- choices=["Color", "Image", "Blur"],
429
- value="Color",
430
- label="Background Type"
431
- )
432
- background_color = gr.ColorPicker(
433
- label="Background Color",
434
- value="#00FF00",
435
- visible=True
436
- )
437
- background_image = gr.Image(
438
- label="Background Image",
439
- type="filepath",
440
- visible=False
441
- )
442
-
443
- # Show/hide based on background type
444
- def update_background_inputs(bg_type):
445
- return (
446
- gr.update(visible=bg_type == "Color"),
447
- gr.update(visible=bg_type == "Image")
448
- )
449
-
450
- background_type.change(
451
- update_background_inputs,
452
- inputs=[background_type],
453
- outputs=[background_color, background_image]
454
- )
455
-
456
- with gr.Column(scale=1):
457
- output_video = gr.Video(label="Output Video", height=300)
458
-
459
- process_btn = gr.Button("🚀 Process Video", variant="primary", size="lg")
460
-
461
- processing_info = gr.Textbox(
462
- label="📊 Processing Info",
463
- lines=10,
464
- max_lines=15,
465
- interactive=False,
466
- placeholder="Processing status will appear here...",
467
- elem_id="processing-info"
468
- )
469
-
470
- # Connect processing
471
- process_btn.click(
472
- fn=process_video,
473
- inputs=[
474
- input_video,
475
- background_image,
476
- use_two_stage,
477
- use_mask_refinement,
478
- use_temporal_smoothing,
479
- mask_blur,
480
- edge_smoothing,
481
- background_type,
482
- background_color
483
- ],
484
- outputs=[output_video]
485
- )
486
-
487
- with gr.TabItem("📚 Examples"):
488
- gr.Examples(
489
- examples=get_example_videos(),
490
- inputs=input_video,
491
- label="Sample Videos"
492
- )
493
- gr.Examples(
494
- examples=get_example_backgrounds(),
495
- inputs=background_image,
496
- label="Sample Backgrounds"
497
  )
498
-
499
- with gr.TabItem("ℹ️ About"):
500
- gr.Markdown("""
501
- ### Technology Stack
502
-
503
- - **SAM2**: Segment Anything Model 2 for accurate person segmentation
504
- - **MattingAnything**: Advanced alpha matting for refined edges
505
- - **Two-Stage Processing**: Combines both models for optimal quality
506
-
507
- ### Tips for Best Results
508
-
509
- 1. **Use Two-Stage Mode** for highest quality output
510
- 2. **Enable Temporal Smoothing** to reduce flickering
511
- 3. **Adjust Edge Smoothing** for softer transitions
512
- 4. **High contrast backgrounds** work best
513
-
514
- ### Performance Notes
515
-
516
- - Processing speed depends on video resolution and length
517
- - GPU recommended for faster processing
518
- - Two-stage mode is slower but produces better results
519
- """)
520
-
521
- return app
522
-
523
- # ========================= MAIN ENTRY POINT =========================
524
- if __name__ == "__main__":
 
 
 
 
 
 
 
 
 
 
 
525
  try:
526
- # Create and launch app
527
- app = create_app()
528
-
529
- # Configure for HuggingFace Spaces
530
- app.queue(max_size=5)
531
- app.launch(
 
 
 
 
 
 
 
 
 
 
 
532
  server_name="0.0.0.0",
533
  server_port=7860,
534
- share=False,
535
- debug=False,
536
- show_error=True
 
537
  )
538
-
539
  except Exception as e:
540
- logger.error(f"Failed to start application: {str(e)}")
541
- traceback.print_exc()
542
- sys.exit(1)
 
 
 
1
  #!/usr/bin/env python3
2
+ """
3
+ Final Fixed Video Background Replacement
4
+ Uses proper functions from utilities.py to avoid transparency issues
5
+ NEW: Added GPU detection, model caching, batch processing support,
6
+ and improved error handling
7
+ """
 
 
 
 
8
  import sys
9
  import cv2
 
10
  import numpy as np
 
 
 
11
  from pathlib import Path
12
+ import torch
13
  import traceback
14
+ import time
15
+ import shutil
16
+ import gc
17
+ import threading
18
+ from typing import Optional, Tuple, Dict, Any
19
+ import logging
20
+ from huggingface_hub import hf_hub_download
21
 
22
+ # Import utilities - CRITICAL: Use these functions, don't duplicate!
23
  from utilities import (
24
  segment_person_hq,
25
  refine_mask_hq,
26
+ enhance_mask_opencv,
27
  replace_background_hq,
28
+ create_professional_background,
29
+ PROFESSIONAL_BACKGROUNDS,
30
+ validate_video_file
 
 
31
  )
32
 
33
+ # Import two-stage processor if available
34
+ try:
35
+ from two_stage_processor import TwoStageProcessor, CHROMA_PRESETS
36
+ TWO_STAGE_AVAILABLE = True
37
+ except ImportError:
38
+ TWO_STAGE_AVAILABLE = False
39
 
40
+ logging.basicConfig(level=logging.INFO)
 
 
 
 
41
  logger = logging.getLogger(__name__)
42
 
43
+ # ============================================================================ #
44
+ # OPTIMIZATION SETTINGS
45
+ # ============================================================================ #
46
+ KEYFRAME_INTERVAL = 5 # Process MatAnyone every 5th frame
47
+ FRAME_SKIP = 1 # Process every frame (set to 2 for every other frame)
48
+ MEMORY_CLEANUP_INTERVAL = 30 # Clean memory every 30 frames
49
+
50
+ # ============================================================================ #
51
+ # MODEL CACHING SYSTEM
52
+ # ============================================================================ #
53
+ CACHE_DIR = Path("/tmp/model_cache")
54
+ CACHE_DIR.mkdir(exist_ok=True, parents=True)
55
+
56
+ # ============================================================================ #
57
+ # GLOBAL MODEL STATE
58
+ # ============================================================================ #
59
+ sam2_predictor = None
60
+ matanyone_model = None
61
+ models_loaded = False
62
+ loading_lock = threading.Lock()
63
  two_stage_processor = None
64
+ PROCESS_CANCELLED = False
65
+
66
+ # ============================================================================ #
67
+ # SAM2 LOADER WITH VALIDATION
68
+ # ============================================================================ #
69
+ def load_sam2_predictor_fixed(device: str = "cuda", progress_callback: Optional[callable] = None) -> Any:
70
+ """Load SAM2 with proper error handling and validation"""
71
+ def _prog(pct: float, desc: str):
72
+ if progress_callback:
73
+ progress_callback(pct, desc)
74
+
75
+ # Format progress info for display in the UI
76
+ if "Frame" in desc and "|" in desc:
77
+ parts = desc.split("|")
78
+ frame_info = parts[0].strip() if len(parts) > 0 else ""
79
+ time_info = parts[1].strip() if len(parts) > 1 else ""
80
+ fps_info = parts[2].strip() if len(parts) > 2 else ""
81
+ eta_info = parts[3].strip() if len(parts) > 3 else ""
82
+
83
+ display_text = f"""📊 PROCESSING STATUS
84
+ ━━━━━━━━━━━━━━━━━━━━━━━━━━
85
+ 🎬 {frame_info}
86
+ ⏱️ Elapsed: {time_info}
87
+ ⚡ Speed: {fps_info}
88
+ 🎯 {eta_info}
89
+ ━━━━━━━━━━━━━━━━━━━━━━━━━━
90
+ �� Progress: {pct*100:.1f}%"""
91
+
92
+ try:
93
+ with open("/tmp/processing_info.txt", 'w') as f:
94
+ f.write(display_text)
95
+ except Exception as e:
96
+ logger.warning(f"Error writing processing info: {e}")
97
 
 
 
 
 
 
98
  try:
99
+ _prog(0.1, "Initializing SAM2...")
100
+
101
+ # Download checkpoint with caching
102
+ checkpoint_path = hf_hub_download(
103
+ repo_id="facebook/sam2-hiera-large",
104
+ filename="sam2_hiera_large.pt",
105
+ cache_dir=str(CACHE_DIR / "sam2_checkpoint"),
106
+ force_download=False
107
+ )
108
+ _prog(0.5, "SAM2 checkpoint downloaded, building model...")
109
+
110
+ # Import and build
111
+ from sam2.build_sam import build_sam2
112
+ from sam2.sam2_image_predictor import SAM2ImagePredictor
113
+
114
+ # Build model with explicit config
115
+ sam2_model = build_sam2("sam2_hiera_l.yaml", checkpoint_path)
116
+ sam2_model.to(device)
117
+ predictor = SAM2ImagePredictor(sam2_model)
118
+
119
+ # Test the predictor with dummy data
120
+ _prog(0.8, "Testing SAM2 functionality...")
121
+ test_image = np.zeros((256, 256, 3), dtype=np.uint8)
122
+ predictor.set_image(test_image)
123
+ test_points = np.array([[128, 128]])
124
+ test_labels = np.array([1])
125
+ masks, scores, _ = predictor.predict(
126
+ point_coords=test_points,
127
+ point_labels=test_labels,
128
+ multimask_output=False
129
+ )
130
+
131
+ if masks is None or len(masks) == 0:
132
+ raise Exception("SAM2 predictor test failed - no masks generated")
133
+
134
+ _prog(1.0, "SAM2 loaded and validated successfully!")
135
+ logger.info("SAM2 predictor loaded and tested successfully")
136
+ return predictor
137
+
138
  except Exception as e:
139
+ logger.error(f"SAM2 loading failed: {str(e)}")
140
+ logger.error(f"Full traceback: {traceback.format_exc()}")
141
+ raise Exception(f"SAM2 loading failed: {str(e)}")
142
+
143
+ # ============================================================================ #
144
+ # MATANYONE LOADER WITH VALIDATION
145
+ # ============================================================================ #
146
+ def load_matanyone_fixed(progress_callback: Optional[callable] = None) -> Any:
147
+ """Load MatAnyone with proper error handling and validation"""
148
+ def _prog(pct: float, desc: str):
149
+ if progress_callback:
150
+ progress_callback(pct, desc)
151
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
152
  try:
153
+ _prog(0.2, "Loading MatAnyone...")
154
+
155
+ from matanyone import InferenceCore
156
+ processor = InferenceCore("PeiqingYang/MatAnyone")
157
+
158
+ # Test MatAnyone with dummy data
159
+ _prog(0.8, "Testing MatAnyone functionality...")
160
+ test_image = np.zeros((256, 256, 3), dtype=np.uint8)
161
+ test_mask = np.zeros((256, 256), dtype=np.uint8)
162
+ test_mask[64:192, 64:192] = 255
163
+
164
+ # Test the processor
165
+ try:
166
+ if hasattr(processor, 'process') or hasattr(processor, '__call__'):
167
+ logger.info("MatAnyone processor interface detected")
168
  else:
169
+ logger.warning("MatAnyone interface unclear, will use fallback refinement")
170
+ except Exception as test_e:
171
+ logger.warning(f"MatAnyone test failed: {test_e}, will use enhanced OpenCV")
172
+
173
+ _prog(1.0, "MatAnyone loaded successfully!")
174
+ logger.info("MatAnyone processor loaded successfully")
175
+ return processor
176
+
177
  except Exception as e:
178
+ logger.error(f"MatAnyone loading failed: {str(e)}")
179
+ logger.error(f"Full traceback: {traceback.format_exc()}")
180
+ raise Exception(f"MatAnyone loading failed: {str(e)}")
181
+
182
+ # ============================================================================ #
183
+ # MODEL MANAGEMENT FUNCTIONS
184
+ # ============================================================================ #
185
+ def get_model_status() -> Dict[str, str]:
186
+ """Return current model status for UI"""
187
+ global sam2_predictor, matanyone_model, models_loaded
188
+ return {
189
+ 'sam2': 'Ready' if sam2_predictor is not None else 'Not loaded',
190
+ 'matanyone': 'Ready' if matanyone_model is not None else 'Not loaded',
191
+ 'validated': models_loaded
192
+ }
193
+
194
+ def get_cache_status() -> Dict[str, Any]:
195
+ """Get current cache status"""
196
+ return {
197
+ "sam2_loaded": sam2_predictor is not None,
198
+ "matanyone_loaded": matanyone_model is not None,
199
+ "models_validated": models_loaded,
200
+ "two_stage_available": TWO_STAGE_AVAILABLE
201
+ }
202
+
203
+ def load_models_with_validation(progress_callback: Optional[callable] = None) -> str:
204
+ """Load models with comprehensive validation"""
205
+ global sam2_predictor, matanyone_model, models_loaded, two_stage_processor, PROCESS_CANCELLED
206
+
207
+ with loading_lock:
208
+ if models_loaded and not PROCESS_CANCELLED:
209
+ return "Models already loaded and validated"
210
+
211
+ try:
212
+ PROCESS_CANCELLED = False
213
+ start_time = time.time()
214
+ device = "cuda" if torch.cuda.is_available() else "cpu"
215
+ logger.info(f"Starting model loading on {device}")
216
+
217
+ if progress_callback:
218
+ progress_callback(0.0, "Starting model loading...")
219
+
220
+ # Load SAM2 with validation
221
+ sam2_predictor = load_sam2_predictor_fixed(device=device, progress_callback=progress_callback)
222
+
223
+ if PROCESS_CANCELLED:
224
+ return "Model loading cancelled by user"
225
+
226
+ # Load MatAnyone with validation
227
+ matanyone_model = load_matanyone_fixed(progress_callback=progress_callback)
228
+
229
+ if PROCESS_CANCELLED:
230
+ return "Model loading cancelled by user"
231
+
232
+ models_loaded = True
233
+
234
+ # Initialize two-stage processor if available
235
+ if TWO_STAGE_AVAILABLE:
236
+ two_stage_processor = TwoStageProcessor(sam2_predictor, matanyone_model)
237
+ logger.info("Two-stage processor initialized")
238
+
239
+ load_time = time.time() - start_time
240
+ message = f"SUCCESS: SAM2 + MatAnyone loaded and validated in {load_time:.1f}s"
241
+ if TWO_STAGE_AVAILABLE:
242
+ message += " (Two-stage mode available)"
243
+ logger.info(message)
244
+ return message
245
+
246
+ except Exception as e:
247
+ models_loaded = False
248
+ error_msg = f"Model loading failed: {str(e)}"
249
+ logger.error(error_msg)
250
+ return error_msg
251
+
252
+ # ============================================================================ #
253
+ # MAIN VIDEO PROCESSING - USING UTILITIES FUNCTIONS
254
+ # ============================================================================ #
255
+ def process_video_fixed(
256
+ video_path: str,
257
+ background_choice: str,
258
+ custom_background_path: Optional[str],
259
+ progress_callback: Optional[callable] = None,
260
+ use_two_stage: bool = False,
261
+ chroma_preset: str = "standard",
262
+ preview_mask: bool = False,
263
+ preview_greenscreen: bool = False
264
+ ) -> Tuple[Optional[str], str]:
265
+ """Optimized video processing using proper functions from utilities"""
266
+ global PROCESS_CANCELLED
267
+
268
+ if PROCESS_CANCELLED:
269
+ return None, "Processing cancelled by user"
270
+
271
+ if not models_loaded:
272
+ return None, "Models not loaded. Call load_models_with_validation() first."
273
+
274
+ if not video_path or not os.path.exists(video_path):
275
+ return None, f"Video file not found: {video_path}"
276
+
277
+ # Validate video file
278
+ is_valid, validation_msg = validate_video_file(video_path)
279
+ if not is_valid:
280
+ return None, f"Invalid video: {validation_msg}"
281
+
282
+ def _prog(pct: float, desc: str):
283
+ if PROCESS_CANCELLED:
284
+ raise Exception("Processing cancelled by user")
285
+
286
+ if progress_callback:
287
+ progress_callback(pct, desc)
288
+
289
+ # Update processing info file
290
+ if "Frame" in desc and "|" in desc:
291
+ parts = desc.split("|")
292
+ frame_info = parts[0].strip() if len(parts) > 0 else ""
293
+ time_info = parts[1].strip() if len(parts) > 1 else ""
294
+ fps_info = parts[2].strip() if len(parts) > 2 else ""
295
+ eta_info = parts[3].strip() if len(parts) > 3 else ""
296
+
297
+ display_text = f"""📊 PROCESSING STATUS
298
+ ━━━━━━━━━━━━━━━━━━━━━━━━━━
299
+ 🎬 {frame_info}
300
+ ⏱️ Elapsed: {time_info}
301
+ ⚡ Speed: {fps_info}
302
+ 🎯 {eta_info}
303
+ ━━━━━━━━━━━━━━━━━━━━━━━━━━
304
+ 📈 Progress: {pct*100:.1f}%"""
305
+
306
+ try:
307
+ with open("/tmp/processing_info.txt", 'w') as f:
308
+ f.write(display_text)
309
+ except Exception as e:
310
+ logger.warning(f"Error writing processing info: {e}")
311
+
312
  try:
313
+ _prog(0.0, f"Starting {'TWO-STAGE' if use_two_stage else 'SINGLE-STAGE'} processing...")
314
+
315
+ # Check if two-stage mode is requested
316
+ if use_two_stage:
317
+ if not TWO_STAGE_AVAILABLE:
318
+ return None, "Two-stage mode not available. Please add two_stage_processor.py file."
319
+
320
+ if two_stage_processor is None:
321
+ return None, "Two-stage processor not initialized. Please reload models."
322
+
323
+ _prog(0.05, "Starting TWO-STAGE green screen processing...")
324
+
325
+ # Get video dimensions
326
+ cap = cv2.VideoCapture(video_path)
327
+ frame_width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
328
+ frame_height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
329
+ cap.release()
330
+
331
+ # Prepare background
332
+ if background_choice == "custom" and custom_background_path:
333
+ if not os.path.exists(custom_background_path):
334
+ return None, f"Custom background not found: {custom_background_path}"
335
+
336
+ background = cv2.imread(custom_background_path)
337
+ if background is None:
338
+ return None, "Could not read custom background image."
339
+ background_name = "Custom Image"
340
+
341
+ else:
342
+ if background_choice in PROFESSIONAL_BACKGROUNDS:
343
+ bg_config = PROFESSIONAL_BACKGROUNDS[background_choice]
344
+ background = create_professional_background(bg_config, frame_width, frame_height)
345
+ background_name = bg_config["name"]
346
+ else:
347
+ return None, f"Invalid background selection: {background_choice}"
348
+
349
+ # Get chroma settings
350
+ chroma_settings = CHROMA_PRESETS.get(chroma_preset, CHROMA_PRESETS['standard'])
351
+
352
+ # Run two-stage pipeline
353
+ timestamp = int(time.time())
354
+ final_output = f"/tmp/twostage_final_{timestamp}.mp4"
355
+
356
+ result, message = two_stage_processor.process_full_pipeline(
357
+ video_path,
358
+ background,
359
+ final_output,
360
+ chroma_settings=chroma_settings,
361
+ progress_callback=_prog
362
+ )
363
+
364
+ if PROCESS_CANCELLED:
365
+ return None, "Processing cancelled by user"
366
+
367
+ if result is None:
368
+ return None, message
369
+
370
+ # Add audio back
371
+ _prog(0.9, "Adding audio...")
372
+ final_with_audio = f"/tmp/twostage_audio_{timestamp}.mp4"
373
+
374
+ try:
375
+ audio_cmd = (
376
+ f'ffmpeg -y -i "{final_output}" -i "{video_path}" '
377
+ f'-c:v libx264 -crf 18 -preset medium '
378
+ f'-c:a aac -b:a 192k -ac 2 -ar 48000 '
379
+ f'-map 0:v:0 -map 1:a:0? -shortest "{final_with_audio}"'
380
+ )
381
+ os.system(audio_cmd)
382
+ if os.path.exists(final_with_audio):
383
+ os.remove(final_output)
384
+ final_output = final_with_audio
385
+ except Exception as e:
386
+ logger.warning(f"Audio processing error: {e}")
387
+ final_with_audio = final_output # Fallback to video without audio
388
+
389
+ _prog(1.0, "TWO-STAGE processing complete!")
390
+
391
+ success_message = (
392
+ f"TWO-STAGE Success!\n"
393
+ f"Background: {background_name}\n"
394
+ f"Method: Green Screen Chroma Key\n"
395
+ f"Preset: {chroma_preset}\n"
396
+ f"Quality: Professional cinema-grade"
397
+ )
398
+
399
+ return final_output, success_message
400
+
401
+ # Single-stage processing
402
+ _prog(0.05, "Starting SINGLE-STAGE processing...")
403
+
404
+ cap = cv2.VideoCapture(video_path)
405
  if not cap.isOpened():
406
+ return None, "Could not open video file."
407
+
408
+ fps = cap.get(cv2.CAP_PROP_FPS)
 
409
  total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
410
+ frame_width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
411
+ frame_height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
412
+
413
+ if total_frames == 0:
414
+ return None, "Video appears to be empty."
415
+
416
+ # Log video info
417
+ logger.info(f"Video info: {frame_width}x{frame_height}, {fps}fps, {total_frames} frames")
418
+
419
  # Prepare background
420
+ background = None
421
+ background_name = ""
422
+
423
+ if background_choice == "custom" and custom_background_path:
424
+ if not os.path.exists(custom_background_path):
425
+ return None, f"Custom background not found: {custom_background_path}"
426
+
427
+ background = cv2.imread(custom_background_path)
428
+ if background is None:
429
+ return None, "Could not read custom background image."
430
+ background_name = "Custom Image"
431
+
432
  else:
433
+ if background_choice in PROFESSIONAL_BACKGROUNDS:
434
+ bg_config = PROFESSIONAL_BACKGROUNDS[background_choice]
435
+ background = create_professional_background(bg_config, frame_width, frame_height)
436
+ background_name = bg_config["name"]
437
+ else:
438
+ return None, f"Invalid background selection: {background_choice}"
439
+
440
+ if background is None:
441
+ return None, "Failed to create background."
442
+
443
+ timestamp = int(time.time())
444
+ fourcc = cv2.VideoWriter_fourcc(*'avc1') # H.264 for better compatibility
445
+
446
+ _prog(0.1, f"Processing {total_frames} frames with {'TWO-STAGE' if use_two_stage else 'SINGLE-STAGE'} processing...")
447
+
448
+ # Create temporary output for preview if needed
449
+ if preview_mask or preview_greenscreen:
450
+ temp_output = f"/tmp/preview_{timestamp}.mp4"
451
+ final_writer = cv2.VideoWriter(temp_output, fourcc, fps, (frame_width, frame_height))
452
+ else:
453
+ final_path = f"/tmp/output_{timestamp}.mp4"
454
+ final_writer = cv2.VideoWriter(final_path, fourcc, fps, (frame_width, frame_height))
455
+
456
+ if not final_writer.isOpened():
457
+ return None, "Could not create output video file."
458
+
459
+ frame_count = 0
460
+ successful_frames = 0
461
+ last_refined_mask = None
462
+
463
+ # Processing stats
464
+ start_time = time.time()
465
+
466
  while True:
467
+ if PROCESS_CANCELLED:
468
+ cap.release()
469
+ final_writer.release()
470
+ if os.path.exists(final_path):
471
+ os.remove(final_path)
472
+ return None, "Processing cancelled by user"
473
+
474
  ret, frame = cap.read()
475
  if not ret:
476
  break
477
+
478
+ # Skip frames if FRAME_SKIP > 1
479
+ if frame_count % FRAME_SKIP != 0:
480
+ frame_count += 1
481
+ continue
482
+
483
+ try:
484
+ # Update progress with detailed timing info and ETA
485
+ elapsed_time = time.time() - start_time
486
+ current_fps = frame_count / elapsed_time if elapsed_time > 0 else 0
487
+ remaining_frames = total_frames - frame_count
488
+ eta_seconds = remaining_frames / current_fps if current_fps > 0 else 0
489
+ eta_display = f"{int(eta_seconds//60)}m {int(eta_seconds%60)}s" if eta_seconds > 60 else f"{int(eta_seconds)}s"
490
+
491
+ progress_msg = f"Frame {frame_count + 1}/{total_frames} | {elapsed_time:.1f}s | {current_fps:.1f} fps | ETA: {eta_display}"
492
+
493
+ # Log and display progress
494
+ logger.info(progress_msg)
495
+ _prog(0.1 + (frame_count / max(1, total_frames)) * 0.8, progress_msg)
496
+
497
+ # CRITICAL: Use functions from utilities.py, not local implementations!
498
+ # SAM2 segmentation using utilities function
499
+ mask = segment_person_hq(frame, sam2_predictor)
500
+
501
+ if preview_mask:
502
+ # Save mask visualization
503
+ mask_vis = cv2.cvtColor(mask, cv2.COLOR_GRAY2BGR)
504
+ mask_vis[..., 1:] = 0 # Green mask
505
+ final_writer.write(mask_vis)
506
+ frame_count += 1
507
+ continue
508
+
509
+ # MatAnyone refinement on keyframes using utilities function
510
+ if (frame_count % KEYFRAME_INTERVAL == 0) or (last_refined_mask is None):
511
+ refined_mask = refine_mask_hq(frame, mask, matanyone_model)
512
+ last_refined_mask = refined_mask.copy()
513
+ logger.info(f"Keyframe refinement at frame {frame_count}")
514
+ else:
515
+ # Blend SAM2 mask with last refined mask for temporal smoothness
516
+ alpha = 0.7
517
+ refined_mask = cv2.addWeighted(mask, alpha, last_refined_mask, 1-alpha, 0)
518
+
519
+ if preview_greenscreen:
520
+ # Create green screen preview
521
+ green_bg = np.zeros_like(frame)
522
+ green_bg[:, :] = [0, 255, 0] # Pure green
523
+ preview_frame = frame.copy()
524
+ mask_3ch = cv2.cvtColor(refined_mask, cv2.COLOR_GRAY2BGR)
525
+ mask_norm = mask_3ch.astype(float) / 255
526
+ preview_frame = preview_frame * mask_norm + green_bg * (1 - mask_norm)
527
+ final_writer.write(preview_frame.astype(np.uint8))
528
+ frame_count += 1
529
+ continue
530
+
531
+ # CRITICAL: Use replace_background_hq from utilities which has the transparency fix!
532
+ result_frame = replace_background_hq(frame, refined_mask, background)
533
+ final_writer.write(result_frame)
534
+ successful_frames += 1
535
+
536
+ except Exception as frame_error:
537
+ logger.warning(f"Error processing frame {frame_count}: {frame_error}")
538
+ # Write original frame if processing fails
539
+ final_writer.write(frame)
540
+
541
+ frame_count += 1
542
+
543
+ # Memory management
544
+ if frame_count % MEMORY_CLEANUP_INTERVAL == 0:
545
+ gc.collect()
546
+ if torch.cuda.is_available():
547
+ torch.cuda.empty_cache()
548
+ elapsed = time.time() - start_time
549
+ fps_actual = frame_count / elapsed
550
+ eta = (total_frames - frame_count) / fps_actual if fps_actual > 0 else 0
551
+ logger.info(f"Progress: {frame_count}/{total_frames}, FPS: {fps_actual:.1f}, ETA: {eta:.0f}s")
552
+
 
 
 
 
 
 
 
 
553
  cap.release()
554
+ final_writer.release()
555
+
556
+ if PROCESS_CANCELLED:
557
+ if os.path.exists(final_path):
558
+ os.remove(final_path)
559
+ return None, "Processing cancelled by user"
560
+
561
+ if successful_frames == 0:
562
+ return None, "No frames were processed successfully with AI."
563
+
564
+ # Calculate processing stats
565
  total_time = time.time() - start_time
566
+ avg_fps = frame_count / total_time if total_time > 0 else 0
567
+
568
+ _prog(0.9, "Finalizing output...")
569
+
570
+ if preview_mask or preview_greenscreen:
571
+ final_output = temp_output
572
+ else:
573
+ # Add audio back for final output
574
+ _prog(0.9, "Adding audio...")
575
+ final_output = f"/tmp/final_{timestamp}.mp4"
576
+
577
+ try:
578
+ audio_cmd = (
579
+ f'ffmpeg -y -i "{final_path}" -i "{video_path}" '
580
+ f'-c:v libx264 -crf 18 -preset medium '
581
+ f'-c:a aac -b:a 192k -ac 2 -ar 48000 '
582
+ f'-map 0:v:0 -map 1:a:0? -shortest "{final_output}"'
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
583
  )
584
+ result = os.system(audio_cmd)
585
+ if result != 0 or not os.path.exists(final_output):
586
+ shutil.copy2(final_path, final_output)
587
+ except Exception as e:
588
+ logger.warning(f"Audio processing error: {e}")
589
+ shutil.copy2(final_path, final_output)
590
+
591
+ # Cleanup
592
+ try:
593
+ if os.path.exists(final_path):
594
+ os.remove(final_path)
595
+ except Exception as e:
596
+ logger.warning(f"Cleanup error: {e}")
597
+
598
+ _prog(1.0, "Processing complete!")
599
+
600
+ success_message = (
601
+ f"Success!\n"
602
+ f"Background: {background_name}\n"
603
+ f"Resolution: {frame_width}x{frame_height}\n"
604
+ f"Total frames: {frame_count}\n"
605
+ f"Successfully processed: {successful_frames}\n"
606
+ f"Processing time: {total_time:.1f}s\n"
607
+ f"Average FPS: {avg_fps:.1f}\n"
608
+ f"Keyframe interval: {KEYFRAME_INTERVAL}\n"
609
+ f"Mode: {'TWO-STAGE' if use_two_stage else 'SINGLE-STAGE'}"
610
+ )
611
+
612
+ return final_output, success_message
613
+
614
+ except Exception as e:
615
+ logger.error(f"Processing error: {traceback.format_exc()}")
616
+ return None, f"Processing Error: {str(e)}"
617
+
618
+ # ============================================================================ #
619
+ # MAIN - IMPORT UI COMPONENTS
620
+ # ============================================================================ #
621
+ def main():
622
  try:
623
+ print("===== FINAL FIXED VIDEO BACKGROUND REPLACEMENT =====")
624
+ print(f"Keyframe interval: {KEYFRAME_INTERVAL} frames")
625
+ print(f"Frame skip: {FRAME_SKIP} (1=all frames, 2=every other)")
626
+ print(f"Two-stage mode: {'AVAILABLE' if TWO_STAGE_AVAILABLE else 'NOT AVAILABLE'}")
627
+ print("Loading UI components...")
628
+
629
+ # Import UI components
630
+ from ui_components import create_interface
631
+
632
+ os.makedirs("/tmp/MyAvatar/My_Videos/", exist_ok=True)
633
+ CACHE_DIR.mkdir(exist_ok=True, parents=True)
634
+
635
+ print("Creating interface...")
636
+ demo = create_interface()
637
+
638
+ print("Launching...")
639
+ demo.launch(
640
  server_name="0.0.0.0",
641
  server_port=7860,
642
+ share=True,
643
+ show_error=True,
644
+ debug=True,
645
+ enable_queue=True
646
  )
647
+
648
  except Exception as e:
649
+ logger.error(f"Startup failed: {e}")
650
+ print(f"Startup failed: {e}")
651
+
652
+ if __name__ == "__main__":
653
+ main()