File size: 14,533 Bytes
ad645ee
2167778
d034be2
 
 
 
 
 
 
 
 
 
 
 
 
2167778
ee38ee4
 
 
69bef1e
ee38ee4
 
f4b2697
ee38ee4
 
 
 
 
 
 
d034be2
44d164b
 
 
 
 
 
 
 
 
 
 
 
ee38ee4
44d164b
 
 
ee38ee4
 
c268795
f4b2697
d034be2
61d0a06
d034be2
 
 
 
 
 
 
 
 
69bef1e
 
 
 
 
 
 
 
 
d034be2
2167778
 
 
ee38ee4
2167778
ee38ee4
 
 
d034be2
 
 
53a282c
ee38ee4
d034be2
ee38ee4
d034be2
 
 
 
 
 
 
ee38ee4
d034be2
 
ee38ee4
 
 
 
d034be2
 
ee38ee4
d034be2
ee38ee4
 
 
 
 
 
69bef1e
ee38ee4
d034be2
ee38ee4
d034be2
 
 
 
 
ee38ee4
 
 
 
d034be2
 
 
 
 
 
 
 
ee38ee4
d034be2
ee38ee4
d034be2
ee38ee4
 
 
 
 
d034be2
ee38ee4
 
 
d034be2
ee38ee4
 
 
 
d034be2
 
 
 
 
ee38ee4
 
 
 
 
 
 
 
 
 
 
 
d034be2
ee38ee4
d034be2
ee38ee4
 
 
 
 
d034be2
ee38ee4
 
 
 
 
d034be2
ee38ee4
 
 
 
 
69bef1e
ee38ee4
 
 
 
4d0185c
d034be2
ee38ee4
 
d034be2
 
 
ee38ee4
 
 
 
 
 
 
 
 
 
d034be2
 
 
 
 
 
 
 
 
 
 
ee38ee4
 
d034be2
 
ee38ee4
d034be2
ee38ee4
d034be2
 
 
 
ee38ee4
d034be2
ee38ee4
 
d034be2
ee38ee4
 
d034be2
ee38ee4
 
d034be2
ee38ee4
 
 
 
 
 
 
 
 
d034be2
ee38ee4
d034be2
 
ee38ee4
 
 
 
 
d034be2
 
ee38ee4
 
 
 
 
 
d034be2
ee38ee4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d034be2
ee38ee4
 
 
 
 
d034be2
 
ee38ee4
 
d034be2
ee38ee4
d034be2
ee38ee4
d034be2
 
 
 
 
 
 
 
 
 
ee38ee4
 
d034be2
ee38ee4
 
d034be2
 
 
 
 
 
 
69bef1e
d034be2
ee38ee4
69bef1e
d034be2
ee38ee4
d034be2
ee38ee4
69bef1e
ee38ee4
 
 
 
 
 
 
 
 
 
d034be2
ee38ee4
 
 
 
 
69bef1e
ee38ee4
d034be2
ee38ee4
69bef1e
ee38ee4
d034be2
ee38ee4
69bef1e
ee38ee4
 
69bef1e
 
ee38ee4
69bef1e
ee38ee4
d034be2
ee38ee4
d034be2
ee38ee4
 
 
 
 
 
 
 
 
 
 
 
 
 
ad645ee
ee38ee4
c268795
d034be2
 
 
f4b2697
2167778
82f3861
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
#!/usr/bin/env python3
"""
Video Background Replacement - Main Application Entry Point
Refactored modular architecture - orchestrates specialized components

This file has been refactored from a monolithic 600+ line structure into
a clean orchestration layer that coordinates specialized modules:
- config: Application configuration and environment variables
- device_manager: Hardware detection and optimization
- memory_manager: Memory and GPU resource management
- model_loader: AI model loading and validation
- video_processor: Core video processing pipeline
- audio_processor: Audio track handling and FFmpeg operations
- progress_tracker: Progress monitoring and ETA calculations
- exceptions: Custom exception classes for better error handling
"""

import os
import logging
import threading
from pathlib import Path
from typing import Optional, Tuple, Dict, Any, Callable

# Configure logging
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
)
logger = logging.getLogger(__name__)

# Apply Gradio schema patch early (before other imports)
try:
    import gradio_client.utils as gc_utils
    original_get_type = gc_utils.get_type
    
    def patched_get_type(schema):
        if not isinstance(schema, dict):
            if isinstance(schema, bool):
                return "boolean"
            if isinstance(schema, str):
                return "string"
            if isinstance(schema, (int, float)):
                return "number"
            return "string"
        return original_get_type(schema)
    
    gc_utils.get_type = patched_get_type
    logger.info("Gradio schema patch applied successfully")
except Exception as e:
    logger.error(f"Gradio patch failed: {e}")

# Import modular components
from app_config import ProcessingConfig
from device_manager import DeviceManager
from memory_manager import MemoryManager
from model_loader import ModelLoader
from video_processor import CoreVideoProcessor
from audio_processor import AudioProcessor
from progress_tracker import ProgressTracker
from exceptions import VideoProcessingError, ModelLoadingError, DeviceError

# Import utilities (existing)
from utilities import (
    segment_person_hq,
    refine_mask_hq,
    replace_background_hq,
    create_professional_background,
    PROFESSIONAL_BACKGROUNDS,
    validate_video_file
)

# Import two-stage processor if available
try:
    from two_stage_processor import TwoStageProcessor, CHROMA_PRESETS
    TWO_STAGE_AVAILABLE = True
except ImportError:
    TWO_STAGE_AVAILABLE = False
    CHROMA_PRESETS = {'standard': {}}

class VideoProcessor:
    """
    Main video processing orchestrator - coordinates all specialized components
    """
    
    def __init__(self):
        """Initialize the video processor with all required components"""
        self.config = ProcessingConfig()
        self.device_manager = DeviceManager()
        self.memory_manager = MemoryManager(self.device_manager.get_optimal_device())
        self.model_loader = ModelLoader(self.device_manager.get_optimal_device())
        self.audio_processor = AudioProcessor()
        
        # Initialize core processor (will be set up after models load)
        self.core_processor = None
        self.two_stage_processor = None
        
        # State management
        self.models_loaded = False
        self.loading_lock = threading.Lock()
        self.cancel_event = threading.Event()
        
        logger.info(f"VideoProcessor initialized on device: {self.device_manager.get_optimal_device()}")
    
    def load_models(self, progress_callback: Optional[Callable] = None) -> str:
        """Load and validate all AI models"""
        with self.loading_lock:
            if self.models_loaded:
                return "Models already loaded and validated"
            
            try:
                self.cancel_event.clear()
                
                if progress_callback:
                    progress_callback(0.0, f"Starting model loading on {self.device_manager.get_optimal_device()}")
                
                # Load models using the specialized loader
                sam2_predictor, matanyone_model = self.model_loader.load_all_models(
                    progress_callback=progress_callback,
                    cancel_event=self.cancel_event
                )
                
                if self.cancel_event.is_set():
                    return "Model loading cancelled"
                
                # Initialize core processor with loaded models
                self.core_processor = CoreVideoProcessor(
                    sam2_predictor=sam2_predictor,
                    matanyone_model=matanyone_model,
                    config=self.config,
                    memory_manager=self.memory_manager
                )
                
                # Initialize two-stage processor if available
                if TWO_STAGE_AVAILABLE and sam2_predictor and matanyone_model:
                    try:
                        self.two_stage_processor = TwoStageProcessor(sam2_predictor, matanyone_model)
                        logger.info("Two-stage processor initialized")
                    except Exception as e:
                        logger.warning(f"Two-stage processor init failed: {e}")
                
                self.models_loaded = True
                message = self.model_loader.get_load_summary()
                logger.info(message)
                return message
                
            except ModelLoadingError as e:
                self.models_loaded = False
                error_msg = f"Model loading failed: {str(e)}"
                logger.error(error_msg)
                return error_msg
            except Exception as e:
                self.models_loaded = False
                error_msg = f"Unexpected error during model loading: {str(e)}"
                logger.error(error_msg)
                return error_msg
    
    def process_video(
        self,
        video_path: str,
        background_choice: str,
        custom_background_path: Optional[str] = None,
        progress_callback: Optional[Callable] = None,
        use_two_stage: bool = False,
        chroma_preset: str = "standard",
        preview_mask: bool = False,
        preview_greenscreen: bool = False
    ) -> Tuple[Optional[str], str]:
        """Process video with the specified parameters"""
        
        if not self.models_loaded or not self.core_processor:
            return None, "Models not loaded. Please load models first."
        
        if self.cancel_event.is_set():
            return None, "Processing cancelled"
        
        # Validate input file
        is_valid, validation_msg = validate_video_file(video_path)
        if not is_valid:
            return None, f"Invalid video: {validation_msg}"
        
        try:
            # Route to appropriate processing method
            if use_two_stage and TWO_STAGE_AVAILABLE and self.two_stage_processor:
                return self._process_two_stage(
                    video_path, background_choice, custom_background_path,
                    progress_callback, chroma_preset
                )
            else:
                return self._process_single_stage(
                    video_path, background_choice, custom_background_path,
                    progress_callback, preview_mask, preview_greenscreen
                )
                
        except VideoProcessingError as e:
            logger.error(f"Video processing failed: {e}")
            return None, f"Processing failed: {str(e)}"
        except Exception as e:
            logger.error(f"Unexpected error during video processing: {e}")
            return None, f"Unexpected error: {str(e)}"
    
    def _process_single_stage(
        self,
        video_path: str,
        background_choice: str,
        custom_background_path: Optional[str],
        progress_callback: Optional[Callable],
        preview_mask: bool,
        preview_greenscreen: bool
    ) -> Tuple[Optional[str], str]:
        """Process video using single-stage pipeline"""
        
        # Process video using core processor
        processed_video_path, process_message = self.core_processor.process_video(
            video_path=video_path,
            background_choice=background_choice,
            custom_background_path=custom_background_path,
            progress_callback=progress_callback,
            cancel_event=self.cancel_event,
            preview_mask=preview_mask,
            preview_greenscreen=preview_greenscreen
        )
        
        if processed_video_path is None:
            return None, process_message
        
        # Add audio if not in preview mode
        if not (preview_mask or preview_greenscreen):
            final_video_path = self.audio_processor.add_audio_to_video(
                original_video=video_path,
                processed_video=processed_video_path
            )
        else:
            final_video_path = processed_video_path
        
        success_msg = (
            f"{process_message}\n"
            f"Background: {background_choice}\n"
            f"Mode: Single-stage\n"
            f"Device: {self.device_manager.get_optimal_device()}"
        )
        
        return final_video_path, success_msg
    
    def _process_two_stage(
        self,
        video_path: str,
        background_choice: str,
        custom_background_path: Optional[str],
        progress_callback: Optional[Callable],
        chroma_preset: str
    ) -> Tuple[Optional[str], str]:
        """Process video using two-stage pipeline"""
        
        # Get video dimensions for background preparation
        import cv2
        cap = cv2.VideoCapture(video_path)
        frame_width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
        frame_height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
        cap.release()
        
        # Prepare background using core processor
        background = self.core_processor.prepare_background(
            background_choice, custom_background_path, frame_width, frame_height
        )
        if background is None:
            return None, "Failed to prepare background"
        
        # Process with two-stage pipeline
        import time
        timestamp = int(time.time())
        final_output = f"/tmp/twostage_final_{timestamp}.mp4"
        
        chroma_settings = CHROMA_PRESETS.get(chroma_preset, CHROMA_PRESETS['standard'])
        
        result, message = self.two_stage_processor.process_full_pipeline(
            video_path,
            background,
            final_output,
            chroma_settings=chroma_settings,
            progress_callback=progress_callback
        )
        
        if result is None:
            return None, message
        
        success_msg = (
            f"Two-stage success!\n"
            f"Background: {background_choice}\n"
            f"Preset: {chroma_preset}\n"
            f"Quality: Cinema-grade\n"
            f"Device: {self.device_manager.get_optimal_device()}"
        )
        
        return result, success_msg
    
    def get_status(self) -> Dict[str, Any]:
        """Get comprehensive status of all components"""
        base_status = {
            'models_loaded': self.models_loaded,
            'two_stage_available': TWO_STAGE_AVAILABLE and self.two_stage_processor is not None,
            'device': str(self.device_manager.get_optimal_device()),
            'memory_usage': self.memory_manager.get_memory_usage(),
            'config': self.config.to_dict()
        }
        
        # Add model-specific status if available
        if self.model_loader:
            base_status.update(self.model_loader.get_status())
        
        # Add processing status if available
        if self.core_processor:
            base_status.update(self.core_processor.get_status())
        
        return base_status
    
    def cancel_processing(self):
        """Cancel any ongoing processing"""
        self.cancel_event.set()
        logger.info("Processing cancellation requested")
    
    def cleanup_resources(self):
        """Clean up all resources"""
        self.memory_manager.cleanup_aggressive()
        if self.model_loader:
            self.model_loader.cleanup()
        logger.info("Resources cleaned up")

# Global processor instance for application
processor = VideoProcessor()

# Backward compatibility functions for existing UI
def load_models_with_validation(progress_callback: Optional[Callable] = None) -> str:
    """Load models with validation - backward compatibility wrapper"""
    return processor.load_models(progress_callback)

def process_video_fixed(
    video_path: str,
    background_choice: str,
    custom_background_path: Optional[str],
    progress_callback: Optional[Callable] = None,
    use_two_stage: bool = False,
    chroma_preset: str = "standard",
    preview_mask: bool = False,
    preview_greenscreen: bool = False
) -> Tuple[Optional[str], str]:
    """Process video - backward compatibility wrapper"""
    return processor.process_video(
        video_path, background_choice, custom_background_path,
        progress_callback, use_two_stage, chroma_preset,
        preview_mask, preview_greenscreen
    )

def get_model_status() -> Dict[str, Any]:
    """Get model status - backward compatibility wrapper"""
    return processor.get_status()

def get_cache_status() -> Dict[str, Any]:
    """Get cache status - backward compatibility wrapper"""
    return processor.get_status()

# For backward compatibility
PROCESS_CANCELLED = processor.cancel_event

def main():
    """Main application entry point"""
    try:
        logger.info("Starting Video Background Replacement application")
        logger.info(f"Device: {processor.device_manager.get_optimal_device()}")
        logger.info(f"Two-stage available: {TWO_STAGE_AVAILABLE}")
        logger.info("Modular architecture loaded successfully")
        
        # Import and create UI
        from ui_components import create_interface
        demo = create_interface()
        
        # Launch application
        demo.queue().launch(
            server_name="0.0.0.0",
            server_port=7860,
            share=True,
            show_error=True,
            debug=False
        )
        
    except Exception as e:
        logger.error(f"Application startup failed: {e}")
        raise
    finally:
        # Cleanup on exit
        processor.cleanup_resources()

if __name__ == "__main__":
    main()