MogensR commited on
Commit
2d694e6
·
1 Parent(s): 27391e2

Rename app.py to core/app.py

Browse files
Files changed (1) hide show
  1. app.py → core/app.py +54 -103
app.py → core/app.py RENAMED
@@ -2,32 +2,20 @@
2
  """
3
  BackgroundFX Pro - Main Application Entry Point
4
  Refactored modular architecture - orchestrates specialized components
5
-
6
- This file has been refactored from a monolithic 600+ line structure into
7
- a clean orchestration layer that coordinates specialized modules:
8
- - config: Application configuration and environment variables
9
- - device_manager: Hardware detection and optimization
10
- - memory_manager: Memory and GPU resource management
11
- - model_loader: AI model loading and validation
12
- - video_processor: Core video processing pipeline
13
- - audio_processor: Audio track handling and FFmpeg operations
14
- - progress_tracker: Progress monitoring and ETA calculations
15
- - exceptions: Custom exception classes for better error handling
16
  """
17
 
18
  import os
19
- import sys
20
  import logging
21
  import threading
22
  from pathlib import Path
23
  from typing import Optional, Tuple, Dict, Any, Callable
24
 
25
- # Add parent directory to path for imports
26
- sys.path.insert(0, str(Path(__file__).parent.parent))
27
-
28
- # Configure logging
29
- from utils.logging import setup_logging
30
- logger = setup_logging(__name__)
31
 
32
  # Apply Gradio schema patch early (before other imports)
33
  try:
@@ -50,26 +38,20 @@ def patched_get_type(schema):
50
  except Exception as e:
51
  logger.error(f"Gradio patch failed: {e}")
52
 
53
- # Import configuration
54
- from config.app_config import ProcessingConfig
55
 
56
- # Import core components
57
  from core.exceptions import ModelLoadingError, VideoProcessingError
58
-
59
- # Import utilities
60
  from utils.hardware.device_manager import DeviceManager
61
  from utils.system.memory_manager import MemoryManager
62
- from utils.monitoring.progress_tracker import ProgressTracker
63
-
64
- # Import models
65
  from models.loaders.model_loader import ModelLoader
66
-
67
- # Import processing components
68
  from processing.video.video_processor import CoreVideoProcessor
69
  from processing.audio.audio_processor import AudioProcessor
 
70
 
71
- # Import legacy utilities (during migration)
72
- from utils.legacy import (
73
  segment_person_hq,
74
  refine_mask_hq,
75
  replace_background_hq,
@@ -80,21 +62,20 @@ def patched_get_type(schema):
80
 
81
  # Import two-stage processor if available
82
  try:
83
- from processing.advanced.two_stage_processor import TwoStageProcessor, CHROMA_PRESETS
84
  TWO_STAGE_AVAILABLE = True
85
  except ImportError:
86
  TWO_STAGE_AVAILABLE = False
87
  CHROMA_PRESETS = {'standard': {}}
88
- logger.warning("Two-stage processor not available")
89
 
90
- class BackgroundFXProcessor:
91
  """
92
  Main video processing orchestrator - coordinates all specialized components
93
  """
94
 
95
- def __init__(self, config: Optional[ProcessingConfig] = None):
96
  """Initialize the video processor with all required components"""
97
- self.config = config or ProcessingConfig()
98
  self.device_manager = DeviceManager()
99
  self.memory_manager = MemoryManager(self.device_manager.get_optimal_device())
100
  self.model_loader = ModelLoader(self.device_manager, self.memory_manager)
@@ -110,21 +91,19 @@ def __init__(self, config: Optional[ProcessingConfig] = None):
110
  self.loading_lock = threading.Lock()
111
  self.cancel_event = threading.Event()
112
 
113
- logger.info(f"BackgroundFX Pro initialized on device: {self.device_manager.get_optimal_device()}")
114
- logger.info(f"Configuration: {self.config.to_dict()}")
115
 
116
  def load_models(self, progress_callback: Optional[Callable] = None) -> str:
117
  """Load and validate all AI models"""
118
  with self.loading_lock:
119
  if self.models_loaded:
120
- return "Models already loaded and validated"
121
 
122
  try:
123
  self.cancel_event.clear()
124
- self.progress_tracker.start("model_loading")
125
 
126
  if progress_callback:
127
- progress_callback(0.0, f"🚀 Starting model loading on {self.device_manager.get_optimal_device()}")
128
 
129
  # Load models using the specialized loader
130
  sam2_predictor, matanyone_model = self.model_loader.load_all_models(
@@ -133,8 +112,7 @@ def load_models(self, progress_callback: Optional[Callable] = None) -> str:
133
  )
134
 
135
  if self.cancel_event.is_set():
136
- self.progress_tracker.cancel("model_loading")
137
- return "⚠️ Model loading cancelled"
138
 
139
  # Initialize core processor with loaded models
140
  self.core_processor = CoreVideoProcessor(
@@ -147,32 +125,24 @@ def load_models(self, progress_callback: Optional[Callable] = None) -> str:
147
  # Initialize two-stage processor if available
148
  if TWO_STAGE_AVAILABLE and sam2_predictor and matanyone_model:
149
  try:
150
- self.two_stage_processor = TwoStageProcessor(
151
- sam2_predictor,
152
- matanyone_model,
153
- memory_manager=self.memory_manager
154
- )
155
- logger.info("✓ Two-stage processor initialized")
156
  except Exception as e:
157
  logger.warning(f"Two-stage processor init failed: {e}")
158
 
159
  self.models_loaded = True
160
- self.progress_tracker.complete("model_loading")
161
-
162
  message = self.model_loader.get_load_summary()
163
  logger.info(message)
164
- return f"✓ {message}"
165
 
166
  except ModelLoadingError as e:
167
  self.models_loaded = False
168
- self.progress_tracker.fail("model_loading", str(e))
169
- error_msg = f"❌ Model loading failed: {str(e)}"
170
  logger.error(error_msg)
171
  return error_msg
172
  except Exception as e:
173
  self.models_loaded = False
174
- self.progress_tracker.fail("model_loading", str(e))
175
- error_msg = f"❌ Unexpected error during model loading: {str(e)}"
176
  logger.error(error_msg)
177
  return error_msg
178
 
@@ -185,48 +155,40 @@ def process_video(
185
  use_two_stage: bool = False,
186
  chroma_preset: str = "standard",
187
  preview_mask: bool = False,
188
- preview_greenscreen: bool = False,
189
- **kwargs
190
  ) -> Tuple[Optional[str], str]:
191
  """Process video with the specified parameters"""
192
 
193
  if not self.models_loaded or not self.core_processor:
194
- return None, "Models not loaded. Please load models first."
195
 
196
  if self.cancel_event.is_set():
197
- return None, "⚠️ Processing cancelled"
198
 
199
  # Validate input file
200
  is_valid, validation_msg = validate_video_file(video_path)
201
  if not is_valid:
202
- return None, f"Invalid video: {validation_msg}"
203
 
204
  try:
205
- self.progress_tracker.start("video_processing")
206
-
207
  # Route to appropriate processing method
208
  if use_two_stage and TWO_STAGE_AVAILABLE and self.two_stage_processor:
209
- result = self._process_two_stage(
210
  video_path, background_choice, custom_background_path,
211
  progress_callback, chroma_preset
212
  )
213
  else:
214
- result = self._process_single_stage(
215
  video_path, background_choice, custom_background_path,
216
  progress_callback, preview_mask, preview_greenscreen
217
  )
218
-
219
- self.progress_tracker.complete("video_processing")
220
- return result
221
 
222
  except VideoProcessingError as e:
223
- self.progress_tracker.fail("video_processing", str(e))
224
  logger.error(f"Video processing failed: {e}")
225
- return None, f"Processing failed: {str(e)}"
226
  except Exception as e:
227
- self.progress_tracker.fail("video_processing", str(e))
228
  logger.error(f"Unexpected error during video processing: {e}")
229
- return None, f"Unexpected error: {str(e)}"
230
 
231
  def _process_single_stage(
232
  self,
@@ -263,10 +225,10 @@ def _process_single_stage(
263
  final_video_path = processed_video_path
264
 
265
  success_msg = (
266
- f"{process_message}\n"
267
- f"📹 Background: {background_choice}\n"
268
- f"Mode: Single-stage\n"
269
- f"🖥️ Device: {self.device_manager.get_optimal_device()}"
270
  )
271
 
272
  return final_video_path, success_msg
@@ -293,7 +255,7 @@ def _process_two_stage(
293
  background_choice, custom_background_path, frame_width, frame_height
294
  )
295
  if background is None:
296
- return None, "Failed to prepare background"
297
 
298
  # Process with two-stage pipeline
299
  import time
@@ -316,11 +278,11 @@ def _process_two_stage(
316
  return None, message
317
 
318
  success_msg = (
319
- f"Two-stage success!\n"
320
- f"📹 Background: {background_choice}\n"
321
- f"🎨 Preset: {chroma_preset}\n"
322
- f"🌟 Quality: Cinema-grade\n"
323
- f"🖥️ Device: {self.device_manager.get_optimal_device()}"
324
  )
325
 
326
  return result, success_msg
@@ -328,14 +290,11 @@ def _process_two_stage(
328
  def get_status(self) -> Dict[str, Any]:
329
  """Get comprehensive status of all components"""
330
  base_status = {
331
- 'app': 'BackgroundFX Pro',
332
- 'version': '2.0.0',
333
  'models_loaded': self.models_loaded,
334
  'two_stage_available': TWO_STAGE_AVAILABLE and self.two_stage_processor is not None,
335
  'device': str(self.device_manager.get_optimal_device()),
336
  'memory_usage': self.memory_manager.get_memory_usage(),
337
- 'config': self.config.to_dict(),
338
- 'progress': self.progress_tracker.get_all_progress()
339
  }
340
 
341
  # Add model-specific status if available
@@ -346,12 +305,15 @@ def get_status(self) -> Dict[str, Any]:
346
  if self.core_processor:
347
  base_status.update(self.core_processor.get_status())
348
 
 
 
 
 
349
  return base_status
350
 
351
  def cancel_processing(self):
352
  """Cancel any ongoing processing"""
353
  self.cancel_event.set()
354
- self.progress_tracker.cancel_all()
355
  logger.info("Processing cancellation requested")
356
 
357
  def cleanup_resources(self):
@@ -359,20 +321,12 @@ def cleanup_resources(self):
359
  self.memory_manager.cleanup_aggressive()
360
  if self.model_loader:
361
  self.model_loader.cleanup()
362
- self.progress_tracker.cleanup()
363
  logger.info("Resources cleaned up")
364
 
365
  # Global processor instance for application
366
- processor = BackgroundFXProcessor()
367
-
368
- # ============================================================================
369
- # BACKWARD COMPATIBILITY LAYER
370
- # These functions maintain compatibility with existing UI and scripts
371
- # ============================================================================
372
-
373
- # Alias for backward compatibility
374
- VideoProcessor = BackgroundFXProcessor
375
 
 
376
  def load_models_with_validation(progress_callback: Optional[Callable] = None) -> str:
377
  """Load models with validation - backward compatibility wrapper"""
378
  return processor.load_models(progress_callback)
@@ -408,25 +362,22 @@ def get_cache_status() -> Dict[str, Any]:
408
  def main():
409
  """Main application entry point"""
410
  try:
411
- logger.info("=" * 60)
412
- logger.info("Starting BackgroundFX Pro")
413
- logger.info("=" * 60)
414
  logger.info(f"Device: {processor.device_manager.get_optimal_device()}")
415
  logger.info(f"Two-stage available: {TWO_STAGE_AVAILABLE}")
416
  logger.info("Modular architecture loaded successfully")
417
 
418
  # Import and create UI
419
- from web.ui_components import create_interface
420
  demo = create_interface()
421
 
422
  # Launch application
423
- demo.queue(max_size=10).launch(
424
  server_name="0.0.0.0",
425
  server_port=7860,
426
  share=True,
427
  show_error=True,
428
- debug=False,
429
- favicon_path="web/static/favicon.ico"
430
  )
431
 
432
  except Exception as e:
 
2
  """
3
  BackgroundFX Pro - Main Application Entry Point
4
  Refactored modular architecture - orchestrates specialized components
 
 
 
 
 
 
 
 
 
 
 
5
  """
6
 
7
  import os
 
8
  import logging
9
  import threading
10
  from pathlib import Path
11
  from typing import Optional, Tuple, Dict, Any, Callable
12
 
13
+ # Configure logging first
14
+ logging.basicConfig(
15
+ level=logging.INFO,
16
+ format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
17
+ )
18
+ logger = logging.getLogger(__name__)
19
 
20
  # Apply Gradio schema patch early (before other imports)
21
  try:
 
38
  except Exception as e:
39
  logger.error(f"Gradio patch failed: {e}")
40
 
41
+ # Import configuration from new location
42
+ from config.app_config import ProcessingConfig, get_config
43
 
44
+ # Import core components from new locations
45
  from core.exceptions import ModelLoadingError, VideoProcessingError
 
 
46
  from utils.hardware.device_manager import DeviceManager
47
  from utils.system.memory_manager import MemoryManager
 
 
 
48
  from models.loaders.model_loader import ModelLoader
 
 
49
  from processing.video.video_processor import CoreVideoProcessor
50
  from processing.audio.audio_processor import AudioProcessor
51
+ from utils.monitoring.progress_tracker import ProgressTracker
52
 
53
+ # Import existing utilities (temporary during migration)
54
+ from utilities import (
55
  segment_person_hq,
56
  refine_mask_hq,
57
  replace_background_hq,
 
62
 
63
  # Import two-stage processor if available
64
  try:
65
+ from two_stage_processor import TwoStageProcessor, CHROMA_PRESETS
66
  TWO_STAGE_AVAILABLE = True
67
  except ImportError:
68
  TWO_STAGE_AVAILABLE = False
69
  CHROMA_PRESETS = {'standard': {}}
 
70
 
71
+ class VideoProcessor:
72
  """
73
  Main video processing orchestrator - coordinates all specialized components
74
  """
75
 
76
+ def __init__(self):
77
  """Initialize the video processor with all required components"""
78
+ self.config = get_config() # Use singleton config
79
  self.device_manager = DeviceManager()
80
  self.memory_manager = MemoryManager(self.device_manager.get_optimal_device())
81
  self.model_loader = ModelLoader(self.device_manager, self.memory_manager)
 
91
  self.loading_lock = threading.Lock()
92
  self.cancel_event = threading.Event()
93
 
94
+ logger.info(f"VideoProcessor initialized on device: {self.device_manager.get_optimal_device()}")
 
95
 
96
  def load_models(self, progress_callback: Optional[Callable] = None) -> str:
97
  """Load and validate all AI models"""
98
  with self.loading_lock:
99
  if self.models_loaded:
100
+ return "Models already loaded and validated"
101
 
102
  try:
103
  self.cancel_event.clear()
 
104
 
105
  if progress_callback:
106
+ progress_callback(0.0, f"Starting model loading on {self.device_manager.get_optimal_device()}")
107
 
108
  # Load models using the specialized loader
109
  sam2_predictor, matanyone_model = self.model_loader.load_all_models(
 
112
  )
113
 
114
  if self.cancel_event.is_set():
115
+ return "Model loading cancelled"
 
116
 
117
  # Initialize core processor with loaded models
118
  self.core_processor = CoreVideoProcessor(
 
125
  # Initialize two-stage processor if available
126
  if TWO_STAGE_AVAILABLE and sam2_predictor and matanyone_model:
127
  try:
128
+ self.two_stage_processor = TwoStageProcessor(sam2_predictor, matanyone_model)
129
+ logger.info("Two-stage processor initialized")
 
 
 
 
130
  except Exception as e:
131
  logger.warning(f"Two-stage processor init failed: {e}")
132
 
133
  self.models_loaded = True
 
 
134
  message = self.model_loader.get_load_summary()
135
  logger.info(message)
136
+ return message
137
 
138
  except ModelLoadingError as e:
139
  self.models_loaded = False
140
+ error_msg = f"Model loading failed: {str(e)}"
 
141
  logger.error(error_msg)
142
  return error_msg
143
  except Exception as e:
144
  self.models_loaded = False
145
+ error_msg = f"Unexpected error during model loading: {str(e)}"
 
146
  logger.error(error_msg)
147
  return error_msg
148
 
 
155
  use_two_stage: bool = False,
156
  chroma_preset: str = "standard",
157
  preview_mask: bool = False,
158
+ preview_greenscreen: bool = False
 
159
  ) -> Tuple[Optional[str], str]:
160
  """Process video with the specified parameters"""
161
 
162
  if not self.models_loaded or not self.core_processor:
163
+ return None, "Models not loaded. Please load models first."
164
 
165
  if self.cancel_event.is_set():
166
+ return None, "Processing cancelled"
167
 
168
  # Validate input file
169
  is_valid, validation_msg = validate_video_file(video_path)
170
  if not is_valid:
171
+ return None, f"Invalid video: {validation_msg}"
172
 
173
  try:
 
 
174
  # Route to appropriate processing method
175
  if use_two_stage and TWO_STAGE_AVAILABLE and self.two_stage_processor:
176
+ return self._process_two_stage(
177
  video_path, background_choice, custom_background_path,
178
  progress_callback, chroma_preset
179
  )
180
  else:
181
+ return self._process_single_stage(
182
  video_path, background_choice, custom_background_path,
183
  progress_callback, preview_mask, preview_greenscreen
184
  )
 
 
 
185
 
186
  except VideoProcessingError as e:
 
187
  logger.error(f"Video processing failed: {e}")
188
+ return None, f"Processing failed: {str(e)}"
189
  except Exception as e:
 
190
  logger.error(f"Unexpected error during video processing: {e}")
191
+ return None, f"Unexpected error: {str(e)}"
192
 
193
  def _process_single_stage(
194
  self,
 
225
  final_video_path = processed_video_path
226
 
227
  success_msg = (
228
+ f"{process_message}\n"
229
+ f"Background: {background_choice}\n"
230
+ f"Mode: Single-stage\n"
231
+ f"Device: {self.device_manager.get_optimal_device()}"
232
  )
233
 
234
  return final_video_path, success_msg
 
255
  background_choice, custom_background_path, frame_width, frame_height
256
  )
257
  if background is None:
258
+ return None, "Failed to prepare background"
259
 
260
  # Process with two-stage pipeline
261
  import time
 
278
  return None, message
279
 
280
  success_msg = (
281
+ f"Two-stage success!\n"
282
+ f"Background: {background_choice}\n"
283
+ f"Preset: {chroma_preset}\n"
284
+ f"Quality: Cinema-grade\n"
285
+ f"Device: {self.device_manager.get_optimal_device()}"
286
  )
287
 
288
  return result, success_msg
 
290
  def get_status(self) -> Dict[str, Any]:
291
  """Get comprehensive status of all components"""
292
  base_status = {
 
 
293
  'models_loaded': self.models_loaded,
294
  'two_stage_available': TWO_STAGE_AVAILABLE and self.two_stage_processor is not None,
295
  'device': str(self.device_manager.get_optimal_device()),
296
  'memory_usage': self.memory_manager.get_memory_usage(),
297
+ 'config': self.config.to_dict()
 
298
  }
299
 
300
  # Add model-specific status if available
 
305
  if self.core_processor:
306
  base_status.update(self.core_processor.get_status())
307
 
308
+ # Add progress tracking if available
309
+ if self.progress_tracker:
310
+ base_status['progress'] = self.progress_tracker.get_all_progress()
311
+
312
  return base_status
313
 
314
  def cancel_processing(self):
315
  """Cancel any ongoing processing"""
316
  self.cancel_event.set()
 
317
  logger.info("Processing cancellation requested")
318
 
319
  def cleanup_resources(self):
 
321
  self.memory_manager.cleanup_aggressive()
322
  if self.model_loader:
323
  self.model_loader.cleanup()
 
324
  logger.info("Resources cleaned up")
325
 
326
  # Global processor instance for application
327
+ processor = VideoProcessor()
 
 
 
 
 
 
 
 
328
 
329
+ # Backward compatibility functions for existing UI
330
  def load_models_with_validation(progress_callback: Optional[Callable] = None) -> str:
331
  """Load models with validation - backward compatibility wrapper"""
332
  return processor.load_models(progress_callback)
 
362
  def main():
363
  """Main application entry point"""
364
  try:
365
+ logger.info("Starting Video Background Replacement application")
 
 
366
  logger.info(f"Device: {processor.device_manager.get_optimal_device()}")
367
  logger.info(f"Two-stage available: {TWO_STAGE_AVAILABLE}")
368
  logger.info("Modular architecture loaded successfully")
369
 
370
  # Import and create UI
371
+ from ui_components import create_interface
372
  demo = create_interface()
373
 
374
  # Launch application
375
+ demo.queue().launch(
376
  server_name="0.0.0.0",
377
  server_port=7860,
378
  share=True,
379
  show_error=True,
380
+ debug=False
 
381
  )
382
 
383
  except Exception as e: