|
|
""" |
|
|
BackgroundFX Pro Configuration Module |
|
|
Centralizes all application configuration and environment variable handling |
|
|
|
|
|
Note: Named 'app_config.py' to avoid conflicts with existing 'Configs/' folder |
|
|
""" |
|
|
|
|
|
import os |
|
|
from dataclasses import dataclass, asdict, field |
|
|
from typing import Dict, Any, Optional, List |
|
|
from pathlib import Path |
|
|
import logging |
|
|
import json |
|
|
import yaml |
|
|
|
|
|
logger = logging.getLogger(__name__) |
|
|
|
|
|
@dataclass |
|
|
class ProcessingConfig: |
|
|
""" |
|
|
Main processing configuration with environment variable defaults |
|
|
""" |
|
|
|
|
|
app_name: str = "BackgroundFX Pro" |
|
|
version: str = "2.0.0" |
|
|
|
|
|
|
|
|
keyframe_interval: int = int(os.getenv('KEYFRAME_INTERVAL', '5')) |
|
|
frame_skip: int = int(os.getenv('FRAME_SKIP', '1')) |
|
|
|
|
|
|
|
|
memory_cleanup_interval: int = int(os.getenv('MEMORY_CLEANUP_INTERVAL', '30')) |
|
|
memory_threshold_mb: int = int(os.getenv('MEMORY_THRESHOLD_MB', '1024')) |
|
|
|
|
|
|
|
|
max_video_length: int = int(os.getenv('MAX_VIDEO_LENGTH', '300')) |
|
|
max_video_resolution: str = os.getenv('MAX_VIDEO_RESOLUTION', '1920x1080') |
|
|
min_video_fps: int = int(os.getenv('MIN_VIDEO_FPS', '15')) |
|
|
max_video_fps: int = int(os.getenv('MAX_VIDEO_FPS', '60')) |
|
|
|
|
|
|
|
|
quality_preset: str = os.getenv('QUALITY_PRESET', 'balanced') |
|
|
|
|
|
|
|
|
sam2_model_size: str = os.getenv('SAM2_MODEL_SIZE', 'large') |
|
|
matanyone_precision: str = os.getenv('MATANYONE_PRECISION', 'fp32') |
|
|
model_device: str = os.getenv('MODEL_DEVICE', 'auto') |
|
|
|
|
|
|
|
|
temporal_consistency: bool = os.getenv('TEMPORAL_CONSISTENCY', 'true').lower() == 'true' |
|
|
edge_refinement: bool = os.getenv('EDGE_REFINEMENT', 'true').lower() == 'true' |
|
|
mask_blur_radius: int = int(os.getenv('MASK_BLUR_RADIUS', '5')) |
|
|
confidence_threshold: float = float(os.getenv('CONFIDENCE_THRESHOLD', '0.85')) |
|
|
background_preset: str = os.getenv('BACKGROUND_PRESET', 'minimalist') |
|
|
|
|
|
|
|
|
output_dir: str = os.getenv('OUTPUT_DIR', 'outputs') |
|
|
output_format: str = os.getenv('OUTPUT_FORMAT', 'mp4') |
|
|
output_quality: str = os.getenv('OUTPUT_QUALITY', 'high') |
|
|
output_codec: str = os.getenv('OUTPUT_CODEC', 'h264') |
|
|
write_fps: Optional[float] = None |
|
|
preserve_audio: bool = os.getenv('PRESERVE_AUDIO', 'true').lower() == 'true' |
|
|
|
|
|
|
|
|
model_cache_dir: str = os.getenv('MODEL_CACHE_DIR', 'models/cache') |
|
|
temp_dir: str = os.getenv('TEMP_DIR', 'temp') |
|
|
cleanup_temp_files: bool = os.getenv('CLEANUP_TEMP_FILES', 'true').lower() == 'true' |
|
|
cache_size_limit_gb: float = float(os.getenv('CACHE_SIZE_LIMIT_GB', '10.0')) |
|
|
|
|
|
|
|
|
max_concurrent_processes: int = int(os.getenv('MAX_CONCURRENT_PROCESSES', '1')) |
|
|
gpu_memory_fraction: float = float(os.getenv('GPU_MEMORY_FRACTION', '0.8')) |
|
|
batch_size: int = int(os.getenv('BATCH_SIZE', '4')) |
|
|
num_workers: int = int(os.getenv('NUM_WORKERS', '4')) |
|
|
|
|
|
|
|
|
api_enabled: bool = os.getenv('API_ENABLED', 'false').lower() == 'true' |
|
|
api_host: str = os.getenv('API_HOST', '0.0.0.0') |
|
|
api_port: int = int(os.getenv('API_PORT', '8000')) |
|
|
api_key: Optional[str] = os.getenv('API_KEY', None) |
|
|
|
|
|
|
|
|
gradio_server_name: str = os.getenv('GRADIO_SERVER_NAME', '0.0.0.0') |
|
|
gradio_server_port: int = int(os.getenv('GRADIO_SERVER_PORT', '7860')) |
|
|
gradio_share: bool = os.getenv('GRADIO_SHARE', 'false').lower() == 'true' |
|
|
gradio_auth: Optional[str] = os.getenv('GRADIO_AUTH', None) |
|
|
|
|
|
|
|
|
debug_mode: bool = os.getenv('DEBUG_MODE', 'false').lower() == 'true' |
|
|
save_intermediate_results: bool = os.getenv('SAVE_INTERMEDIATE_RESULTS', 'false').lower() == 'true' |
|
|
log_level: str = os.getenv('LOG_LEVEL', 'INFO') |
|
|
profile_performance: bool = os.getenv('PROFILE_PERFORMANCE', 'false').lower() == 'true' |
|
|
|
|
|
|
|
|
enable_two_stage: bool = os.getenv('ENABLE_TWO_STAGE', 'true').lower() == 'true' |
|
|
enable_preview_modes: bool = os.getenv('ENABLE_PREVIEW_MODES', 'true').lower() == 'true' |
|
|
enable_batch_processing: bool = os.getenv('ENABLE_BATCH_PROCESSING', 'false').lower() == 'true' |
|
|
|
|
|
|
|
|
legacy_mode: bool = os.getenv('LEGACY_MODE', 'true').lower() == 'true' |
|
|
legacy_configs_dir: str = os.getenv('LEGACY_CONFIGS_DIR', 'Configs') |
|
|
|
|
|
def __post_init__(self): |
|
|
"""Validate configuration after initialization""" |
|
|
self._validate_config() |
|
|
self._create_directories() |
|
|
self._setup_logging() |
|
|
if self.debug_mode: |
|
|
self._log_config() |
|
|
|
|
|
def _validate_config(self): |
|
|
"""Validate configuration values""" |
|
|
|
|
|
self.keyframe_interval = max(1, self.keyframe_interval) |
|
|
self.frame_skip = max(1, self.frame_skip) |
|
|
|
|
|
|
|
|
self.memory_cleanup_interval = max(1, self.memory_cleanup_interval) |
|
|
self.memory_threshold_mb = max(256, self.memory_threshold_mb) |
|
|
|
|
|
|
|
|
self.max_video_length = max(1, self.max_video_length) |
|
|
self.min_video_fps = max(1, min(self.min_video_fps, 60)) |
|
|
self.max_video_fps = max(self.min_video_fps, min(self.max_video_fps, 120)) |
|
|
|
|
|
|
|
|
if 'x' not in self.max_video_resolution: |
|
|
logger.warning(f"Invalid resolution format: {self.max_video_resolution}. Setting to 1920x1080.") |
|
|
self.max_video_resolution = '1920x1080' |
|
|
|
|
|
|
|
|
valid_presets = ['fast', 'balanced', 'high', 'ultra'] |
|
|
if self.quality_preset not in valid_presets: |
|
|
logger.warning(f"Invalid quality preset: {self.quality_preset}. Setting to 'balanced'.") |
|
|
self.quality_preset = 'balanced' |
|
|
|
|
|
|
|
|
valid_sam2_sizes = ['tiny', 'small', 'base', 'large'] |
|
|
if self.sam2_model_size not in valid_sam2_sizes: |
|
|
logger.warning(f"Invalid SAM2 model size: {self.sam2_model_size}. Setting to 'large'.") |
|
|
self.sam2_model_size = 'large' |
|
|
|
|
|
valid_precisions = ['fp16', 'fp32'] |
|
|
if self.matanyone_precision not in valid_precisions: |
|
|
logger.warning(f"Invalid precision: {self.matanyone_precision}. Setting to 'fp32'.") |
|
|
self.matanyone_precision = 'fp32' |
|
|
|
|
|
|
|
|
valid_formats = ['mp4', 'avi', 'mov', 'webm', 'mkv'] |
|
|
if self.output_format not in valid_formats: |
|
|
logger.warning(f"Invalid output format: {self.output_format}. Setting to 'mp4'.") |
|
|
self.output_format = 'mp4' |
|
|
|
|
|
valid_qualities = ['low', 'medium', 'high', 'ultra'] |
|
|
if self.output_quality not in valid_qualities: |
|
|
logger.warning(f"Invalid output quality: {self.output_quality}. Setting to 'high'.") |
|
|
self.output_quality = 'high' |
|
|
|
|
|
|
|
|
self.max_concurrent_processes = max(1, self.max_concurrent_processes) |
|
|
self.gpu_memory_fraction = max(0.1, min(1.0, self.gpu_memory_fraction)) |
|
|
self.batch_size = max(1, self.batch_size) |
|
|
self.num_workers = max(0, self.num_workers) |
|
|
|
|
|
|
|
|
self.api_port = max(1024, min(65535, self.api_port)) |
|
|
|
|
|
|
|
|
self.confidence_threshold = max(0.0, min(1.0, self.confidence_threshold)) |
|
|
|
|
|
|
|
|
self.cache_size_limit_gb = max(0.1, self.cache_size_limit_gb) |
|
|
|
|
|
def _create_directories(self): |
|
|
"""Create necessary directories if they don't exist""" |
|
|
directories = [ |
|
|
self.model_cache_dir, |
|
|
self.temp_dir, |
|
|
self.output_dir, |
|
|
Path(self.output_dir) / 'masks', |
|
|
Path(self.output_dir) / 'greenscreen', |
|
|
Path(self.output_dir) / 'final', |
|
|
Path(self.output_dir) / 'two_stage' |
|
|
] |
|
|
|
|
|
for directory in directories: |
|
|
try: |
|
|
Path(directory).mkdir(parents=True, exist_ok=True) |
|
|
logger.debug(f"Ensured directory exists: {directory}") |
|
|
except Exception as e: |
|
|
logger.error(f"Failed to create directory {directory}: {e}") |
|
|
|
|
|
def _setup_logging(self): |
|
|
"""Setup logging based on configuration""" |
|
|
log_levels = { |
|
|
'DEBUG': logging.DEBUG, |
|
|
'INFO': logging.INFO, |
|
|
'WARNING': logging.WARNING, |
|
|
'ERROR': logging.ERROR, |
|
|
'CRITICAL': logging.CRITICAL |
|
|
} |
|
|
|
|
|
level = log_levels.get(self.log_level.upper(), logging.INFO) |
|
|
logging.getLogger().setLevel(level) |
|
|
|
|
|
def _log_config(self): |
|
|
"""Log current configuration in debug mode""" |
|
|
logger.info("=" * 60) |
|
|
logger.info(f"{self.app_name} v{self.version} Configuration") |
|
|
logger.info("=" * 60) |
|
|
config_dict = self.to_dict() |
|
|
|
|
|
if config_dict.get('api_key'): |
|
|
config_dict['api_key'] = '***hidden***' |
|
|
if config_dict.get('gradio_auth'): |
|
|
config_dict['gradio_auth'] = '***hidden***' |
|
|
|
|
|
for key, value in config_dict.items(): |
|
|
logger.info(f"{key}: {value}") |
|
|
logger.info("=" * 60) |
|
|
|
|
|
def to_dict(self) -> Dict[str, Any]: |
|
|
"""Convert configuration to dictionary""" |
|
|
return asdict(self) |
|
|
|
|
|
def to_json(self, filepath: Optional[str] = None) -> str: |
|
|
"""Export configuration to JSON""" |
|
|
config_dict = self.to_dict() |
|
|
if filepath: |
|
|
with open(filepath, 'w') as f: |
|
|
json.dump(config_dict, f, indent=2) |
|
|
logger.info(f"Configuration saved to {filepath}") |
|
|
return json.dumps(config_dict, indent=2) |
|
|
|
|
|
def to_yaml(self, filepath: Optional[str] = None) -> str: |
|
|
"""Export configuration to YAML""" |
|
|
config_dict = self.to_dict() |
|
|
if filepath: |
|
|
with open(filepath, 'w') as f: |
|
|
yaml.dump(config_dict, f, default_flow_style=False) |
|
|
logger.info(f"Configuration saved to {filepath}") |
|
|
return yaml.dump(config_dict, default_flow_style=False) |
|
|
|
|
|
@classmethod |
|
|
def from_json(cls, filepath: str) -> 'ProcessingConfig': |
|
|
"""Load configuration from JSON file""" |
|
|
with open(filepath, 'r') as f: |
|
|
config_dict = json.load(f) |
|
|
return cls(**config_dict) |
|
|
|
|
|
@classmethod |
|
|
def from_yaml(cls, filepath: str) -> 'ProcessingConfig': |
|
|
"""Load configuration from YAML file""" |
|
|
with open(filepath, 'r') as f: |
|
|
config_dict = yaml.safe_load(f) |
|
|
return cls(**config_dict) |
|
|
|
|
|
def get_quality_settings(self) -> Dict[str, Any]: |
|
|
"""Get quality-specific settings based on preset""" |
|
|
quality_maps = { |
|
|
'fast': { |
|
|
'keyframe_interval': max(self.keyframe_interval, 10), |
|
|
'frame_skip': max(self.frame_skip, 2), |
|
|
'edge_refinement': False, |
|
|
'temporal_consistency': False, |
|
|
'model_precision': 'fp16', |
|
|
'batch_size': min(self.batch_size * 2, 16), |
|
|
'output_quality_params': '-preset ultrafast -crf 28' |
|
|
}, |
|
|
'balanced': { |
|
|
'keyframe_interval': self.keyframe_interval, |
|
|
'frame_skip': self.frame_skip, |
|
|
'edge_refinement': True, |
|
|
'temporal_consistency': True, |
|
|
'model_precision': 'fp32', |
|
|
'batch_size': self.batch_size, |
|
|
'output_quality_params': '-preset medium -crf 23' |
|
|
}, |
|
|
'high': { |
|
|
'keyframe_interval': max(self.keyframe_interval // 2, 1), |
|
|
'frame_skip': 1, |
|
|
'edge_refinement': True, |
|
|
'temporal_consistency': True, |
|
|
'model_precision': 'fp32', |
|
|
'batch_size': max(self.batch_size // 2, 1), |
|
|
'output_quality_params': '-preset slow -crf 18' |
|
|
}, |
|
|
'ultra': { |
|
|
'keyframe_interval': 1, |
|
|
'frame_skip': 1, |
|
|
'edge_refinement': True, |
|
|
'temporal_consistency': True, |
|
|
'model_precision': 'fp32', |
|
|
'batch_size': 1, |
|
|
'output_quality_params': '-preset veryslow -crf 15' |
|
|
} |
|
|
} |
|
|
|
|
|
return quality_maps.get(self.quality_preset, quality_maps['balanced']) |
|
|
|
|
|
def get_resolution_limits(self) -> tuple[int, int]: |
|
|
"""Get max width and height from resolution setting""" |
|
|
try: |
|
|
width, height = map(int, self.max_video_resolution.split('x')) |
|
|
return width, height |
|
|
except ValueError: |
|
|
logger.error(f"Invalid resolution format: {self.max_video_resolution}") |
|
|
return 1920, 1080 |
|
|
|
|
|
def get_output_params(self) -> Dict[str, str]: |
|
|
"""Get FFmpeg output parameters based on settings""" |
|
|
quality_settings = self.get_quality_settings() |
|
|
codec_map = { |
|
|
'h264': 'libx264', |
|
|
'h265': 'libx265', |
|
|
'vp9': 'libvpx-vp9', |
|
|
'av1': 'libaom-av1' |
|
|
} |
|
|
|
|
|
return { |
|
|
'codec': codec_map.get(self.output_codec, 'libx264'), |
|
|
'quality': quality_settings['output_quality_params'], |
|
|
'format': self.output_format, |
|
|
'audio': '-c:a copy' if self.preserve_audio else '-an' |
|
|
} |
|
|
|
|
|
def is_high_performance_mode(self) -> bool: |
|
|
"""Check if configuration is set for high performance""" |
|
|
return ( |
|
|
self.quality_preset in ['high', 'ultra'] and |
|
|
self.edge_refinement and |
|
|
self.temporal_consistency and |
|
|
self.keyframe_interval <= 3 |
|
|
) |
|
|
|
|
|
def get_memory_limits(self) -> Dict[str, Any]: |
|
|
"""Get memory-related limits""" |
|
|
return { |
|
|
'gpu_memory_fraction': self.gpu_memory_fraction, |
|
|
'cleanup_interval': self.memory_cleanup_interval, |
|
|
'max_concurrent': self.max_concurrent_processes, |
|
|
'threshold_mb': self.memory_threshold_mb, |
|
|
'cache_size_gb': self.cache_size_limit_gb |
|
|
} |
|
|
|
|
|
def validate_for_production(self) -> List[str]: |
|
|
"""Validate configuration for production deployment""" |
|
|
warnings = [] |
|
|
|
|
|
if self.debug_mode: |
|
|
warnings.append("Debug mode is enabled in production") |
|
|
|
|
|
if self.save_intermediate_results: |
|
|
warnings.append("Intermediate results saving is enabled (disk usage)") |
|
|
|
|
|
if not self.cleanup_temp_files: |
|
|
warnings.append("Temp file cleanup is disabled (disk usage)") |
|
|
|
|
|
if self.gradio_share: |
|
|
warnings.append("Gradio share is enabled (security risk)") |
|
|
|
|
|
if not self.api_key and self.api_enabled: |
|
|
warnings.append("API is enabled without authentication") |
|
|
|
|
|
if self.gpu_memory_fraction > 0.9: |
|
|
warnings.append("GPU memory fraction is very high (>90%)") |
|
|
|
|
|
if self.max_concurrent_processes > 4: |
|
|
warnings.append("High concurrent processes may cause instability") |
|
|
|
|
|
return warnings |
|
|
|
|
|
|
|
|
_config_instance: Optional[ProcessingConfig] = None |
|
|
|
|
|
def get_config() -> ProcessingConfig: |
|
|
"""Get global configuration instance""" |
|
|
global _config_instance |
|
|
if _config_instance is None: |
|
|
_config_instance = ProcessingConfig() |
|
|
return _config_instance |
|
|
|
|
|
def reload_config() -> ProcessingConfig: |
|
|
"""Reload configuration from environment variables""" |
|
|
global _config_instance |
|
|
_config_instance = ProcessingConfig() |
|
|
logger.info("Configuration reloaded from environment variables") |
|
|
return _config_instance |
|
|
|
|
|
def update_config(**kwargs) -> ProcessingConfig: |
|
|
"""Update configuration with new values""" |
|
|
global _config_instance |
|
|
if _config_instance is None: |
|
|
_config_instance = ProcessingConfig() |
|
|
|
|
|
for key, value in kwargs.items(): |
|
|
if hasattr(_config_instance, key): |
|
|
setattr(_config_instance, key, value) |
|
|
logger.debug(f"Updated config: {key} = {value}") |
|
|
else: |
|
|
logger.warning(f"Unknown configuration key: {key}") |
|
|
|
|
|
|
|
|
_config_instance._validate_config() |
|
|
return _config_instance |
|
|
|
|
|
def load_config_from_file(filepath: str) -> ProcessingConfig: |
|
|
"""Load configuration from file (JSON or YAML)""" |
|
|
global _config_instance |
|
|
|
|
|
file_path = Path(filepath) |
|
|
if not file_path.exists(): |
|
|
raise FileNotFoundError(f"Configuration file not found: {filepath}") |
|
|
|
|
|
if file_path.suffix.lower() in ['.json']: |
|
|
_config_instance = ProcessingConfig.from_json(filepath) |
|
|
elif file_path.suffix.lower() in ['.yaml', '.yml']: |
|
|
_config_instance = ProcessingConfig.from_yaml(filepath) |
|
|
else: |
|
|
raise ValueError(f"Unsupported configuration file format: {file_path.suffix}") |
|
|
|
|
|
logger.info(f"Configuration loaded from {filepath}") |
|
|
return _config_instance |