|
|
#!/bin/bash |
|
|
|
|
|
|
|
|
|
|
|
set -e |
|
|
|
|
|
|
|
|
RED='\033[0;31m' |
|
|
GREEN='\033[0;32m' |
|
|
YELLOW='\033[1;33m' |
|
|
BLUE='\033[0;34m' |
|
|
NC='\033[0m' |
|
|
|
|
|
|
|
|
PYTHON_VERSION="3.10" |
|
|
VENV_NAME="venv" |
|
|
PROJECT_ROOT=$(dirname $(dirname $(realpath $0))) |
|
|
MODELS_DIR="$HOME/.backgroundfx/models" |
|
|
CONFIG_DIR="$HOME/.backgroundfx/config" |
|
|
|
|
|
|
|
|
print_header() { |
|
|
echo -e "\n${BLUE}========================================${NC}" |
|
|
echo -e "${BLUE}$1${NC}" |
|
|
echo -e "${BLUE}========================================${NC}\n" |
|
|
} |
|
|
|
|
|
print_success() { |
|
|
echo -e "${GREEN}β $1${NC}" |
|
|
} |
|
|
|
|
|
print_warning() { |
|
|
echo -e "${YELLOW}β $1${NC}" |
|
|
} |
|
|
|
|
|
print_error() { |
|
|
echo -e "${RED}β $1${NC}" |
|
|
} |
|
|
|
|
|
check_command() { |
|
|
if command -v $1 &> /dev/null; then |
|
|
return 0 |
|
|
else |
|
|
return 1 |
|
|
fi |
|
|
} |
|
|
|
|
|
|
|
|
check_requirements() { |
|
|
print_header "Checking System Requirements" |
|
|
|
|
|
|
|
|
if check_command python$PYTHON_VERSION; then |
|
|
print_success "Python $PYTHON_VERSION found" |
|
|
else |
|
|
print_error "Python $PYTHON_VERSION not found" |
|
|
echo "Please install Python $PYTHON_VERSION or later" |
|
|
exit 1 |
|
|
fi |
|
|
|
|
|
|
|
|
if check_command pip3; then |
|
|
print_success "pip found" |
|
|
else |
|
|
print_error "pip not found" |
|
|
exit 1 |
|
|
fi |
|
|
|
|
|
|
|
|
if check_command git; then |
|
|
print_success "git found" |
|
|
else |
|
|
print_warning "git not found - some features may not work" |
|
|
fi |
|
|
|
|
|
|
|
|
if check_command ffmpeg; then |
|
|
print_success "FFmpeg found" |
|
|
else |
|
|
print_warning "FFmpeg not found - video processing will fail" |
|
|
echo "Install with:" |
|
|
echo " Ubuntu/Debian: sudo apt-get install ffmpeg" |
|
|
echo " macOS: brew install ffmpeg" |
|
|
echo " Windows: Download from https://ffmpeg.org" |
|
|
fi |
|
|
|
|
|
|
|
|
if check_command nvidia-smi; then |
|
|
print_success "NVIDIA GPU detected" |
|
|
nvidia-smi --query-gpu=name,memory.total --format=csv,noheader |
|
|
CUDA_AVAILABLE=true |
|
|
else |
|
|
print_warning "No NVIDIA GPU detected - will use CPU" |
|
|
CUDA_AVAILABLE=false |
|
|
fi |
|
|
|
|
|
|
|
|
if [[ "$OSTYPE" == "linux-gnu"* ]]; then |
|
|
MEM_GB=$(free -g | awk '/^Mem:/{print $2}') |
|
|
elif [[ "$OSTYPE" == "darwin"* ]]; then |
|
|
MEM_GB=$(($(sysctl -n hw.memsize) / 1024 / 1024 / 1024)) |
|
|
else |
|
|
MEM_GB=0 |
|
|
fi |
|
|
|
|
|
if [ $MEM_GB -ge 8 ]; then |
|
|
print_success "Memory: ${MEM_GB}GB (recommended: 8GB+)" |
|
|
else |
|
|
print_warning "Memory: ${MEM_GB}GB (recommended: 8GB+)" |
|
|
fi |
|
|
|
|
|
|
|
|
DISK_GB=$(df -BG . | awk 'NR==2 {print $4}' | sed 's/G//') |
|
|
if [ $DISK_GB -ge 20 ]; then |
|
|
print_success "Disk space: ${DISK_GB}GB available" |
|
|
else |
|
|
print_warning "Disk space: ${DISK_GB}GB (recommended: 20GB+)" |
|
|
fi |
|
|
} |
|
|
|
|
|
|
|
|
setup_virtualenv() { |
|
|
print_header "Setting Up Virtual Environment" |
|
|
|
|
|
cd "$PROJECT_ROOT" |
|
|
|
|
|
if [ -d "$VENV_NAME" ]; then |
|
|
print_warning "Virtual environment already exists" |
|
|
read -p "Recreate? (y/n): " -n 1 -r |
|
|
echo |
|
|
if [[ $REPLY =~ ^[Yy]$ ]]; then |
|
|
rm -rf "$VENV_NAME" |
|
|
else |
|
|
return |
|
|
fi |
|
|
fi |
|
|
|
|
|
python$PYTHON_VERSION -m venv "$VENV_NAME" |
|
|
print_success "Virtual environment created" |
|
|
|
|
|
|
|
|
source "$VENV_NAME/bin/activate" |
|
|
|
|
|
|
|
|
pip install --upgrade pip setuptools wheel |
|
|
print_success "pip upgraded" |
|
|
} |
|
|
|
|
|
|
|
|
install_dependencies() { |
|
|
print_header "Installing Dependencies" |
|
|
|
|
|
source "$VENV_NAME/bin/activate" |
|
|
|
|
|
|
|
|
if [ "$CUDA_AVAILABLE" = true ]; then |
|
|
print_success "Installing PyTorch with CUDA support..." |
|
|
pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cu121 |
|
|
else |
|
|
print_success "Installing PyTorch (CPU only)..." |
|
|
pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cpu |
|
|
fi |
|
|
|
|
|
|
|
|
if [ -f "requirements.txt" ]; then |
|
|
print_success "Installing requirements..." |
|
|
pip install -r requirements.txt |
|
|
fi |
|
|
|
|
|
|
|
|
if [ -f "requirements-dev.txt" ]; then |
|
|
print_success "Installing development dependencies..." |
|
|
pip install -r requirements-dev.txt |
|
|
fi |
|
|
|
|
|
print_success "All dependencies installed" |
|
|
} |
|
|
|
|
|
|
|
|
setup_directories() { |
|
|
print_header "Creating Directory Structure" |
|
|
|
|
|
|
|
|
mkdir -p "$MODELS_DIR" |
|
|
print_success "Created models directory: $MODELS_DIR" |
|
|
|
|
|
|
|
|
mkdir -p "$CONFIG_DIR" |
|
|
print_success "Created config directory: $CONFIG_DIR" |
|
|
|
|
|
|
|
|
mkdir -p "$PROJECT_ROOT/uploads" |
|
|
mkdir -p "$PROJECT_ROOT/outputs" |
|
|
mkdir -p "$PROJECT_ROOT/logs" |
|
|
mkdir -p "$PROJECT_ROOT/temp" |
|
|
|
|
|
print_success "Created project directories" |
|
|
} |
|
|
|
|
|
|
|
|
download_models() { |
|
|
print_header "Model Download" |
|
|
|
|
|
echo "Would you like to download the AI models now? (Recommended)" |
|
|
echo "This will download approximately 2-3GB of data." |
|
|
read -p "Download models? (y/n): " -n 1 -r |
|
|
echo |
|
|
|
|
|
if [[ $REPLY =~ ^[Yy]$ ]]; then |
|
|
source "$VENV_NAME/bin/activate" |
|
|
|
|
|
|
|
|
cat > download_models.py << 'EOF' |
|
|
import sys |
|
|
sys.path.append('.') |
|
|
from models import create_model_manager |
|
|
|
|
|
print("Downloading essential models...") |
|
|
manager = create_model_manager() |
|
|
|
|
|
|
|
|
models_to_download = ['rmbg-1.4', 'u2netp', 'modnet'] |
|
|
|
|
|
for model_id in models_to_download: |
|
|
print(f"Downloading {model_id}...") |
|
|
success = manager.downloader.download_model(model_id) |
|
|
if success: |
|
|
print(f"β {model_id} downloaded") |
|
|
else: |
|
|
print(f"β Failed to download {model_id}") |
|
|
|
|
|
print("\nModel download complete!") |
|
|
print(f"Models stored in: {manager.models_dir}") |
|
|
EOF |
|
|
|
|
|
python download_models.py |
|
|
rm download_models.py |
|
|
|
|
|
print_success "Models downloaded" |
|
|
else |
|
|
print_warning "Skipping model download - models will be downloaded on first use" |
|
|
fi |
|
|
} |
|
|
|
|
|
|
|
|
create_config() { |
|
|
print_header "Creating Configuration" |
|
|
|
|
|
|
|
|
cat > "$CONFIG_DIR/config.json" << EOF |
|
|
{ |
|
|
"device": "auto", |
|
|
"quality_preset": "high", |
|
|
"models_dir": "$MODELS_DIR", |
|
|
"max_memory_gb": 8, |
|
|
"enable_cache": true, |
|
|
"default_background": "blur", |
|
|
"output_format": "mp4", |
|
|
"log_level": "INFO" |
|
|
} |
|
|
EOF |
|
|
|
|
|
print_success "Configuration created: $CONFIG_DIR/config.json" |
|
|
|
|
|
|
|
|
if [ ! -f "$PROJECT_ROOT/.env" ]; then |
|
|
cp "$PROJECT_ROOT/docker/.env.example" "$PROJECT_ROOT/.env" 2>/dev/null || \ |
|
|
cat > "$PROJECT_ROOT/.env" << EOF |
|
|
# BackgroundFX Pro Environment Configuration |
|
|
DEVICE=auto |
|
|
MODEL_CACHE_DIR=$MODELS_DIR |
|
|
LOG_LEVEL=INFO |
|
|
GRADIO_SERVER_NAME=0.0.0.0 |
|
|
GRADIO_SERVER_PORT=7860 |
|
|
EOF |
|
|
print_success "Environment file created: .env" |
|
|
fi |
|
|
} |
|
|
|
|
|
|
|
|
setup_cli() { |
|
|
print_header "Setting Up CLI" |
|
|
|
|
|
source "$VENV_NAME/bin/activate" |
|
|
|
|
|
|
|
|
pip install -e . |
|
|
|
|
|
if check_command bgfx; then |
|
|
print_success "CLI installed successfully" |
|
|
echo "You can now use: bgfx --help" |
|
|
else |
|
|
print_warning "CLI installation may have failed" |
|
|
fi |
|
|
} |
|
|
|
|
|
|
|
|
test_installation() { |
|
|
print_header "Testing Installation" |
|
|
|
|
|
source "$VENV_NAME/bin/activate" |
|
|
|
|
|
|
|
|
python -c " |
|
|
import torch |
|
|
import cv2 |
|
|
import numpy |
|
|
print('β Core libraries imported successfully') |
|
|
" |
|
|
|
|
|
|
|
|
python -c " |
|
|
from api import ProcessingPipeline |
|
|
from models import ModelRegistry |
|
|
print('β Custom modules imported successfully') |
|
|
" 2>/dev/null || print_warning "Some custom modules may not be available" |
|
|
|
|
|
|
|
|
if [ "$CUDA_AVAILABLE" = true ]; then |
|
|
python -c " |
|
|
import torch |
|
|
if torch.cuda.is_available(): |
|
|
print(f'β CUDA available: {torch.cuda.get_device_name(0)}') |
|
|
else: |
|
|
print('β CUDA not available in PyTorch') |
|
|
" |
|
|
fi |
|
|
} |
|
|
|
|
|
|
|
|
main() { |
|
|
print_header "BackgroundFX Pro Setup" |
|
|
echo "This script will set up your development environment" |
|
|
echo "Project root: $PROJECT_ROOT" |
|
|
echo |
|
|
|
|
|
check_requirements |
|
|
setup_virtualenv |
|
|
install_dependencies |
|
|
setup_directories |
|
|
download_models |
|
|
create_config |
|
|
setup_cli |
|
|
test_installation |
|
|
|
|
|
print_header "Setup Complete!" |
|
|
echo -e "${GREEN}BackgroundFX Pro is ready to use!${NC}" |
|
|
echo |
|
|
echo "To activate the environment:" |
|
|
echo " source $VENV_NAME/bin/activate" |
|
|
echo |
|
|
echo "To run the application:" |
|
|
echo " python app.py" |
|
|
echo |
|
|
echo "To use the CLI:" |
|
|
echo " bgfx --help" |
|
|
echo |
|
|
echo "To run tests:" |
|
|
echo " pytest" |
|
|
echo |
|
|
} |
|
|
|
|
|
|
|
|
main |