Spaces:
Sleeping
Sleeping
Upload 13 files
Browse filesyoutube automaton studiio
- .dockerignore +58 -0
- .gitignore +67 -0
- DEPLOYMENT_HF.md +143 -0
- Dockerfile +39 -0
- README.md +85 -12
- ai_genrator.py +270 -0
- app.py +1027 -0
- downloader.py +96 -0
- requirements.txt +30 -0
- runtime.txt +1 -0
- token.pickle +3 -0
- uploader.py +289 -0
- video_editor.py +467 -0
.dockerignore
ADDED
|
@@ -0,0 +1,58 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Git
|
| 2 |
+
.git
|
| 3 |
+
.gitignore
|
| 4 |
+
.gitattributes
|
| 5 |
+
|
| 6 |
+
# Python
|
| 7 |
+
__pycache__/
|
| 8 |
+
*.py[cod]
|
| 9 |
+
*$py.class
|
| 10 |
+
*.so
|
| 11 |
+
.Python
|
| 12 |
+
*.egg-info/
|
| 13 |
+
.installed.cfg
|
| 14 |
+
*.egg
|
| 15 |
+
venv/
|
| 16 |
+
env/
|
| 17 |
+
ENV/
|
| 18 |
+
|
| 19 |
+
# Documentation
|
| 20 |
+
*.md
|
| 21 |
+
!README.md
|
| 22 |
+
DEPLOYMENT.md
|
| 23 |
+
README_HF.md
|
| 24 |
+
|
| 25 |
+
# IDE
|
| 26 |
+
.vscode/
|
| 27 |
+
.idea/
|
| 28 |
+
*.swp
|
| 29 |
+
*.swo
|
| 30 |
+
|
| 31 |
+
# OS
|
| 32 |
+
.DS_Store
|
| 33 |
+
Thumbs.db
|
| 34 |
+
|
| 35 |
+
# Temporary files
|
| 36 |
+
downloads/*.mp4
|
| 37 |
+
downloads/*.avi
|
| 38 |
+
downloads/*.mov
|
| 39 |
+
user_tokens/*.json
|
| 40 |
+
temp_audio/*.mp3
|
| 41 |
+
temp_audio/*.m4a
|
| 42 |
+
flask_session/
|
| 43 |
+
*.log
|
| 44 |
+
|
| 45 |
+
# Environment
|
| 46 |
+
.env
|
| 47 |
+
.env.local
|
| 48 |
+
.env.example
|
| 49 |
+
|
| 50 |
+
# Build files
|
| 51 |
+
render.yaml
|
| 52 |
+
Procfile
|
| 53 |
+
runtime.txt
|
| 54 |
+
requirements_hf.txt
|
| 55 |
+
|
| 56 |
+
# Test files
|
| 57 |
+
test_*.py
|
| 58 |
+
tests/
|
.gitignore
ADDED
|
@@ -0,0 +1,67 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Python
|
| 2 |
+
__pycache__/
|
| 3 |
+
*.py[cod]
|
| 4 |
+
*$py.class
|
| 5 |
+
*.so
|
| 6 |
+
.Python
|
| 7 |
+
env/
|
| 8 |
+
venv/
|
| 9 |
+
ENV/
|
| 10 |
+
build/
|
| 11 |
+
develop-eggs/
|
| 12 |
+
dist/
|
| 13 |
+
downloads/
|
| 14 |
+
eggs/
|
| 15 |
+
.eggs/
|
| 16 |
+
lib/
|
| 17 |
+
lib64/
|
| 18 |
+
parts/
|
| 19 |
+
sdist/
|
| 20 |
+
var/
|
| 21 |
+
wheels/
|
| 22 |
+
*.egg-info/
|
| 23 |
+
.installed.cfg
|
| 24 |
+
*.egg
|
| 25 |
+
|
| 26 |
+
# Flask
|
| 27 |
+
instance/
|
| 28 |
+
.webassets-cache
|
| 29 |
+
|
| 30 |
+
# Environment
|
| 31 |
+
.env
|
| 32 |
+
.env.local
|
| 33 |
+
.env.*.local
|
| 34 |
+
|
| 35 |
+
# IDE
|
| 36 |
+
.vscode/
|
| 37 |
+
.idea/
|
| 38 |
+
*.swp
|
| 39 |
+
*.swo
|
| 40 |
+
*~
|
| 41 |
+
|
| 42 |
+
# OS
|
| 43 |
+
.DS_Store
|
| 44 |
+
Thumbs.db
|
| 45 |
+
|
| 46 |
+
# Project specific
|
| 47 |
+
downloads/*.mp4
|
| 48 |
+
downloads/*.avi
|
| 49 |
+
downloads/*.mov
|
| 50 |
+
temp_audio/
|
| 51 |
+
user_tokens/
|
| 52 |
+
flask_session/
|
| 53 |
+
client_secret.json
|
| 54 |
+
token.json
|
| 55 |
+
token_*.json
|
| 56 |
+
|
| 57 |
+
# Logs
|
| 58 |
+
*.log
|
| 59 |
+
logs/
|
| 60 |
+
|
| 61 |
+
# Testing
|
| 62 |
+
.pytest_cache/
|
| 63 |
+
.coverage
|
| 64 |
+
htmlcov/
|
| 65 |
+
|
| 66 |
+
# Hugging Face
|
| 67 |
+
.huggingface/
|
DEPLOYMENT_HF.md
ADDED
|
@@ -0,0 +1,143 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Deployment Guide for Hugging Face Spaces
|
| 2 |
+
|
| 3 |
+
## Prerequisites
|
| 4 |
+
|
| 5 |
+
1. Hugging Face account (free)
|
| 6 |
+
2. GitHub repository with your code
|
| 7 |
+
3. Google Cloud OAuth credentials (`client_secret.json`)
|
| 8 |
+
4. Required API keys:
|
| 9 |
+
- Google Gemini API key
|
| 10 |
+
- Instagram session ID
|
| 11 |
+
|
| 12 |
+
## Step 1: Prepare Google Cloud OAuth
|
| 13 |
+
|
| 14 |
+
1. Go to [Google Cloud Console](https://console.cloud.google.com)
|
| 15 |
+
2. Create a new project or select existing
|
| 16 |
+
3. Enable **YouTube Data API v3**
|
| 17 |
+
4. Go to **Credentials** β Create OAuth 2.0 Client ID
|
| 18 |
+
5. Choose **Web application**
|
| 19 |
+
6. Add authorized redirect URI:
|
| 20 |
+
```
|
| 21 |
+
https://YOUR_USERNAME-youtube-automation-studio.hf.space/auth/callback
|
| 22 |
+
```
|
| 23 |
+
7. Download `client_secret.json`
|
| 24 |
+
|
| 25 |
+
## Step 2: Get Instagram Session ID
|
| 26 |
+
|
| 27 |
+
1. Open Instagram in Chrome browser
|
| 28 |
+
2. Login to your Instagram account
|
| 29 |
+
3. Press `F12` (Developer Tools)
|
| 30 |
+
4. Go to **Application** tab β **Cookies** β `https://instagram.com`
|
| 31 |
+
5. Find cookie named `sessionid`
|
| 32 |
+
6. Copy the entire value (should be 40+ characters)
|
| 33 |
+
|
| 34 |
+
## Step 3: Generate Secret Key
|
| 35 |
+
|
| 36 |
+
Run this Python command:
|
| 37 |
+
```bash
|
| 38 |
+
python -c "import secrets; print(secrets.token_hex(32))"
|
| 39 |
+
```
|
| 40 |
+
Copy the output.
|
| 41 |
+
|
| 42 |
+
## Step 4: Create Hugging Face Space
|
| 43 |
+
|
| 44 |
+
1. Go to https://huggingface.co/spaces
|
| 45 |
+
2. Click **Create new Space**
|
| 46 |
+
3. Fill in details:
|
| 47 |
+
- **Space name**: `youtube-automation-studio`
|
| 48 |
+
- **License**: MIT
|
| 49 |
+
- **Select SDK**: Docker
|
| 50 |
+
- **Space hardware**: CPU basic (free)
|
| 51 |
+
4. Click **Create Space**
|
| 52 |
+
|
| 53 |
+
## Step 5: Configure Environment Variables
|
| 54 |
+
|
| 55 |
+
In your Space settings β **Variables and secrets**:
|
| 56 |
+
|
| 57 |
+
Add these **secrets**:
|
| 58 |
+
|
| 59 |
+
```env
|
| 60 |
+
ENVIRONMENT=production
|
| 61 |
+
GEMINI_API_KEY=your-gemini-api-key-from-google-ai-studio
|
| 62 |
+
IG_SESSIONID=your-instagram-sessionid-from-cookies
|
| 63 |
+
SECRET_KEY=your-generated-secret-key-from-step3
|
| 64 |
+
```
|
| 65 |
+
|
| 66 |
+
## Step 6: Upload client_secret.json
|
| 67 |
+
|
| 68 |
+
In your Space β **Files** tab:
|
| 69 |
+
1. Click **Add file** β **Upload files**
|
| 70 |
+
2. Upload your `client_secret.json`
|
| 71 |
+
3. Mark it as **secret file** if option available
|
| 72 |
+
|
| 73 |
+
## Step 7: Push Code to Hugging Face
|
| 74 |
+
|
| 75 |
+
### Option A: Direct Git Push
|
| 76 |
+
|
| 77 |
+
```bash
|
| 78 |
+
# Clone your HF Space repository
|
| 79 |
+
git clone https://huggingface.co/spaces/YOUR_USERNAME/youtube-automation-studio
|
| 80 |
+
cd youtube-automation-studio
|
| 81 |
+
|
| 82 |
+
# Copy all your project files
|
| 83 |
+
cp -r /path/to/your/project/* .
|
| 84 |
+
|
| 85 |
+
# Make sure these files are present:
|
| 86 |
+
# - Dockerfile
|
| 87 |
+
# - requirements.txt
|
| 88 |
+
# - app.py
|
| 89 |
+
# - All other Python files
|
| 90 |
+
# - templates/ folder
|
| 91 |
+
# - static/ folder
|
| 92 |
+
# - client_secret.json (if not uploaded via UI)
|
| 93 |
+
|
| 94 |
+
# Commit and push
|
| 95 |
+
git add .
|
| 96 |
+
git commit -m "Initial deployment"
|
| 97 |
+
git push
|
| 98 |
+
```
|
| 99 |
+
|
| 100 |
+
### Option B: Via GitHub
|
| 101 |
+
|
| 102 |
+
If your code is on GitHub:
|
| 103 |
+
|
| 104 |
+
1. In your HF Space settings
|
| 105 |
+
2. Go to **Files and versions** β **Settings**
|
| 106 |
+
3. Connect to GitHub repository
|
| 107 |
+
4. Select branch to deploy from
|
| 108 |
+
|
| 109 |
+
## Step 8: Wait for Build
|
| 110 |
+
|
| 111 |
+
- Build typically takes 5-10 minutes
|
| 112 |
+
- Monitor logs in **Logs** tab
|
| 113 |
+
- Check for any errors
|
| 114 |
+
|
| 115 |
+
## Step 9: Test Your Deployment
|
| 116 |
+
|
| 117 |
+
1. Once build completes, visit your Space URL:
|
| 118 |
+
```
|
| 119 |
+
https://YOUR_USERNAME-youtube-automation-studio.hf.space
|
| 120 |
+
```
|
| 121 |
+
|
| 122 |
+
2. Test each feature:
|
| 123 |
+
- β
Homepage loads
|
| 124 |
+
- β
Sign in with YouTube works
|
| 125 |
+
- β
Download Instagram Reel
|
| 126 |
+
- β
AI metadata generation
|
| 127 |
+
- β
Video upload to YouTube
|
| 128 |
+
|
| 129 |
+
## Step 10: Update OAuth Redirect URI (If Needed)
|
| 130 |
+
|
| 131 |
+
If authentication fails:
|
| 132 |
+
|
| 133 |
+
1. Go back to Google Cloud Console
|
| 134 |
+
2. Edit your OAuth 2.0 Client
|
| 135 |
+
3. Verify redirect URI matches exactly:
|
| 136 |
+
```
|
| 137 |
+
https://YOUR_USERNAME-youtube-automation-studio.hf.space/auth/callback
|
| 138 |
+
```
|
| 139 |
+
4. Save and wait 5 minutes for changes to propagate
|
| 140 |
+
|
| 141 |
+
## Monitoring & Maintenance
|
| 142 |
+
|
| 143 |
+
### Check Logs
|
Dockerfile
ADDED
|
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
FROM python:3.11-slim
|
| 2 |
+
|
| 3 |
+
# Install system dependencies including ffmpeg
|
| 4 |
+
RUN apt-get update && apt-get install -y \
|
| 5 |
+
ffmpeg \
|
| 6 |
+
libsm6 \
|
| 7 |
+
libxext6 \
|
| 8 |
+
libxrender-dev \
|
| 9 |
+
libgomp1 \
|
| 10 |
+
libgl1-mesa-glx \
|
| 11 |
+
git \
|
| 12 |
+
&& rm -rf /var/lib/apt/lists/*
|
| 13 |
+
|
| 14 |
+
# Set working directory
|
| 15 |
+
WORKDIR /app
|
| 16 |
+
|
| 17 |
+
# Copy requirements
|
| 18 |
+
COPY requirements.txt .
|
| 19 |
+
|
| 20 |
+
# Install Python dependencies
|
| 21 |
+
RUN pip install --no-cache-dir --upgrade pip setuptools wheel && \
|
| 22 |
+
pip install --no-cache-dir -r requirements.txt
|
| 23 |
+
|
| 24 |
+
# Copy application files
|
| 25 |
+
COPY . .
|
| 26 |
+
|
| 27 |
+
# Create necessary directories
|
| 28 |
+
RUN mkdir -p downloads user_tokens /tmp/flask_session temp_audio
|
| 29 |
+
|
| 30 |
+
# Set environment variables
|
| 31 |
+
ENV PYTHONUNBUFFERED=1
|
| 32 |
+
ENV ENVIRONMENT=production
|
| 33 |
+
ENV PORT=7860
|
| 34 |
+
|
| 35 |
+
# Expose port
|
| 36 |
+
EXPOSE 7860
|
| 37 |
+
|
| 38 |
+
# Run the application
|
| 39 |
+
CMD ["gunicorn", "app:app", "--bind", "0.0.0.0:7860", "--workers", "2", "--timeout", "600", "--log-level", "info"]
|
README.md
CHANGED
|
@@ -1,12 +1,85 @@
|
|
| 1 |
-
---
|
| 2 |
-
title:
|
| 3 |
-
emoji:
|
| 4 |
-
colorFrom:
|
| 5 |
-
colorTo:
|
| 6 |
-
sdk: docker
|
| 7 |
-
pinned: false
|
| 8 |
-
license: mit
|
| 9 |
-
|
| 10 |
-
---
|
| 11 |
-
|
| 12 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
title: YouTube Automation Studio
|
| 3 |
+
emoji: π¬
|
| 4 |
+
colorFrom: red
|
| 5 |
+
colorTo: blue
|
| 6 |
+
sdk: docker
|
| 7 |
+
pinned: false
|
| 8 |
+
license: mit
|
| 9 |
+
app_port: 7860
|
| 10 |
+
---
|
| 11 |
+
|
| 12 |
+
# π¬ YouTube Automation Studio
|
| 13 |
+
|
| 14 |
+
AI-powered automation tool for downloading Instagram Reels, generating metadata with Google Gemini AI, and uploading to YouTube automatically.
|
| 15 |
+
|
| 16 |
+
## β¨ Features
|
| 17 |
+
|
| 18 |
+
- π₯ **Instagram Reel Downloader**: Download Reels in HD quality with original audio
|
| 19 |
+
- π€ **AI Metadata Generator**: Generate SEO-optimized titles, descriptions, tags using Google Gemini 2.0
|
| 20 |
+
- π€ **YouTube Auto Uploader**: Upload directly to YouTube with automated metadata
|
| 21 |
+
- π΅ **Video Editor**: Add background music and text overlays to videos
|
| 22 |
+
- π **Multi-User Support**: Each user has their own isolated YouTube authentication
|
| 23 |
+
|
| 24 |
+
## π Quick Start
|
| 25 |
+
|
| 26 |
+
1. **Set Environment Variables** (Required):
|
| 27 |
+
- `GEMINI_API_KEY`: Get from [Google AI Studio](https://makersuite.google.com/app/apikey)
|
| 28 |
+
- `IG_SESSIONID`: Instagram session cookie (see below)
|
| 29 |
+
- `SECRET_KEY`: Generate with `python -c "import secrets; print(secrets.token_hex(32))"`
|
| 30 |
+
|
| 31 |
+
2. **Upload client_secret.json**:
|
| 32 |
+
- Create OAuth credentials in [Google Cloud Console](https://console.cloud.google.com)
|
| 33 |
+
- Enable YouTube Data API v3
|
| 34 |
+
- Download and upload `client_secret.json` as a secret file
|
| 35 |
+
|
| 36 |
+
3. **Access the app** and start automating!
|
| 37 |
+
|
| 38 |
+
## π Getting Instagram Session ID
|
| 39 |
+
|
| 40 |
+
1. Open Instagram in Chrome
|
| 41 |
+
2. Press `F12` β Application β Cookies β instagram.com
|
| 42 |
+
3. Find `sessionid` cookie and copy its value
|
| 43 |
+
4. Add to Space secrets as `IG_SESSIONID`
|
| 44 |
+
|
| 45 |
+
## π― Usage
|
| 46 |
+
|
| 47 |
+
1. **Sign in with YouTube** (OAuth 2.0 - secure & one-time)
|
| 48 |
+
2. **Paste Instagram Reel URL**
|
| 49 |
+
3. **(Optional) Add video editing**:
|
| 50 |
+
- Background music from YouTube or local file
|
| 51 |
+
- Text overlays with custom positioning
|
| 52 |
+
4. **AI generates metadata** (title, description, tags, hashtags)
|
| 53 |
+
5. **Video uploads to YouTube** automatically
|
| 54 |
+
|
| 55 |
+
## π οΈ Tech Stack
|
| 56 |
+
|
| 57 |
+
- **Backend**: Flask, Python 3.11, Gunicorn
|
| 58 |
+
- **AI**: Google Gemini 2.0 Flash
|
| 59 |
+
- **APIs**: YouTube Data API v3, Instagram
|
| 60 |
+
- **Video Processing**: MoviePy, OpenCV, FFmpeg
|
| 61 |
+
- **Auth**: OAuth 2.0, Session-based authentication
|
| 62 |
+
|
| 63 |
+
## π Environment Variables
|
| 64 |
+
|
| 65 |
+
Set these in your Hugging Face Space settings:
|
| 66 |
+
|
| 67 |
+
```env
|
| 68 |
+
ENVIRONMENT=production
|
| 69 |
+
GEMINI_API_KEY=your-gemini-api-key-here
|
| 70 |
+
IG_SESSIONID=your-instagram-sessionid-here
|
| 71 |
+
SECRET_KEY=your-secret-key-here
|
| 72 |
+
```
|
| 73 |
+
|
| 74 |
+
## π Security
|
| 75 |
+
|
| 76 |
+
- OAuth 2.0 for YouTube authentication
|
| 77 |
+
- Session-based user isolation
|
| 78 |
+
- Secure cookie handling
|
| 79 |
+
- No password storage
|
| 80 |
+
- Automatic credential refresh
|
| 81 |
+
|
| 82 |
+
## π OAuth Redirect URI
|
| 83 |
+
|
| 84 |
+
Add this to your Google Cloud OAuth credentials:
|
| 85 |
+
````
|
ai_genrator.py
ADDED
|
@@ -0,0 +1,270 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import google.generativeai as genai
|
| 2 |
+
import os
|
| 3 |
+
import json
|
| 4 |
+
from typing import Dict, List, Optional
|
| 5 |
+
import cv2
|
| 6 |
+
from datetime import datetime
|
| 7 |
+
from dotenv import load_dotenv
|
| 8 |
+
import base64
|
| 9 |
+
import tempfile
|
| 10 |
+
from PIL import Image
|
| 11 |
+
import numpy as np
|
| 12 |
+
|
| 13 |
+
# Load environment variables from .env file
|
| 14 |
+
load_dotenv()
|
| 15 |
+
|
| 16 |
+
class AIMetadataGenerator:
|
| 17 |
+
def __init__(self, api_key: Optional[str] = None):
|
| 18 |
+
"""Initialize the AI Metadata Generator with Gemini 2.0 Flash"""
|
| 19 |
+
self.api_key = api_key or os.getenv('GEMINI_API_KEY')
|
| 20 |
+
if not self.api_key:
|
| 21 |
+
raise ValueError("Gemini API key not found. Please set GEMINI_API_KEY in .env file or pass it as parameter.")
|
| 22 |
+
|
| 23 |
+
genai.configure(api_key=self.api_key) # type: ignore
|
| 24 |
+
self.model = genai.GenerativeModel('gemini-2.0-flash-exp') # type: ignore
|
| 25 |
+
|
| 26 |
+
def extract_video_frames(self, video_path: str, num_frames: int = 3) -> List[str]:
|
| 27 |
+
"""Extract representative frames from video for AI analysis"""
|
| 28 |
+
try:
|
| 29 |
+
cap = cv2.VideoCapture(video_path)
|
| 30 |
+
frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
|
| 31 |
+
frames = []
|
| 32 |
+
|
| 33 |
+
# Extract frames at regular intervals
|
| 34 |
+
for i in range(num_frames):
|
| 35 |
+
frame_number = int((i + 1) * frame_count / (num_frames + 1))
|
| 36 |
+
cap.set(cv2.CAP_PROP_POS_FRAMES, frame_number)
|
| 37 |
+
ret, frame = cap.read()
|
| 38 |
+
|
| 39 |
+
if ret:
|
| 40 |
+
# Convert BGR to RGB
|
| 41 |
+
frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
|
| 42 |
+
# Convert to PIL Image
|
| 43 |
+
pil_image = Image.fromarray(frame_rgb)
|
| 44 |
+
# Resize for efficient processing
|
| 45 |
+
pil_image = pil_image.resize((512, 512), Image.Resampling.LANCZOS)
|
| 46 |
+
frames.append(pil_image)
|
| 47 |
+
|
| 48 |
+
cap.release()
|
| 49 |
+
return frames
|
| 50 |
+
|
| 51 |
+
except Exception as e:
|
| 52 |
+
print(f"Error extracting frames: {e}")
|
| 53 |
+
return []
|
| 54 |
+
|
| 55 |
+
def analyze_video_content(self, video_path: str) -> str:
|
| 56 |
+
"""Analyze video content using AI vision to understand what's in the video frames"""
|
| 57 |
+
try:
|
| 58 |
+
# Extract video frames
|
| 59 |
+
frames = self.extract_video_frames(video_path, 3) # Increased to 3 frames for better coverage
|
| 60 |
+
if not frames:
|
| 61 |
+
return "Unable to analyze video content"
|
| 62 |
+
|
| 63 |
+
# Analyze frames for text and visual content
|
| 64 |
+
combined_analysis = []
|
| 65 |
+
|
| 66 |
+
for i, frame in enumerate(frames):
|
| 67 |
+
prompt = f"""
|
| 68 |
+
Analyze this video frame {i+1} and describe what you see in detail. Focus on:
|
| 69 |
+
1. Any visible text in the frame
|
| 70 |
+
2. Main subject/person and their actions
|
| 71 |
+
3. Setting/location
|
| 72 |
+
4. Key objects or activities visible
|
| 73 |
+
5. Overall mood and style
|
| 74 |
+
|
| 75 |
+
Extract any text that appears in the image if present.
|
| 76 |
+
Provide detailed analysis of what's shown in the frame.
|
| 77 |
+
"""
|
| 78 |
+
|
| 79 |
+
response = self.model.generate_content([prompt, frame])
|
| 80 |
+
combined_analysis.append(response.text.strip())
|
| 81 |
+
|
| 82 |
+
# Combine analyses from all frames
|
| 83 |
+
final_prompt = f"""
|
| 84 |
+
Based on the following analyses of different frames from a video, provide a comprehensive understanding of what the video is about:
|
| 85 |
+
|
| 86 |
+
FRAME ANALYSES:
|
| 87 |
+
{' '.join(combined_analysis)}
|
| 88 |
+
|
| 89 |
+
Create a concise summary that captures the essence of this video, focusing especially on any text that appears in the frames.
|
| 90 |
+
"""
|
| 91 |
+
|
| 92 |
+
final_response = self.model.generate_content(final_prompt)
|
| 93 |
+
return final_response.text.strip()
|
| 94 |
+
|
| 95 |
+
except Exception as e:
|
| 96 |
+
print(f"Error analyzing video content: {e}")
|
| 97 |
+
return "Video content analysis unavailable"
|
| 98 |
+
|
| 99 |
+
def generate_title(self, video_analysis: str) -> str:
|
| 100 |
+
"""Generate engaging YouTube shorts title with tranding hashtags to viral a shorts and acc. to video """
|
| 101 |
+
prompt = f"""
|
| 102 |
+
Based on this video analysis, create a catchy YouTube Shorts title:
|
| 103 |
+
|
| 104 |
+
VIDEO CONTENT: {video_analysis}
|
| 105 |
+
|
| 106 |
+
Requirements:
|
| 107 |
+
- Make it extremely engaging, click-worthy and optimized for high CTR
|
| 108 |
+
- Keep it under 99 characters including hashtags
|
| 109 |
+
- Include tranding relevant hashtags related to short video directly in the title
|
| 110 |
+
- Focus on the most intriguing aspect of the video, especially any text that appears in the video
|
| 111 |
+
- Use trending language patterns popular in viral shorts
|
| 112 |
+
- Consider using emojis strategically
|
| 113 |
+
- Make it provocative but not clickbait
|
| 114 |
+
|
| 115 |
+
The title should follow formats that are proven to work for viral shorts.
|
| 116 |
+
Return only the title with hashtags, nothing else.
|
| 117 |
+
"""
|
| 118 |
+
|
| 119 |
+
try:
|
| 120 |
+
response = self.model.generate_content(prompt)
|
| 121 |
+
return response.text.strip().replace('"', '').replace("'", "")
|
| 122 |
+
except Exception as e:
|
| 123 |
+
print(f"Error generating title: {e}")
|
| 124 |
+
return "π₯ Viral Moment You Won't Believe! #shorts #viral #trending"
|
| 125 |
+
|
| 126 |
+
def generate_description(self, video_analysis: str) -> str:
|
| 127 |
+
"""Generate YouTube shorts description optimized for virality based on text and visual content"""
|
| 128 |
+
prompt = f"""
|
| 129 |
+
Create a YouTube Shorts description based on this video analysis:
|
| 130 |
+
|
| 131 |
+
VIDEO CONTENT: {video_analysis}
|
| 132 |
+
|
| 133 |
+
Structure the description exactly like this:
|
| 134 |
+
|
| 135 |
+
Write exactly 3-4 lines of engaging, high-emotion description that creates curiosity
|
| 136 |
+
Add a line break then write "Keywords:" followed by exactly 40-50 trending keywords line by line
|
| 137 |
+
Add a line break then write "Hashtags:" followed by exactly 60-70 viral hashtags (MUST include #shorts, #viral, #trending #shortvideo #shortsfeed #ytshorts #viralvideo more like this and other relevant ones)
|
| 138 |
+
Add a line break then add this call-to-action: "π subscribe for more content like this! π Turn on notifications!"
|
| 139 |
+
Add a line break then make copyright disclaimer to protect from strikes And copyright holder.
|
| 140 |
+
|
| 141 |
+
Make the description extremely engaging and optimized for Shorts algorithm with high-emotion language patterns.
|
| 142 |
+
Focus on keywords and hashtags that are currently trending for short-form viral content.
|
| 143 |
+
If there was any text in the video, incorporate it into the description.
|
| 144 |
+
"""
|
| 145 |
+
|
| 146 |
+
try:
|
| 147 |
+
response = self.model.generate_content(prompt)
|
| 148 |
+
return response.text.strip()
|
| 149 |
+
except Exception as e:
|
| 150 |
+
print(f"Error generating description: {e}")
|
| 151 |
+
return self._fallback_description()
|
| 152 |
+
|
| 153 |
+
def generate_tags_and_keywords(self, video_analysis: str) -> Dict:
|
| 154 |
+
"""Generate optimized tags and keywords specifically for viral shorts"""
|
| 155 |
+
prompt = f"""
|
| 156 |
+
Based on this video analysis, generate optimized tags and keywords for a viral YouTube Short:
|
| 157 |
+
|
| 158 |
+
VIDEO CONTENT: {video_analysis}
|
| 159 |
+
|
| 160 |
+
Provide your response as JSON with these exact fields:
|
| 161 |
+
1. "tags": List of 30 tags optimized for YouTube search algorithm (keep each under 30 characters)
|
| 162 |
+
2. "keywords": List of 40 relevant keywords (single words or short phrases) related to the content
|
| 163 |
+
3. "trending_keywords": List of 15 currently trending keywords related to the content
|
| 164 |
+
|
| 165 |
+
Tags should include general category terms, specific content descriptors, and trending terms.
|
| 166 |
+
Format the response as valid JSON only - no explanation or other text.
|
| 167 |
+
"""
|
| 168 |
+
|
| 169 |
+
try:
|
| 170 |
+
response = self.model.generate_content(prompt)
|
| 171 |
+
result = json.loads(response.text.strip())
|
| 172 |
+
return result
|
| 173 |
+
except Exception as e:
|
| 174 |
+
print(f"Error generating tags and keywords: {e}")
|
| 175 |
+
return {
|
| 176 |
+
"tags": ["shorts", "viral", "trending"],
|
| 177 |
+
"keywords": ["viral video", "trending content", "shorts"],
|
| 178 |
+
"trending_keywords": ["viral shorts", "trending now"]
|
| 179 |
+
}
|
| 180 |
+
|
| 181 |
+
def _fallback_description(self) -> str:
|
| 182 |
+
"""Fallback description when AI generation fails"""
|
| 183 |
+
return """Amazing video content that will keep you entertained!
|
| 184 |
+
Watch this incredible moment captured on video.
|
| 185 |
+
Perfect for sharing with friends and family.
|
| 186 |
+
Don't forget to like and subscribe for more content.
|
| 187 |
+
This video showcases some really cool stuff.
|
| 188 |
+
You won't believe what happens in this video.
|
| 189 |
+
Make sure to watch until the end.
|
| 190 |
+
Comment below what you think about this.
|
| 191 |
+
Share this video if you enjoyed it.
|
| 192 |
+
Thanks for watching our content!
|
| 193 |
+
|
| 194 |
+
Keywords: viral video, trending content, amazing moments, entertainment, social media, short video, funny clips, must watch, incredible, awesome, cool stuff, viral clips, trending now, popular video, engaging content, shareable, entertaining, video content, social sharing, watch now
|
| 195 |
+
|
| 196 |
+
Hashtags: #shorts #viral #trending #amazing #entertainment #video #content #socialmedia #funny #cool #awesome #mustwatch #incredible #popular #engaging #shareable #entertaining #videooftheday #trend #viral2024 #shortsvideo #viralshorts #trendingshorts #amazingvideo #viralcontent #shortsfeed #explore #fyp #foryou #viralmoment
|
| 197 |
+
|
| 198 |
+
β οΈ Copyright Disclaimer: This content is used for educational and entertainment purposes. All rights belong to their respective owners. If you are the owner and want this removed, please contact us."""
|
| 199 |
+
|
| 200 |
+
def generate_complete_metadata(self, video_path: str, **kwargs) -> Dict:
|
| 201 |
+
"""Generate complete metadata package based on video frame analysis"""
|
| 202 |
+
|
| 203 |
+
print("π€ Analyzing video frames with AI...")
|
| 204 |
+
video_analysis = self.analyze_video_content(video_path)
|
| 205 |
+
print(f"πΉ Video analysis complete")
|
| 206 |
+
|
| 207 |
+
print("π― Generating viral shorts title with hashtags...")
|
| 208 |
+
title = self.generate_title(video_analysis)
|
| 209 |
+
|
| 210 |
+
print("π Generating description optimized for shorts...")
|
| 211 |
+
description = self.generate_description(video_analysis)
|
| 212 |
+
|
| 213 |
+
print("π·οΈ Generating optimized tags and keywords...")
|
| 214 |
+
tags_keywords = self.generate_tags_and_keywords(video_analysis)
|
| 215 |
+
|
| 216 |
+
# Extract hashtags from description
|
| 217 |
+
description_lines = description.split('\n')
|
| 218 |
+
hashtags = []
|
| 219 |
+
keywords = []
|
| 220 |
+
|
| 221 |
+
for line in description_lines:
|
| 222 |
+
if line.startswith('Hashtags:'):
|
| 223 |
+
hashtags_text = line.replace('Hashtags:', '').strip()
|
| 224 |
+
hashtags = [tag.strip() for tag in hashtags_text.split() if tag.startswith('#')]
|
| 225 |
+
elif line.startswith('Keywords:'):
|
| 226 |
+
keywords_text = line.replace('Keywords:', '').strip()
|
| 227 |
+
keywords = [kw.strip() for kw in keywords_text.split(',')]
|
| 228 |
+
|
| 229 |
+
metadata = {
|
| 230 |
+
"video_analysis": video_analysis,
|
| 231 |
+
"title": title,
|
| 232 |
+
"description": description,
|
| 233 |
+
"tags": tags_keywords.get("tags", []),
|
| 234 |
+
"hashtags": hashtags,
|
| 235 |
+
"keywords": tags_keywords.get("keywords", []),
|
| 236 |
+
"trending_keywords": tags_keywords.get("trending_keywords", []),
|
| 237 |
+
"generated_at": datetime.now().isoformat()
|
| 238 |
+
}
|
| 239 |
+
|
| 240 |
+
return metadata
|
| 241 |
+
|
| 242 |
+
def save_metadata(self, metadata: Dict, output_path: str):
|
| 243 |
+
"""Save metadata to JSON file"""
|
| 244 |
+
try:
|
| 245 |
+
with open(output_path, 'w', encoding='utf-8') as f:
|
| 246 |
+
json.dump(metadata, f, indent=2, ensure_ascii=False)
|
| 247 |
+
print(f"Metadata saved to: {output_path}")
|
| 248 |
+
except Exception as e:
|
| 249 |
+
print(f"Error saving metadata: {e}")
|
| 250 |
+
|
| 251 |
+
# Example usage
|
| 252 |
+
if __name__ == "__main__":
|
| 253 |
+
# Initialize the generator (API key will be loaded from .env)
|
| 254 |
+
generator = AIMetadataGenerator()
|
| 255 |
+
|
| 256 |
+
# Example video processing
|
| 257 |
+
video_path = r"c:\Users\DELL\OneDrive\Desktop\youtube automation ai\sample_video.mp4"
|
| 258 |
+
|
| 259 |
+
# Generate complete metadata
|
| 260 |
+
metadata = generator.generate_complete_metadata(video_path=video_path)
|
| 261 |
+
|
| 262 |
+
# Print results
|
| 263 |
+
print("Generated Metadata:")
|
| 264 |
+
print("-" * 50)
|
| 265 |
+
print(f"Title: {metadata['title']}")
|
| 266 |
+
print(f"\nDescription:\n{metadata['description']}")
|
| 267 |
+
|
| 268 |
+
# Save to file
|
| 269 |
+
output_file = r"c:\Users\DELL\OneDrive\Desktop\youtube automation ai\metadata_output.json"
|
| 270 |
+
generator.save_metadata(metadata, output_file)
|
app.py
ADDED
|
@@ -0,0 +1,1027 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from flask import Flask, render_template, request, jsonify, send_file, redirect, url_for, session
|
| 2 |
+
import os
|
| 3 |
+
import uuid
|
| 4 |
+
import threading
|
| 5 |
+
from datetime import datetime
|
| 6 |
+
from typing import Dict, Any, Optional
|
| 7 |
+
import logging
|
| 8 |
+
from dotenv import load_dotenv
|
| 9 |
+
import secrets
|
| 10 |
+
|
| 11 |
+
# Load environment variables from .env file
|
| 12 |
+
load_dotenv()
|
| 13 |
+
|
| 14 |
+
# β
FIX: Allow insecure transport for local OAuth (development only)
|
| 15 |
+
os.environ['OAUTHLIB_INSECURE_TRANSPORT'] = '1'
|
| 16 |
+
|
| 17 |
+
# Import our modules
|
| 18 |
+
from downloader import download_reel_with_audio # Ensure downloader.py defines this function
|
| 19 |
+
from uploader import upload_to_youtube, check_authentication, authenticate_youtube, get_youtube_service, get_channel_info, logout_youtube
|
| 20 |
+
from ai_genrator import AIMetadataGenerator # Fixed import to use the correct class
|
| 21 |
+
from video_editor import VideoEditor # Import video editor
|
| 22 |
+
|
| 23 |
+
# Configure logging
|
| 24 |
+
logging.basicConfig(level=logging.INFO)
|
| 25 |
+
logger = logging.getLogger(__name__)
|
| 26 |
+
|
| 27 |
+
app = Flask(__name__)
|
| 28 |
+
|
| 29 |
+
# Configuration
|
| 30 |
+
DOWNLOAD_FOLDER = 'downloads'
|
| 31 |
+
GEMINI_API_KEY = os.getenv('GEMINI_API_KEY')
|
| 32 |
+
# β
NEW: Store user tokens in a separate folder
|
| 33 |
+
USER_TOKENS_FOLDER = 'user_tokens'
|
| 34 |
+
os.makedirs(USER_TOKENS_FOLDER, exist_ok=True)
|
| 35 |
+
|
| 36 |
+
# Ensure download folder exists
|
| 37 |
+
os.makedirs(DOWNLOAD_FOLDER, exist_ok=True)
|
| 38 |
+
|
| 39 |
+
# Task storage (in production, use Redis or database)
|
| 40 |
+
tasks = {}
|
| 41 |
+
|
| 42 |
+
class TaskStatus:
|
| 43 |
+
def __init__(self, task_id: str):
|
| 44 |
+
self.task_id = task_id
|
| 45 |
+
self.status = 'started'
|
| 46 |
+
self.progress = 0
|
| 47 |
+
self.message = 'Task started'
|
| 48 |
+
self.error = None
|
| 49 |
+
self.result = None
|
| 50 |
+
self.metadata = None
|
| 51 |
+
self.youtube_url = None
|
| 52 |
+
self.created_at = datetime.now()
|
| 53 |
+
|
| 54 |
+
def update_task_status(task_id: str, status: str, message: str = '', progress: int = 0, **kwargs):
|
| 55 |
+
"""Update task status"""
|
| 56 |
+
if task_id in tasks:
|
| 57 |
+
task = tasks[task_id]
|
| 58 |
+
task.status = status
|
| 59 |
+
task.message = message
|
| 60 |
+
task.progress = progress
|
| 61 |
+
|
| 62 |
+
# Update additional fields
|
| 63 |
+
for key, value in kwargs.items():
|
| 64 |
+
setattr(task, key, value)
|
| 65 |
+
|
| 66 |
+
logger.info(f"Task {task_id}: {status} - {message}")
|
| 67 |
+
|
| 68 |
+
# β
NEW: Helper function to get user-specific token path
|
| 69 |
+
def get_user_token_path():
|
| 70 |
+
"""Get token path for current user session"""
|
| 71 |
+
if 'user_id' not in session:
|
| 72 |
+
session['user_id'] = str(uuid.uuid4())
|
| 73 |
+
|
| 74 |
+
user_id = session['user_id']
|
| 75 |
+
return os.path.join(USER_TOKENS_FOLDER, f'token_{user_id}.json')
|
| 76 |
+
|
| 77 |
+
def background_upload_task(task_id: str, reel_url: str, editing_options: Optional[dict] = None, token_path: str = 'token.json'):
|
| 78 |
+
"""Background task for downloading, editing, and uploading"""
|
| 79 |
+
video_path = None
|
| 80 |
+
edited_video_path = None
|
| 81 |
+
uploaded_music_path = None # β
NEW: Track uploaded music for cleanup
|
| 82 |
+
|
| 83 |
+
try:
|
| 84 |
+
update_task_status(task_id, 'downloading', 'Downloading reel from Instagram...', 10)
|
| 85 |
+
|
| 86 |
+
# Get sessionid from environment
|
| 87 |
+
sessionid = os.getenv("IG_SESSIONID")
|
| 88 |
+
if not sessionid:
|
| 89 |
+
raise Exception("IG_SESSIONID not configured in .env file")
|
| 90 |
+
|
| 91 |
+
# Validate sessionid format (basic check)
|
| 92 |
+
if len(sessionid) < 20 or not sessionid.replace('%', '').replace(':', '').isalnum():
|
| 93 |
+
raise Exception("IG_SESSIONID appears to be invalid. Please update it in the .env file.")
|
| 94 |
+
|
| 95 |
+
# Download the reel with sessionid
|
| 96 |
+
try:
|
| 97 |
+
video_path = download_reel_with_audio(reel_url, DOWNLOAD_FOLDER, sessionid)
|
| 98 |
+
except Exception as download_error:
|
| 99 |
+
error_msg = str(download_error)
|
| 100 |
+
# Check for Instagram authentication/permission errors
|
| 101 |
+
if "403" in error_msg or "Forbidden" in error_msg or "metadata failed" in error_msg.lower():
|
| 102 |
+
raise Exception(
|
| 103 |
+
"Instagram session expired or invalid. Please update your IG_SESSIONID in the .env file"
|
| 104 |
+
)
|
| 105 |
+
elif "Login required" in error_msg or "Not logged in" in error_msg:
|
| 106 |
+
raise Exception(
|
| 107 |
+
"Instagram login required. Your session has expired. Please update IG_SESSIONID in .env file."
|
| 108 |
+
)
|
| 109 |
+
else:
|
| 110 |
+
raise Exception(f"Failed to download reel: {error_msg}")
|
| 111 |
+
|
| 112 |
+
if not video_path or not os.path.exists(video_path):
|
| 113 |
+
raise Exception("Failed to download video file")
|
| 114 |
+
|
| 115 |
+
logger.info(f"β
Video downloaded: {video_path}")
|
| 116 |
+
|
| 117 |
+
# Determine final video path
|
| 118 |
+
final_video_path = video_path
|
| 119 |
+
|
| 120 |
+
# β
Video Editing Step (FIXED to support local files)
|
| 121 |
+
if editing_options and editing_options.get('enabled'):
|
| 122 |
+
try:
|
| 123 |
+
update_task_status(task_id, 'editing', 'Editing video (adding music & text overlays)...', 30)
|
| 124 |
+
|
| 125 |
+
logger.info(f"οΏ½οΏ½ Starting video editing with options: {editing_options}")
|
| 126 |
+
|
| 127 |
+
# Initialize video editor
|
| 128 |
+
editor = VideoEditor()
|
| 129 |
+
|
| 130 |
+
# Generate edited video filename
|
| 131 |
+
base_name = os.path.splitext(os.path.basename(video_path))[0]
|
| 132 |
+
edited_video_path = os.path.join(DOWNLOAD_FOLDER, f"{base_name}_edited.mp4")
|
| 133 |
+
|
| 134 |
+
# β
NEW: Determine music source (URL or local file)
|
| 135 |
+
music_source = editing_options.get('music_url') or editing_options.get('music_file')
|
| 136 |
+
uploaded_music_path = editing_options.get('music_file') # Track for cleanup
|
| 137 |
+
|
| 138 |
+
# Apply edits
|
| 139 |
+
editor.edit_video(
|
| 140 |
+
video_path=video_path,
|
| 141 |
+
output_path=edited_video_path,
|
| 142 |
+
music_url=music_source, # Can be either YouTube URL or local file path
|
| 143 |
+
music_volume=editing_options.get('music_volume', 0.3),
|
| 144 |
+
text_overlays=editing_options.get('text_overlays')
|
| 145 |
+
)
|
| 146 |
+
|
| 147 |
+
logger.info(f"β
Video editing completed: {edited_video_path}")
|
| 148 |
+
|
| 149 |
+
# Use edited video for upload
|
| 150 |
+
final_video_path = edited_video_path
|
| 151 |
+
|
| 152 |
+
except Exception as edit_error:
|
| 153 |
+
error_msg = str(edit_error)
|
| 154 |
+
logger.error(f"β Video editing failed: {error_msg}")
|
| 155 |
+
|
| 156 |
+
# If editing fails, use original video
|
| 157 |
+
logger.warning("β οΈ Using original video due to editing failure")
|
| 158 |
+
final_video_path = video_path
|
| 159 |
+
|
| 160 |
+
# Update status to show editing was skipped
|
| 161 |
+
update_task_status(
|
| 162 |
+
task_id,
|
| 163 |
+
'generating_metadata',
|
| 164 |
+
f'Skipped editing (error: {error_msg[:50]}...). Using original video...',
|
| 165 |
+
50
|
| 166 |
+
)
|
| 167 |
+
|
| 168 |
+
update_task_status(task_id, 'generating_metadata', 'AI analyzing video content and generating metadata...', 60)
|
| 169 |
+
|
| 170 |
+
# Generate metadata using AI with actual video analysis
|
| 171 |
+
try:
|
| 172 |
+
# Create AI Metadata Generator instance
|
| 173 |
+
ai_generator = AIMetadataGenerator(GEMINI_API_KEY)
|
| 174 |
+
|
| 175 |
+
# Generate metadata based on actual video content
|
| 176 |
+
generated_metadata = ai_generator.generate_complete_metadata(
|
| 177 |
+
video_path=final_video_path,
|
| 178 |
+
target_audience="social media users"
|
| 179 |
+
)
|
| 180 |
+
|
| 181 |
+
# Extract needed fields for YouTube upload
|
| 182 |
+
metadata = {
|
| 183 |
+
'title': generated_metadata['title'],
|
| 184 |
+
'description': generated_metadata['description'],
|
| 185 |
+
'tags': generated_metadata['tags'],
|
| 186 |
+
'keywords': generated_metadata['keywords'],
|
| 187 |
+
'hashtags': generated_metadata['hashtags'],
|
| 188 |
+
'video_analysis': generated_metadata.get('video_analysis', 'Content analysis unavailable')
|
| 189 |
+
}
|
| 190 |
+
|
| 191 |
+
logger.info(f"β
AI metadata generated successfully")
|
| 192 |
+
logger.info(f"π Title: {metadata['title']}")
|
| 193 |
+
|
| 194 |
+
except Exception as e:
|
| 195 |
+
logger.warning(f"AI metadata generation failed: {str(e)}. Using fallback metadata.")
|
| 196 |
+
# Fallback metadata
|
| 197 |
+
filename = os.path.basename(final_video_path)
|
| 198 |
+
metadata = {
|
| 199 |
+
'title': f'Amazing Social Media Content - {filename}',
|
| 200 |
+
'description': f'Check out this amazing content!\n\nOriginal source: {reel_url}\n\n#SocialMedia #Viral #Content #Entertainment',
|
| 201 |
+
'tags': ['social media', 'viral', 'entertainment', 'content', 'video'],
|
| 202 |
+
'keywords': ['social media video', 'viral content', 'entertainment'],
|
| 203 |
+
'hashtags': ['#SocialMedia', '#Viral', '#Content']
|
| 204 |
+
}
|
| 205 |
+
|
| 206 |
+
update_task_status(task_id, 'uploading', 'Uploading to YouTube...', 85, metadata=metadata)
|
| 207 |
+
|
| 208 |
+
# β
FIX: Pass token_path to upload_to_youtube
|
| 209 |
+
try:
|
| 210 |
+
video_id = upload_to_youtube(
|
| 211 |
+
video_path=final_video_path,
|
| 212 |
+
title=metadata['title'],
|
| 213 |
+
description=metadata['description'],
|
| 214 |
+
tags=metadata['tags'],
|
| 215 |
+
privacy_status="public",
|
| 216 |
+
token_path=token_path # β
FIXED: Now passing token_path
|
| 217 |
+
)
|
| 218 |
+
|
| 219 |
+
youtube_url = f"https://www.youtube.com/watch?v={video_id}"
|
| 220 |
+
|
| 221 |
+
update_task_status(
|
| 222 |
+
task_id,
|
| 223 |
+
'completed',
|
| 224 |
+
'Upload completed successfully!',
|
| 225 |
+
100,
|
| 226 |
+
result={'video_id': video_id},
|
| 227 |
+
youtube_url=youtube_url,
|
| 228 |
+
metadata=metadata
|
| 229 |
+
)
|
| 230 |
+
|
| 231 |
+
except Exception as upload_error:
|
| 232 |
+
raise Exception(f"YouTube upload failed: {str(upload_error)}")
|
| 233 |
+
|
| 234 |
+
except Exception as e:
|
| 235 |
+
logger.error(f"Task {task_id} failed: {str(e)}")
|
| 236 |
+
update_task_status(task_id, 'failed', str(e), error=str(e))
|
| 237 |
+
finally:
|
| 238 |
+
# Clean up downloaded files after successful upload or failure
|
| 239 |
+
for path in [video_path, edited_video_path, uploaded_music_path]:
|
| 240 |
+
if path and os.path.exists(path):
|
| 241 |
+
try:
|
| 242 |
+
os.remove(path)
|
| 243 |
+
logger.info(f"π§Ή Cleaned up temporary file: {path}")
|
| 244 |
+
except Exception as cleanup_error:
|
| 245 |
+
logger.warning(f"β οΈ Could not clean up file {path}: {cleanup_error}")
|
| 246 |
+
|
| 247 |
+
@app.route('/')
|
| 248 |
+
def index():
|
| 249 |
+
"""Render the main page"""
|
| 250 |
+
return render_template('index.html')
|
| 251 |
+
|
| 252 |
+
@app.route('/downloader')
|
| 253 |
+
def downloader_page():
|
| 254 |
+
"""Render the downloader page"""
|
| 255 |
+
return render_template('downloader.html')
|
| 256 |
+
|
| 257 |
+
@app.route('/metadata-generator')
|
| 258 |
+
def metadata_generator_page():
|
| 259 |
+
"""Render the metadata generator page"""
|
| 260 |
+
return render_template('metadata_generator.html')
|
| 261 |
+
|
| 262 |
+
@app.route('/uploader')
|
| 263 |
+
def uploader_page():
|
| 264 |
+
"""Render the uploader page"""
|
| 265 |
+
is_authenticated = check_authentication()
|
| 266 |
+
return render_template('uploader.html', authenticated=is_authenticated)
|
| 267 |
+
|
| 268 |
+
@app.route('/check-auth')
|
| 269 |
+
def check_auth():
|
| 270 |
+
"""Check YouTube authentication status for current user"""
|
| 271 |
+
try:
|
| 272 |
+
# β
Pass user-specific token path
|
| 273 |
+
token_path = get_user_token_path()
|
| 274 |
+
is_authenticated = check_authentication(token_path)
|
| 275 |
+
channel_info = None
|
| 276 |
+
if is_authenticated:
|
| 277 |
+
channel_info = get_channel_info(token_path)
|
| 278 |
+
return jsonify({
|
| 279 |
+
'authenticated': is_authenticated,
|
| 280 |
+
'channel': channel_info
|
| 281 |
+
})
|
| 282 |
+
except Exception as e:
|
| 283 |
+
logger.error(f"Error checking authentication: {str(e)}")
|
| 284 |
+
return jsonify({'authenticated': False, 'error': str(e)})
|
| 285 |
+
|
| 286 |
+
@app.route('/authenticate', methods=['POST'])
|
| 287 |
+
def authenticate():
|
| 288 |
+
"""Authenticate with YouTube for current user"""
|
| 289 |
+
try:
|
| 290 |
+
token_path = get_user_token_path()
|
| 291 |
+
credentials = authenticate_youtube(token_path)
|
| 292 |
+
if credentials:
|
| 293 |
+
return jsonify({'success': True, 'message': 'Authentication successful'})
|
| 294 |
+
else:
|
| 295 |
+
return jsonify({'success': False, 'error': 'Authentication failed'})
|
| 296 |
+
except Exception as e:
|
| 297 |
+
logger.error(f"Authentication error: {str(e)}")
|
| 298 |
+
return jsonify({'success': False, 'error': str(e)})
|
| 299 |
+
|
| 300 |
+
@app.route('/auth/start')
|
| 301 |
+
def start_auth():
|
| 302 |
+
"""Start OAuth flow for current user"""
|
| 303 |
+
try:
|
| 304 |
+
import google_auth_oauthlib.flow
|
| 305 |
+
|
| 306 |
+
scopes = ["https://www.googleapis.com/auth/youtube.upload"]
|
| 307 |
+
client_secrets_file = "client_secret.json"
|
| 308 |
+
|
| 309 |
+
if not os.path.exists(client_secrets_file):
|
| 310 |
+
return jsonify({'error': 'client_secret.json not found'})
|
| 311 |
+
|
| 312 |
+
flow = google_auth_oauthlib.flow.InstalledAppFlow.from_client_secrets_file(
|
| 313 |
+
client_secrets_file, scopes)
|
| 314 |
+
|
| 315 |
+
redirect_uri = request.url_root.rstrip('/') + '/auth/callback'
|
| 316 |
+
|
| 317 |
+
if os.getenv('ENVIRONMENT') != 'production':
|
| 318 |
+
if 'localhost' in redirect_uri or '127.0.0.1' in redirect_uri:
|
| 319 |
+
redirect_uri = redirect_uri.replace('https://', 'http://')
|
| 320 |
+
|
| 321 |
+
flow.redirect_uri = redirect_uri
|
| 322 |
+
|
| 323 |
+
auth_url, state = flow.authorization_url(
|
| 324 |
+
prompt='consent',
|
| 325 |
+
access_type='offline',
|
| 326 |
+
include_granted_scopes='true'
|
| 327 |
+
)
|
| 328 |
+
|
| 329 |
+
# β
Store flow in user session (not shared globally)
|
| 330 |
+
session['oauth_flow_state'] = state
|
| 331 |
+
# Store flow data in a temporary file for this user
|
| 332 |
+
flow_data = {
|
| 333 |
+
'client_secrets_file': client_secrets_file,
|
| 334 |
+
'redirect_uri': redirect_uri,
|
| 335 |
+
'scopes': scopes
|
| 336 |
+
}
|
| 337 |
+
session['oauth_flow_data'] = flow_data
|
| 338 |
+
|
| 339 |
+
return jsonify({'auth_url': auth_url})
|
| 340 |
+
|
| 341 |
+
except Exception as e:
|
| 342 |
+
logger.error(f"Error starting auth: {str(e)}")
|
| 343 |
+
return jsonify({'error': str(e)})
|
| 344 |
+
|
| 345 |
+
@app.route('/auth/callback')
|
| 346 |
+
def auth_callback():
|
| 347 |
+
"""Handle OAuth callback for current user"""
|
| 348 |
+
try:
|
| 349 |
+
# β
Reconstruct flow from session data
|
| 350 |
+
flow_data = session.get('oauth_flow_data')
|
| 351 |
+
state = session.get('oauth_flow_state')
|
| 352 |
+
|
| 353 |
+
if not flow_data:
|
| 354 |
+
return "OAuth flow not found. Please restart the authentication process.", 400
|
| 355 |
+
|
| 356 |
+
import google_auth_oauthlib.flow
|
| 357 |
+
flow = google_auth_oauthlib.flow.InstalledAppFlow.from_client_secrets_file(
|
| 358 |
+
flow_data['client_secrets_file'], flow_data['scopes'])
|
| 359 |
+
flow.redirect_uri = flow_data['redirect_uri']
|
| 360 |
+
|
| 361 |
+
auth_code = request.args.get('code')
|
| 362 |
+
error = request.args.get('error')
|
| 363 |
+
|
| 364 |
+
if error:
|
| 365 |
+
return f"Authentication failed: {error}", 400
|
| 366 |
+
|
| 367 |
+
if not auth_code:
|
| 368 |
+
return "No authorization code received", 400
|
| 369 |
+
|
| 370 |
+
try:
|
| 371 |
+
flow.fetch_token(code=auth_code)
|
| 372 |
+
credentials = flow.credentials
|
| 373 |
+
except Exception as token_error:
|
| 374 |
+
logger.error(f"Token exchange failed: {str(token_error)}")
|
| 375 |
+
return f"Token exchange failed: {str(token_error)}", 500
|
| 376 |
+
|
| 377 |
+
# β
Save credentials to user-specific token file
|
| 378 |
+
token_path = get_user_token_path()
|
| 379 |
+
with open(token_path, 'w') as token:
|
| 380 |
+
token.write(credentials.to_json())
|
| 381 |
+
|
| 382 |
+
# Clean up session
|
| 383 |
+
session.pop('oauth_flow_state', None)
|
| 384 |
+
session.pop('oauth_flow_data', None)
|
| 385 |
+
|
| 386 |
+
return """
|
| 387 |
+
<html>
|
| 388 |
+
<head>
|
| 389 |
+
<style>
|
| 390 |
+
body {
|
| 391 |
+
font-family: 'Inter', sans-serif;
|
| 392 |
+
background: linear-gradient(135deg, #0f0f23 0%, #1a1a3e 100%);
|
| 393 |
+
color: white;
|
| 394 |
+
display: flex;
|
| 395 |
+
justify-content: center;
|
| 396 |
+
align-items: center;
|
| 397 |
+
height: 100vh;
|
| 398 |
+
margin: 0;
|
| 399 |
+
}
|
| 400 |
+
.container {
|
| 401 |
+
text-align: center;
|
| 402 |
+
background: rgba(255, 255, 255, 0.05);
|
| 403 |
+
backdrop-filter: blur(20px);
|
| 404 |
+
border: 1px solid rgba(255, 255, 255, 0.1);
|
| 405 |
+
border-radius: 20px;
|
| 406 |
+
padding: 50px;
|
| 407 |
+
box-shadow: 0 20px 60px rgba(0, 0, 0, 0.5);
|
| 408 |
+
}
|
| 409 |
+
h2 {
|
| 410 |
+
color: #00E676;
|
| 411 |
+
font-size: 32px;
|
| 412 |
+
margin-bottom: 20px;
|
| 413 |
+
}
|
| 414 |
+
p {
|
| 415 |
+
font-size: 18px;
|
| 416 |
+
color: rgba(255, 255, 255, 0.7);
|
| 417 |
+
}
|
| 418 |
+
.icon {
|
| 419 |
+
font-size: 80px;
|
| 420 |
+
margin-bottom: 20px;
|
| 421 |
+
}
|
| 422 |
+
</style>
|
| 423 |
+
</head>
|
| 424 |
+
<body>
|
| 425 |
+
<div class="container">
|
| 426 |
+
<div class="icon">β
</div>
|
| 427 |
+
<h2>Authentication Successful!</h2>
|
| 428 |
+
<p>You can now close this tab and return to the application.</p>
|
| 429 |
+
</div>
|
| 430 |
+
<script>
|
| 431 |
+
setTimeout(function() {
|
| 432 |
+
window.close();
|
| 433 |
+
}, 3000);
|
| 434 |
+
</script>
|
| 435 |
+
</body>
|
| 436 |
+
</html>
|
| 437 |
+
"""
|
| 438 |
+
|
| 439 |
+
except Exception as e:
|
| 440 |
+
logger.error(f"Error in auth callback: {str(e)}")
|
| 441 |
+
return f"""
|
| 442 |
+
<html>
|
| 443 |
+
<head>
|
| 444 |
+
<style>
|
| 445 |
+
body {{
|
| 446 |
+
font-family: 'Inter', sans-serif;
|
| 447 |
+
background: linear-gradient(135deg, #0f0f23 0%, #1a1a3e 100%);
|
| 448 |
+
color: white;
|
| 449 |
+
display: flex;
|
| 450 |
+
justify-content: center;
|
| 451 |
+
align-items: center;
|
| 452 |
+
height: 100vh;
|
| 453 |
+
margin: 0;
|
| 454 |
+
}}
|
| 455 |
+
.container {{
|
| 456 |
+
text-align: center;
|
| 457 |
+
background: rgba(255, 255, 255, 0.05);
|
| 458 |
+
backdrop-filter: blur(20px);
|
| 459 |
+
border: 1px solid rgba(255, 255, 255, 0.1);
|
| 460 |
+
border-radius: 20px;
|
| 461 |
+
padding: 50px;
|
| 462 |
+
box-shadow: 0 20px 60px rgba(0, 0, 0, 0.5);
|
| 463 |
+
}}
|
| 464 |
+
h2 {{
|
| 465 |
+
color: #FF1744;
|
| 466 |
+
font-size: 32px;
|
| 467 |
+
margin-bottom: 20px;
|
| 468 |
+
}}
|
| 469 |
+
p {{
|
| 470 |
+
font-size: 16px;
|
| 471 |
+
color: rgba(255, 255, 255, 0.7);
|
| 472 |
+
}}
|
| 473 |
+
.icon {{
|
| 474 |
+
font-size: 80px;
|
| 475 |
+
margin-bottom: 20px;
|
| 476 |
+
}}
|
| 477 |
+
</style>
|
| 478 |
+
</head>
|
| 479 |
+
<body>
|
| 480 |
+
<div class="container">
|
| 481 |
+
<div class="icon">β</div>
|
| 482 |
+
<h2>Authentication Failed</h2>
|
| 483 |
+
<p>{str(e)}</p>
|
| 484 |
+
</div>
|
| 485 |
+
</body>
|
| 486 |
+
</html>
|
| 487 |
+
""", 500
|
| 488 |
+
|
| 489 |
+
@app.route('/download', methods=['POST'])
|
| 490 |
+
def download_reel():
|
| 491 |
+
"""Download reel only"""
|
| 492 |
+
try:
|
| 493 |
+
data = request.get_json(silent=True) or {}
|
| 494 |
+
reel_url = data.get('url') or request.form.get('url') or request.args.get('url')
|
| 495 |
+
|
| 496 |
+
if not reel_url:
|
| 497 |
+
return jsonify({'success': False, 'error': 'URL is required'})
|
| 498 |
+
|
| 499 |
+
# Get sessionid from environment
|
| 500 |
+
sessionid = os.getenv("IG_SESSIONID")
|
| 501 |
+
if not sessionid:
|
| 502 |
+
return jsonify({
|
| 503 |
+
'success': False,
|
| 504 |
+
'error': 'IG_SESSIONID not configured. Please add it to your .env file.'
|
| 505 |
+
})
|
| 506 |
+
|
| 507 |
+
# Validate sessionid format
|
| 508 |
+
if len(sessionid) < 20 or not sessionid.replace('%', '').replace(':', '').isalnum():
|
| 509 |
+
return jsonify({
|
| 510 |
+
'success': False,
|
| 511 |
+
'error': 'IG_SESSIONID appears to be invalid. Please update it in the .env file.'
|
| 512 |
+
})
|
| 513 |
+
|
| 514 |
+
# Download the reel
|
| 515 |
+
try:
|
| 516 |
+
video_path = download_reel_with_audio(reel_url, DOWNLOAD_FOLDER, sessionid)
|
| 517 |
+
except Exception as download_error:
|
| 518 |
+
error_msg = str(download_error)
|
| 519 |
+
# Check for Instagram authentication errors
|
| 520 |
+
if "403" in error_msg or "Forbidden" in error_msg or "metadata failed" in error_msg.lower():
|
| 521 |
+
return jsonify({
|
| 522 |
+
'success': False,
|
| 523 |
+
'error': 'Instagram session expired or invalid',
|
| 524 |
+
'details': 'Please update your IG_SESSIONID:\n1. Log into Instagram\n2. Copy sessionid cookie from browser\n3. Update .env file\n4. Restart app'
|
| 525 |
+
})
|
| 526 |
+
elif "Login required" in error_msg or "Not logged in" in error_msg:
|
| 527 |
+
return jsonify({
|
| 528 |
+
'success': False,
|
| 529 |
+
'error': 'Instagram login required. Session expired.',
|
| 530 |
+
'details': 'Please update IG_SESSIONID in .env file'
|
| 531 |
+
})
|
| 532 |
+
else:
|
| 533 |
+
return jsonify({'success': False, 'error': f'Download failed: {error_msg}'})
|
| 534 |
+
|
| 535 |
+
if not video_path or not os.path.exists(video_path):
|
| 536 |
+
return jsonify({'success': False, 'error': 'Failed to download video'})
|
| 537 |
+
|
| 538 |
+
filename = os.path.basename(video_path)
|
| 539 |
+
|
| 540 |
+
return jsonify({
|
| 541 |
+
'success': True,
|
| 542 |
+
'message': 'Download completed',
|
| 543 |
+
'filename': filename,
|
| 544 |
+
'filepath': video_path
|
| 545 |
+
})
|
| 546 |
+
|
| 547 |
+
except Exception as e:
|
| 548 |
+
logger.error(f"Download error: {str(e)}")
|
| 549 |
+
return jsonify({'success': False, 'error': str(e)})
|
| 550 |
+
|
| 551 |
+
@app.route('/auto-upload-async', methods=['POST'])
|
| 552 |
+
def auto_upload_async():
|
| 553 |
+
"""Start async upload process for current user"""
|
| 554 |
+
try:
|
| 555 |
+
# β
Check user-specific authentication
|
| 556 |
+
token_path = get_user_token_path()
|
| 557 |
+
if not check_authentication(token_path):
|
| 558 |
+
return jsonify({'success': False, 'error': 'Not authenticated with YouTube'}), 401
|
| 559 |
+
|
| 560 |
+
data = request.get_json(silent=True) or {}
|
| 561 |
+
reel_url = data.get('url') or request.form.get('url') or request.args.get('url')
|
| 562 |
+
editing_options = data.get('editing')
|
| 563 |
+
|
| 564 |
+
if not reel_url:
|
| 565 |
+
return jsonify({'success': False, 'error': 'URL is required'})
|
| 566 |
+
|
| 567 |
+
task_id = str(uuid.uuid4())
|
| 568 |
+
tasks[task_id] = TaskStatus(task_id)
|
| 569 |
+
|
| 570 |
+
# β
Pass user token path to background task
|
| 571 |
+
thread = threading.Thread(
|
| 572 |
+
target=background_upload_task,
|
| 573 |
+
args=(task_id, reel_url, editing_options, token_path)
|
| 574 |
+
)
|
| 575 |
+
thread.daemon = True
|
| 576 |
+
thread.start()
|
| 577 |
+
|
| 578 |
+
return jsonify({
|
| 579 |
+
'success': True,
|
| 580 |
+
'task_id': task_id,
|
| 581 |
+
'message': 'Upload process started'
|
| 582 |
+
})
|
| 583 |
+
|
| 584 |
+
except Exception as e:
|
| 585 |
+
logger.error(f"Auto upload error: {str(e)}")
|
| 586 |
+
return jsonify({'success': False, 'error': str(e)})
|
| 587 |
+
|
| 588 |
+
@app.route('/task-status/<task_id>')
|
| 589 |
+
def get_task_status(task_id):
|
| 590 |
+
"""Get task status"""
|
| 591 |
+
try:
|
| 592 |
+
if task_id not in tasks:
|
| 593 |
+
return jsonify({'success': False, 'error': 'Task not found'})
|
| 594 |
+
|
| 595 |
+
task = tasks[task_id]
|
| 596 |
+
|
| 597 |
+
return jsonify({
|
| 598 |
+
'success': True,
|
| 599 |
+
'task': {
|
| 600 |
+
'id': task.task_id,
|
| 601 |
+
'status': task.status,
|
| 602 |
+
'message': task.message,
|
| 603 |
+
'progress': task.progress,
|
| 604 |
+
'error': task.error,
|
| 605 |
+
'result': task.result,
|
| 606 |
+
'metadata': task.metadata,
|
| 607 |
+
'youtube_url': task.youtube_url
|
| 608 |
+
}
|
| 609 |
+
})
|
| 610 |
+
|
| 611 |
+
except Exception as e:
|
| 612 |
+
logger.error(f"Error getting task status: {str(e)}")
|
| 613 |
+
return jsonify({'success': False, 'error': str(e)})
|
| 614 |
+
|
| 615 |
+
@app.route('/get-video/<filename>')
|
| 616 |
+
def get_video(filename):
|
| 617 |
+
"""Download video file"""
|
| 618 |
+
try:
|
| 619 |
+
# Security: Only allow files from downloads folder
|
| 620 |
+
safe_filename = os.path.basename(filename)
|
| 621 |
+
file_path = os.path.join(DOWNLOAD_FOLDER, safe_filename)
|
| 622 |
+
|
| 623 |
+
logger.info(f"Serving video request for: {safe_filename}")
|
| 624 |
+
logger.info(f"Full path: {file_path}")
|
| 625 |
+
logger.info(f"File exists: {os.path.exists(file_path)}")
|
| 626 |
+
|
| 627 |
+
if not os.path.exists(file_path):
|
| 628 |
+
logger.error(f"File not found: {file_path}")
|
| 629 |
+
# List available files for debugging
|
| 630 |
+
available_files = os.listdir(DOWNLOAD_FOLDER) if os.path.exists(DOWNLOAD_FOLDER) else []
|
| 631 |
+
logger.error(f"Available files: {available_files}")
|
| 632 |
+
return jsonify({
|
| 633 |
+
'error': 'File not found',
|
| 634 |
+
'requested': safe_filename,
|
| 635 |
+
'available': available_files
|
| 636 |
+
}), 404
|
| 637 |
+
|
| 638 |
+
# Verify it's a video file
|
| 639 |
+
if not file_path.lower().endswith(('.mp4', '.mov', '.avi', '.mkv', '.webm')):
|
| 640 |
+
logger.error(f"Invalid file type: {file_path}")
|
| 641 |
+
return jsonify({'error': 'Invalid file type'}), 400
|
| 642 |
+
|
| 643 |
+
# Get file size for logging
|
| 644 |
+
file_size = os.path.getsize(file_path)
|
| 645 |
+
logger.info(f"Serving file: {safe_filename} ({file_size} bytes)")
|
| 646 |
+
|
| 647 |
+
# Serve the file with proper headers
|
| 648 |
+
return send_file(
|
| 649 |
+
file_path,
|
| 650 |
+
mimetype='video/mp4',
|
| 651 |
+
as_attachment=True,
|
| 652 |
+
download_name=safe_filename
|
| 653 |
+
)
|
| 654 |
+
|
| 655 |
+
except Exception as e:
|
| 656 |
+
logger.error(f"Error serving video: {str(e)}")
|
| 657 |
+
import traceback
|
| 658 |
+
logger.error(traceback.format_exc())
|
| 659 |
+
return jsonify({'error': str(e)}), 500
|
| 660 |
+
|
| 661 |
+
@app.route('/cleanup/<filename>', methods=['POST'])
|
| 662 |
+
def cleanup_file(filename):
|
| 663 |
+
"""Clean up downloaded file after user has downloaded it"""
|
| 664 |
+
try:
|
| 665 |
+
safe_filename = os.path.basename(filename)
|
| 666 |
+
file_path = os.path.join(DOWNLOAD_FOLDER, safe_filename)
|
| 667 |
+
|
| 668 |
+
if os.path.exists(file_path):
|
| 669 |
+
os.remove(file_path)
|
| 670 |
+
logger.info(f"Cleaned up file: {file_path}")
|
| 671 |
+
return jsonify({'success': True, 'message': 'File cleaned up'})
|
| 672 |
+
else:
|
| 673 |
+
return jsonify({'success': False, 'error': 'File not found'})
|
| 674 |
+
|
| 675 |
+
except Exception as e:
|
| 676 |
+
logger.error(f"Error cleaning up file: {str(e)}")
|
| 677 |
+
return jsonify({'success': False, 'error': str(e)})
|
| 678 |
+
|
| 679 |
+
@app.route('/list-downloads')
|
| 680 |
+
def list_downloads():
|
| 681 |
+
"""List all files in downloads folder (for debugging)"""
|
| 682 |
+
try:
|
| 683 |
+
files = os.listdir(DOWNLOAD_FOLDER)
|
| 684 |
+
files_info = []
|
| 685 |
+
for f in files:
|
| 686 |
+
filepath = os.path.join(DOWNLOAD_FOLDER, f)
|
| 687 |
+
files_info.append({
|
| 688 |
+
'name': f,
|
| 689 |
+
'size': os.path.getsize(filepath),
|
| 690 |
+
'exists': os.path.exists(filepath)
|
| 691 |
+
})
|
| 692 |
+
return jsonify({'files': files_info})
|
| 693 |
+
except Exception as e:
|
| 694 |
+
return jsonify({'error': str(e)})
|
| 695 |
+
|
| 696 |
+
@app.route('/generate-preview', methods=['POST'])
|
| 697 |
+
def generate_preview():
|
| 698 |
+
"""Generate metadata preview by downloading and analyzing video"""
|
| 699 |
+
try:
|
| 700 |
+
data = request.get_json(silent=True) or {}
|
| 701 |
+
reel_url = data.get('url') or request.form.get('url') or request.args.get('url')
|
| 702 |
+
|
| 703 |
+
if not reel_url:
|
| 704 |
+
return jsonify({'success': False, 'error': 'URL is required'})
|
| 705 |
+
|
| 706 |
+
# Get sessionid from environment
|
| 707 |
+
sessionid = os.getenv("IG_SESSIONID")
|
| 708 |
+
if not sessionid:
|
| 709 |
+
return jsonify({
|
| 710 |
+
'success': False,
|
| 711 |
+
'error': 'IG_SESSIONID not configured in .env file'
|
| 712 |
+
})
|
| 713 |
+
|
| 714 |
+
# Validate sessionid format
|
| 715 |
+
if len(sessionid) < 20:
|
| 716 |
+
return jsonify({
|
| 717 |
+
'success': False,
|
| 718 |
+
'error': 'IG_SESSIONID appears to be invalid. Please update it in .env file.'
|
| 719 |
+
})
|
| 720 |
+
|
| 721 |
+
# Download video temporarily for analysis
|
| 722 |
+
temp_video_path = None
|
| 723 |
+
try:
|
| 724 |
+
# Download the video for analysis
|
| 725 |
+
try:
|
| 726 |
+
temp_video_path = download_reel_with_audio(reel_url, DOWNLOAD_FOLDER, sessionid)
|
| 727 |
+
except Exception as download_error:
|
| 728 |
+
error_msg = str(download_error)
|
| 729 |
+
if "403" in error_msg or "Forbidden" in error_msg or "metadata failed" in error_msg.lower():
|
| 730 |
+
return jsonify({
|
| 731 |
+
'success': False,
|
| 732 |
+
'error': 'Instagram session expired',
|
| 733 |
+
'details': 'Please update your IG_SESSIONID in the .env file and restart the app'
|
| 734 |
+
})
|
| 735 |
+
else:
|
| 736 |
+
raise download_error
|
| 737 |
+
|
| 738 |
+
# Create AI Metadata Generator instance
|
| 739 |
+
ai_generator = AIMetadataGenerator(GEMINI_API_KEY)
|
| 740 |
+
|
| 741 |
+
# Generate metadata based on actual video content
|
| 742 |
+
generated_metadata = ai_generator.generate_complete_metadata(
|
| 743 |
+
video_path=temp_video_path,
|
| 744 |
+
target_audience="social media users"
|
| 745 |
+
)
|
| 746 |
+
|
| 747 |
+
return jsonify({
|
| 748 |
+
'success': True,
|
| 749 |
+
'title': generated_metadata['title'],
|
| 750 |
+
'description': generated_metadata['description'],
|
| 751 |
+
'tags': generated_metadata['tags'],
|
| 752 |
+
'hashtags': generated_metadata['hashtags'],
|
| 753 |
+
'video_analysis': generated_metadata.get('video_analysis', 'Analysis unavailable')
|
| 754 |
+
})
|
| 755 |
+
|
| 756 |
+
except Exception as e:
|
| 757 |
+
logger.warning(f"AI metadata generation preview failed: {str(e)}")
|
| 758 |
+
# Fallback metadata
|
| 759 |
+
return jsonify({
|
| 760 |
+
'success': True,
|
| 761 |
+
'title': 'Amazing Social Media Content',
|
| 762 |
+
'description': f'Check out this amazing content from social media!\n\nSource: {reel_url}\n\n#SocialMedia #Viral #Content',
|
| 763 |
+
'tags': ['social media', 'viral', 'entertainment', 'content'],
|
| 764 |
+
'hashtags': ['#SocialMedia', '#Viral', '#Content', '#Entertainment']
|
| 765 |
+
})
|
| 766 |
+
finally:
|
| 767 |
+
# Clean up temporary file
|
| 768 |
+
if temp_video_path and os.path.exists(temp_video_path):
|
| 769 |
+
try:
|
| 770 |
+
os.remove(temp_video_path)
|
| 771 |
+
except Exception:
|
| 772 |
+
pass
|
| 773 |
+
|
| 774 |
+
except Exception as e:
|
| 775 |
+
logger.error(f"Preview generation error: {str(e)}")
|
| 776 |
+
return jsonify({'success': False, 'error': str(e)})
|
| 777 |
+
|
| 778 |
+
@app.route('/get-channel-info')
|
| 779 |
+
def channel_info():
|
| 780 |
+
"""Get information about the connected YouTube channel for current user"""
|
| 781 |
+
try:
|
| 782 |
+
token_path = get_user_token_path()
|
| 783 |
+
if not check_authentication(token_path):
|
| 784 |
+
return jsonify({'authenticated': False})
|
| 785 |
+
|
| 786 |
+
channel_data = get_channel_info(token_path)
|
| 787 |
+
if channel_data:
|
| 788 |
+
return jsonify({
|
| 789 |
+
'authenticated': True,
|
| 790 |
+
'channel': channel_data
|
| 791 |
+
})
|
| 792 |
+
else:
|
| 793 |
+
return jsonify({'authenticated': True, 'channel': None})
|
| 794 |
+
except Exception as e:
|
| 795 |
+
logger.error(f"Error getting channel info: {str(e)}")
|
| 796 |
+
return jsonify({'authenticated': False, 'error': str(e)})
|
| 797 |
+
|
| 798 |
+
@app.route('/logout', methods=['POST'])
|
| 799 |
+
def logout():
|
| 800 |
+
"""Logout from YouTube for current user"""
|
| 801 |
+
try:
|
| 802 |
+
token_path = get_user_token_path()
|
| 803 |
+
success = logout_youtube(token_path)
|
| 804 |
+
|
| 805 |
+
# β
Clear user session
|
| 806 |
+
if success and os.path.exists(token_path):
|
| 807 |
+
os.remove(token_path)
|
| 808 |
+
session.pop('user_id', None)
|
| 809 |
+
|
| 810 |
+
return jsonify({'success': success})
|
| 811 |
+
except Exception as e:
|
| 812 |
+
logger.error(f"Error during logout: {str(e)}")
|
| 813 |
+
return jsonify({'success': False, 'error': str(e)})
|
| 814 |
+
|
| 815 |
+
@app.route('/privacy-policy')
|
| 816 |
+
def privacy_policy():
|
| 817 |
+
"""Privacy policy page"""
|
| 818 |
+
from datetime import datetime
|
| 819 |
+
return render_template('privacy_policy.html', current_date=datetime.now().strftime('%B %d, %Y'))
|
| 820 |
+
|
| 821 |
+
@app.route('/terms')
|
| 822 |
+
def terms():
|
| 823 |
+
"""Terms of service page"""
|
| 824 |
+
return render_template('terms.html')
|
| 825 |
+
|
| 826 |
+
# β
Production configuration with better error handling
|
| 827 |
+
if os.getenv('SPACE_ID'): # Running on Hugging Face Spaces
|
| 828 |
+
print("π Running on Hugging Face Spaces")
|
| 829 |
+
os.environ['ENVIRONMENT'] = 'production'
|
| 830 |
+
|
| 831 |
+
# Allow insecure transport for Hugging Face internal routing
|
| 832 |
+
os.environ['OAUTHLIB_INSECURE_TRANSPORT'] = '1'
|
| 833 |
+
|
| 834 |
+
# Hugging Face specific settings
|
| 835 |
+
app.config['SESSION_COOKIE_SECURE'] = False # HF uses internal HTTP routing
|
| 836 |
+
app.config['SESSION_COOKIE_HTTPONLY'] = True
|
| 837 |
+
app.config['SESSION_COOKIE_SAMESITE'] = 'Lax'
|
| 838 |
+
|
| 839 |
+
# Session configuration
|
| 840 |
+
app.config['SESSION_TYPE'] = 'filesystem'
|
| 841 |
+
app.config['SESSION_FILE_DIR'] = '/tmp/flask_session'
|
| 842 |
+
app.config['SESSION_PERMANENT'] = False
|
| 843 |
+
app.config['SESSION_USE_SIGNER'] = True
|
| 844 |
+
app.config['PERMANENT_SESSION_LIFETIME'] = 3600
|
| 845 |
+
|
| 846 |
+
# Create session directory
|
| 847 |
+
os.makedirs('/tmp/flask_session', exist_ok=True)
|
| 848 |
+
|
| 849 |
+
PORT = 7860 # Hugging Face Spaces port
|
| 850 |
+
app.debug = False
|
| 851 |
+
|
| 852 |
+
print(f"π‘ Port: {PORT}")
|
| 853 |
+
print("π Secure sessions enabled")
|
| 854 |
+
|
| 855 |
+
elif os.getenv('ENVIRONMENT') == 'production':
|
| 856 |
+
# Disable insecure transport in production
|
| 857 |
+
if 'OAUTHLIB_INSECURE_TRANSPORT' in os.environ:
|
| 858 |
+
del os.environ['OAUTHLIB_INSECURE_TRANSPORT']
|
| 859 |
+
|
| 860 |
+
# Set secure session
|
| 861 |
+
app.config['SESSION_COOKIE_SECURE'] = True
|
| 862 |
+
app.config['SESSION_COOKIE_HTTPONLY'] = True
|
| 863 |
+
app.config['SESSION_COOKIE_SAMESITE'] = 'Lax'
|
| 864 |
+
|
| 865 |
+
# Session configuration
|
| 866 |
+
app.config['SESSION_TYPE'] = 'filesystem'
|
| 867 |
+
app.config['SESSION_FILE_DIR'] = '/tmp/flask_session'
|
| 868 |
+
app.config['SESSION_PERMANENT'] = False
|
| 869 |
+
app.config['SESSION_USE_SIGNER'] = True
|
| 870 |
+
app.config['PERMANENT_SESSION_LIFETIME'] = 3600
|
| 871 |
+
|
| 872 |
+
# Create session directory
|
| 873 |
+
os.makedirs('/tmp/flask_session', exist_ok=True)
|
| 874 |
+
|
| 875 |
+
# β
FIX: Use PORT from environment (Render provides this)
|
| 876 |
+
# Render sets PORT to 10000 by default
|
| 877 |
+
PORT = int(os.getenv('PORT', 5000))
|
| 878 |
+
|
| 879 |
+
# Disable debug mode
|
| 880 |
+
app.debug = False
|
| 881 |
+
else:
|
| 882 |
+
# Development mode
|
| 883 |
+
os.environ['OAUTHLIB_INSECURE_TRANSPORT'] = '1'
|
| 884 |
+
PORT = int(os.getenv('PORT', 5000))
|
| 885 |
+
|
| 886 |
+
# Session configuration for development
|
| 887 |
+
app.config['SESSION_TYPE'] = 'filesystem'
|
| 888 |
+
app.config['SESSION_PERMANENT'] = False
|
| 889 |
+
app.config['SESSION_USE_SIGNER'] = True
|
| 890 |
+
app.config['PERMANENT_SESSION_LIFETIME'] = 3600
|
| 891 |
+
|
| 892 |
+
# Secret key for sessions - MUST be set for production
|
| 893 |
+
app.secret_key = os.getenv('SECRET_KEY', secrets.token_hex(32))
|
| 894 |
+
|
| 895 |
+
# β
Global error handlers
|
| 896 |
+
@app.errorhandler(404)
|
| 897 |
+
def not_found(error):
|
| 898 |
+
if request.path.startswith('/api/'):
|
| 899 |
+
return jsonify({'error': 'Endpoint not found'}), 404
|
| 900 |
+
return render_template('404.html'), 404
|
| 901 |
+
|
| 902 |
+
@app.errorhandler(500)
|
| 903 |
+
def internal_error(error):
|
| 904 |
+
logger.error(f"Internal server error: {str(error)}")
|
| 905 |
+
if request.path.startswith('/api/'):
|
| 906 |
+
return jsonify({'error': 'Internal server error', 'message': 'Please try again later'}), 500
|
| 907 |
+
return render_template('500.html'), 500
|
| 908 |
+
|
| 909 |
+
@app.errorhandler(Exception)
|
| 910 |
+
def handle_exception(e):
|
| 911 |
+
logger.error(f"Unhandled exception: {str(e)}")
|
| 912 |
+
import traceback
|
| 913 |
+
logger.error(traceback.format_exc())
|
| 914 |
+
|
| 915 |
+
if request.path.startswith('/api/'):
|
| 916 |
+
return jsonify({
|
| 917 |
+
'error': 'An unexpected error occurred',
|
| 918 |
+
'message': str(e) if app.debug else 'Please try again later'
|
| 919 |
+
}), 500
|
| 920 |
+
|
| 921 |
+
return render_template('500.html'), 500
|
| 922 |
+
|
| 923 |
+
# β
Health check with detailed status
|
| 924 |
+
@app.route('/health')
|
| 925 |
+
def health_check():
|
| 926 |
+
"""Health check endpoint for monitoring"""
|
| 927 |
+
try:
|
| 928 |
+
health_status = {
|
| 929 |
+
'status': 'healthy',
|
| 930 |
+
'version': '1.0.0',
|
| 931 |
+
'environment': os.getenv('ENVIRONMENT', 'development'),
|
| 932 |
+
'checks': {
|
| 933 |
+
'downloads_folder': os.path.exists(DOWNLOAD_FOLDER),
|
| 934 |
+
'user_tokens_folder': os.path.exists(USER_TOKENS_FOLDER),
|
| 935 |
+
'gemini_configured': bool(GEMINI_API_KEY and GEMINI_API_KEY != 'your-gemini-api-key-here'),
|
| 936 |
+
'ig_session_configured': bool(os.getenv('IG_SESSIONID') and len(os.getenv('IG_SESSIONID', '')) > 20)
|
| 937 |
+
}
|
| 938 |
+
}
|
| 939 |
+
|
| 940 |
+
# Check if all critical services are OK
|
| 941 |
+
if not all(health_status['checks'].values()):
|
| 942 |
+
health_status['status'] = 'degraded'
|
| 943 |
+
return jsonify(health_status), 503
|
| 944 |
+
|
| 945 |
+
return jsonify(health_status), 200
|
| 946 |
+
except Exception as e:
|
| 947 |
+
logger.error(f"Health check failed: {str(e)}")
|
| 948 |
+
return jsonify({
|
| 949 |
+
'status': 'unhealthy',
|
| 950 |
+
'error': str(e)
|
| 951 |
+
}), 503
|
| 952 |
+
|
| 953 |
+
# β
Request logging middleware
|
| 954 |
+
@app.before_request
|
| 955 |
+
def log_request():
|
| 956 |
+
"""Log incoming requests"""
|
| 957 |
+
if not request.path.startswith('/static/'):
|
| 958 |
+
logger.info(f"{request.method} {request.path} - {request.remote_addr}")
|
| 959 |
+
|
| 960 |
+
@app.after_request
|
| 961 |
+
def log_response(response):
|
| 962 |
+
"""Log responses"""
|
| 963 |
+
if not request.path.startswith('/static/'):
|
| 964 |
+
logger.info(f"{request.method} {request.path} - {response.status_code}")
|
| 965 |
+
return response
|
| 966 |
+
|
| 967 |
+
# β
Startup validation
|
| 968 |
+
def validate_environment():
|
| 969 |
+
"""Validate critical environment variables"""
|
| 970 |
+
issues = []
|
| 971 |
+
|
| 972 |
+
if not os.getenv('SECRET_KEY') and os.getenv('ENVIRONMENT') == 'production':
|
| 973 |
+
issues.append("SECRET_KEY not set (will use generated key)")
|
| 974 |
+
|
| 975 |
+
if not os.getenv('GEMINI_API_KEY'):
|
| 976 |
+
issues.append("GEMINI_API_KEY not set - AI features will use fallback")
|
| 977 |
+
|
| 978 |
+
if not os.getenv('IG_SESSIONID'):
|
| 979 |
+
issues.append("IG_SESSIONID not set - Instagram downloads will fail")
|
| 980 |
+
|
| 981 |
+
if not os.path.exists('client_secret.json'):
|
| 982 |
+
issues.append("client_secret.json not found - YouTube authentication will fail")
|
| 983 |
+
|
| 984 |
+
if issues:
|
| 985 |
+
logger.warning("β οΈ Environment validation issues:")
|
| 986 |
+
for issue in issues:
|
| 987 |
+
logger.warning(f" - {issue}")
|
| 988 |
+
else:
|
| 989 |
+
logger.info("β
Environment validation passed")
|
| 990 |
+
|
| 991 |
+
if __name__ == "__main__":
|
| 992 |
+
print(f"π YouTube Automation Machine Starting...")
|
| 993 |
+
print(f"π Downloads folder: {DOWNLOAD_FOLDER}")
|
| 994 |
+
print(f"π User tokens folder: {USER_TOKENS_FOLDER}")
|
| 995 |
+
print(f"π€ Gemini AI: {'Configured' if GEMINI_API_KEY and GEMINI_API_KEY != 'your-gemini-api-key-here' else 'Not configured (using fallback)'}")
|
| 996 |
+
|
| 997 |
+
# Validate Instagram session
|
| 998 |
+
ig_session = os.getenv("IG_SESSIONID")
|
| 999 |
+
if ig_session and len(ig_session) > 20:
|
| 1000 |
+
print(f"πΈ Instagram Session: Configured (length: {len(ig_session)})")
|
| 1001 |
+
else:
|
| 1002 |
+
print("β οΈ Instagram Session: NOT CONFIGURED or INVALID")
|
| 1003 |
+
print(" Please add IG_SESSIONID to your environment variables")
|
| 1004 |
+
|
| 1005 |
+
# Check environment
|
| 1006 |
+
environment = os.getenv('ENVIRONMENT', 'development')
|
| 1007 |
+
if environment == 'production':
|
| 1008 |
+
print("π Running in PRODUCTION mode (Render)")
|
| 1009 |
+
print(f"π‘ Port: {PORT}")
|
| 1010 |
+
print("π HTTPS enabled")
|
| 1011 |
+
print("π Secure sessions enabled")
|
| 1012 |
+
else:
|
| 1013 |
+
print("β οΈ Running in DEVELOPMENT mode")
|
| 1014 |
+
print(f"π‘ Port: {PORT}")
|
| 1015 |
+
|
| 1016 |
+
print()
|
| 1017 |
+
|
| 1018 |
+
# Validate environment
|
| 1019 |
+
validate_environment()
|
| 1020 |
+
|
| 1021 |
+
# Run with appropriate settings
|
| 1022 |
+
debug = environment != 'production'
|
| 1023 |
+
|
| 1024 |
+
# β
FIX: Bind to 0.0.0.0 and use PORT already configured above
|
| 1025 |
+
app.run(host='0.0.0.0', port=PORT, debug=debug, threaded=True)
|
| 1026 |
+
|
| 1027 |
+
|
downloader.py
ADDED
|
@@ -0,0 +1,96 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import instaloader
|
| 2 |
+
import os
|
| 3 |
+
import requests
|
| 4 |
+
from urllib.parse import urlparse
|
| 5 |
+
from dotenv import load_dotenv
|
| 6 |
+
import time
|
| 7 |
+
|
| 8 |
+
# Load environment variables from .env file
|
| 9 |
+
load_dotenv()
|
| 10 |
+
|
| 11 |
+
def extract_shortcode(reel_url: str) -> str:
|
| 12 |
+
"""Extract shortcode from Instagram URL"""
|
| 13 |
+
path = urlparse(reel_url).path.strip("/")
|
| 14 |
+
parts = [p for p in path.split("/") if p]
|
| 15 |
+
if len(parts) >= 2 and parts[0] in {"reel", "p"}:
|
| 16 |
+
return parts[1]
|
| 17 |
+
return parts[-1] if parts else ""
|
| 18 |
+
|
| 19 |
+
def download_reel_with_audio(reel_url: str, output_folder: str, sessionid: str) -> str:
|
| 20 |
+
"""
|
| 21 |
+
Download Instagram reel with audio
|
| 22 |
+
Returns the full path to the downloaded video file
|
| 23 |
+
"""
|
| 24 |
+
try:
|
| 25 |
+
L = instaloader.Instaloader()
|
| 26 |
+
|
| 27 |
+
# π Use sessionid from environment
|
| 28 |
+
L.context._session.cookies.set("sessionid", sessionid, domain=".instagram.com")
|
| 29 |
+
|
| 30 |
+
shortcode = extract_shortcode(reel_url)
|
| 31 |
+
post = instaloader.Post.from_shortcode(L.context, shortcode)
|
| 32 |
+
|
| 33 |
+
video_url = post.video_url
|
| 34 |
+
if not video_url and post.typename == "GraphSidecar":
|
| 35 |
+
for node in post.get_sidecar_nodes():
|
| 36 |
+
if node.is_video:
|
| 37 |
+
video_url = node.video_url
|
| 38 |
+
break
|
| 39 |
+
|
| 40 |
+
if not video_url:
|
| 41 |
+
raise Exception("No video URL found for this post")
|
| 42 |
+
|
| 43 |
+
# Ensure the filename is valid and doesn't have special characters
|
| 44 |
+
video_filename = f"reel_{int(time.time())}_{shortcode}.mp4"
|
| 45 |
+
video_path = os.path.join(output_folder, video_filename)
|
| 46 |
+
|
| 47 |
+
print("β¬οΈ Downloading video with audio...")
|
| 48 |
+
r = requests.get(video_url, stream=True)
|
| 49 |
+
with open(video_path, "wb") as f:
|
| 50 |
+
for chunk in r.iter_content(chunk_size=8192):
|
| 51 |
+
if chunk:
|
| 52 |
+
f.write(chunk)
|
| 53 |
+
|
| 54 |
+
# Verify file was written correctly
|
| 55 |
+
if not os.path.exists(video_path):
|
| 56 |
+
raise Exception(f"Failed to save video to {video_path}")
|
| 57 |
+
|
| 58 |
+
file_size = os.path.getsize(video_path)
|
| 59 |
+
if file_size == 0:
|
| 60 |
+
raise Exception("Downloaded file is empty")
|
| 61 |
+
|
| 62 |
+
print(f"β
Video saved successfully: {video_path} ({file_size} bytes)")
|
| 63 |
+
return video_path
|
| 64 |
+
|
| 65 |
+
except Exception as e:
|
| 66 |
+
raise Exception(f"Download failed: {str(e)}")
|
| 67 |
+
|
| 68 |
+
def main():
|
| 69 |
+
print("Instagram Reel Downloader with Audio")
|
| 70 |
+
print("="*40)
|
| 71 |
+
|
| 72 |
+
try:
|
| 73 |
+
reel_url = input("Enter Instagram reel URL: ").strip()
|
| 74 |
+
if not reel_url:
|
| 75 |
+
print("No URL provided")
|
| 76 |
+
return
|
| 77 |
+
|
| 78 |
+
# Get sessionid from environment variable (now loaded from .env)
|
| 79 |
+
sessionid = os.getenv("IG_SESSIONID")
|
| 80 |
+
if not sessionid:
|
| 81 |
+
raise Exception("No IG_SESSIONID found! Please set in .env file.")
|
| 82 |
+
|
| 83 |
+
video_path = download_reel_with_audio(reel_url, "downloads", sessionid)
|
| 84 |
+
|
| 85 |
+
print(f"\nβ
Successfully downloaded with audio!")
|
| 86 |
+
print(f"π File location: {video_path}")
|
| 87 |
+
print(f"π File size: {os.path.getsize(video_path) / 1024 / 1024:.2f} MB")
|
| 88 |
+
|
| 89 |
+
except Exception as e:
|
| 90 |
+
print(f"\nβ Error: {str(e)}")
|
| 91 |
+
print("\nTips:")
|
| 92 |
+
print("- Make sure IG_SESSIONID is set in the .env file")
|
| 93 |
+
print("- If expired, grab a new sessionid from Chrome cookies")
|
| 94 |
+
|
| 95 |
+
if __name__ == "__main__":
|
| 96 |
+
main()
|
requirements.txt
ADDED
|
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Flask Web Framework
|
| 2 |
+
Flask==3.1.2
|
| 3 |
+
Flask-Session==0.8.0
|
| 4 |
+
gunicorn==23.0.0
|
| 5 |
+
python-dotenv==1.1.1
|
| 6 |
+
|
| 7 |
+
# Video Processing (CRITICAL - MoviePy and dependencies)
|
| 8 |
+
moviepy==2.2.1
|
| 9 |
+
imageio==2.37.0
|
| 10 |
+
imageio-ffmpeg==0.6.0
|
| 11 |
+
opencv-python-headless==4.12.0.88
|
| 12 |
+
Pillow==11.3.0
|
| 13 |
+
numpy==2.2.6
|
| 14 |
+
proglog==0.1.12
|
| 15 |
+
decorator==5.2.1
|
| 16 |
+
|
| 17 |
+
# Instagram Downloader
|
| 18 |
+
instaloader==4.14.2
|
| 19 |
+
yt-dlp==2025.9.26
|
| 20 |
+
|
| 21 |
+
# Google APIs & AI
|
| 22 |
+
google-api-python-client==2.184.0
|
| 23 |
+
google-auth==2.41.1
|
| 24 |
+
google-auth-httplib2==0.2.0
|
| 25 |
+
google-auth-oauthlib==1.2.2
|
| 26 |
+
google-generativeai==0.8.5
|
| 27 |
+
|
| 28 |
+
# Utilities
|
| 29 |
+
requests==2.32.5
|
| 30 |
+
tqdm==4.67.1
|
runtime.txt
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
python-3.11.7
|
token.pickle
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:e9ba428bdff7d2fbfa5d935dce7f974a0650b838922b99d26371d86307fbef96
|
| 3 |
+
size 1038
|
uploader.py
ADDED
|
@@ -0,0 +1,289 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import argparse
|
| 3 |
+
import google_auth_oauthlib.flow
|
| 4 |
+
import googleapiclient.discovery
|
| 5 |
+
import googleapiclient.errors
|
| 6 |
+
from googleapiclient.http import MediaFileUpload
|
| 7 |
+
from google.auth.transport.requests import Request
|
| 8 |
+
from google.oauth2.credentials import Credentials
|
| 9 |
+
import logging
|
| 10 |
+
|
| 11 |
+
# Configure logging
|
| 12 |
+
logging.basicConfig(level=logging.INFO)
|
| 13 |
+
logger = logging.getLogger(__name__)
|
| 14 |
+
|
| 15 |
+
# β
FIX: Allow insecure transport for local OAuth (development only)
|
| 16 |
+
os.environ['OAUTHLIB_INSECURE_TRANSPORT'] = '1'
|
| 17 |
+
|
| 18 |
+
def get_credentials(token_path='token.json'):
|
| 19 |
+
"""Get or refresh YouTube API credentials for specific user"""
|
| 20 |
+
creds = None
|
| 21 |
+
|
| 22 |
+
# The file token.json stores the user's access and refresh tokens.
|
| 23 |
+
if os.path.exists(token_path):
|
| 24 |
+
try:
|
| 25 |
+
creds = Credentials.from_authorized_user_file(token_path,
|
| 26 |
+
["https://www.googleapis.com/auth/youtube.upload"])
|
| 27 |
+
except Exception as e:
|
| 28 |
+
logger.error(f"Failed to load credentials: {e}")
|
| 29 |
+
if os.path.exists(token_path):
|
| 30 |
+
os.remove(token_path)
|
| 31 |
+
return None
|
| 32 |
+
|
| 33 |
+
# If there are no (valid) credentials available, let the user log in.
|
| 34 |
+
if not creds or not creds.valid:
|
| 35 |
+
if creds and creds.expired and creds.refresh_token:
|
| 36 |
+
try:
|
| 37 |
+
logger.info("Refreshing expired credentials...")
|
| 38 |
+
creds.refresh(Request())
|
| 39 |
+
# Save the refreshed credentials
|
| 40 |
+
with open(token_path, 'w') as token:
|
| 41 |
+
token.write(creds.to_json())
|
| 42 |
+
logger.info("β
Credentials refreshed successfully")
|
| 43 |
+
except Exception as e:
|
| 44 |
+
logger.error(f"Token refresh failed: {e}")
|
| 45 |
+
if os.path.exists(token_path):
|
| 46 |
+
os.remove(token_path)
|
| 47 |
+
return None
|
| 48 |
+
else:
|
| 49 |
+
return None
|
| 50 |
+
|
| 51 |
+
return creds
|
| 52 |
+
|
| 53 |
+
def authenticate_youtube(token_path='token.json'):
|
| 54 |
+
"""Authenticate with YouTube API - creates new credentials if needed"""
|
| 55 |
+
try:
|
| 56 |
+
scopes = ["https://www.googleapis.com/auth/youtube.upload"]
|
| 57 |
+
client_secrets_file = "client_secret.json"
|
| 58 |
+
|
| 59 |
+
if not os.path.exists(client_secrets_file):
|
| 60 |
+
raise Exception("client_secret.json file not found. Please download it from Google Cloud Console.")
|
| 61 |
+
|
| 62 |
+
logger.info("Starting OAuth flow...")
|
| 63 |
+
flow = google_auth_oauthlib.flow.InstalledAppFlow.from_client_secrets_file(
|
| 64 |
+
client_secrets_file, scopes)
|
| 65 |
+
|
| 66 |
+
# Try local server auth first
|
| 67 |
+
try:
|
| 68 |
+
logger.info("Attempting local server authentication...")
|
| 69 |
+
credentials = flow.run_local_server(
|
| 70 |
+
host='localhost',
|
| 71 |
+
port=8080,
|
| 72 |
+
prompt='consent',
|
| 73 |
+
authorization_prompt_message='Please visit this URL to authorize: {url}',
|
| 74 |
+
success_message='β
Authentication successful! You may close this window.',
|
| 75 |
+
open_browser=True
|
| 76 |
+
)
|
| 77 |
+
logger.info("β
Local server authentication successful")
|
| 78 |
+
except Exception as local_error:
|
| 79 |
+
logger.warning(f"Local server auth failed: {local_error}")
|
| 80 |
+
logger.info("Falling back to console-based authentication...")
|
| 81 |
+
flow.redirect_uri = 'urn:ietf:wg:oauth:2.0:oob'
|
| 82 |
+
auth_url, _ = flow.authorization_url(prompt='consent')
|
| 83 |
+
|
| 84 |
+
print(f"\nπ Please visit this URL to authorize:\n{auth_url}\n")
|
| 85 |
+
auth_code = input("Enter the authorization code: ").strip()
|
| 86 |
+
|
| 87 |
+
if not auth_code:
|
| 88 |
+
raise Exception("No authorization code provided")
|
| 89 |
+
|
| 90 |
+
flow.fetch_token(code=auth_code)
|
| 91 |
+
credentials = flow.credentials
|
| 92 |
+
logger.info("β
Console-based authentication successful")
|
| 93 |
+
|
| 94 |
+
# β
Save to specified token path
|
| 95 |
+
with open(token_path, 'w') as token:
|
| 96 |
+
token.write(credentials.to_json())
|
| 97 |
+
|
| 98 |
+
logger.info(f"β
Authentication complete and credentials saved to {token_path}!")
|
| 99 |
+
return credentials
|
| 100 |
+
|
| 101 |
+
except Exception as e:
|
| 102 |
+
logger.error(f"Authentication failed: {str(e)}")
|
| 103 |
+
raise Exception(f"Authentication failed: {str(e)}")
|
| 104 |
+
|
| 105 |
+
def check_authentication(token_path='token.json'):
|
| 106 |
+
"""Check if valid YouTube authentication exists for specific user"""
|
| 107 |
+
try:
|
| 108 |
+
creds = get_credentials(token_path)
|
| 109 |
+
return creds is not None and creds.valid
|
| 110 |
+
except Exception:
|
| 111 |
+
return False
|
| 112 |
+
|
| 113 |
+
def get_youtube_service(token_path='token.json'):
|
| 114 |
+
"""Get authenticated YouTube service for specific user"""
|
| 115 |
+
creds = get_credentials(token_path)
|
| 116 |
+
if not creds:
|
| 117 |
+
raise Exception("Not authenticated. Please run authentication first.")
|
| 118 |
+
|
| 119 |
+
return googleapiclient.discovery.build("youtube", "v3", credentials=creds)
|
| 120 |
+
|
| 121 |
+
def upload_to_youtube(video_path, title, description, tags, privacy_status="public", category_id="22", token_path='token.json'):
|
| 122 |
+
"""Upload video to YouTube with proper error handling for specific user"""
|
| 123 |
+
try:
|
| 124 |
+
# Verify file exists and is accessible
|
| 125 |
+
if not os.path.exists(video_path):
|
| 126 |
+
raise Exception(f"Video file not found: {video_path}")
|
| 127 |
+
|
| 128 |
+
# Get file size for validation
|
| 129 |
+
file_size = os.path.getsize(video_path)
|
| 130 |
+
if file_size == 0:
|
| 131 |
+
raise Exception("Video file is empty")
|
| 132 |
+
|
| 133 |
+
print(f"Uploading video: {os.path.basename(video_path)} ({file_size / 1024 / 1024:.2f} MB)")
|
| 134 |
+
print(f"Privacy status: {privacy_status}")
|
| 135 |
+
|
| 136 |
+
# β
Get YouTube service for specific user
|
| 137 |
+
youtube = get_youtube_service(token_path)
|
| 138 |
+
|
| 139 |
+
# Prepare video metadata
|
| 140 |
+
video_metadata = {
|
| 141 |
+
"snippet": {
|
| 142 |
+
"title": title[:100], # YouTube title limit
|
| 143 |
+
"description": description[:5000], # YouTube description limit
|
| 144 |
+
"tags": tags[:500] if isinstance(tags, list) else [], # YouTube tags limit
|
| 145 |
+
"categoryId": str(category_id)
|
| 146 |
+
},
|
| 147 |
+
"status": {
|
| 148 |
+
"privacyStatus": privacy_status,
|
| 149 |
+
"selfDeclaredMadeForKids": False
|
| 150 |
+
}
|
| 151 |
+
}
|
| 152 |
+
|
| 153 |
+
# Create media upload object
|
| 154 |
+
media = MediaFileUpload(
|
| 155 |
+
video_path,
|
| 156 |
+
chunksize=1024*1024, # 1MB chunks
|
| 157 |
+
resumable=True,
|
| 158 |
+
mimetype="video/*"
|
| 159 |
+
)
|
| 160 |
+
|
| 161 |
+
# Create upload request
|
| 162 |
+
request = youtube.videos().insert(
|
| 163 |
+
part="snippet,status",
|
| 164 |
+
body=video_metadata,
|
| 165 |
+
media_body=media
|
| 166 |
+
)
|
| 167 |
+
|
| 168 |
+
# Execute upload with retry logic
|
| 169 |
+
response = None
|
| 170 |
+
max_retries = 3
|
| 171 |
+
retry_count = 0
|
| 172 |
+
|
| 173 |
+
while retry_count < max_retries:
|
| 174 |
+
try:
|
| 175 |
+
print(f"Upload attempt {retry_count + 1}/{max_retries}")
|
| 176 |
+
response = request.execute()
|
| 177 |
+
break
|
| 178 |
+
except Exception as upload_error:
|
| 179 |
+
retry_count += 1
|
| 180 |
+
if retry_count >= max_retries:
|
| 181 |
+
raise upload_error
|
| 182 |
+
print(f"Upload failed, retrying... Error: {str(upload_error)}")
|
| 183 |
+
|
| 184 |
+
if not response or 'id' not in response:
|
| 185 |
+
raise Exception("Upload completed but no video ID returned")
|
| 186 |
+
|
| 187 |
+
video_id = response["id"]
|
| 188 |
+
print(f"β
Upload successful! Video ID: {video_id}")
|
| 189 |
+
print(f"π Video URL: https://www.youtube.com/watch?v={video_id}")
|
| 190 |
+
|
| 191 |
+
return video_id
|
| 192 |
+
|
| 193 |
+
except Exception as e:
|
| 194 |
+
print(f"β Upload failed: {str(e)}")
|
| 195 |
+
raise Exception(f"Failed to upload video: {str(e)}")
|
| 196 |
+
|
| 197 |
+
def get_channel_info(token_path='token.json'):
|
| 198 |
+
"""Get information about the authenticated YouTube channel for specific user"""
|
| 199 |
+
try:
|
| 200 |
+
youtube = get_youtube_service(token_path)
|
| 201 |
+
|
| 202 |
+
# Call the channels.list method to get the channel info
|
| 203 |
+
request = youtube.channels().list(
|
| 204 |
+
part="snippet,statistics",
|
| 205 |
+
mine=True
|
| 206 |
+
)
|
| 207 |
+
response = request.execute()
|
| 208 |
+
|
| 209 |
+
if not response.get('items'):
|
| 210 |
+
return None
|
| 211 |
+
|
| 212 |
+
channel = response['items'][0]
|
| 213 |
+
|
| 214 |
+
# Extract relevant information
|
| 215 |
+
channel_info = {
|
| 216 |
+
'id': channel['id'],
|
| 217 |
+
'title': channel['snippet']['title'],
|
| 218 |
+
'description': channel['snippet'].get('description', ''),
|
| 219 |
+
'thumbnail': channel['snippet']['thumbnails'].get('default', {}).get('url', ''),
|
| 220 |
+
'subscriberCount': channel['statistics'].get('subscriberCount', '0'),
|
| 221 |
+
'videoCount': channel['statistics'].get('videoCount', '0'),
|
| 222 |
+
'viewCount': channel['statistics'].get('viewCount', '0')
|
| 223 |
+
}
|
| 224 |
+
|
| 225 |
+
return channel_info
|
| 226 |
+
except Exception as e:
|
| 227 |
+
print(f"Error getting channel info: {e}")
|
| 228 |
+
return None
|
| 229 |
+
|
| 230 |
+
def logout_youtube(token_path='token.json'):
|
| 231 |
+
"""Revoke credentials and log out from YouTube for specific user"""
|
| 232 |
+
try:
|
| 233 |
+
if os.path.exists(token_path):
|
| 234 |
+
creds = Credentials.from_authorized_user_file(token_path)
|
| 235 |
+
|
| 236 |
+
# Revoke the token
|
| 237 |
+
import google.oauth2.credentials
|
| 238 |
+
import google.auth.transport.requests
|
| 239 |
+
import requests
|
| 240 |
+
|
| 241 |
+
transport = google.auth.transport.requests.Request()
|
| 242 |
+
|
| 243 |
+
# Try to revoke the token
|
| 244 |
+
try:
|
| 245 |
+
requests.post(
|
| 246 |
+
'https://oauth2.googleapis.com/revoke',
|
| 247 |
+
params={'token': creds.token},
|
| 248 |
+
headers={'content-type': 'application/x-www-form-urlencoded'}
|
| 249 |
+
)
|
| 250 |
+
except Exception as e:
|
| 251 |
+
print(f"Error revoking token: {e}")
|
| 252 |
+
|
| 253 |
+
# Remove the token file
|
| 254 |
+
os.remove(token_path)
|
| 255 |
+
return True
|
| 256 |
+
except Exception as e:
|
| 257 |
+
print(f"Error during logout: {e}")
|
| 258 |
+
|
| 259 |
+
return False
|
| 260 |
+
|
| 261 |
+
def main():
|
| 262 |
+
parser = argparse.ArgumentParser(description='Upload a video to YouTube')
|
| 263 |
+
parser.add_argument('--video-path', required=True, help='Path to the video file')
|
| 264 |
+
parser.add_argument('--title', required=True, help='Title of the video')
|
| 265 |
+
parser.add_argument('--description', default='', help='Description of the video')
|
| 266 |
+
parser.add_argument('--tags', nargs='*', default=[], help='Tags for the video')
|
| 267 |
+
parser.add_argument('--privacy', default='public', choices=['private', 'unlisted', 'public'],
|
| 268 |
+
help='Privacy setting (default: public)')
|
| 269 |
+
|
| 270 |
+
args = parser.parse_args()
|
| 271 |
+
|
| 272 |
+
if not os.path.exists(args.video_path):
|
| 273 |
+
print(f"Error: Video file not found at {args.video_path}")
|
| 274 |
+
return
|
| 275 |
+
|
| 276 |
+
try:
|
| 277 |
+
video_id = upload_to_youtube(
|
| 278 |
+
video_path=args.video_path,
|
| 279 |
+
title=args.title,
|
| 280 |
+
description=args.description,
|
| 281 |
+
tags=args.tags,
|
| 282 |
+
privacy_status=args.privacy
|
| 283 |
+
)
|
| 284 |
+
print(f"Video uploaded successfully! Video ID: {video_id}")
|
| 285 |
+
except Exception as e:
|
| 286 |
+
print(f"Error uploading video: {str(e)}")
|
| 287 |
+
|
| 288 |
+
if __name__ == "__main__":
|
| 289 |
+
main()
|
video_editor.py
ADDED
|
@@ -0,0 +1,467 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import logging
|
| 3 |
+
import warnings
|
| 4 |
+
|
| 5 |
+
# Configure logging
|
| 6 |
+
logging.basicConfig(level=logging.INFO)
|
| 7 |
+
logger = logging.getLogger(__name__)
|
| 8 |
+
|
| 9 |
+
# Suppress MoviePy warnings about frame reading
|
| 10 |
+
warnings.filterwarnings('ignore', category=UserWarning, module='moviepy')
|
| 11 |
+
|
| 12 |
+
try:
|
| 13 |
+
from moviepy.editor import VideoFileClip, AudioFileClip, ImageClip, CompositeVideoClip, concatenate_audioclips
|
| 14 |
+
from moviepy.audio.AudioClip import CompositeAudioClip
|
| 15 |
+
from moviepy.audio.fx.volumex import volumex
|
| 16 |
+
MOVIEPY_AVAILABLE = True
|
| 17 |
+
logger.info("β
MoviePy loaded successfully")
|
| 18 |
+
except ImportError as e:
|
| 19 |
+
logger.error(f"β MoviePy not available: {e}")
|
| 20 |
+
MOVIEPY_AVAILABLE = False
|
| 21 |
+
|
| 22 |
+
try:
|
| 23 |
+
import yt_dlp
|
| 24 |
+
YT_DLP_AVAILABLE = True
|
| 25 |
+
logger.info("β
yt-dlp loaded successfully")
|
| 26 |
+
except ImportError as e:
|
| 27 |
+
logger.error(f"β yt-dlp not available: {e}")
|
| 28 |
+
YT_DLP_AVAILABLE = False
|
| 29 |
+
|
| 30 |
+
try:
|
| 31 |
+
from PIL import Image, ImageDraw, ImageFont
|
| 32 |
+
import numpy as np
|
| 33 |
+
PIL_AVAILABLE = True
|
| 34 |
+
logger.info("β
PIL (Pillow) loaded successfully")
|
| 35 |
+
except ImportError as e:
|
| 36 |
+
logger.error(f"β PIL not available: {e}")
|
| 37 |
+
PIL_AVAILABLE = False
|
| 38 |
+
|
| 39 |
+
class VideoEditor:
|
| 40 |
+
def __init__(self, temp_folder='temp_audio'):
|
| 41 |
+
if not MOVIEPY_AVAILABLE:
|
| 42 |
+
raise ImportError("MoviePy is required for video editing. Install with: pip install moviepy")
|
| 43 |
+
|
| 44 |
+
self.temp_folder = temp_folder
|
| 45 |
+
os.makedirs(temp_folder, exist_ok=True)
|
| 46 |
+
logger.info(f"β
VideoEditor initialized with temp folder: {temp_folder}")
|
| 47 |
+
|
| 48 |
+
def download_youtube_audio(self, youtube_url):
|
| 49 |
+
"""Download audio from YouTube video"""
|
| 50 |
+
if not YT_DLP_AVAILABLE:
|
| 51 |
+
raise ImportError("yt-dlp is required. Install with: pip install yt-dlp")
|
| 52 |
+
|
| 53 |
+
try:
|
| 54 |
+
audio_path = os.path.join(self.temp_folder, 'background_music.m4a')
|
| 55 |
+
|
| 56 |
+
# Remove old file if exists
|
| 57 |
+
if os.path.exists(audio_path):
|
| 58 |
+
os.remove(audio_path)
|
| 59 |
+
|
| 60 |
+
# Also check for mp3 version
|
| 61 |
+
mp3_path = os.path.join(self.temp_folder, 'background_music.mp3')
|
| 62 |
+
if os.path.exists(mp3_path):
|
| 63 |
+
os.remove(mp3_path)
|
| 64 |
+
|
| 65 |
+
ydl_opts = {
|
| 66 |
+
'format': 'bestaudio/best',
|
| 67 |
+
'outtmpl': os.path.join(self.temp_folder, 'background_music.%(ext)s'),
|
| 68 |
+
'quiet': True,
|
| 69 |
+
'no_warnings': True
|
| 70 |
+
}
|
| 71 |
+
|
| 72 |
+
logger.info(f"π₯ Downloading audio from: {youtube_url}")
|
| 73 |
+
with yt_dlp.YoutubeDL(ydl_opts) as ydl: # type: ignore
|
| 74 |
+
info = ydl.extract_info(youtube_url, download=True)
|
| 75 |
+
ext = info.get('ext', 'm4a')
|
| 76 |
+
actual_path = os.path.join(self.temp_folder, f'background_music.{ext}')
|
| 77 |
+
|
| 78 |
+
# Check which file was created
|
| 79 |
+
if os.path.exists(actual_path):
|
| 80 |
+
logger.info(f"β
Downloaded audio from YouTube: {actual_path}")
|
| 81 |
+
return actual_path
|
| 82 |
+
elif os.path.exists(audio_path):
|
| 83 |
+
logger.info(f"β
Downloaded audio from YouTube: {audio_path}")
|
| 84 |
+
return audio_path
|
| 85 |
+
elif os.path.exists(mp3_path):
|
| 86 |
+
logger.info(f"β
Downloaded audio from YouTube: {mp3_path}")
|
| 87 |
+
return mp3_path
|
| 88 |
+
else:
|
| 89 |
+
raise Exception("Audio file was not created")
|
| 90 |
+
|
| 91 |
+
except Exception as e:
|
| 92 |
+
logger.error(f"β Failed to download YouTube audio: {str(e)}")
|
| 93 |
+
raise Exception(f"Failed to download music: {str(e)}")
|
| 94 |
+
|
| 95 |
+
def add_background_music(self, video, audio_path, volume=0.3):
|
| 96 |
+
"""Add background music to video with volume control"""
|
| 97 |
+
background_audio = None
|
| 98 |
+
try:
|
| 99 |
+
logger.info(f"π΅ Adding background music to video from: {audio_path}")
|
| 100 |
+
|
| 101 |
+
# Check if audio_path is a local file or needs to be downloaded
|
| 102 |
+
if not os.path.exists(audio_path):
|
| 103 |
+
raise Exception(f"Audio file not found: {audio_path}")
|
| 104 |
+
|
| 105 |
+
background_audio = AudioFileClip(audio_path)
|
| 106 |
+
|
| 107 |
+
# Adjust background music duration to match video
|
| 108 |
+
if background_audio.duration > video.duration:
|
| 109 |
+
background_audio = background_audio.subclip(0, video.duration)
|
| 110 |
+
else:
|
| 111 |
+
# Loop if music is shorter than video
|
| 112 |
+
loops_needed = int(video.duration / background_audio.duration) + 1
|
| 113 |
+
background_audio = concatenate_audioclips([background_audio] * loops_needed).subclip(0, video.duration)
|
| 114 |
+
# Set background music volume
|
| 115 |
+
background_audio = volumex(background_audio, volume)
|
| 116 |
+
|
| 117 |
+
# Replace original audio with background music (mute original)
|
| 118 |
+
video = video.set_audio(background_audio)
|
| 119 |
+
|
| 120 |
+
logger.info("β
Added background music to video (original audio muted)")
|
| 121 |
+
return video
|
| 122 |
+
|
| 123 |
+
except Exception as e:
|
| 124 |
+
logger.error(f"β Failed to add background music: {str(e)}")
|
| 125 |
+
raise Exception(f"Failed to add music: {str(e)}")
|
| 126 |
+
|
| 127 |
+
def add_text_overlay(self, video, text, position='top', fontsize=50, color='white',
|
| 128 |
+
bg_color='black', duration=None):
|
| 129 |
+
"""Add text overlay to video using PIL (no ImageMagick needed)"""
|
| 130 |
+
try:
|
| 131 |
+
if not PIL_AVAILABLE:
|
| 132 |
+
raise ImportError("PIL (Pillow) is required. Install with: pip install Pillow")
|
| 133 |
+
|
| 134 |
+
if duration is None:
|
| 135 |
+
duration = video.duration
|
| 136 |
+
|
| 137 |
+
logger.info(f"π Adding text overlay: {text[:30]}...")
|
| 138 |
+
|
| 139 |
+
# Create an image with text using PIL
|
| 140 |
+
img_width, img_height = video.size
|
| 141 |
+
img = Image.new('RGBA', (img_width, img_height), (0, 0, 0, 0))
|
| 142 |
+
draw = ImageDraw.Draw(img)
|
| 143 |
+
|
| 144 |
+
# Try to use a default font, fall back to default if not available
|
| 145 |
+
try:
|
| 146 |
+
font = ImageFont.truetype("arial.ttf", fontsize)
|
| 147 |
+
except:
|
| 148 |
+
try:
|
| 149 |
+
font = ImageFont.truetype("Arial.ttf", fontsize)
|
| 150 |
+
except:
|
| 151 |
+
try:
|
| 152 |
+
# For Windows
|
| 153 |
+
font = ImageFont.truetype("C:/Windows/Fonts/arial.ttf", fontsize)
|
| 154 |
+
except:
|
| 155 |
+
logger.warning("β οΈ Could not load Arial font, using default")
|
| 156 |
+
font = ImageFont.load_default()
|
| 157 |
+
|
| 158 |
+
# Get text bounding box
|
| 159 |
+
bbox = draw.textbbox((0, 0), text, font=font)
|
| 160 |
+
text_width = bbox[2] - bbox[0]
|
| 161 |
+
text_height = bbox[3] - bbox[1]
|
| 162 |
+
|
| 163 |
+
# Position mapping
|
| 164 |
+
if position == 'top':
|
| 165 |
+
x = (img_width - text_width) // 2
|
| 166 |
+
y = 50
|
| 167 |
+
elif position == 'bottom':
|
| 168 |
+
x = (img_width - text_width) // 2
|
| 169 |
+
y = img_height - text_height - 50
|
| 170 |
+
elif position == 'center':
|
| 171 |
+
x = (img_width - text_width) // 2
|
| 172 |
+
y = (img_height - text_height) // 2
|
| 173 |
+
elif position == 'top-left':
|
| 174 |
+
x = 50
|
| 175 |
+
y = 50
|
| 176 |
+
elif position == 'top-right':
|
| 177 |
+
x = img_width - text_width - 50
|
| 178 |
+
y = 50
|
| 179 |
+
else:
|
| 180 |
+
x = (img_width - text_width) // 2
|
| 181 |
+
y = 50
|
| 182 |
+
|
| 183 |
+
# Parse colors
|
| 184 |
+
color_map = {
|
| 185 |
+
'white': (255, 255, 255),
|
| 186 |
+
'black': (0, 0, 0),
|
| 187 |
+
'red': (255, 0, 0),
|
| 188 |
+
'blue': (0, 0, 255),
|
| 189 |
+
'green': (0, 255, 0),
|
| 190 |
+
'yellow': (255, 255, 0)
|
| 191 |
+
}
|
| 192 |
+
text_color = color_map.get(color.lower(), (255, 255, 255))
|
| 193 |
+
stroke_color = color_map.get(bg_color.lower(), (0, 0, 0))
|
| 194 |
+
|
| 195 |
+
# Draw text with stroke (outline)
|
| 196 |
+
stroke_width = 3
|
| 197 |
+
for adj_x in range(-stroke_width, stroke_width + 1):
|
| 198 |
+
for adj_y in range(-stroke_width, stroke_width + 1):
|
| 199 |
+
draw.text((x + adj_x, y + adj_y), text, font=font, fill=stroke_color)
|
| 200 |
+
|
| 201 |
+
# Draw main text
|
| 202 |
+
draw.text((x, y), text, font=font, fill=text_color)
|
| 203 |
+
|
| 204 |
+
# Convert PIL image to numpy array
|
| 205 |
+
img_array = np.array(img)
|
| 206 |
+
|
| 207 |
+
# Create ImageClip from the array
|
| 208 |
+
txt_clip = ImageClip(img_array, transparent=True)
|
| 209 |
+
txt_clip = txt_clip.set_duration(duration)
|
| 210 |
+
|
| 211 |
+
# Add fade in/out effects
|
| 212 |
+
txt_clip = txt_clip.fadein(0.5).fadeout(0.5)
|
| 213 |
+
|
| 214 |
+
# Composite video with text
|
| 215 |
+
video = CompositeVideoClip([video, txt_clip])
|
| 216 |
+
|
| 217 |
+
logger.info(f"β
Added text overlay: {text}")
|
| 218 |
+
return video
|
| 219 |
+
|
| 220 |
+
except Exception as e:
|
| 221 |
+
logger.error(f"β Failed to add text overlay: {str(e)}")
|
| 222 |
+
raise Exception(f"Failed to add text: {str(e)}")
|
| 223 |
+
|
| 224 |
+
def edit_video(self, video_path, output_path, music_url=None, music_volume=0.3,
|
| 225 |
+
text_overlays=None):
|
| 226 |
+
"""
|
| 227 |
+
Complete video editing with music and text overlays
|
| 228 |
+
|
| 229 |
+
Args:
|
| 230 |
+
video_path: Path to input video
|
| 231 |
+
output_path: Path for output video
|
| 232 |
+
music_url: YouTube URL OR local file path for background music
|
| 233 |
+
music_volume: Volume of background music (0.0 to 1.0)
|
| 234 |
+
text_overlays: List of dict with keys: text, position, duration
|
| 235 |
+
"""
|
| 236 |
+
if not os.path.exists(video_path):
|
| 237 |
+
raise FileNotFoundError(f"Video file not found: {video_path}")
|
| 238 |
+
|
| 239 |
+
# Ensure output path is a file, not a directory
|
| 240 |
+
if os.path.isdir(output_path):
|
| 241 |
+
video_filename = os.path.basename(video_path)
|
| 242 |
+
output_path = os.path.join(output_path, os.path.splitext(video_filename)[0] + "_edited.mp4")
|
| 243 |
+
logger.info(f"Output is a directory, saving to: {output_path}")
|
| 244 |
+
|
| 245 |
+
video = None
|
| 246 |
+
audio_path = None
|
| 247 |
+
try:
|
| 248 |
+
logger.info(f"π¬ Starting video editing for: {video_path}")
|
| 249 |
+
|
| 250 |
+
with warnings.catch_warnings():
|
| 251 |
+
warnings.simplefilter("ignore")
|
| 252 |
+
video = VideoFileClip(video_path)
|
| 253 |
+
|
| 254 |
+
logger.info(f"π Video info: {video.duration:.2f}s, {video.size[0]}x{video.size[1]}")
|
| 255 |
+
|
| 256 |
+
# β
NEW: Handle both YouTube URLs and local files
|
| 257 |
+
if music_url:
|
| 258 |
+
# Check if it's a local file or YouTube URL
|
| 259 |
+
if os.path.exists(music_url):
|
| 260 |
+
# It's a local file
|
| 261 |
+
logger.info(f"π΅ Using local music file: {music_url}")
|
| 262 |
+
audio_path = music_url
|
| 263 |
+
elif music_url.startswith('http'):
|
| 264 |
+
# It's a YouTube URL
|
| 265 |
+
logger.info(f"π΅ Downloading music from YouTube: {music_url}")
|
| 266 |
+
audio_path = self.download_youtube_audio(music_url)
|
| 267 |
+
else:
|
| 268 |
+
raise Exception(f"Invalid music source: {music_url}")
|
| 269 |
+
|
| 270 |
+
video = self.add_background_music(video, audio_path, music_volume)
|
| 271 |
+
|
| 272 |
+
# Add text overlays if provided
|
| 273 |
+
if text_overlays and len(text_overlays) > 0:
|
| 274 |
+
logger.info(f"π Adding {len(text_overlays)} text overlays")
|
| 275 |
+
for i, overlay in enumerate(text_overlays):
|
| 276 |
+
logger.info(f"Adding overlay {i+1}/{len(text_overlays)}: {overlay.get('text', 'N/A')[:30]}")
|
| 277 |
+
video = self.add_text_overlay(
|
| 278 |
+
video,
|
| 279 |
+
text=overlay.get('text', 'Subscribe!'),
|
| 280 |
+
position=overlay.get('position', 'top'),
|
| 281 |
+
fontsize=overlay.get('fontsize', 50),
|
| 282 |
+
color=overlay.get('color', 'white'),
|
| 283 |
+
duration=overlay.get('duration', min(5, video.duration))
|
| 284 |
+
)
|
| 285 |
+
|
| 286 |
+
# Write output video
|
| 287 |
+
logger.info(f"πΎ Writing edited video to: {output_path}")
|
| 288 |
+
logger.info(f"β³ This may take a few minutes... Please wait.")
|
| 289 |
+
|
| 290 |
+
# Suppress warnings during video writing
|
| 291 |
+
with warnings.catch_warnings():
|
| 292 |
+
warnings.simplefilter("ignore")
|
| 293 |
+
video.write_videofile(
|
| 294 |
+
output_path,
|
| 295 |
+
codec='libx264',
|
| 296 |
+
audio_codec='aac',
|
| 297 |
+
temp_audiofile='temp-audio.m4a',
|
| 298 |
+
remove_temp=True,
|
| 299 |
+
fps=30,
|
| 300 |
+
preset='medium',
|
| 301 |
+
threads=4,
|
| 302 |
+
logger=None # Suppress moviepy's verbose logging
|
| 303 |
+
)
|
| 304 |
+
|
| 305 |
+
# Verify output file was created
|
| 306 |
+
if not os.path.exists(output_path):
|
| 307 |
+
raise Exception(f"Output video file was not created: {output_path}")
|
| 308 |
+
|
| 309 |
+
file_size = os.path.getsize(output_path)
|
| 310 |
+
if file_size == 0:
|
| 311 |
+
raise Exception(f"Output video file is empty (0 bytes): {output_path}")
|
| 312 |
+
|
| 313 |
+
file_size_mb = file_size / (1024 * 1024)
|
| 314 |
+
logger.info(f"β
Video editing completed: {output_path}")
|
| 315 |
+
logger.info(f"π Output file size: {file_size_mb:.2f} MB")
|
| 316 |
+
return output_path
|
| 317 |
+
|
| 318 |
+
except Exception as e:
|
| 319 |
+
logger.error(f"β Video editing failed: {str(e)}")
|
| 320 |
+
import traceback
|
| 321 |
+
logger.error(traceback.format_exc())
|
| 322 |
+
raise Exception(f"Video editing failed: {str(e)}")
|
| 323 |
+
finally:
|
| 324 |
+
# Cleanup - close video and wait a bit for file handles to release
|
| 325 |
+
if video:
|
| 326 |
+
try:
|
| 327 |
+
video.close()
|
| 328 |
+
import time
|
| 329 |
+
time.sleep(0.5) # Give time for file handles to release
|
| 330 |
+
except:
|
| 331 |
+
pass
|
| 332 |
+
|
| 333 |
+
# Clean up temp files
|
| 334 |
+
self.cleanup_temp_files()
|
| 335 |
+
|
| 336 |
+
def cleanup_temp_files(self):
|
| 337 |
+
"""Clean up temporary files"""
|
| 338 |
+
try:
|
| 339 |
+
import time
|
| 340 |
+
import gc
|
| 341 |
+
|
| 342 |
+
# Force garbage collection to release file handles
|
| 343 |
+
gc.collect()
|
| 344 |
+
time.sleep(0.5)
|
| 345 |
+
|
| 346 |
+
if os.path.exists(self.temp_folder):
|
| 347 |
+
for file in os.listdir(self.temp_folder):
|
| 348 |
+
file_path = os.path.join(self.temp_folder, file)
|
| 349 |
+
try:
|
| 350 |
+
if os.path.isfile(file_path):
|
| 351 |
+
os.remove(file_path)
|
| 352 |
+
logger.debug(f"π§Ή Removed temp file: {file_path}")
|
| 353 |
+
except PermissionError:
|
| 354 |
+
# File still in use, skip it
|
| 355 |
+
logger.debug(f"βοΈ Skipping temp file (still in use): {file_path}")
|
| 356 |
+
except Exception as e:
|
| 357 |
+
logger.warning(f"β οΈ Failed to remove {file_path}: {e}")
|
| 358 |
+
except Exception as e:
|
| 359 |
+
logger.warning(f"β οΈ Failed to cleanup temp files: {str(e)}")
|
| 360 |
+
|
| 361 |
+
|
| 362 |
+
def main():
|
| 363 |
+
"""Main function to run video editor with user input"""
|
| 364 |
+
import argparse
|
| 365 |
+
|
| 366 |
+
parser = argparse.ArgumentParser(description='Edit videos with background music and text overlays')
|
| 367 |
+
parser.add_argument('--video', type=str, help='Path to input video file')
|
| 368 |
+
parser.add_argument('--music', type=str, help='YouTube URL for background music')
|
| 369 |
+
parser.add_argument('--output', type=str, help='Path for output video file')
|
| 370 |
+
parser.add_argument('--volume', type=float, default=0.3, help='Music volume (0.0 to 1.0, default: 0.3)')
|
| 371 |
+
parser.add_argument('--text', type=str, help='Text overlay (optional)')
|
| 372 |
+
parser.add_argument('--text-position', type=str, default='top',
|
| 373 |
+
choices=['top', 'bottom', 'center', 'top-left', 'top-right'],
|
| 374 |
+
help='Text position (default: top)')
|
| 375 |
+
|
| 376 |
+
args = parser.parse_args()
|
| 377 |
+
|
| 378 |
+
print("\n" + "="*60)
|
| 379 |
+
print("π¬ VIDEO EDITOR - Add Background Music & Text")
|
| 380 |
+
print("="*60 + "\n")
|
| 381 |
+
|
| 382 |
+
# Get video path
|
| 383 |
+
if args.video:
|
| 384 |
+
video_path = args.video
|
| 385 |
+
else:
|
| 386 |
+
video_path = input("πΉ Enter path to your video file: ").strip().strip('"')
|
| 387 |
+
|
| 388 |
+
# Validate video path
|
| 389 |
+
if not os.path.exists(video_path):
|
| 390 |
+
print(f"β Error: Video file not found: {video_path}")
|
| 391 |
+
return
|
| 392 |
+
|
| 393 |
+
# Get music URL
|
| 394 |
+
if args.music:
|
| 395 |
+
music_url = args.music
|
| 396 |
+
else:
|
| 397 |
+
music_url = input("π΅ Enter YouTube URL for background music (or press Enter to skip): ").strip()
|
| 398 |
+
if not music_url:
|
| 399 |
+
music_url = None
|
| 400 |
+
|
| 401 |
+
# Get output path
|
| 402 |
+
if args.output:
|
| 403 |
+
output_path = args.output
|
| 404 |
+
else:
|
| 405 |
+
default_output = os.path.splitext(video_path)[0] + "_edited.mp4"
|
| 406 |
+
output_path = input(f"πΎ Enter output path (press Enter for '{default_output}'): ").strip().strip('"')
|
| 407 |
+
if not output_path:
|
| 408 |
+
output_path = default_output
|
| 409 |
+
|
| 410 |
+
# Get volume
|
| 411 |
+
music_volume = args.volume
|
| 412 |
+
|
| 413 |
+
# Get text overlay
|
| 414 |
+
text_overlays = None
|
| 415 |
+
if args.text:
|
| 416 |
+
text_overlays = [{
|
| 417 |
+
'text': args.text,
|
| 418 |
+
'position': args.text_position,
|
| 419 |
+
'duration': None
|
| 420 |
+
}]
|
| 421 |
+
else:
|
| 422 |
+
add_text = input("π Add text overlay? (y/n, press Enter to skip): ").strip().lower()
|
| 423 |
+
if add_text == 'y':
|
| 424 |
+
text = input("Enter text: ").strip()
|
| 425 |
+
position = input("Position (top/bottom/center, default: top): ").strip() or 'top'
|
| 426 |
+
text_overlays = [{
|
| 427 |
+
'text': text,
|
| 428 |
+
'position': position,
|
| 429 |
+
'duration': None
|
| 430 |
+
}]
|
| 431 |
+
|
| 432 |
+
# Process video
|
| 433 |
+
try:
|
| 434 |
+
print("\n" + "="*60)
|
| 435 |
+
print("π Starting video editing...")
|
| 436 |
+
print("="*60 + "\n")
|
| 437 |
+
|
| 438 |
+
editor = VideoEditor()
|
| 439 |
+
result = editor.edit_video(
|
| 440 |
+
video_path=video_path,
|
| 441 |
+
output_path=output_path,
|
| 442 |
+
music_url=music_url,
|
| 443 |
+
music_volume=music_volume,
|
| 444 |
+
text_overlays=text_overlays
|
| 445 |
+
)
|
| 446 |
+
|
| 447 |
+
print("\n" + "="*60)
|
| 448 |
+
print(f"β
SUCCESS! Video saved to: {result}")
|
| 449 |
+
|
| 450 |
+
# Verify and display file info
|
| 451 |
+
if os.path.exists(result):
|
| 452 |
+
file_size_mb = os.path.getsize(result) / (1024 * 1024)
|
| 453 |
+
print(f"π File size: {file_size_mb:.2f} MB")
|
| 454 |
+
print(f"π Open folder: {os.path.dirname(result)}")
|
| 455 |
+
else:
|
| 456 |
+
print("β οΈ Warning: Output file path shown but file not found!")
|
| 457 |
+
|
| 458 |
+
print("="*60 + "\n")
|
| 459 |
+
|
| 460 |
+
except Exception as e:
|
| 461 |
+
print(f"\nβ Error: {str(e)}\n")
|
| 462 |
+
import traceback
|
| 463 |
+
traceback.print_exc()
|
| 464 |
+
|
| 465 |
+
|
| 466 |
+
if __name__ == "__main__":
|
| 467 |
+
main()
|