Spaces:
Sleeping
Sleeping
Fix build errors: Update Gradio to 5.8.0, remove problematic dependencies, add graceful error handling
Browse files- requirements.txt +7 -4
- wgp.py +24 -4
requirements.txt
CHANGED
|
@@ -15,10 +15,11 @@ ftfy
|
|
| 15 |
dashscope
|
| 16 |
imageio-ffmpeg
|
| 17 |
|
|
|
|
| 18 |
# flash_attn
|
| 19 |
-
flash-attn @ https://github.com/Dao-AILab/flash-attention/releases/download/v2.7.4.post1/flash_attn-2.7.4.post1+cu12torch2.6cxx11abiFALSE-cp310-cp310-linux_x86_64.whl
|
| 20 |
|
| 21 |
-
gradio==5.
|
| 22 |
numpy>=1.23.5,<2
|
| 23 |
einops
|
| 24 |
moviepy==1.0.3
|
|
@@ -27,8 +28,10 @@ peft==0.14.0
|
|
| 27 |
mutagen
|
| 28 |
pydantic==2.10.6
|
| 29 |
decord
|
| 30 |
-
|
| 31 |
-
|
|
|
|
|
|
|
| 32 |
matplotlib
|
| 33 |
timm
|
| 34 |
segment-anything
|
|
|
|
| 15 |
dashscope
|
| 16 |
imageio-ffmpeg
|
| 17 |
|
| 18 |
+
# Remove flash-attn for now as it causes build issues on HF Spaces
|
| 19 |
# flash_attn
|
| 20 |
+
# flash-attn @ https://github.com/Dao-AILab/flash-attention/releases/download/v2.7.4.post1/flash_attn-2.7.4.post1+cu12torch2.6cxx11abiFALSE-cp310-cp310-linux_x86_64.whl
|
| 21 |
|
| 22 |
+
gradio==5.8.0
|
| 23 |
numpy>=1.23.5,<2
|
| 24 |
einops
|
| 25 |
moviepy==1.0.3
|
|
|
|
| 28 |
mutagen
|
| 29 |
pydantic==2.10.6
|
| 30 |
decord
|
| 31 |
+
# Use CPU version for initial testing
|
| 32 |
+
onnxruntime
|
| 33 |
+
# rembg[gpu]==2.0.65
|
| 34 |
+
rembg==2.0.65
|
| 35 |
matplotlib
|
| 36 |
timm
|
| 37 |
segment-anything
|
wgp.py
CHANGED
|
@@ -17,7 +17,17 @@ from mmgp import offload, safetensors2, profile_type
|
|
| 17 |
try:
|
| 18 |
import triton
|
| 19 |
except ImportError:
|
|
|
|
|
|
|
| 20 |
pass
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 21 |
from pathlib import Path
|
| 22 |
from datetime import datetime
|
| 23 |
import gradio as gr
|
|
@@ -56,11 +66,21 @@ target_mmgp_version = "3.4.8"
|
|
| 56 |
WanGP_version = "5.41"
|
| 57 |
prompt_enhancer_image_caption_model, prompt_enhancer_image_caption_processor, prompt_enhancer_llm_model, prompt_enhancer_llm_tokenizer = None, None, None, None
|
| 58 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 59 |
from importlib.metadata import version
|
| 60 |
-
|
| 61 |
-
|
| 62 |
-
|
| 63 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 64 |
lock = threading.Lock()
|
| 65 |
current_task_id = None
|
| 66 |
task_id = 0
|
|
|
|
| 17 |
try:
|
| 18 |
import triton
|
| 19 |
except ImportError:
|
| 20 |
+
triton = None
|
| 21 |
+
print("Triton not available, will use fallback implementations")
|
| 22 |
pass
|
| 23 |
+
|
| 24 |
+
# Handle flash_attn import gracefully
|
| 25 |
+
try:
|
| 26 |
+
import flash_attn
|
| 27 |
+
FLASH_ATTN_AVAILABLE = True
|
| 28 |
+
except ImportError:
|
| 29 |
+
FLASH_ATTN_AVAILABLE = False
|
| 30 |
+
print("Flash Attention not available, will use standard attention")
|
| 31 |
from pathlib import Path
|
| 32 |
from datetime import datetime
|
| 33 |
import gradio as gr
|
|
|
|
| 66 |
WanGP_version = "5.41"
|
| 67 |
prompt_enhancer_image_caption_model, prompt_enhancer_image_caption_processor, prompt_enhancer_llm_model, prompt_enhancer_llm_tokenizer = None, None, None, None
|
| 68 |
|
| 69 |
+
# Check for GPU availability
|
| 70 |
+
CUDA_AVAILABLE = torch.cuda.is_available()
|
| 71 |
+
print(f"CUDA available: {CUDA_AVAILABLE}")
|
| 72 |
+
if not CUDA_AVAILABLE:
|
| 73 |
+
print("Running on CPU mode - video generation will be significantly slower")
|
| 74 |
+
|
| 75 |
from importlib.metadata import version
|
| 76 |
+
try:
|
| 77 |
+
mmgp_version = version("mmgp")
|
| 78 |
+
if mmgp_version != target_mmgp_version:
|
| 79 |
+
print(f"Warning: mmgp version {mmgp_version} detected, version {target_mmgp_version} is recommended.")
|
| 80 |
+
print("Some features may not work correctly. Consider upgrading with 'pip install -r requirements.txt'")
|
| 81 |
+
except Exception as e:
|
| 82 |
+
print(f"Warning: Could not check mmgp version: {e}")
|
| 83 |
+
print("This may cause issues with some features.")
|
| 84 |
lock = threading.Lock()
|
| 85 |
current_task_id = None
|
| 86 |
task_id = 0
|