futurespyhi commited on
Commit
4b92fef
·
1 Parent(s): 15389e6

1.delete apply patches for YuE 2.modify YuEGP to YuE

Browse files
Files changed (2) hide show
  1. app.py +9 -125
  2. download_models.py +2 -2
app.py CHANGED
@@ -78,7 +78,7 @@ def install_flash_attn() -> bool:
78
  # Setup environment first
79
  setup_spaces_environment()
80
 
81
- # Download required models for YuEGP inference
82
  def download_required_models():
83
  """Download required model files at startup"""
84
  try:
@@ -103,116 +103,6 @@ models_ready = download_required_models()
103
  # Install flash-attn if needed
104
  flash_attn_available = install_flash_attn()
105
 
106
- # Apply transformers patches for performance optimization
107
- def apply_transformers_patch():
108
- """
109
- Apply YuEGP transformers patches for high-performance generation.
110
-
111
- This function applies optimized transformers patches that provide:
112
- - 2x speed improvement for low VRAM profiles
113
- - 3x speed improvement for Stage 1 generation (16GB+ VRAM)
114
- - 2x speed improvement for Stage 2 generation (all profiles)
115
-
116
- The patches replace two key files in the transformers library:
117
- - models/llama/modeling_llama.py (LLaMA model optimizations)
118
- - generation/utils.py (generation utilities optimizations)
119
-
120
- Includes smart detection to avoid re-applying patches on restart.
121
- """
122
- try:
123
- import shutil
124
- import site
125
- import hashlib
126
-
127
- # Define source and target directories
128
- source_dir = os.path.join(project_root, "YuEGP", "transformers")
129
-
130
- # Get the site-packages directory where transformers is installed
131
- site_packages = site.getsitepackages()
132
- if not site_packages:
133
- # Fallback for some environments
134
- import transformers
135
- transformers_path = os.path.dirname(transformers.__file__)
136
- target_base = os.path.dirname(transformers_path)
137
- else:
138
- target_base = site_packages[0]
139
-
140
- target_dir = os.path.join(target_base, "transformers")
141
-
142
- # Check if source patches exist
143
- if not os.path.exists(source_dir):
144
- print("⚠️ YuEGP transformers patches not found, skipping optimization")
145
- return False
146
-
147
- if not os.path.exists(target_dir):
148
- print("⚠️ Transformers library not found, skipping patches")
149
- return False
150
-
151
- # Check if patches are already applied by comparing file hashes
152
- def get_file_hash(filepath):
153
- """Get MD5 hash of file content"""
154
- if not os.path.exists(filepath):
155
- return None
156
- with open(filepath, 'rb') as f:
157
- return hashlib.md5(f.read()).hexdigest()
158
-
159
- # Key files to check for patch status
160
- key_patches = [
161
- "models/llama/modeling_llama.py",
162
- "generation/utils.py"
163
- ]
164
-
165
- patches_needed = False
166
- for patch_file in key_patches:
167
- source_file = os.path.join(source_dir, patch_file)
168
- target_file = os.path.join(target_dir, patch_file)
169
-
170
- if os.path.exists(source_file):
171
- source_hash = get_file_hash(source_file)
172
- target_hash = get_file_hash(target_file)
173
-
174
- if source_hash != target_hash:
175
- patches_needed = True
176
- break
177
-
178
- if not patches_needed:
179
- print("✅ YuEGP transformers patches already applied, skipping re-installation")
180
- print(" 📈 High-performance optimizations are active:")
181
- print(" • Stage 1 generation: 3x faster (16GB+ VRAM)")
182
- print(" • Stage 2 generation: 2x faster (all profiles)")
183
- return True
184
-
185
- # Apply patches by copying optimized files
186
- print("🔧 Applying YuEGP transformers patches for high-performance generation...")
187
-
188
- # Copy the patched files, preserving directory structure
189
- for root, dirs, files in os.walk(source_dir):
190
- # Calculate relative path from source_dir
191
- rel_path = os.path.relpath(root, source_dir)
192
- target_subdir = os.path.join(target_dir, rel_path) if rel_path != '.' else target_dir
193
-
194
- # Ensure target subdirectory exists
195
- os.makedirs(target_subdir, exist_ok=True)
196
-
197
- # Copy all Python files in this directory
198
- for file in files:
199
- if file.endswith('.py'):
200
- src_file = os.path.join(root, file)
201
- dst_file = os.path.join(target_subdir, file)
202
-
203
- shutil.copy2(src_file, dst_file)
204
- print(f" ✅ Patched: {os.path.relpath(dst_file, target_base)}")
205
-
206
- print("🚀 Transformers patches applied successfully!")
207
- print(" 📈 Expected performance gains:")
208
- print(" • Stage 1 generation: 3x faster (16GB+ VRAM)")
209
- print(" • Stage 2 generation: 2x faster (all profiles)")
210
- return True
211
-
212
- except Exception as e:
213
- print(f"❌ Error applying transformers patches: {e}")
214
- print(" Continuing without patches - performance may be reduced")
215
- return False
216
 
217
  # Now import the rest of the dependencies
218
  # Add project root to Python path for imports
@@ -224,9 +114,6 @@ from tools.groq_client import client as groq_client
224
  from openai import OpenAI
225
  from tools.generate_lyrics import generate_structured_lyrics, format_lyrics
226
 
227
- # Apply patches after all imports are set up
228
- patch_applied = apply_transformers_patch()
229
-
230
  # Import CUDA info after flash-attn setup
231
  import torch
232
  if torch.cuda.is_available():
@@ -267,9 +154,9 @@ def validate_api_keys():
267
  def validate_file_structure():
268
  """Validate that required files and directories exist"""
269
  required_paths = [
270
- "YuEGP/inference/infer.py",
271
- "YuEGP/inference/codecmanipulator.py",
272
- "YuEGP/inference/mmtokenizer.py",
273
  "tools/generate_lyrics.py",
274
  "tools/groq_client.py",
275
  "schemas/lyrics.py" # Required for lyrics structure models
@@ -306,8 +193,8 @@ def generate_music_spaces(lyrics: str, genre: str, mood: str, progress=gr.Progre
306
  genre_file.write(f"instrumental,{genre},{mood},male vocals")
307
  genre_file_path = genre_file.name
308
 
309
- # Convert lyrics format for YuEGP compatibility
310
- # YuEGP expects [VERSE], [CHORUS] format, but our AI generates **VERSE**, **CHORUS**
311
  import re
312
 
313
  # Extract only the actual lyrics content, removing AI commentary
@@ -352,7 +239,7 @@ def generate_music_spaces(lyrics: str, genre: str, mood: str, progress=gr.Progre
352
 
353
  # High-performance command based on Spaces GPU resources
354
  # In Spaces, working directory is /app
355
- infer_script_path = os.path.join(os.getcwd(), "YuEGP", "inference", "infer.py")
356
  cmd = [
357
  sys.executable,
358
  infer_script_path,
@@ -365,9 +252,6 @@ def generate_music_spaces(lyrics: str, genre: str, mood: str, progress=gr.Progre
365
  "--stage2_batch_size", "4", # Higher batch size for speed
366
  "--output_dir", output_dir,
367
  "--max_new_tokens", "3000", # Full token count
368
- "--profile", "1", # Highest performance profile
369
- "--verbose", "3",
370
- "--rescale", # Enable audio rescaling to proper volume
371
  "--prompt_start_time", "0",
372
  "--prompt_end_time", "30", # Full 30-second clips
373
  ]
@@ -391,9 +275,9 @@ def generate_music_spaces(lyrics: str, genre: str, mood: str, progress=gr.Progre
391
  print(f"Working directory: {os.getcwd()}")
392
  print(f"Command: {' '.join(cmd)}")
393
 
394
- # Change to YuEGP/inference directory for execution
395
  original_cwd = os.getcwd()
396
- inference_dir = os.path.join(os.getcwd(), "YuEGP", "inference")
397
 
398
  try:
399
  os.chdir(inference_dir)
 
78
  # Setup environment first
79
  setup_spaces_environment()
80
 
81
+ # Download required models for YuE inference
82
  def download_required_models():
83
  """Download required model files at startup"""
84
  try:
 
103
  # Install flash-attn if needed
104
  flash_attn_available = install_flash_attn()
105
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
106
 
107
  # Now import the rest of the dependencies
108
  # Add project root to Python path for imports
 
114
  from openai import OpenAI
115
  from tools.generate_lyrics import generate_structured_lyrics, format_lyrics
116
 
 
 
 
117
  # Import CUDA info after flash-attn setup
118
  import torch
119
  if torch.cuda.is_available():
 
154
  def validate_file_structure():
155
  """Validate that required files and directories exist"""
156
  required_paths = [
157
+ "YuE/inference/infer.py",
158
+ "YuE/inference/codecmanipulator.py",
159
+ "YuE/inference/mmtokenizer.py",
160
  "tools/generate_lyrics.py",
161
  "tools/groq_client.py",
162
  "schemas/lyrics.py" # Required for lyrics structure models
 
193
  genre_file.write(f"instrumental,{genre},{mood},male vocals")
194
  genre_file_path = genre_file.name
195
 
196
+ # Convert lyrics format for YuE compatibility
197
+ # YuE expects [VERSE], [CHORUS] format, but our AI generates **VERSE**, **CHORUS**
198
  import re
199
 
200
  # Extract only the actual lyrics content, removing AI commentary
 
239
 
240
  # High-performance command based on Spaces GPU resources
241
  # In Spaces, working directory is /app
242
+ infer_script_path = os.path.join(os.getcwd(), "YuE", "inference", "infer.py")
243
  cmd = [
244
  sys.executable,
245
  infer_script_path,
 
252
  "--stage2_batch_size", "4", # Higher batch size for speed
253
  "--output_dir", output_dir,
254
  "--max_new_tokens", "3000", # Full token count
 
 
 
255
  "--prompt_start_time", "0",
256
  "--prompt_end_time", "30", # Full 30-second clips
257
  ]
 
275
  print(f"Working directory: {os.getcwd()}")
276
  print(f"Command: {' '.join(cmd)}")
277
 
278
+ # Change to YuE/inference directory for execution
279
  original_cwd = os.getcwd()
280
+ inference_dir = os.path.join(os.getcwd(), "YuE", "inference")
281
 
282
  try:
283
  os.chdir(inference_dir)
download_models.py CHANGED
@@ -14,7 +14,7 @@ def download_xcodec_models():
14
  """Download xcodec_mini_infer using git clone (no LFS) + wget for large files"""
15
 
16
  # Base path for xcodec models - convert to absolute path to avoid working directory issues
17
- xcodec_base = Path("YuEGP/inference/xcodec_mini_infer").resolve()
18
 
19
  print("📥 Downloading xcodec_mini_infer using git clone + wget strategy...")
20
 
@@ -183,7 +183,7 @@ def ensure_model_availability():
183
  Download them if they don't exist.
184
  """
185
 
186
- xcodec_base = Path("YuEGP/inference/xcodec_mini_infer")
187
 
188
  # Check if critical files exist (both for recons and vocoder stages)
189
  critical_files = [
 
14
  """Download xcodec_mini_infer using git clone (no LFS) + wget for large files"""
15
 
16
  # Base path for xcodec models - convert to absolute path to avoid working directory issues
17
+ xcodec_base = Path("YuE/inference/xcodec_mini_infer").resolve()
18
 
19
  print("📥 Downloading xcodec_mini_infer using git clone + wget strategy...")
20
 
 
183
  Download them if they don't exist.
184
  """
185
 
186
+ xcodec_base = Path("YuE/inference/xcodec_mini_infer")
187
 
188
  # Check if critical files exist (both for recons and vocoder stages)
189
  critical_files = [