Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -9,6 +9,13 @@ from scipy.io import wavfile
|
|
| 9 |
from bark import generate_audio, SAMPLE_RATE
|
| 10 |
from bark.generation import preload_models, load_model, generate_text_semantic
|
| 11 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 12 |
class VoiceCloningApp:
|
| 13 |
def __init__(self):
|
| 14 |
# Create working directory
|
|
@@ -19,8 +26,8 @@ class VoiceCloningApp:
|
|
| 19 |
# Explicit model loading with error handling
|
| 20 |
try:
|
| 21 |
print("Attempting to load Bark models...")
|
| 22 |
-
preload_models()
|
| 23 |
-
print("Bark models loaded successfully
|
| 24 |
except Exception as e:
|
| 25 |
print(f"Error loading Bark models: {e}")
|
| 26 |
raise RuntimeError(f"Could not load Bark models: {e}")
|
|
@@ -62,13 +69,15 @@ class VoiceCloningApp:
|
|
| 62 |
history_prompt=None,
|
| 63 |
temp=0.7,
|
| 64 |
min_eos_p=0.05,
|
|
|
|
| 65 |
)
|
| 66 |
|
| 67 |
# Generate audio from semantic tokens
|
| 68 |
audio_array = generate_audio(
|
| 69 |
semantic_tokens,
|
| 70 |
history_prompt=None,
|
| 71 |
-
temp=0.7
|
|
|
|
| 72 |
)
|
| 73 |
|
| 74 |
# Save generated audio
|
|
|
|
| 9 |
from bark import generate_audio, SAMPLE_RATE
|
| 10 |
from bark.generation import preload_models, load_model, generate_text_semantic
|
| 11 |
|
| 12 |
+
# Add device detection
|
| 13 |
+
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
| 14 |
+
print("CUDA available:", torch.cuda.is_available())
|
| 15 |
+
print("CUDA device count:", torch.cuda.device_count())
|
| 16 |
+
if torch.cuda.is_available():
|
| 17 |
+
print("CUDA device name:", torch.cuda.get_device_name(0))
|
| 18 |
+
|
| 19 |
class VoiceCloningApp:
|
| 20 |
def __init__(self):
|
| 21 |
# Create working directory
|
|
|
|
| 26 |
# Explicit model loading with error handling
|
| 27 |
try:
|
| 28 |
print("Attempting to load Bark models...")
|
| 29 |
+
preload_models(device=device)
|
| 30 |
+
print(f"Bark models loaded successfully on {device}")
|
| 31 |
except Exception as e:
|
| 32 |
print(f"Error loading Bark models: {e}")
|
| 33 |
raise RuntimeError(f"Could not load Bark models: {e}")
|
|
|
|
| 69 |
history_prompt=None,
|
| 70 |
temp=0.7,
|
| 71 |
min_eos_p=0.05,
|
| 72 |
+
device=device
|
| 73 |
)
|
| 74 |
|
| 75 |
# Generate audio from semantic tokens
|
| 76 |
audio_array = generate_audio(
|
| 77 |
semantic_tokens,
|
| 78 |
history_prompt=None,
|
| 79 |
+
temp=0.7,
|
| 80 |
+
device=device
|
| 81 |
)
|
| 82 |
|
| 83 |
# Save generated audio
|