Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -12,18 +12,18 @@ from transformers import AutoModelForSpeechSeq2Seq, AutoProcessor, WhisperTokeni
|
|
| 12 |
from kokoro import KPipeline
|
| 13 |
import soundfile as sf
|
| 14 |
import subprocess
|
| 15 |
-
subprocess.run(
|
| 16 |
-
|
| 17 |
-
|
| 18 |
-
|
| 19 |
-
)
|
| 20 |
|
| 21 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 22 |
torch_dtype = torch.float16
|
| 23 |
MODEL_NAME = "openai/whisper-large-v3-turbo"
|
| 24 |
-
|
| 25 |
model = AutoModelForSpeechSeq2Seq.from_pretrained(
|
| 26 |
-
MODEL_NAME, torch_dtype=torch_dtype, low_cpu_mem_usage=True, use_safetensors=True,
|
| 27 |
)
|
| 28 |
model.to(device)
|
| 29 |
|
|
|
|
| 12 |
from kokoro import KPipeline
|
| 13 |
import soundfile as sf
|
| 14 |
import subprocess
|
| 15 |
+
# subprocess.run(
|
| 16 |
+
# "pip install flash-attn --no-build-isolation",
|
| 17 |
+
# env={"FLASH_ATTENTION_SKIP_CUDA_BUILD": "TRUE"},
|
| 18 |
+
# shell=True,
|
| 19 |
+
# )
|
| 20 |
|
| 21 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 22 |
torch_dtype = torch.float16
|
| 23 |
MODEL_NAME = "openai/whisper-large-v3-turbo"
|
| 24 |
+
# attn_implementation="flash_attention_2"
|
| 25 |
model = AutoModelForSpeechSeq2Seq.from_pretrained(
|
| 26 |
+
MODEL_NAME, torch_dtype=torch_dtype, low_cpu_mem_usage=True, use_safetensors=True,
|
| 27 |
)
|
| 28 |
model.to(device)
|
| 29 |
|