Commit
·
3e600ab
1
Parent(s):
f117d73
try makinh mp4 copatible
Browse files
app.py
CHANGED
|
@@ -39,11 +39,18 @@ def get_cached_model(model_name: str):
|
|
| 39 |
# Separation Logic (all stems)
|
| 40 |
def separate_all_stems(audio_file_path: str, model_name: str):
|
| 41 |
model = get_cached_model(model_name)
|
| 42 |
-
waveform, sr = torchaudio.load(audio_file_path)
|
| 43 |
-
is_mono = waveform.shape[0] == 1
|
| 44 |
-
if is_mono:
|
| 45 |
-
waveform = waveform.repeat(2, 1)
|
| 46 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 47 |
with torch.no_grad():
|
| 48 |
stems_batch = apply_model(
|
| 49 |
model,
|
|
|
|
| 39 |
# Separation Logic (all stems)
|
| 40 |
def separate_all_stems(audio_file_path: str, model_name: str):
|
| 41 |
model = get_cached_model(model_name)
|
| 42 |
+
#waveform, sr = torchaudio.load(audio_file_path)
|
| 43 |
+
#is_mono = waveform.shape[0] == 1
|
| 44 |
+
#if is_mono:
|
| 45 |
+
#waveform = waveform.repeat(2, 1)
|
| 46 |
+
|
| 47 |
+
signal = AudioSignal(audio_file_path)
|
| 48 |
+
signal = signal.resample(44100) # expects 44.1kHz
|
| 49 |
+
if signal.num_channels == 1:
|
| 50 |
+
signal = signal.convert_to(stereo=True)
|
| 51 |
+
|
| 52 |
+
waveform = torch.tensor(signal.audio_data).to(torch.float32)
|
| 53 |
+
sr = signal.sample_rate
|
| 54 |
with torch.no_grad():
|
| 55 |
stems_batch = apply_model(
|
| 56 |
model,
|