Spaces:
Build error
Build error
| import gradio as gr | |
| import numpy as np | |
| import resampy | |
| import torch | |
| import torchaudio | |
| from huggingface_hub import hf_hub_download | |
| from deepafx_st.system import System | |
| from deepafx_st.utils import DSPMode | |
| system_speech = System.load_from_checkpoint( | |
| hf_hub_download("nateraw/deepafx-st-libritts-autodiff", "lit_model.ckpt"), batch_size=1 | |
| ).eval() | |
| system_music = System.load_from_checkpoint( | |
| hf_hub_download("nateraw/deepafx-st-jamendo-autodiff", "lit_model.ckpt"), batch_size=1 | |
| ).eval() | |
| gpu = torch.cuda.is_available() | |
| if gpu: | |
| system_speech.to("cuda") | |
| system_music.to("cuda") | |
| def process(input_path, reference_path, model): | |
| system = system_speech if model == "speech" else system_music | |
| # load audio data | |
| x, x_sr = torchaudio.load(input_path) | |
| r, r_sr = torchaudio.load(reference_path) | |
| # resample if needed | |
| if x_sr != 24000: | |
| print("Resampling to 24000 Hz...") | |
| x_24000 = torch.tensor(resampy.resample(x.view(-1).numpy(), x_sr, 24000)) | |
| x_24000 = x_24000.view(1, -1) | |
| else: | |
| x_24000 = x | |
| if r_sr != 24000: | |
| print("Resampling to 24000 Hz...") | |
| r_24000 = torch.tensor(resampy.resample(r.view(-1).numpy(), r_sr, 24000)) | |
| r_24000 = r_24000.view(1, -1) | |
| else: | |
| r_24000 = r | |
| # peak normalize to -12 dBFS | |
| x_24000 = x_24000[0:1, : 24000 * 5] | |
| x_24000 /= x_24000.abs().max() | |
| x_24000 *= 10 ** (-12 / 20.0) | |
| x_24000 = x_24000.view(1, 1, -1) | |
| # peak normalize to -12 dBFS | |
| r_24000 = r_24000[0:1, : 24000 * 5] | |
| r_24000 /= r_24000.abs().max() | |
| r_24000 *= 10 ** (-12 / 20.0) | |
| r_24000 = r_24000.view(1, 1, -1) | |
| if gpu: | |
| x_24000 = x_24000.to("cuda") | |
| r_24000 = r_24000.to("cuda") | |
| with torch.no_grad(): | |
| y_hat, p, e = system(x_24000, r_24000) | |
| y_hat = y_hat.view(1, -1) | |
| y_hat /= y_hat.abs().max() | |
| x_24000 /= x_24000.abs().max() | |
| # Sqeeze to (T,), convert to numpy, and convert to int16 | |
| out_audio = (32767 * y_hat).squeeze(0).detach().cpu().numpy().astype(np.int16) | |
| return 24000, out_audio | |
| gr.Interface( | |
| fn=process, | |
| inputs=[gr.Audio(type="filepath"), gr.Audio(type="filepath"), gr.Dropdown(["speech", "music"], value="speech")], | |
| outputs="audio", | |
| examples=[ | |
| [ | |
| hf_hub_download("nateraw/examples", "voice_raw.wav", repo_type="dataset", cache_dir="./data"), | |
| hf_hub_download("nateraw/examples", "voice_produced.wav", repo_type="dataset", cache_dir="./data"), | |
| "speech", | |
| ], | |
| [ | |
| hf_hub_download("nateraw/examples", "nys_of_mind.wav", repo_type="dataset", cache_dir="./data"), | |
| hf_hub_download("nateraw/examples", "world_is_yours_highpass.wav", repo_type="dataset", cache_dir="./data"), | |
| "music", | |
| ], | |
| ], | |
| title="DeepAFx-ST", | |
| description=( | |
| "Gradio demo for DeepAFx-ST for style transfer of audio effects with differentiable signal processing. To use it, simply" | |
| " upload your audio files or choose from one of the examples. Read more at the links below." | |
| ), | |
| article=( | |
| "<div style='text-align: center;'><a href='https://github.com/adobe-research/DeepAFx-ST' target='_blank'>Github Repo</a>" | |
| " <center><img src='https://visitor-badge.glitch.me/badge?page_id=nateraw_deepafx-st' alt='visitor" | |
| " badge'></center></div>" | |
| ), | |
| allow_flagging="never", | |
| cache_examples=False | |
| ).launch() | |