Spaces:
Running
Running
| import sys | |
| import os | |
| import torch | |
| from omegaconf import OmegaConf | |
| from pitch import load_csv_pitch | |
| from grad.utils import fix_len_compatibility | |
| from grad.model import GradTTS | |
| from bigvgan.model.generator import Generator | |
| import gradio as gr | |
| import numpy as np | |
| import soundfile | |
| import librosa | |
| import logging | |
| # Set logging levels to suppress unnecessary warnings | |
| logging.getLogger('numba').setLevel(logging.WARNING) | |
| logging.getLogger('markdown_it').setLevel(logging.WARNING) | |
| logging.getLogger('urllib3').setLevel(logging.WARNING) | |
| logging.getLogger('matplotlib').setLevel(logging.WARNING) | |
| # Append current working directory to system path | |
| sys.path.append(os.getcwd()) | |
| # Function to load Grad-TTS model checkpoint | |
| def load_gvc_model(checkpoint_path, model): | |
| assert os.path.isfile(checkpoint_path) | |
| checkpoint_dict = torch.load(checkpoint_path, map_location="cpu") | |
| saved_state_dict = checkpoint_dict["model"] | |
| state_dict = model.state_dict() | |
| new_state_dict = {} | |
| for k, v in state_dict.items(): | |
| try: | |
| new_state_dict[k] = saved_state_dict[k] | |
| except: | |
| print(f"{k} is not in the checkpoint") | |
| new_state_dict[k] = v | |
| model.load_state_dict(new_state_dict) | |
| return model | |
| # Function to load BigVGAN model checkpoint | |
| def load_bigv_model(checkpoint_path, model): | |
| assert os.path.isfile(checkpoint_path) | |
| checkpoint_dict = torch.load(checkpoint_path, map_location="cpu") | |
| saved_state_dict = checkpoint_dict["model_g"] | |
| state_dict = model.state_dict() | |
| new_state_dict = {} | |
| for k, v in state_dict.items(): | |
| try: | |
| new_state_dict[k] = saved_state_dict[k] | |
| except: | |
| print(f"{k} is not in the checkpoint") | |
| new_state_dict[k] = v | |
| model.load_state_dict(new_state_dict) | |
| return model | |
| # Main Grad-TTS inference function | |
| def gvc_main(device, model, _vec, _pit, spk, rature=1.015): | |
| l_vec = _vec.shape[0] | |
| d_vec = _vec.shape[1] | |
| lengths_fix = fix_len_compatibility(l_vec) | |
| lengths = torch.LongTensor([l_vec]).to(device) | |
| vec = torch.zeros((1, lengths_fix, d_vec), dtype=torch.float32).to(device) | |
| pit = torch.zeros((1, lengths_fix), dtype=torch.float32).to(device) | |
| vec[0, :l_vec, :] = _vec | |
| pit[0, :l_vec] = _pit | |
| y_enc, y_dec = model(lengths, vec, pit, spk, n_timesteps=10, temperature=rature) | |
| y_dec = y_dec.squeeze(0) | |
| y_dec = y_dec[:, :l_vec] | |
| return y_dec | |
| # Function to process input audio and extract features | |
| def svc_change(argswav, argsspk): | |
| argsvec = "svc_tmp.ppg.npy" | |
| os.system(f"python hubert/inference.py -w {argswav} -v {argsvec}") | |
| argspit = "svc_tmp.pit.npy" | |
| os.system(f"python pitch/inference.py -w {argswav} -p {argspit}") | |
| device = torch.device("cuda" if torch.cuda.is_available() else "cpu") | |
| hps = OmegaConf.load('configs/base.yaml') | |
| print('Initializing Grad-TTS...') | |
| model = GradTTS( | |
| hps.grad.n_mels, hps.grad.n_vecs, hps.grad.n_pits, hps.grad.n_spks, | |
| hps.grad.n_embs, hps.grad.n_enc_channels, hps.grad.filter_channels, | |
| hps.grad.dec_dim, hps.grad.beta_min, hps.grad.beta_max, hps.grad.pe_scale | |
| ) | |
| print(f'Number of encoder parameters = {model.encoder.nparams/1e6:.2f}m') | |
| print(f'Number of decoder parameters = {model.decoder.nparams/1e6:.2f}m') | |
| load_gvc_model('grad_pretrain/gvc.pretrain.pth', model) | |
| model.eval() | |
| model.to(device) | |
| spk = np.load(argsspk) | |
| spk = torch.FloatTensor(spk) | |
| vec = np.load(argsvec) | |
| vec = np.repeat(vec, 2, 0) | |
| vec = torch.FloatTensor(vec) | |
| pit = load_csv_pitch(argspit) | |
| pit = np.array(pit) | |
| pit = torch.FloatTensor(pit) | |
| len_pit = pit.size()[0] | |
| len_vec = vec.size()[0] | |
| len_min = min(len_pit, len_vec) | |
| pit = pit[:len_min] | |
| vec = vec[:len_min, :] | |
| with torch.no_grad(): | |
| spk = spk.unsqueeze(0).to(device) | |
| all_frame = len_min | |
| hop_frame = 8 | |
| out_chunk = 2400 # 24 seconds | |
| out_index = 0 | |
| mel = None | |
| while out_index < all_frame: | |
| if out_index == 0: # Start frame | |
| cut_s = 0 | |
| cut_s_out = 0 | |
| else: | |
| cut_s = out_index - hop_frame | |
| cut_s_out = hop_frame | |
| if out_index + out_chunk + hop_frame > all_frame: # End frame | |
| cut_e = all_frame | |
| cut_e_out = -1 | |
| else: | |
| cut_e = out_index + out_chunk + hop_frame | |
| cut_e_out = -1 * hop_frame | |
| sub_vec = vec[cut_s:cut_e, :].to(device) | |
| sub_pit = pit[cut_s:cut_e].to(device) | |
| sub_out = gvc_main(device, model, sub_vec, sub_pit, spk, 0.95) | |
| sub_out = sub_out[:, cut_s_out:cut_e_out] | |
| out_index = out_index + out_chunk | |
| if mel is None: | |
| mel = sub_out | |
| else: | |
| mel = torch.cat((mel, sub_out), -1) | |
| if cut_e == all_frame: | |
| break | |
| del model | |
| del hps | |
| del spk | |
| del vec | |
| del sub_vec | |
| del sub_pit | |
| del sub_out | |
| hps = OmegaConf.load('./bigvgan/configs/nsf_bigvgan.yaml') | |
| model = Generator(hps) | |
| load_bigv_model('./bigvgan_pretrain/nsf_bigvgan_pretrain_32K.pth', model) | |
| model.eval() | |
| model.to(device) | |
| len_pit = pit.size()[0] | |
| len_mel = mel.size()[1] | |
| len_min = min(len_pit, len_mel) | |
| pit = pit[:len_min] | |
| mel = mel[:, :len_min] | |
| with torch.no_grad(): | |
| mel = mel.unsqueeze(0).to(device) | |
| pit = pit.unsqueeze(0).to(device) | |
| audio = model.inference(mel, pit) | |
| audio = audio.cpu().detach().numpy() | |
| pitwav = model.pitch2wav(pit) | |
| pitwav = pitwav.cpu().detach().numpy() | |
| return audio | |
| # Main function to handle audio input and conversion | |
| def svc_main(sid, input_audio): | |
| if input_audio is None: | |
| return "You need to upload an audio file", None | |
| sampling_rate, audio = input_audio | |
| audio = (audio / np.iinfo(audio.dtype).max).astype(np.float32) | |
| if len(audio.shape) > 1: | |
| audio = librosa.to_mono(audio.transpose(1, 0)) | |
| if sampling_rate != 16000: | |
| audio = librosa.resample(audio, orig_sr=sampling_rate, target_sr=16000) | |
| if len(audio) > 16000 * 100: | |
| audio = audio[:16000 * 100] | |
| separator = Separator() | |
| separator.load_model() | |
| output_names = { | |
| "Vocals": "vocals_output", | |
| "Instrumental": "instrumental_output", | |
| } | |
| output_files = separator.separate(audio, output_names) | |
| wav_path = "vocals_output.wav" | |
| soundfile.write(wav_path, audio, 16000, format="wav") | |
| out_audio = svc_change(wav_path, f"configs/singers/singer00{sid}.npy") | |
| return "Conversion Successful", (32000, out_audio) | |
| # Gradio WebUI setup | |
| app = gr.Blocks() | |
| with app: | |
| with gr.Tabs(): | |
| with gr.TabItem("Grad-SVC"): | |
| gr.Markdown( | |
| """ | |
| Based on Grad-TTS from HUAWEI Noah's Ark Lab | |
| This project is named Grad-SVC, or GVC for short. Its core technology is diffusion, but it is very different from other diffusion-based SVC models. | |
| <video id='video' controls='' preload='yes'> | |
| <source id='mp4' src='https://github.com/PlayVoice/Grad-SVC/assets/16432329/f9b66af7-b5b5-4efb-b73d-adb0dc84a0ae' type='video/mp4'> | |
| </video> | |
| """ | |
| ) | |
| sid = gr.Dropdown(label="Voice Tone", choices=["22", "33", "47", "51"], value="47") | |
| vc_input3 = gr.Audio(label="Upload Audio") | |
| vc_submit = gr.Button("Convert", variant="primary") | |
| vc_output1 = gr.Textbox(label="Status Information") | |
| vc_output2 = gr.Audio(label="Converted Audio") | |
| vc_submit.click(svc_main, [sid, vc_input3], [vc_output1, vc_output2]) | |
| # Launch the Gradio app | |
| app.launch(share=True) |