Spaces:
Running
on
Zero
Running
on
Zero
| # Copyright (c) 2024 NVIDIA CORPORATION. | |
| # Licensed under the MIT license. | |
| # Adapted from https://github.com/jik876/hifi-gan under the MIT license. | |
| # LICENSE is in incl_licenses directory. | |
| import os | |
| import json | |
| from pathlib import Path | |
| from typing import Optional, Union, Dict | |
| import torch | |
| import torch.nn as nn | |
| from torch.nn import Conv1d, ConvTranspose1d | |
| from torch.nn.utils import weight_norm, remove_weight_norm | |
| from stepvocoder.cosyvoice2.bigvgan import activations | |
| from stepvocoder.cosyvoice2.bigvgan.bigvgan_utils import init_weights, get_padding | |
| from stepvocoder.cosyvoice2.bigvgan.alias_free_activation.torch.act import Activation1d as TorchActivation1d | |
| class AMPBlock1(torch.nn.Module): | |
| """ | |
| AMPBlock applies Snake / SnakeBeta activation functions with trainable parameters that control periodicity, defined for each layer. | |
| AMPBlock1 has additional self.convs2 that contains additional Conv1d layers with a fixed dilation=1 followed by each layer in self.convs1 | |
| Args: | |
| h (AttrDict): Hyperparameters. | |
| channels (int): Number of convolution channels. | |
| kernel_size (int): Size of the convolution kernel. Default is 3. | |
| dilation (tuple): Dilation rates for the convolutions. Each dilation layer has two convolutions. Default is (1, 3, 5). | |
| activation (str): Activation function type. Should be either 'snake' or 'snakebeta'. Default is None. | |
| """ | |
| def __init__( | |
| self, | |
| channels: int, | |
| kernel_size: int = 3, | |
| dilation: tuple = (1, 3, 5), | |
| activation: str = None, | |
| use_cuda_kernel: bool = False, | |
| snake_logscale: bool = True | |
| ): | |
| super().__init__() | |
| self.convs1 = nn.ModuleList( | |
| [ | |
| weight_norm( | |
| Conv1d( | |
| channels, | |
| channels, | |
| kernel_size, | |
| stride=1, | |
| dilation=d, | |
| padding=get_padding(kernel_size, d), | |
| ) | |
| ) | |
| for d in dilation | |
| ] | |
| ) | |
| self.convs1.apply(init_weights) | |
| self.convs2 = nn.ModuleList( | |
| [ | |
| weight_norm( | |
| Conv1d( | |
| channels, | |
| channels, | |
| kernel_size, | |
| stride=1, | |
| dilation=1, | |
| padding=get_padding(kernel_size, 1), | |
| ) | |
| ) | |
| for _ in range(len(dilation)) | |
| ] | |
| ) | |
| self.convs2.apply(init_weights) | |
| self.num_layers = len(self.convs1) + len( | |
| self.convs2 | |
| ) # Total number of conv layers | |
| # Select which Activation1d, lazy-load cuda version to ensure backward compatibility | |
| if use_cuda_kernel: | |
| from alias_free_activation.cuda.activation1d import ( | |
| Activation1d as CudaActivation1d, | |
| ) | |
| Activation1d = CudaActivation1d | |
| else: | |
| Activation1d = TorchActivation1d | |
| # Activation functions | |
| if activation == "snake": | |
| self.activations = nn.ModuleList( | |
| [ | |
| Activation1d( | |
| activation=activations.Snake( | |
| channels, alpha_logscale=snake_logscale | |
| ) | |
| ) | |
| for _ in range(self.num_layers) | |
| ] | |
| ) | |
| elif activation == "snakebeta": | |
| self.activations = nn.ModuleList( | |
| [ | |
| Activation1d( | |
| activation=activations.SnakeBeta( | |
| channels, alpha_logscale=snake_logscale | |
| ) | |
| ) | |
| for _ in range(self.num_layers) | |
| ] | |
| ) | |
| else: | |
| raise NotImplementedError( | |
| "activation incorrectly specified. check the config file and look for 'activation'." | |
| ) | |
| def forward(self, x): | |
| acts1, acts2 = self.activations[::2], self.activations[1::2] | |
| for c1, c2, a1, a2 in zip(self.convs1, self.convs2, acts1, acts2): | |
| xt = a1(x) | |
| xt = c1(xt) | |
| xt = a2(xt) | |
| xt = c2(xt) | |
| x = xt + x | |
| return x | |
| def remove_weight_norm(self): | |
| for l in self.convs1: | |
| remove_weight_norm(l) | |
| for l in self.convs2: | |
| remove_weight_norm(l) | |
| class AMPBlock2(torch.nn.Module): | |
| """ | |
| AMPBlock applies Snake / SnakeBeta activation functions with trainable parameters that control periodicity, defined for each layer. | |
| Unlike AMPBlock1, AMPBlock2 does not contain extra Conv1d layers with fixed dilation=1 | |
| Args: | |
| h (AttrDict): Hyperparameters. | |
| channels (int): Number of convolution channels. | |
| kernel_size (int): Size of the convolution kernel. Default is 3. | |
| dilation (tuple): Dilation rates for the convolutions. Each dilation layer has two convolutions. Default is (1, 3, 5). | |
| activation (str): Activation function type. Should be either 'snake' or 'snakebeta'. Default is None. | |
| """ | |
| def __init__( | |
| self, | |
| channels: int, | |
| kernel_size: int = 3, | |
| dilation: tuple = (1, 3, 5), | |
| activation: str = None, | |
| use_cuda_kernel: bool = False, | |
| snake_logscale: bool = True | |
| ): | |
| super().__init__() | |
| self.convs = nn.ModuleList( | |
| [ | |
| weight_norm( | |
| Conv1d( | |
| channels, | |
| channels, | |
| kernel_size, | |
| stride=1, | |
| dilation=d, | |
| padding=get_padding(kernel_size, d), | |
| ) | |
| ) | |
| for d in dilation | |
| ] | |
| ) | |
| self.convs.apply(init_weights) | |
| self.num_layers = len(self.convs) # Total number of conv layers | |
| # Select which Activation1d, lazy-load cuda version to ensure backward compatibility | |
| if use_cuda_kernel: | |
| from alias_free_activation.cuda.activation1d import ( | |
| Activation1d as CudaActivation1d, | |
| ) | |
| Activation1d = CudaActivation1d | |
| else: | |
| Activation1d = TorchActivation1d | |
| # Activation functions | |
| if activation == "snake": | |
| self.activations = nn.ModuleList( | |
| [ | |
| Activation1d( | |
| activation=activations.Snake( | |
| channels, alpha_logscale=snake_logscale | |
| ) | |
| ) | |
| for _ in range(self.num_layers) | |
| ] | |
| ) | |
| elif activation == "snakebeta": | |
| self.activations = nn.ModuleList( | |
| [ | |
| Activation1d( | |
| activation=activations.SnakeBeta( | |
| channels, alpha_logscale=snake_logscale | |
| ) | |
| ) | |
| for _ in range(self.num_layers) | |
| ] | |
| ) | |
| else: | |
| raise NotImplementedError( | |
| "activation incorrectly specified. check the config file and look for 'activation'." | |
| ) | |
| def forward(self, x): | |
| for c, a in zip(self.convs, self.activations): | |
| xt = a(x) | |
| xt = c(xt) | |
| x = xt + x | |
| return x | |
| def remove_weight_norm(self): | |
| for l in self.convs: | |
| remove_weight_norm(l) | |
| class BigVGAN(torch.nn.Module): | |
| """ | |
| BigVGAN is a neural vocoder model that applies anti-aliased periodic activation for residual blocks (resblocks). | |
| New in BigVGAN-v2: it can optionally use optimized CUDA kernels for AMP (anti-aliased multi-periodicity) blocks. | |
| Args: | |
| use_cuda_kernel (bool): If set to True, loads optimized CUDA kernels for AMP. This should be used for inference only, as training is not supported with CUDA kernels. | |
| Note: | |
| - The `use_cuda_kernel` parameter should be used for inference only, as training with CUDA kernels is not supported. | |
| - Ensure that the activation function is correctly specified in the hyperparameters (h.activation). | |
| """ | |
| def __init__( | |
| self, | |
| use_cuda_kernel: bool = False, | |
| num_mels: int = 80, | |
| upsample_initial_channel: int = 512, | |
| upsample_rates: list[int] = [5, 4, 3, 2, 2, 2], | |
| upsample_kernel_sizes: list[int] = [11, 8, 7, 4, 4, 4], | |
| resblock: str = "1", | |
| resblock_kernel_sizes: list[int] = [3, 7, 11], | |
| resblock_dilation_sizes: list[tuple] = [(1, 3, 5), (1, 3, 5), (1, 3, 5)], | |
| activation: str = "snakebeta", | |
| snake_logscale: bool = True, | |
| use_bias_at_final: bool = False, | |
| use_tanh_at_final: bool = False, | |
| ): | |
| super().__init__() | |
| self.use_cuda_kernel = use_cuda_kernel | |
| # Select which Activation1d, lazy-load cuda version to ensure backward compatibility | |
| if self.use_cuda_kernel: | |
| from alias_free_activation.cuda.activation1d import ( | |
| Activation1d as CudaActivation1d, | |
| ) | |
| Activation1d = CudaActivation1d | |
| else: | |
| Activation1d = TorchActivation1d | |
| self.num_kernels = len(resblock_kernel_sizes) | |
| self.num_upsamples = len(upsample_rates) | |
| # Pre-conv | |
| # for context smoothing, the padding=3 in the first layer conv_pre is removed | |
| self.conv_pre = weight_norm( | |
| Conv1d(num_mels, upsample_initial_channel, 7, 1, padding=0) | |
| ) | |
| # Define which AMPBlock to use. BigVGAN uses AMPBlock1 as default | |
| if resblock == "1": | |
| resblock_class = AMPBlock1 | |
| elif resblock == "2": | |
| resblock_class = AMPBlock2 | |
| else: | |
| raise ValueError( | |
| f"Incorrect resblock class specified in hyperparameters. Got {resblock}" | |
| ) | |
| # Transposed conv-based upsamplers. does not apply anti-aliasing | |
| self.ups = nn.ModuleList() | |
| for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)): | |
| self.ups.append( | |
| nn.ModuleList( | |
| [ | |
| weight_norm( | |
| ConvTranspose1d( | |
| upsample_initial_channel // (2**i), | |
| upsample_initial_channel // (2 ** (i + 1)), | |
| k, | |
| u, | |
| padding=(k - u) // 2, | |
| ) | |
| ) | |
| ] | |
| ) | |
| ) | |
| # Residual blocks using anti-aliased multi-periodicity composition modules (AMP) | |
| self.resblocks = nn.ModuleList() | |
| for i in range(len(self.ups)): | |
| ch = upsample_initial_channel // (2 ** (i + 1)) | |
| for j, (k, d) in enumerate( | |
| zip(resblock_kernel_sizes, resblock_dilation_sizes) | |
| ): | |
| self.resblocks.append( | |
| resblock_class(ch, k, d, activation=activation, use_cuda_kernel=self.use_cuda_kernel, snake_logscale=snake_logscale) | |
| ) | |
| # Post-conv | |
| activation_post = ( | |
| activations.Snake(ch, alpha_logscale=snake_logscale) | |
| if activation == "snake" | |
| else ( | |
| activations.SnakeBeta(ch, alpha_logscale=snake_logscale) | |
| if activation == "snakebeta" | |
| else None | |
| ) | |
| ) | |
| if activation_post is None: | |
| raise NotImplementedError( | |
| "activation incorrectly specified. check the config file and look for 'activation'." | |
| ) | |
| self.activation_post = Activation1d(activation=activation_post) | |
| # Whether to use bias for the final conv_post. Default to True for backward compatibility | |
| self.use_bias_at_final = use_bias_at_final | |
| self.conv_post = weight_norm( | |
| Conv1d(ch, 1, 7, 1, padding=3, bias=self.use_bias_at_final) | |
| ) | |
| # Weight initialization | |
| for i in range(len(self.ups)): | |
| self.ups[i].apply(init_weights) | |
| self.conv_post.apply(init_weights) | |
| # Final tanh activation. Defaults to True for backward compatibility | |
| self.use_tanh_at_final = use_tanh_at_final | |
| def forward(self, x): | |
| # Pre-conv | |
| x = self.conv_pre(x) | |
| for i in range(self.num_upsamples): | |
| # Upsampling | |
| for i_up in range(len(self.ups[i])): | |
| x = self.ups[i][i_up](x) | |
| # AMP blocks | |
| xs = None | |
| for j in range(self.num_kernels): | |
| if xs is None: | |
| xs = self.resblocks[i * self.num_kernels + j](x) | |
| else: | |
| xs += self.resblocks[i * self.num_kernels + j](x) | |
| x = xs / self.num_kernels | |
| # Post-conv | |
| x = self.activation_post(x) | |
| x = self.conv_post(x) | |
| # Final tanh activation | |
| if self.use_tanh_at_final: | |
| x = torch.tanh(x) | |
| else: | |
| x = torch.clamp(x, min=-1.0, max=1.0) # Bound the output to [-1, 1] | |
| return x | |
| def remove_weight_norm(self): | |
| try: | |
| print("Removing weight norm...") | |
| for l in self.ups: | |
| for l_i in l: | |
| remove_weight_norm(l_i) | |
| for l in self.resblocks: | |
| l.remove_weight_norm() | |
| remove_weight_norm(self.conv_pre) | |
| remove_weight_norm(self.conv_post) | |
| except ValueError: | |
| print("[INFO] Model already removed weight norm. Skipping!") | |
| pass | |
| def _init_cuda_graph(self): | |
| pass | |
| def inference(self, x): | |
| x = self.forward(x) | |
| return x | |