Spaces:
Sleeping
Sleeping
| # Copyright (c) 2023 Amphion. | |
| # | |
| # This source code is licensed under the MIT license found in the | |
| # LICENSE file in the root directory of this source tree. | |
| import torch | |
| import torch.nn.functional as F | |
| import torch.nn as nn | |
| from torch.nn import Conv1d, ConvTranspose1d | |
| from torch.nn.utils import weight_norm, remove_weight_norm | |
| from modules.vocoder_blocks import * | |
| LRELU_SLOPE = 0.1 | |
| class ResBlock1(torch.nn.Module): | |
| def __init__(self, cfg, channels, kernel_size=3, dilation=(1, 3, 5)): | |
| super(ResBlock1, self).__init__() | |
| self.cfg = cfg | |
| self.convs1 = nn.ModuleList( | |
| [ | |
| weight_norm( | |
| Conv1d( | |
| channels, | |
| channels, | |
| kernel_size, | |
| 1, | |
| dilation=dilation[0], | |
| padding=get_padding(kernel_size, dilation[0]), | |
| ) | |
| ), | |
| weight_norm( | |
| Conv1d( | |
| channels, | |
| channels, | |
| kernel_size, | |
| 1, | |
| dilation=dilation[1], | |
| padding=get_padding(kernel_size, dilation[1]), | |
| ) | |
| ), | |
| weight_norm( | |
| Conv1d( | |
| channels, | |
| channels, | |
| kernel_size, | |
| 1, | |
| dilation=dilation[2], | |
| padding=get_padding(kernel_size, dilation[2]), | |
| ) | |
| ), | |
| ] | |
| ) | |
| self.convs1.apply(init_weights) | |
| self.convs2 = nn.ModuleList( | |
| [ | |
| weight_norm( | |
| Conv1d( | |
| channels, | |
| channels, | |
| kernel_size, | |
| 1, | |
| dilation=1, | |
| padding=get_padding(kernel_size, 1), | |
| ) | |
| ), | |
| weight_norm( | |
| Conv1d( | |
| channels, | |
| channels, | |
| kernel_size, | |
| 1, | |
| dilation=1, | |
| padding=get_padding(kernel_size, 1), | |
| ) | |
| ), | |
| weight_norm( | |
| Conv1d( | |
| channels, | |
| channels, | |
| kernel_size, | |
| 1, | |
| dilation=1, | |
| padding=get_padding(kernel_size, 1), | |
| ) | |
| ), | |
| ] | |
| ) | |
| self.convs2.apply(init_weights) | |
| def forward(self, x): | |
| for c1, c2 in zip(self.convs1, self.convs2): | |
| xt = F.leaky_relu(x, LRELU_SLOPE) | |
| xt = c1(xt) | |
| xt = F.leaky_relu(xt, LRELU_SLOPE) | |
| xt = c2(xt) | |
| x = xt + x | |
| return x | |
| def remove_weight_norm(self): | |
| for l in self.convs1: | |
| remove_weight_norm(l) | |
| for l in self.convs2: | |
| remove_weight_norm(l) | |
| class ResBlock2(torch.nn.Module): | |
| def __init__(self, cfg, channels, kernel_size=3, dilation=(1, 3)): | |
| super(ResBlock2, self).__init__() | |
| self.cfg = cfg | |
| self.convs = nn.ModuleList( | |
| [ | |
| weight_norm( | |
| Conv1d( | |
| channels, | |
| channels, | |
| kernel_size, | |
| 1, | |
| dilation=dilation[0], | |
| padding=get_padding(kernel_size, dilation[0]), | |
| ) | |
| ), | |
| weight_norm( | |
| Conv1d( | |
| channels, | |
| channels, | |
| kernel_size, | |
| 1, | |
| dilation=dilation[1], | |
| padding=get_padding(kernel_size, dilation[1]), | |
| ) | |
| ), | |
| ] | |
| ) | |
| self.convs.apply(init_weights) | |
| def forward(self, x): | |
| for c in self.convs: | |
| xt = F.leaky_relu(x, LRELU_SLOPE) | |
| xt = c(xt) | |
| x = xt + x | |
| return x | |
| def remove_weight_norm(self): | |
| for l in self.convs: | |
| remove_weight_norm(l) | |
| class HiFiGAN(torch.nn.Module): | |
| def __init__(self, cfg): | |
| super(HiFiGAN, self).__init__() | |
| self.cfg = cfg | |
| self.num_kernels = len(self.cfg.model.hifigan.resblock_kernel_sizes) | |
| self.num_upsamples = len(self.cfg.model.hifigan.upsample_rates) | |
| self.conv_pre = weight_norm( | |
| Conv1d( | |
| cfg.preprocess.n_mel, | |
| self.cfg.model.hifigan.upsample_initial_channel, | |
| 7, | |
| 1, | |
| padding=3, | |
| ) | |
| ) | |
| resblock = ResBlock1 if self.cfg.model.hifigan.resblock == "1" else ResBlock2 | |
| self.ups = nn.ModuleList() | |
| for i, (u, k) in enumerate( | |
| zip( | |
| self.cfg.model.hifigan.upsample_rates, | |
| self.cfg.model.hifigan.upsample_kernel_sizes, | |
| ) | |
| ): | |
| self.ups.append( | |
| weight_norm( | |
| ConvTranspose1d( | |
| self.cfg.model.hifigan.upsample_initial_channel // (2**i), | |
| self.cfg.model.hifigan.upsample_initial_channel | |
| // (2 ** (i + 1)), | |
| k, | |
| u, | |
| padding=(k - u) // 2, | |
| ) | |
| ) | |
| ) | |
| self.resblocks = nn.ModuleList() | |
| for i in range(len(self.ups)): | |
| ch = self.cfg.model.hifigan.upsample_initial_channel // (2 ** (i + 1)) | |
| for j, (k, d) in enumerate( | |
| zip( | |
| self.cfg.model.hifigan.resblock_kernel_sizes, | |
| self.cfg.model.hifigan.resblock_dilation_sizes, | |
| ) | |
| ): | |
| self.resblocks.append(resblock(self.cfg, ch, k, d)) | |
| self.conv_post = weight_norm(Conv1d(ch, 1, 7, 1, padding=3)) | |
| self.ups.apply(init_weights) | |
| self.conv_post.apply(init_weights) | |
| def forward(self, x): | |
| x = self.conv_pre(x) | |
| for i in range(self.num_upsamples): | |
| x = F.leaky_relu(x, LRELU_SLOPE) | |
| x = self.ups[i](x) | |
| xs = None | |
| for j in range(self.num_kernels): | |
| if xs is None: | |
| xs = self.resblocks[i * self.num_kernels + j](x) | |
| else: | |
| xs += self.resblocks[i * self.num_kernels + j](x) | |
| x = xs / self.num_kernels | |
| x = F.leaky_relu(x) | |
| x = self.conv_post(x) | |
| x = torch.tanh(x) | |
| return x | |
| def remove_weight_norm(self): | |
| print("Removing weight norm...") | |
| for l in self.ups: | |
| remove_weight_norm(l) | |
| for l in self.resblocks: | |
| l.remove_weight_norm() | |
| remove_weight_norm(self.conv_pre) | |
| remove_weight_norm(self.conv_post) | |
| # todo: merge with ResBlock1 (lmxue, yicheng) | |
| class ResBlock1_vits(torch.nn.Module): | |
| def __init__(self, channels, kernel_size=3, dilation=(1, 3, 5)): | |
| super(ResBlock1_vits, self).__init__() | |
| self.convs1 = nn.ModuleList( | |
| [ | |
| weight_norm( | |
| Conv1d( | |
| channels, | |
| channels, | |
| kernel_size, | |
| 1, | |
| dilation=dilation[0], | |
| padding=get_padding(kernel_size, dilation[0]), | |
| ) | |
| ), | |
| weight_norm( | |
| Conv1d( | |
| channels, | |
| channels, | |
| kernel_size, | |
| 1, | |
| dilation=dilation[1], | |
| padding=get_padding(kernel_size, dilation[1]), | |
| ) | |
| ), | |
| weight_norm( | |
| Conv1d( | |
| channels, | |
| channels, | |
| kernel_size, | |
| 1, | |
| dilation=dilation[2], | |
| padding=get_padding(kernel_size, dilation[2]), | |
| ) | |
| ), | |
| ] | |
| ) | |
| self.convs1.apply(init_weights) | |
| self.convs2 = nn.ModuleList( | |
| [ | |
| weight_norm( | |
| Conv1d( | |
| channels, | |
| channels, | |
| kernel_size, | |
| 1, | |
| dilation=1, | |
| padding=get_padding(kernel_size, 1), | |
| ) | |
| ), | |
| weight_norm( | |
| Conv1d( | |
| channels, | |
| channels, | |
| kernel_size, | |
| 1, | |
| dilation=1, | |
| padding=get_padding(kernel_size, 1), | |
| ) | |
| ), | |
| weight_norm( | |
| Conv1d( | |
| channels, | |
| channels, | |
| kernel_size, | |
| 1, | |
| dilation=1, | |
| padding=get_padding(kernel_size, 1), | |
| ) | |
| ), | |
| ] | |
| ) | |
| self.convs2.apply(init_weights) | |
| def forward(self, x, x_mask=None): | |
| for c1, c2 in zip(self.convs1, self.convs2): | |
| xt = F.leaky_relu(x, LRELU_SLOPE) | |
| if x_mask is not None: | |
| xt = xt * x_mask | |
| xt = c1(xt) | |
| xt = F.leaky_relu(xt, LRELU_SLOPE) | |
| if x_mask is not None: | |
| xt = xt * x_mask | |
| xt = c2(xt) | |
| x = xt + x | |
| if x_mask is not None: | |
| x = x * x_mask | |
| return x | |
| def remove_weight_norm(self): | |
| for l in self.convs1: | |
| remove_weight_norm(l) | |
| for l in self.convs2: | |
| remove_weight_norm(l) | |
| # todo: merge with ResBlock2 (lmxue, yicheng) | |
| class ResBlock2_vits(torch.nn.Module): | |
| def __init__(self, channels, kernel_size=3, dilation=(1, 3)): | |
| super(ResBlock2_vits, self).__init__() | |
| self.convs = nn.ModuleList( | |
| [ | |
| weight_norm( | |
| Conv1d( | |
| channels, | |
| channels, | |
| kernel_size, | |
| 1, | |
| dilation=dilation[0], | |
| padding=get_padding(kernel_size, dilation[0]), | |
| ) | |
| ), | |
| weight_norm( | |
| Conv1d( | |
| channels, | |
| channels, | |
| kernel_size, | |
| 1, | |
| dilation=dilation[1], | |
| padding=get_padding(kernel_size, dilation[1]), | |
| ) | |
| ), | |
| ] | |
| ) | |
| self.convs.apply(init_weights) | |
| def forward(self, x, x_mask=None): | |
| for c in self.convs: | |
| xt = F.leaky_relu(x, LRELU_SLOPE) | |
| if x_mask is not None: | |
| xt = xt * x_mask | |
| xt = c(xt) | |
| x = xt + x | |
| if x_mask is not None: | |
| x = x * x_mask | |
| return x | |
| def remove_weight_norm(self): | |
| for l in self.convs: | |
| remove_weight_norm(l) | |
| # todo: merge with HiFiGAN (lmxue, yicheng) | |
| class HiFiGAN_vits(torch.nn.Module): | |
| def __init__( | |
| self, | |
| initial_channel, | |
| resblock, | |
| resblock_kernel_sizes, | |
| resblock_dilation_sizes, | |
| upsample_rates, | |
| upsample_initial_channel, | |
| upsample_kernel_sizes, | |
| gin_channels=0, | |
| ): | |
| super(HiFiGAN_vits, self).__init__() | |
| self.num_kernels = len(resblock_kernel_sizes) | |
| self.num_upsamples = len(upsample_rates) | |
| self.conv_pre = Conv1d( | |
| initial_channel, upsample_initial_channel, 7, 1, padding=3 | |
| ) | |
| resblock = ResBlock1_vits if resblock == "1" else ResBlock2_vits | |
| self.ups = nn.ModuleList() | |
| for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)): | |
| self.ups.append( | |
| weight_norm( | |
| ConvTranspose1d( | |
| upsample_initial_channel // (2**i), | |
| upsample_initial_channel // (2 ** (i + 1)), | |
| k, | |
| u, | |
| padding=(k - u) // 2, | |
| ) | |
| ) | |
| ) | |
| self.resblocks = nn.ModuleList() | |
| for i in range(len(self.ups)): | |
| ch = upsample_initial_channel // (2 ** (i + 1)) | |
| for j, (k, d) in enumerate( | |
| zip(resblock_kernel_sizes, resblock_dilation_sizes) | |
| ): | |
| self.resblocks.append(resblock(ch, k, d)) | |
| self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False) | |
| self.ups.apply(init_weights) | |
| if gin_channels != 0: | |
| self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1) | |
| def forward(self, x, g=None): | |
| x = self.conv_pre(x) | |
| if g is not None: | |
| x = x + self.cond(g) | |
| for i in range(self.num_upsamples): | |
| x = F.leaky_relu(x, LRELU_SLOPE) | |
| x = self.ups[i](x) | |
| xs = None | |
| for j in range(self.num_kernels): | |
| if xs is None: | |
| xs = self.resblocks[i * self.num_kernels + j](x) | |
| else: | |
| xs += self.resblocks[i * self.num_kernels + j](x) | |
| x = xs / self.num_kernels | |
| x = F.leaky_relu(x) | |
| x = self.conv_post(x) | |
| x = torch.tanh(x) | |
| return x | |
| def remove_weight_norm(self): | |
| for l in self.ups: | |
| remove_weight_norm(l) | |
| for l in self.resblocks: | |
| l.remove_weight_norm() | |