├── .gitignore ├── LICENSE ├── README.md ├── attentions.py ├── commons.py ├── configs ├── config-single-speaker.json └── config.json ├── data_utils.py ├── filelists ├── miyu_train.txt ├── miyu_train.txt.cleaned ├── miyu_val.txt └── miyu_val.txt.cleaned ├── inference.ipynb ├── losses.py ├── mel_processing.py ├── models.py ├── modules.py ├── monotonic_align ├── __init__.py └── core.py ├── preprocess.py ├── requirements.txt ├── text ├── LICENSE ├── __init__.py ├── cleaners.py ├── japanese.py └── symbols.py ├── train.py ├── train_ms.py ├── transforms.py ├── utils.py ├── wav └── ba │ └── miyu │ ├── 124180.wav │ ├── 130233.wav │ ├── 139528.wav │ ├── 145077.wav │ ├── 147691.wav │ ├── 155037.wav │ ├── 164427.wav │ ├── 173043.wav │ ├── 211336.wav │ ├── 255333.wav │ ├── 29571.wav │ ├── 306949.wav │ ├── 318631.wav │ ├── 356351.wav │ ├── 379056.wav │ ├── 387627.wav │ ├── 389580.wav │ ├── 39192.wav │ ├── 412781.wav │ ├── 41287.wav │ ├── 422295.wav │ ├── 436322.wav │ ├── 44255.wav │ ├── 450375.wav │ ├── 456869.wav │ ├── 461433.wav │ ├── 472324.wav │ ├── 527590.wav │ ├── 533522.wav │ ├── 536512.wav │ ├── 537029.wav │ ├── 554075.wav │ ├── 55566.wav │ ├── 589002.wav │ ├── 616395.wav │ ├── 618533.wav │ ├── 619085.wav │ ├── 643475.wav │ ├── 648148.wav │ ├── 649415.wav │ ├── 706012.wav │ ├── 709504.wav │ ├── 712096.wav │ ├── 714649.wav │ ├── 749189.wav │ ├── 750669.wav │ ├── 755180.wav │ ├── 805836.wav │ ├── 814441.wav │ ├── 827005.wav │ ├── 829111.wav │ ├── 861536.wav │ ├── 862032.wav │ ├── 872843.wav │ ├── 886881.wav │ ├── 899245.wav │ ├── 902456.wav │ ├── 90758.wav │ ├── 916145.wav │ ├── 930369.wav │ ├── 940433.wav │ ├── 946836.wav │ ├── 955071.wav │ ├── 974397.wav │ ├── 978431.wav │ └── 986967.wav └── webui.py /.gitignore: -------------------------------------------------------------------------------- 1 | DUMMY1 2 | DUMMY2 3 | DUMMY3 4 | logs 5 | __pycache__ 6 | .ipynb_checkpoints 7 | .*.swp 8 | 9 | build 10 | *.c 11 | monotonic_align/monotonic_align 12 | /.vs/vits/FileContentIndex 13 | configs/dracu_japanese_base2.json 14 | configs/tolove_japanese_base2.json 15 | 16 | .idea -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2021 Jaehyeon Kim 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | text cleaner from https://github.com/CjangCjengh/vits 2 | 3 | original repo: https://github.com/jaywalnut310/vits 4 | 5 | ## Online training and inference 6 | ### colab 7 | See [vits-finetuning](https://colab.research.google.com/drive/13FF2pBWxj9rMR1SjI_JpVD6mTRN-kq--?usp=share_link) 8 | 9 | # How to use 10 | (Suggestion) Python == 3.7 11 | 12 | Only Japanese datasets can be used for fine-tuning in this repo. 13 | ## Clone this repository 14 | ```sh 15 | git clone https://github.com/SayaSS/vits-finetuning.git 16 | ``` 17 | ## Install requirements 18 | ```sh 19 | pip install -r requirements.txt 20 | ``` 21 | ## Download pre-trained model 22 | - [G_0.pth](https://huggingface.co/spaces/sayashi/vits-uma-genshin-honkai/resolve/main/model/G_0.pth) 23 | - [D_0.pth](https://huggingface.co/spaces/sayashi/vits-uma-genshin-honkai/resolve/main/model/D_0.pth) 24 | - Edit "model_dir"(line 152) in utils.py 25 | - Put pre-trained models in the "model_dir"/checkpoints 26 | 27 | ### If you need to customize "n_speakers", please replace the pre-trained model with these two. 28 | - [G_0-p.pth](https://huggingface.co/spaces/sayashi/vits-uma-genshin-honkai/resolve/main/model/G_0-p.pth) 29 | - [D_0-p.pth](https://huggingface.co/spaces/sayashi/vits-uma-genshin-honkai/resolve/main/model/D_0-p.pth) 30 | 31 | ## Create datasets 32 | - Speaker ID should be between 0-803. 33 | - About 50 audio-text pairs will suffice and 100-600 epochs could have quite good performance, but more data may be better. 34 | - Resample all audio to 22050Hz, 16-bit, mono wav files. 35 | - Audio files should be >=1s and <=10s. 36 | ``` 37 | path/to/XXX.wav|speaker id|transcript 38 | ``` 39 | - Example 40 | 41 | ``` 42 | dataset/001.wav|10|こんにちは。 43 | ``` 44 | For complete examples, please see filelists/miyu_train.txt and filelists/miyu_val.txt. 45 | 46 | ## Preprocess 47 | ```sh 48 | python preprocess.py --filelists path/to/filelist_train.txt path/to/filelist_val.txt 49 | ``` 50 | Edit "training_files" and "validation_files" in configs/config.json 51 | 52 | ## Train 53 | ```sh 54 | # Mutiple speakers 55 | python train_ms.py -c configs/config.json -m checkpoints 56 | ``` 57 | -------------------------------------------------------------------------------- /attentions.py: -------------------------------------------------------------------------------- 1 | import copy 2 | import math 3 | import numpy as np 4 | import torch 5 | from torch import nn 6 | from torch.nn import functional as F 7 | 8 | import commons 9 | import modules 10 | from modules import LayerNorm 11 | 12 | 13 | class Encoder(nn.Module): 14 | def __init__(self, hidden_channels, filter_channels, n_heads, n_layers, kernel_size=1, p_dropout=0., window_size=4, **kwargs): 15 | super().__init__() 16 | self.hidden_channels = hidden_channels 17 | self.filter_channels = filter_channels 18 | self.n_heads = n_heads 19 | self.n_layers = n_layers 20 | self.kernel_size = kernel_size 21 | self.p_dropout = p_dropout 22 | self.window_size = window_size 23 | 24 | self.drop = nn.Dropout(p_dropout) 25 | self.attn_layers = nn.ModuleList() 26 | self.norm_layers_1 = nn.ModuleList() 27 | self.ffn_layers = nn.ModuleList() 28 | self.norm_layers_2 = nn.ModuleList() 29 | for i in range(self.n_layers): 30 | self.attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout, window_size=window_size)) 31 | self.norm_layers_1.append(LayerNorm(hidden_channels)) 32 | self.ffn_layers.append(FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout)) 33 | self.norm_layers_2.append(LayerNorm(hidden_channels)) 34 | 35 | def forward(self, x, x_mask): 36 | attn_mask = x_mask.unsqueeze(2) * x_mask.unsqueeze(-1) 37 | x = x * x_mask 38 | for i in range(self.n_layers): 39 | y = self.attn_layers[i](x, x, attn_mask) 40 | y = self.drop(y) 41 | x = self.norm_layers_1[i](x + y) 42 | 43 | y = self.ffn_layers[i](x, x_mask) 44 | y = self.drop(y) 45 | x = self.norm_layers_2[i](x + y) 46 | x = x * x_mask 47 | return x 48 | 49 | 50 | class Decoder(nn.Module): 51 | def __init__(self, hidden_channels, filter_channels, n_heads, n_layers, kernel_size=1, p_dropout=0., proximal_bias=False, proximal_init=True, **kwargs): 52 | super().__init__() 53 | self.hidden_channels = hidden_channels 54 | self.filter_channels = filter_channels 55 | self.n_heads = n_heads 56 | self.n_layers = n_layers 57 | self.kernel_size = kernel_size 58 | self.p_dropout = p_dropout 59 | self.proximal_bias = proximal_bias 60 | self.proximal_init = proximal_init 61 | 62 | self.drop = nn.Dropout(p_dropout) 63 | self.self_attn_layers = nn.ModuleList() 64 | self.norm_layers_0 = nn.ModuleList() 65 | self.encdec_attn_layers = nn.ModuleList() 66 | self.norm_layers_1 = nn.ModuleList() 67 | self.ffn_layers = nn.ModuleList() 68 | self.norm_layers_2 = nn.ModuleList() 69 | for i in range(self.n_layers): 70 | self.self_attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout, proximal_bias=proximal_bias, proximal_init=proximal_init)) 71 | self.norm_layers_0.append(LayerNorm(hidden_channels)) 72 | self.encdec_attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout)) 73 | self.norm_layers_1.append(LayerNorm(hidden_channels)) 74 | self.ffn_layers.append(FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout, causal=True)) 75 | self.norm_layers_2.append(LayerNorm(hidden_channels)) 76 | 77 | def forward(self, x, x_mask, h, h_mask): 78 | """ 79 | x: decoder input 80 | h: encoder output 81 | """ 82 | self_attn_mask = commons.subsequent_mask(x_mask.size(2)).to(device=x.device, dtype=x.dtype) 83 | encdec_attn_mask = h_mask.unsqueeze(2) * x_mask.unsqueeze(-1) 84 | x = x * x_mask 85 | for i in range(self.n_layers): 86 | y = self.self_attn_layers[i](x, x, self_attn_mask) 87 | y = self.drop(y) 88 | x = self.norm_layers_0[i](x + y) 89 | 90 | y = self.encdec_attn_layers[i](x, h, encdec_attn_mask) 91 | y = self.drop(y) 92 | x = self.norm_layers_1[i](x + y) 93 | 94 | y = self.ffn_layers[i](x, x_mask) 95 | y = self.drop(y) 96 | x = self.norm_layers_2[i](x + y) 97 | x = x * x_mask 98 | return x 99 | 100 | 101 | class MultiHeadAttention(nn.Module): 102 | def __init__(self, channels, out_channels, n_heads, p_dropout=0., window_size=None, heads_share=True, block_length=None, proximal_bias=False, proximal_init=False): 103 | super().__init__() 104 | assert channels % n_heads == 0 105 | 106 | self.channels = channels 107 | self.out_channels = out_channels 108 | self.n_heads = n_heads 109 | self.p_dropout = p_dropout 110 | self.window_size = window_size 111 | self.heads_share = heads_share 112 | self.block_length = block_length 113 | self.proximal_bias = proximal_bias 114 | self.proximal_init = proximal_init 115 | self.attn = None 116 | 117 | self.k_channels = channels // n_heads 118 | self.conv_q = nn.Conv1d(channels, channels, 1) 119 | self.conv_k = nn.Conv1d(channels, channels, 1) 120 | self.conv_v = nn.Conv1d(channels, channels, 1) 121 | self.conv_o = nn.Conv1d(channels, out_channels, 1) 122 | self.drop = nn.Dropout(p_dropout) 123 | 124 | if window_size is not None: 125 | n_heads_rel = 1 if heads_share else n_heads 126 | rel_stddev = self.k_channels**-0.5 127 | self.emb_rel_k = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev) 128 | self.emb_rel_v = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev) 129 | 130 | nn.init.xavier_uniform_(self.conv_q.weight) 131 | nn.init.xavier_uniform_(self.conv_k.weight) 132 | nn.init.xavier_uniform_(self.conv_v.weight) 133 | if proximal_init: 134 | with torch.no_grad(): 135 | self.conv_k.weight.copy_(self.conv_q.weight) 136 | self.conv_k.bias.copy_(self.conv_q.bias) 137 | 138 | def forward(self, x, c, attn_mask=None): 139 | q = self.conv_q(x) 140 | k = self.conv_k(c) 141 | v = self.conv_v(c) 142 | 143 | x, self.attn = self.attention(q, k, v, mask=attn_mask) 144 | 145 | x = self.conv_o(x) 146 | return x 147 | 148 | def attention(self, query, key, value, mask=None): 149 | # reshape [b, d, t] -> [b, n_h, t, d_k] 150 | b, d, t_s, t_t = (*key.size(), query.size(2)) 151 | query = query.view(b, self.n_heads, self.k_channels, t_t).transpose(2, 3) 152 | key = key.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3) 153 | value = value.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3) 154 | 155 | scores = torch.matmul(query / math.sqrt(self.k_channels), key.transpose(-2, -1)) 156 | if self.window_size is not None: 157 | assert t_s == t_t, "Relative attention is only available for self-attention." 158 | key_relative_embeddings = self._get_relative_embeddings(self.emb_rel_k, t_s) 159 | rel_logits = self._matmul_with_relative_keys(query /math.sqrt(self.k_channels), key_relative_embeddings) 160 | scores_local = self._relative_position_to_absolute_position(rel_logits) 161 | scores = scores + scores_local 162 | if self.proximal_bias: 163 | assert t_s == t_t, "Proximal bias is only available for self-attention." 164 | scores = scores + self._attention_bias_proximal(t_s).to(device=scores.device, dtype=scores.dtype) 165 | if mask is not None: 166 | scores = scores.masked_fill(mask == 0, -1e4) 167 | if self.block_length is not None: 168 | assert t_s == t_t, "Local attention is only available for self-attention." 169 | block_mask = torch.ones_like(scores).triu(-self.block_length).tril(self.block_length) 170 | scores = scores.masked_fill(block_mask == 0, -1e4) 171 | p_attn = F.softmax(scores, dim=-1) # [b, n_h, t_t, t_s] 172 | p_attn = self.drop(p_attn) 173 | output = torch.matmul(p_attn, value) 174 | if self.window_size is not None: 175 | relative_weights = self._absolute_position_to_relative_position(p_attn) 176 | value_relative_embeddings = self._get_relative_embeddings(self.emb_rel_v, t_s) 177 | output = output + self._matmul_with_relative_values(relative_weights, value_relative_embeddings) 178 | output = output.transpose(2, 3).contiguous().view(b, d, t_t) # [b, n_h, t_t, d_k] -> [b, d, t_t] 179 | return output, p_attn 180 | 181 | def _matmul_with_relative_values(self, x, y): 182 | """ 183 | x: [b, h, l, m] 184 | y: [h or 1, m, d] 185 | ret: [b, h, l, d] 186 | """ 187 | ret = torch.matmul(x, y.unsqueeze(0)) 188 | return ret 189 | 190 | def _matmul_with_relative_keys(self, x, y): 191 | """ 192 | x: [b, h, l, d] 193 | y: [h or 1, m, d] 194 | ret: [b, h, l, m] 195 | """ 196 | ret = torch.matmul(x, y.unsqueeze(0).transpose(-2, -1)) 197 | return ret 198 | 199 | def _get_relative_embeddings(self, relative_embeddings, length): 200 | max_relative_position = 2 * self.window_size + 1 201 | # Pad first before slice to avoid using cond ops. 202 | pad_length = max(length - (self.window_size + 1), 0) 203 | slice_start_position = max((self.window_size + 1) - length, 0) 204 | slice_end_position = slice_start_position + 2 * length - 1 205 | if pad_length > 0: 206 | padded_relative_embeddings = F.pad( 207 | relative_embeddings, 208 | commons.convert_pad_shape([[0, 0], [pad_length, pad_length], [0, 0]])) 209 | else: 210 | padded_relative_embeddings = relative_embeddings 211 | used_relative_embeddings = padded_relative_embeddings[:,slice_start_position:slice_end_position] 212 | return used_relative_embeddings 213 | 214 | def _relative_position_to_absolute_position(self, x): 215 | """ 216 | x: [b, h, l, 2*l-1] 217 | ret: [b, h, l, l] 218 | """ 219 | batch, heads, length, _ = x.size() 220 | # Concat columns of pad to shift from relative to absolute indexing. 221 | x = F.pad(x, commons.convert_pad_shape([[0,0],[0,0],[0,0],[0,1]])) 222 | 223 | # Concat extra elements so to add up to shape (len+1, 2*len-1). 224 | x_flat = x.view([batch, heads, length * 2 * length]) 225 | x_flat = F.pad(x_flat, commons.convert_pad_shape([[0,0],[0,0],[0,length-1]])) 226 | 227 | # Reshape and slice out the padded elements. 228 | x_final = x_flat.view([batch, heads, length+1, 2*length-1])[:, :, :length, length-1:] 229 | return x_final 230 | 231 | def _absolute_position_to_relative_position(self, x): 232 | """ 233 | x: [b, h, l, l] 234 | ret: [b, h, l, 2*l-1] 235 | """ 236 | batch, heads, length, _ = x.size() 237 | # padd along column 238 | x = F.pad(x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, length-1]])) 239 | x_flat = x.view([batch, heads, length**2 + length*(length -1)]) 240 | # add 0's in the beginning that will skew the elements after reshape 241 | x_flat = F.pad(x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [length, 0]])) 242 | x_final = x_flat.view([batch, heads, length, 2*length])[:,:,:,1:] 243 | return x_final 244 | 245 | def _attention_bias_proximal(self, length): 246 | """Bias for self-attention to encourage attention to close positions. 247 | Args: 248 | length: an integer scalar. 249 | Returns: 250 | a Tensor with shape [1, 1, length, length] 251 | """ 252 | r = torch.arange(length, dtype=torch.float32) 253 | diff = torch.unsqueeze(r, 0) - torch.unsqueeze(r, 1) 254 | return torch.unsqueeze(torch.unsqueeze(-torch.log1p(torch.abs(diff)), 0), 0) 255 | 256 | 257 | class FFN(nn.Module): 258 | def __init__(self, in_channels, out_channels, filter_channels, kernel_size, p_dropout=0., activation=None, causal=False): 259 | super().__init__() 260 | self.in_channels = in_channels 261 | self.out_channels = out_channels 262 | self.filter_channels = filter_channels 263 | self.kernel_size = kernel_size 264 | self.p_dropout = p_dropout 265 | self.activation = activation 266 | self.causal = causal 267 | 268 | if causal: 269 | self.padding = self._causal_padding 270 | else: 271 | self.padding = self._same_padding 272 | 273 | self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size) 274 | self.conv_2 = nn.Conv1d(filter_channels, out_channels, kernel_size) 275 | self.drop = nn.Dropout(p_dropout) 276 | 277 | def forward(self, x, x_mask): 278 | x = self.conv_1(self.padding(x * x_mask)) 279 | if self.activation == "gelu": 280 | x = x * torch.sigmoid(1.702 * x) 281 | else: 282 | x = torch.relu(x) 283 | x = self.drop(x) 284 | x = self.conv_2(self.padding(x * x_mask)) 285 | return x * x_mask 286 | 287 | def _causal_padding(self, x): 288 | if self.kernel_size == 1: 289 | return x 290 | pad_l = self.kernel_size - 1 291 | pad_r = 0 292 | padding = [[0, 0], [0, 0], [pad_l, pad_r]] 293 | x = F.pad(x, commons.convert_pad_shape(padding)) 294 | return x 295 | 296 | def _same_padding(self, x): 297 | if self.kernel_size == 1: 298 | return x 299 | pad_l = (self.kernel_size - 1) // 2 300 | pad_r = self.kernel_size // 2 301 | padding = [[0, 0], [0, 0], [pad_l, pad_r]] 302 | x = F.pad(x, commons.convert_pad_shape(padding)) 303 | return x 304 | -------------------------------------------------------------------------------- /commons.py: -------------------------------------------------------------------------------- 1 | import math 2 | import numpy as np 3 | import torch 4 | from torch import nn 5 | from torch.nn import functional as F 6 | 7 | 8 | def init_weights(m, mean=0.0, std=0.01): 9 | classname = m.__class__.__name__ 10 | if classname.find("Conv") != -1: 11 | m.weight.data.normal_(mean, std) 12 | 13 | 14 | def get_padding(kernel_size, dilation=1): 15 | return int((kernel_size*dilation - dilation)/2) 16 | 17 | 18 | def convert_pad_shape(pad_shape): 19 | l = pad_shape[::-1] 20 | pad_shape = [item for sublist in l for item in sublist] 21 | return pad_shape 22 | 23 | 24 | def intersperse(lst, item): 25 | result = [item] * (len(lst) * 2 + 1) 26 | result[1::2] = lst 27 | return result 28 | 29 | 30 | def kl_divergence(m_p, logs_p, m_q, logs_q): 31 | """KL(P||Q)""" 32 | kl = (logs_q - logs_p) - 0.5 33 | kl += 0.5 * (torch.exp(2. * logs_p) + ((m_p - m_q)**2)) * torch.exp(-2. * logs_q) 34 | return kl 35 | 36 | 37 | def rand_gumbel(shape): 38 | """Sample from the Gumbel distribution, protect from overflows.""" 39 | uniform_samples = torch.rand(shape) * 0.99998 + 0.00001 40 | return -torch.log(-torch.log(uniform_samples)) 41 | 42 | 43 | def rand_gumbel_like(x): 44 | g = rand_gumbel(x.size()).to(dtype=x.dtype, device=x.device) 45 | return g 46 | 47 | 48 | def slice_segments(x, ids_str, segment_size=4): 49 | ret = torch.zeros_like(x[:, :, :segment_size]) 50 | for i in range(x.size(0)): 51 | idx_str = ids_str[i] 52 | idx_end = idx_str + segment_size 53 | ret[i] = x[i, :, idx_str:idx_end] 54 | return ret 55 | 56 | 57 | def rand_slice_segments(x, x_lengths=None, segment_size=4): 58 | b, d, t = x.size() 59 | if x_lengths is None: 60 | x_lengths = t 61 | ids_str_max = x_lengths - segment_size + 1 62 | ids_str = (torch.rand([b]).to(device=x.device) * ids_str_max).to(dtype=torch.long) 63 | ret = slice_segments(x, ids_str, segment_size) 64 | return ret, ids_str 65 | 66 | 67 | def get_timing_signal_1d( 68 | length, channels, min_timescale=1.0, max_timescale=1.0e4): 69 | position = torch.arange(length, dtype=torch.float) 70 | num_timescales = channels // 2 71 | log_timescale_increment = ( 72 | math.log(float(max_timescale) / float(min_timescale)) / 73 | (num_timescales - 1)) 74 | inv_timescales = min_timescale * torch.exp( 75 | torch.arange(num_timescales, dtype=torch.float) * -log_timescale_increment) 76 | scaled_time = position.unsqueeze(0) * inv_timescales.unsqueeze(1) 77 | signal = torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)], 0) 78 | signal = F.pad(signal, [0, 0, 0, channels % 2]) 79 | signal = signal.view(1, channels, length) 80 | return signal 81 | 82 | 83 | def add_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4): 84 | b, channels, length = x.size() 85 | signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale) 86 | return x + signal.to(dtype=x.dtype, device=x.device) 87 | 88 | 89 | def cat_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4, axis=1): 90 | b, channels, length = x.size() 91 | signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale) 92 | return torch.cat([x, signal.to(dtype=x.dtype, device=x.device)], axis) 93 | 94 | 95 | def subsequent_mask(length): 96 | mask = torch.tril(torch.ones(length, length)).unsqueeze(0).unsqueeze(0) 97 | return mask 98 | 99 | 100 | @torch.jit.script 101 | def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels): 102 | n_channels_int = n_channels[0] 103 | in_act = input_a + input_b 104 | t_act = torch.tanh(in_act[:, :n_channels_int, :]) 105 | s_act = torch.sigmoid(in_act[:, n_channels_int:, :]) 106 | acts = t_act * s_act 107 | return acts 108 | 109 | 110 | def convert_pad_shape(pad_shape): 111 | l = pad_shape[::-1] 112 | pad_shape = [item for sublist in l for item in sublist] 113 | return pad_shape 114 | 115 | 116 | def shift_1d(x): 117 | x = F.pad(x, convert_pad_shape([[0, 0], [0, 0], [1, 0]]))[:, :, :-1] 118 | return x 119 | 120 | 121 | def sequence_mask(length, max_length=None): 122 | if max_length is None: 123 | max_length = length.max() 124 | x = torch.arange(max_length, dtype=length.dtype, device=length.device) 125 | return x.unsqueeze(0) < length.unsqueeze(1) 126 | 127 | 128 | def generate_path(duration, mask): 129 | """ 130 | duration: [b, 1, t_x] 131 | mask: [b, 1, t_y, t_x] 132 | """ 133 | device = duration.device 134 | 135 | b, _, t_y, t_x = mask.shape 136 | cum_duration = torch.cumsum(duration, -1) 137 | 138 | cum_duration_flat = cum_duration.view(b * t_x) 139 | path = sequence_mask(cum_duration_flat, t_y).to(mask.dtype) 140 | path = path.view(b, t_x, t_y) 141 | path = path - F.pad(path, convert_pad_shape([[0, 0], [1, 0], [0, 0]]))[:, :-1] 142 | path = path.unsqueeze(1).transpose(2,3) * mask 143 | return path 144 | 145 | 146 | def clip_grad_value_(parameters, clip_value, norm_type=2): 147 | if isinstance(parameters, torch.Tensor): 148 | parameters = [parameters] 149 | parameters = list(filter(lambda p: p.grad is not None, parameters)) 150 | norm_type = float(norm_type) 151 | if clip_value is not None: 152 | clip_value = float(clip_value) 153 | 154 | total_norm = 0 155 | for p in parameters: 156 | param_norm = p.grad.data.norm(norm_type) 157 | total_norm += param_norm.item() ** norm_type 158 | if clip_value is not None: 159 | p.grad.data.clamp_(min=-clip_value, max=clip_value) 160 | total_norm = total_norm ** (1. / norm_type) 161 | return total_norm 162 | -------------------------------------------------------------------------------- /configs/config-single-speaker.json: -------------------------------------------------------------------------------- 1 | { 2 | "train": { 3 | "log_interval": 200, 4 | "eval_interval": 1000, 5 | "seed": 1234, 6 | "epochs": 10000, 7 | "learning_rate": 2e-5, 8 | "betas": [0.8, 0.99], 9 | "eps": 1e-9, 10 | "batch_size": 16, 11 | "fp16_run": true, 12 | "lr_decay": 0.999875, 13 | "segment_size": 8192, 14 | "init_lr_ratio": 1, 15 | "warmup_epochs": 0, 16 | "c_mel": 45, 17 | "c_kl": 1.0 18 | }, 19 | "data": { 20 | "training_files":"filelists/takina_train.txt.cleaned", 21 | "validation_files":"filelists/takina_val.txt.cleaned", 22 | "text_cleaners":["japanese_cleaners"], 23 | "max_wav_value": 32768.0, 24 | "sampling_rate": 22050, 25 | "filter_length": 1024, 26 | "hop_length": 256, 27 | "win_length": 1024, 28 | "n_mel_channels": 80, 29 | "mel_fmin": 0.0, 30 | "mel_fmax": null, 31 | "add_blank": true, 32 | "n_speakers": 0, 33 | "cleaned_text": true 34 | }, 35 | "model": { 36 | "inter_channels": 192, 37 | "hidden_channels": 192, 38 | "filter_channels": 768, 39 | "n_heads": 2, 40 | "n_layers": 6, 41 | "kernel_size": 3, 42 | "p_dropout": 0.1, 43 | "resblock": "1", 44 | "resblock_kernel_sizes": [3,7,11], 45 | "resblock_dilation_sizes": [[1,3,5], [1,3,5], [1,3,5]], 46 | "upsample_rates": [8,8,2,2], 47 | "upsample_initial_channel": 512, 48 | "upsample_kernel_sizes": [16,16,4,4], 49 | "n_layers_q": 3, 50 | "use_spectral_norm": false 51 | }, 52 | "speakers": ["takina"], 53 | "symbols": ["_", ",", ".", "!", "?", "-", "~", "\u2026", "A", "E", "I", "N", "O", "Q", "U", "a", "b", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "r", "s", "t", "u", "v", "w", "y", "z", "\u0283", "\u02a7", "\u02a6", "\u026f", "\u0279", "\u0259", "\u0265", "\u207c", "\u02b0", "`", "\u2192", "\u2193", "\u2191", " "] 54 | } -------------------------------------------------------------------------------- /configs/config.json: -------------------------------------------------------------------------------- 1 | { 2 | "train": { 3 | "log_interval": 200, 4 | "eval_interval": 1000, 5 | "seed": 1234, 6 | "epochs": 10000, 7 | "learning_rate": 2e-4, 8 | "betas": [0.8, 0.99], 9 | "eps": 1e-9, 10 | "batch_size": 16, 11 | "fp16_run": true, 12 | "lr_decay": 0.999875, 13 | "segment_size": 8192, 14 | "init_lr_ratio": 1, 15 | "warmup_epochs": 0, 16 | "c_mel": 45, 17 | "c_kl": 1.0 18 | }, 19 | "data": { 20 | "training_files":"filelists/miyu_train.txt.cleaned", 21 | "validation_files":"filelists/miyu_val.txt.cleaned", 22 | "text_cleaners":["japanese_cleaners"], 23 | "max_wav_value": 32768.0, 24 | "sampling_rate": 22050, 25 | "filter_length": 1024, 26 | "hop_length": 256, 27 | "win_length": 1024, 28 | "n_mel_channels": 80, 29 | "mel_fmin": 0.0, 30 | "mel_fmax": null, 31 | "add_blank": true, 32 | "n_speakers": 804, 33 | "cleaned_text": true 34 | }, 35 | "model": { 36 | "inter_channels": 192, 37 | "hidden_channels": 192, 38 | "filter_channels": 768, 39 | "n_heads": 2, 40 | "n_layers": 6, 41 | "kernel_size": 3, 42 | "p_dropout": 0.1, 43 | "resblock": "1", 44 | "resblock_kernel_sizes": [3,7,11], 45 | "resblock_dilation_sizes": [[1,3,5], [1,3,5], [1,3,5]], 46 | "upsample_rates": [8,8,2,2], 47 | "upsample_initial_channel": 512, 48 | "upsample_kernel_sizes": [16,16,4,4], 49 | "n_layers_q": 3, 50 | "use_spectral_norm": false, 51 | "gin_channels": 256 52 | }, 53 | "speakers": ["\u7279\u522b\u5468", "\u65e0\u58f0\u94c3\u9e7f", "\u4e1c\u6d77\u5e1d\u7687\uff08\u5e1d\u5b9d\uff0c\u5e1d\u738b\uff09", "\u4e38\u5584\u65af\u57fa", "\u5bcc\u58eb\u5947\u8ff9", "\u5c0f\u6817\u5e3d", "\u9ec4\u91d1\u8239", "\u4f0f\u7279\u52a0", "\u5927\u548c\u8d64\u9aa5", "\u5927\u6811\u5feb\u8f66", "\u8349\u4e0a\u98de", "\u83f1\u4e9a\u9a6c\u900a", "\u76ee\u767d\u9ea6\u6606", "\u795e\u9e70", "\u597d\u6b4c\u5267", "\u6210\u7530\u767d\u4ec1", "\u9c81\u9053\u592b\u8c61\u5f81\uff08\u7687\u5e1d\uff09", "\u6c14\u69fd", "\u7231\u4e3d\u6570\u7801", "\u661f\u4e91\u5929\u7a7a", "\u7389\u85fb\u5341\u5b57", "\u7f8e\u5999\u59ff\u52bf", "\u7435\u7436\u6668\u5149", "\u6469\u8036\u91cd\u70ae", "\u66fc\u57ce\u8336\u5ea7", "\u7f8e\u6d66\u6ce2\u65c1", "\u76ee\u767d\u8d56\u6069", "\u83f1\u66d9", "\u96ea\u4e2d\u7f8e\u4eba", "\u7c73\u6d74", "\u827e\u5c3c\u65af\u98ce\u795e", "\u7231\u4e3d\u901f\u5b50\uff08\u7231\u4e3d\u5feb\u5b50\uff09", "\u7231\u6155\u7ec7\u59ec", "\u7a3b\u8377\u4e00", "\u80dc\u5229\u5956\u5238", "\u7a7a\u4e2d\u795e\u5bab", "\u8363\u8fdb\u95ea\u8000", "\u771f\u673a\u4f36", "\u5ddd\u4e0a\u516c\u4e3b", "\u9ec4\u91d1\u57ce\uff08\u9ec4\u91d1\u57ce\u5e02\uff09", "\u6a31\u82b1\u8fdb\u738b", "\u91c7\u73e0", "\u65b0\u5149\u98ce", "\u4e1c\u5546\u53d8\u9769", "\u8d85\u7ea7\u5c0f\u6d77\u6e7e", "\u9192\u76ee\u98de\u9e70\uff08\u5bc4\u5bc4\u5b50\uff09", "\u8352\u6f20\u82f1\u96c4", "\u4e1c\u701b\u4f50\u6566", "\u4e2d\u5c71\u5e86\u5178", "\u6210\u7530\u5927\u8fdb", "\u897f\u91ce\u82b1", "\u6625\u4e3d\uff08\u4e4c\u62c9\u62c9\uff09", "\u9752\u7af9\u56de\u5fc6", "\u5fae\u5149\u98de\u9a79", "\u7f8e\u4e3d\u5468\u65e5", "\u5f85\u517c\u798f\u6765", "mr cb\uff08cb\u5148\u751f\uff09", "\u540d\u5c06\u6012\u6d9b\uff08\u540d\u5c06\u6237\u4ec1\uff09", "\u76ee\u767d\u591a\u4f2f", "\u4f18\u79c0\u7d20\u8d28", "\u5e1d\u738b\u5149\u8f89", "\u5f85\u517c\u8bd7\u6b4c\u5267", "\u751f\u91ce\u72c4\u675c\u65af", "\u76ee\u767d\u5584\u4fe1", "\u5927\u62d3\u592a\u9633\u795e", "\u53cc\u6da1\u8f6e\uff08\u4e24\u7acb\u76f4\uff0c\u4e24\u55b7\u5c04\uff0c\u4e8c\u9505\u5934\uff0c\u9006\u55b7\u5c04\uff09", "\u91cc\u89c1\u5149\u94bb\uff08\u8428\u6258\u8bfa\u91d1\u521a\u77f3\uff09", "\u5317\u90e8\u7384\u9a79", "\u6a31\u82b1\u5343\u4ee3\u738b", "\u5929\u72fc\u661f\u8c61\u5f81", "\u76ee\u767d\u963f\u5c14\u4e39", "\u516b\u91cd\u65e0\u654c", "\u9e64\u4e38\u521a\u5fd7", "\u76ee\u767d\u5149\u660e", "\u6210\u7530\u62dc\u4ec1\uff08\u6210\u7530\u8def\uff09", "\u4e5f\u6587\u6444\u8f89", "\u5c0f\u6797\u5386\u5947", "\u5317\u6e2f\u706b\u5c71", "\u5947\u9510\u9a8f", "\u82e6\u6da9\u7cd6\u971c", "\u5c0f\u5c0f\u8695\u8327", "\u9a8f\u5ddd\u624b\u7eb2\uff08\u7eff\u5e3d\u6076\u9b54\uff09", "\u79cb\u5ddd\u5f25\u751f\uff08\u5c0f\u5c0f\u7406\u4e8b\u957f\uff09", "\u4e59\u540d\u53f2\u60a6\u5b50\uff08\u4e59\u540d\u8bb0\u8005\uff09", "\u6850\u751f\u9662\u8475", "\u5b89\u5fc3\u6cfd\u523a\u523a\u7f8e", "\u6a2b\u672c\u7406\u5b50", "\u795e\u91cc\u7eeb\u534e\uff08\u9f9f\u9f9f\uff09", "\u7434", "\u7a7a\uff08\u7a7a\u54e5\uff09", "\u4e3d\u838e", "\u8367\uff08\u8367\u59b9\uff09", "\u82ad\u82ad\u62c9", "\u51ef\u4e9a", "\u8fea\u5362\u514b", "\u96f7\u6cfd", "\u5b89\u67cf", "\u6e29\u8fea", "\u9999\u83f1", "\u5317\u6597", "\u884c\u79cb", "\u9b48", "\u51dd\u5149", "\u53ef\u8389", "\u949f\u79bb", "\u83f2\u8c22\u5c14\uff08\u7687\u5973\uff09", "\u73ed\u5c3c\u7279", "\u8fbe\u8fbe\u5229\u4e9a\uff08\u516c\u5b50\uff09", "\u8bfa\u827e\u5c14\uff08\u5973\u4ec6\uff09", "\u4e03\u4e03", "\u91cd\u4e91", "\u7518\u96e8\uff08\u6930\u7f8a\uff09", "\u963f\u8d1d\u591a", "\u8fea\u5965\u5a1c\uff08\u732b\u732b\uff09", "\u83ab\u5a1c", "\u523b\u6674", "\u7802\u7cd6", "\u8f9b\u7131", "\u7f57\u838e\u8389\u4e9a", "\u80e1\u6843", "\u67ab\u539f\u4e07\u53f6\uff08\u4e07\u53f6\uff09", "\u70df\u7eef", "\u5bb5\u5bab", "\u6258\u9a6c", "\u4f18\u83c8", "\u96f7\u7535\u5c06\u519b\uff08\u96f7\u795e\uff09", "\u65e9\u67da", "\u73ca\u745a\u5bab\u5fc3\u6d77\uff08\u5fc3\u6d77\uff0c\u6263\u6263\u7c73\uff09", "\u4e94\u90ce", "\u4e5d\u6761\u88df\u7f57", "\u8352\u6cf7\u4e00\u6597\uff08\u4e00\u6597\uff09", "\u57c3\u6d1b\u4f0a", "\u7533\u9e64", "\u516b\u91cd\u795e\u5b50\uff08\u795e\u5b50\uff09", "\u795e\u91cc\u7eeb\u4eba\uff08\u7eeb\u4eba\uff09", "\u591c\u5170", "\u4e45\u5c90\u5fcd", "\u9e7f\u91ce\u82d1\u5e73\u85cf", "\u63d0\u7eb3\u91cc", "\u67ef\u83b1", "\u591a\u8389", "\u4e91\u5807", "\u7eb3\u897f\u59b2\uff08\u8349\u795e\uff09", "\u6df1\u6e0a\u4f7f\u5f92", "\u59ae\u9732", "\u8d5b\u8bfa", "\u503a\u52a1\u5904\u7406\u4eba", "\u574e\u8482\u4e1d", "\u771f\u5f13\u5feb\u8f66", "\u79cb\u4eba", "\u671b\u65cf", "\u827e\u5c14\u83f2", "\u827e\u8389\u4e1d", "\u827e\u4f26", "\u963f\u6d1b\u74e6", "\u5929\u91ce", "\u5929\u76ee\u5341\u4e94", "\u611a\u4eba\u4f17-\u5b89\u5fb7\u70c8", "\u5b89\u987a", "\u5b89\u897f", "\u8475", "\u9752\u6728", "\u8352\u5ddd\u5e78\u6b21", "\u8352\u8c37", "\u6709\u6cfd", "\u6d45\u5ddd", "\u9ebb\u7f8e", "\u51dd\u5149\u52a9\u624b", "\u963f\u6258", "\u7afa\u5b50", "\u767e\u8bc6", "\u767e\u95fb", "\u767e\u6653", "\u767d\u672f", "\u8d1d\u96c5\u7279\u4e3d\u5947", "\u4e3d\u5854", "\u5931\u843d\u8ff7\u8fed", "\u7f2d\u4e71\u661f\u68d8", "\u4f0a\u7538", "\u4f0f\u7279\u52a0\u5973\u5b69", "\u72c2\u70ed\u84dd\u8c03", "\u8389\u8389\u5a05", "\u841d\u838e\u8389\u5a05", "\u516b\u91cd\u6a31", "\u516b\u91cd\u971e", "\u5361\u83b2", "\u7b2c\u516d\u591c\u60f3\u66f2", "\u5361\u841d\u5c14", "\u59ec\u5b50", "\u6781\u5730\u6218\u5203", "\u5e03\u6d1b\u59ae\u5a05", "\u6b21\u751f\u94f6\u7ffc", "\u7406\u4e4b\u5f8b\u8005%26\u5e0c\u513f", "\u7406\u4e4b\u5f8b\u8005", "\u8ff7\u57ce\u9a87\u5154", "\u5e0c\u513f", "\u9b47\u591c\u661f\u6e0a", "\u9ed1\u5e0c\u513f", "\u5e15\u6735\u83f2\u8389\u4e1d", "\u4e0d\u706d\u661f\u951a", "\u5929\u5143\u9a91\u82f1", "\u5e7d\u5170\u9edb\u5c14", "\u6d3e\u8499bh3", "\u7231\u9171", "\u7eef\u7389\u4e38", "\u5fb7\u4e3d\u838e", "\u6708\u4e0b\u521d\u62e5", "\u6714\u591c\u89c2\u661f", "\u66ae\u5149\u9a91\u58eb", "\u683c\u857e\u4fee", "\u7559\u4e91\u501f\u98ce\u771f\u541b", "\u6885\u6bd4\u4e4c\u65af", "\u4eff\u72b9\u5927", "\u514b\u83b1\u56e0", "\u5723\u5251\u5e7d\u5170\u9edb\u5c14", "\u5996\u7cbe\u7231\u8389", "\u7279\u65af\u62c9zero", "\u82cd\u7384", "\u82e5\u6c34", "\u897f\u7433", "\u6234\u56e0\u65af\u96f7\u5e03", "\u8d1d\u62c9", "\u8d64\u9e22", "\u9547\u9b42\u6b4c", "\u6e21\u9e26", "\u4eba\u4e4b\u5f8b\u8005", "\u7231\u8389\u5e0c\u96c5", "\u5929\u7a79\u6e38\u4fa0", "\u742a\u4e9a\u5a1c", "\u7a7a\u4e4b\u5f8b\u8005", "\u85aa\u708e\u4e4b\u5f8b\u8005", "\u4e91\u58a8\u4e39\u5fc3", "\u7b26\u534e", "\u8bc6\u4e4b\u5f8b\u8005", "\u7279\u74e6\u6797", "\u7ef4\u5c14\u8587", "\u82bd\u8863", "\u96f7\u4e4b\u5f8b\u8005", "\u65ad\u7f6a\u5f71\u821e", "\u963f\u6ce2\u5c3c\u4e9a", "\u698e\u672c", "\u5384\u5c3c\u65af\u7279", "\u6076\u9f99", "\u8303\u4e8c\u7237", "\u6cd5\u62c9", "\u611a\u4eba\u4f17\u58eb\u5175", "\u611a\u4eba\u4f17\u58eb\u5175a", "\u611a\u4eba\u4f17\u58eb\u5175b", "\u611a\u4eba\u4f17\u58eb\u5175c", "\u611a\u4eba\u4f17a", "\u611a\u4eba\u4f17b", "\u98de\u98de", "\u83f2\u5229\u514b\u65af", "\u5973\u6027\u8ddf\u968f\u8005", "\u9022\u5ca9", "\u6446\u6e21\u4eba", "\u72c2\u8e81\u7684\u7537\u4eba", "\u5965\u5179", "\u8299\u841d\u62c9", "\u8ddf\u968f\u8005", "\u871c\u6c41\u751f\u7269", "\u9ec4\u9ebb\u5b50", "\u6e0a\u4e0a", "\u85e4\u6728", "\u6df1\u89c1", "\u798f\u672c", "\u8299\u84c9", "\u53e4\u6cfd", "\u53e4\u7530", "\u53e4\u5c71", "\u53e4\u8c37\u6607", "\u5085\u4e09\u513f", "\u9ad8\u8001\u516d", "\u77ff\u5de5\u5192", "\u5143\u592a", "\u5fb7\u5b89\u516c", "\u8302\u624d\u516c", "\u6770\u62c9\u5fb7", "\u845b\u7f57\u4e3d", "\u91d1\u5ffd\u5f8b", "\u516c\u4fca", "\u9505\u5df4", "\u6b4c\u5fb7", "\u963f\u8c6a", "\u72d7\u4e09\u513f", "\u845b\u745e\u4e1d", "\u82e5\u5fc3", "\u963f\u5c71\u5a46", "\u602a\u9e1f", "\u5e7f\u7af9", "\u89c2\u6d77", "\u5173\u5b8f", "\u871c\u6c41\u536b\u5175", "\u5b88\u536b1", "\u50b2\u6162\u7684\u5b88\u536b", "\u5bb3\u6015\u7684\u5b88\u536b", "\u8d35\u5b89", "\u76d6\u4f0a", "\u963f\u521b", "\u54c8\u592b\u4e39", "\u65e5\u8bed\u963f\u8d1d\u591a\uff08\u91ce\u5c9b\u5065\u513f\uff09", "\u65e5\u8bed\u57c3\u6d1b\u4f0a\uff08\u9ad8\u57a3\u5f69\u9633\uff09", "\u65e5\u8bed\u5b89\u67cf\uff08\u77f3\u89c1\u821e\u83dc\u9999\uff09", "\u65e5\u8bed\u795e\u91cc\u7eeb\u534e\uff08\u65e9\u89c1\u6c99\u7ec7\uff09", "\u65e5\u8bed\u795e\u91cc\u7eeb\u4eba\uff08\u77f3\u7530\u5f70\uff09", "\u65e5\u8bed\u767d\u672f\uff08\u6e38\u4f50\u6d69\u4e8c\uff09", "\u65e5\u8bed\u82ad\u82ad\u62c9\uff08\u9b3c\u5934\u660e\u91cc\uff09", "\u65e5\u8bed\u5317\u6597\uff08\u5c0f\u6e05\u6c34\u4e9a\u7f8e\uff09", "\u65e5\u8bed\u73ed\u5c3c\u7279\uff08\u9022\u5742\u826f\u592a\uff09", "\u65e5\u8bed\u574e\u8482\u4e1d\uff08\u67da\u6728\u51c9\u9999\uff09", "\u65e5\u8bed\u91cd\u4e91\uff08\u9f50\u85e4\u58ee\u9a6c\uff09", "\u65e5\u8bed\u67ef\u83b1\uff08\u524d\u5ddd\u51c9\u5b50\uff09", "\u65e5\u8bed\u8d5b\u8bfa\uff08\u5165\u91ce\u81ea\u7531\uff09", "\u65e5\u8bed\u6234\u56e0\u65af\u96f7\u5e03\uff08\u6d25\u7530\u5065\u6b21\u90ce\uff09", "\u65e5\u8bed\u8fea\u5362\u514b\uff08\u5c0f\u91ce\u8d24\u7ae0\uff09", "\u65e5\u8bed\u8fea\u5965\u5a1c\uff08\u4e95\u6cfd\u8bd7\u7ec7\uff09", "\u65e5\u8bed\u591a\u8389\uff08\u91d1\u7530\u670b\u5b50\uff09", "\u65e5\u8bed\u4f18\u83c8\uff08\u4f50\u85e4\u5229\u5948\uff09", "\u65e5\u8bed\u83f2\u8c22\u5c14\uff08\u5185\u7530\u771f\u793c\uff09", "\u65e5\u8bed\u7518\u96e8\uff08\u4e0a\u7530\u4e3d\u5948\uff09", "\u65e5\u8bed\uff08\u7560\u4e2d\u7950\uff09", "\u65e5\u8bed\u9e7f\u91ce\u9662\u5e73\u85cf\uff08\u4e95\u53e3\u7950\u4e00\uff09", "\u65e5\u8bed\u7a7a\uff08\u5800\u6c5f\u77ac\uff09", "\u65e5\u8bed\u8367\uff08\u60a0\u6728\u78a7\uff09", "\u65e5\u8bed\u80e1\u6843\uff08\u9ad8\u6865\u674e\u4f9d\uff09", "\u65e5\u8bed\u4e00\u6597\uff08\u897f\u5ddd\u8d35\u6559\uff09", "\u65e5\u8bed\u51ef\u4e9a\uff08\u9e1f\u6d77\u6d69\u8f85\uff09", "\u65e5\u8bed\u4e07\u53f6\uff08\u5c9b\u5d0e\u4fe1\u957f\uff09", "\u65e5\u8bed\u523b\u6674\uff08\u559c\u591a\u6751\u82f1\u68a8\uff09", "\u65e5\u8bed\u53ef\u8389\uff08\u4e45\u91ce\u7f8e\u54b2\uff09", "\u65e5\u8bed\u5fc3\u6d77\uff08\u4e09\u68ee\u94c3\u5b50\uff09", "\u65e5\u8bed\u4e5d\u6761\u88df\u7f57\uff08\u6fd1\u6237\u9ebb\u6c99\u7f8e\uff09", "\u65e5\u8bed\u4e3d\u838e\uff08\u7530\u4e2d\u7406\u60e0\uff09", "\u65e5\u8bed\u83ab\u5a1c\uff08\u5c0f\u539f\u597d\u7f8e\uff09", "\u65e5\u8bed\u7eb3\u897f\u59b2\uff08\u7530\u6751\u7531\u52a0\u8389\uff09", "\u65e5\u8bed\u59ae\u9732\uff08\u91d1\u5143\u5bff\u5b50\uff09", "\u65e5\u8bed\u51dd\u5149\uff08\u5927\u539f\u6c99\u8036\u9999\uff09", "\u65e5\u8bed\u8bfa\u827e\u5c14\uff08\u9ad8\u5c3e\u594f\u97f3\uff09", "\u65e5\u8bed\u5965\u5179\uff08\u589e\u8c37\u5eb7\u7eaa\uff09", "\u65e5\u8bed\u6d3e\u8499\uff08\u53e4\u8d3a\u8475\uff09", "\u65e5\u8bed\u7434\uff08\u658b\u85e4\u5343\u548c\uff09", "\u65e5\u8bed\u4e03\u4e03\uff08\u7530\u6751\u7531\u52a0\u8389\uff09", "\u65e5\u8bed\u96f7\u7535\u5c06\u519b\uff08\u6cfd\u57ce\u7f8e\u96ea\uff09", "\u65e5\u8bed\u96f7\u6cfd\uff08\u5185\u5c71\u6602\u8f89\uff09", "\u65e5\u8bed\u7f57\u838e\u8389\u4e9a\uff08\u52a0\u9688\u4e9a\u8863\uff09", "\u65e5\u8bed\u65e9\u67da\uff08\u6d32\u5d0e\u7eeb\uff09", "\u65e5\u8bed\u6563\u5175\uff08\u67ff\u539f\u5f7b\u4e5f\uff09", "\u65e5\u8bed\u7533\u9e64\uff08\u5ddd\u6f84\u7eeb\u5b50\uff09", "\u65e5\u8bed\u4e45\u5c90\u5fcd\uff08\u6c34\u6865\u9999\u7ec7\uff09", "\u65e5\u8bed\u5973\u58eb\uff08\u5e84\u5b50\u88d5\u8863\uff09", "\u65e5\u8bed\u7802\u7cd6\uff08\u85e4\u7530\u831c\uff09", "\u65e5\u8bed\u8fbe\u8fbe\u5229\u4e9a\uff08\u6728\u6751\u826f\u5e73\uff09", "\u65e5\u8bed\u6258\u9a6c\uff08\u68ee\u7530\u6210\u4e00\uff09", "\u65e5\u8bed\u63d0\u7eb3\u91cc\uff08\u5c0f\u6797\u6c99\u82d7\uff09", "\u65e5\u8bed\u6e29\u8fea\uff08\u6751\u6fd1\u6b65\uff09", "\u65e5\u8bed\u9999\u83f1\uff08\u5c0f\u6cfd\u4e9a\u674e\uff09", "\u65e5\u8bed\u9b48\uff08\u677e\u5188\u796f\u4e1e\uff09", "\u65e5\u8bed\u884c\u79cb\uff08\u7686\u5ddd\u7eaf\u5b50\uff09", "\u65e5\u8bed\u8f9b\u7131\uff08\u9ad8\u6865\u667a\u79cb\uff09", "\u65e5\u8bed\u516b\u91cd\u795e\u5b50\uff08\u4f50\u4ed3\u7eeb\u97f3\uff09", "\u65e5\u8bed\u70df\u7eef\uff08\u82b1\u5b88\u7531\u7f8e\u91cc\uff09", "\u65e5\u8bed\u591c\u5170\uff08\u8fdc\u85e4\u7eeb\uff09", "\u65e5\u8bed\u5bb5\u5bab\uff08\u690d\u7530\u4f73\u5948\uff09", "\u65e5\u8bed\u4e91\u5807\uff08\u5c0f\u5ca9\u4e95\u5c0f\u9e1f\uff09", "\u65e5\u8bed\u949f\u79bb\uff08\u524d\u91ce\u667a\u662d\uff09", "\u6770\u514b", "\u963f\u5409", "\u6c5f\u821f", "\u9274\u79cb", "\u5609\u4e49", "\u7eaa\u82b3", "\u666f\u6f84", "\u7ecf\u7eb6", "\u666f\u660e", "\u664b\u4f18", "\u963f\u9e20", "\u9152\u5ba2", "\u4e54\u5c14", "\u4e54\u745f\u592b", "\u7ea6\u987f", "\u4e54\u4f0a\u65af", "\u5c45\u5b89", "\u541b\u541b", "\u987a\u5409", "\u7eaf\u4e5f", "\u91cd\u4f50", "\u5927\u5c9b\u7eaf\u5e73", "\u84b2\u6cfd", "\u52d8\u89e3\u7531\u5c0f\u8def\u5065\u4e09\u90ce", "\u67ab", "\u67ab\u539f\u4e49\u5e86", "\u836b\u5c71", "\u7532\u6590\u7530\u9f8d\u99ac", "\u6d77\u6597", "\u60df\u795e\u6674\u4e4b\u4ecb", "\u9e7f\u91ce\u5948\u5948", "\u5361\u7435\u8389\u4e9a", "\u51ef\u745f\u7433", "\u52a0\u85e4\u4fe1\u609f", "\u52a0\u85e4\u6d0b\u5e73", "\u80dc\u5bb6", "\u8305\u847a\u4e00\u5e86", "\u548c\u662d", "\u4e00\u6b63", "\u4e00\u9053", "\u6842\u4e00", "\u5e86\u6b21\u90ce", "\u963f\u8d24", "\u5065\u53f8", "\u5065\u6b21\u90ce", "\u5065\u4e09\u90ce", "\u5929\u7406", "\u6740\u624ba", "\u6740\u624bb", "\u6728\u5357\u674f\u5948", "\u6728\u6751", "\u56fd\u738b", "\u6728\u4e0b", "\u5317\u6751", "\u6e05\u60e0", "\u6e05\u4eba", "\u514b\u5217\u95e8\u7279", "\u9a91\u58eb", "\u5c0f\u6797", "\u5c0f\u6625", "\u5eb7\u62c9\u5fb7", "\u5927\u8089\u4e38", "\u7434\u7f8e", "\u5b8f\u4e00", "\u5eb7\u4ecb", "\u5e78\u5fb7", "\u9ad8\u5584", "\u68a2", "\u514b\u7f57\u7d22", "\u4e45\u4fdd", "\u4e5d\u6761\u9570\u6cbb", "\u4e45\u6728\u7530", "\u6606\u94a7", "\u83ca\u5730\u541b", "\u4e45\u5229\u987b", "\u9ed1\u7530", "\u9ed1\u6cfd\u4eac\u4e4b\u4ecb", "\u54cd\u592a", "\u5c9a\u59d0", "\u5170\u6eaa", "\u6f9c\u9633", "\u52b3\u4f26\u65af", "\u4e50\u660e", "\u83b1\u8bfa", "\u83b2", "\u826f\u5b50", "\u674e\u5f53", "\u674e\u4e01", "\u5c0f\u4e50", "\u7075", "\u5c0f\u73b2", "\u7433\u7405a", "\u7433\u7405b", "\u5c0f\u5f6c", "\u5c0f\u5fb7", "\u5c0f\u697d", "\u5c0f\u9f99", "\u5c0f\u5434", "\u5c0f\u5434\u7684\u8bb0\u5fc6", "\u7406\u6b63", "\u963f\u9f99", "\u5362\u5361", "\u6d1b\u6210", "\u7f57\u5de7", "\u5317\u98ce\u72fc", "\u5362\u6b63", "\u840d\u59e5\u59e5", "\u524d\u7530", "\u771f\u663c", "\u9ebb\u7eaa", "\u771f", "\u611a\u4eba\u4f17-\u9a6c\u514b\u897f\u59c6", "\u5973\u6027a", "\u5973\u6027b", "\u5973\u6027a\u7684\u8ddf\u968f\u8005", "\u963f\u5b88", "\u739b\u683c\u4e3d\u7279", "\u771f\u7406", "\u739b\u4e54\u4e3d", "\u739b\u6587", "\u6b63\u80dc", "\u660c\u4fe1", "\u5c06\u53f8", "\u6b63\u4eba", "\u8def\u7237", "\u8001\u7ae0", "\u677e\u7530", "\u677e\u672c", "\u677e\u6d66", "\u677e\u5742", "\u8001\u5b5f", "\u5b5f\u4e39", "\u5546\u4eba\u968f\u4ece", "\u4f20\u4ee4\u5175", "\u7c73\u6b47\u5c14", "\u5fa1\u8206\u6e90\u4e00\u90ce", "\u5fa1\u8206\u6e90\u6b21\u90ce", "\u5343\u5ca9\u519b\u6559\u5934", "\u5343\u5ca9\u519b\u58eb\u5175", "\u660e\u535a", "\u660e\u4fca", "\u7f8e\u94c3", "\u7f8e\u548c", "\u963f\u5e78", "\u524a\u6708\u7b51\u9633\u771f\u541b", "\u94b1\u773c\u513f", "\u68ee\u5f66", "\u5143\u52a9", "\u7406\u6c34\u53e0\u5c71\u771f\u541b", "\u7406\u6c34\u758a\u5c71\u771f\u541b", "\u6731\u8001\u677f", "\u6728\u6728", "\u6751\u4e0a", "\u6751\u7530", "\u6c38\u91ce", "\u957f\u91ce\u539f\u9f99\u4e4b\u4ecb", "\u957f\u6fd1", "\u4e2d\u91ce\u5fd7\u4e43", "\u83dc\u83dc\u5b50", "\u6960\u6960", "\u6210\u6fd1", "\u963f\u5185", "\u5b81\u7984", "\u725b\u5fd7", "\u4fe1\u535a", "\u4f38\u592b", "\u91ce\u65b9", "\u8bfa\u62c9", "\u7eaa\u9999", "\u8bfa\u66fc", "\u4fee\u5973", "\u7eaf\u6c34\u7cbe\u7075", "\u5c0f\u5ddd", "\u5c0f\u4ed3\u6faa", "\u5188\u6797", "\u5188\u5d0e\u7ed8\u91cc\u9999", "\u5188\u5d0e\u9646\u6597", "\u5965\u62c9\u592b", "\u8001\u79d1", "\u9b3c\u5a46\u5a46", "\u5c0f\u91ce\u5bfa", "\u5927\u6cb3\u539f\u4e94\u53f3\u536b\u95e8", "\u5927\u4e45\u4fdd\u5927\u4ecb", "\u5927\u68ee", "\u5927\u52a9", "\u5965\u7279", "\u6d3e\u8499", "\u6d3e\u84992", "\u75c5\u4ebaa", "\u75c5\u4ebab", "\u5df4\u987f", "\u6d3e\u6069", "\u670b\u4e49", "\u56f4\u89c2\u7fa4\u4f17", "\u56f4\u89c2\u7fa4\u4f17a", "\u56f4\u89c2\u7fa4\u4f17b", "\u56f4\u89c2\u7fa4\u4f17c", "\u56f4\u89c2\u7fa4\u4f17d", "\u56f4\u89c2\u7fa4\u4f17e", "\u94dc\u96c0", "\u963f\u80a5", "\u5174\u53d4", "\u8001\u5468\u53d4", "\u516c\u4e3b", "\u5f7c\u5f97", "\u4e7e\u5b50", "\u828a\u828a", "\u4e7e\u73ae", "\u7eee\u547d", "\u675e\u5e73", "\u79cb\u6708", "\u6606\u6069", "\u96f7\u7535\u5f71", "\u5170\u9053\u5c14", "\u96f7\u8499\u5fb7", "\u5192\u5931\u7684\u5e15\u62c9\u5fb7", "\u4f36\u4e00", "\u73b2\u82b1", "\u963f\u4ec1", "\u5bb6\u81e3\u4eec", "\u68a8\u7ed8", "\u8363\u6c5f", "\u620e\u4e16", "\u6d6a\u4eba", "\u7f57\u4f0a\u65af", "\u5982\u610f", "\u51c9\u5b50", "\u5f69\u9999", "\u9152\u4e95", "\u5742\u672c", "\u6714\u6b21\u90ce", "\u6b66\u58eba", "\u6b66\u58ebb", "\u6b66\u58ebc", "\u6b66\u58ebd", "\u73ca\u745a", "\u4e09\u7530", "\u838e\u62c9", "\u7b39\u91ce", "\u806a\u7f8e", "\u806a", "\u5c0f\u767e\u5408", "\u6563\u5175", "\u5bb3\u6015\u7684\u5c0f\u5218", "\u8212\u4f2f\u7279", "\u8212\u8328", "\u6d77\u9f99", "\u4e16\u5b50", "\u8c22\u5c14\u76d6", "\u5bb6\u4e01", "\u5546\u534e", "\u6c99\u5bc5", "\u963f\u5347", "\u67f4\u7530", "\u963f\u8302", "\u5f0f\u5927\u5c06", "\u6e05\u6c34", "\u5fd7\u6751\u52d8\u5175\u536b", "\u65b0\u4e4b\u4e1e", "\u5fd7\u7ec7", "\u77f3\u5934", "\u8bd7\u7fbd", "\u8bd7\u7b60", "\u77f3\u58ee", "\u7fd4\u592a", "\u6b63\u4e8c", "\u5468\u5e73", "\u8212\u6768", "\u9f50\u683c\u8299\u4e3d\u96c5", "\u5973\u58eb", "\u601d\u52e4", "\u516d\u6307\u4e54\u745f", "\u611a\u4eba\u4f17\u5c0f\u5175d", "\u611a\u4eba\u4f17\u5c0f\u5175a", "\u611a\u4eba\u4f17\u5c0f\u5175b", "\u611a\u4eba\u4f17\u5c0f\u5175c", "\u5434\u8001\u4e94", "\u5434\u8001\u4e8c", "\u6ed1\u5934\u9b3c", "\u8a00\u7b11", "\u5434\u8001\u4e03", "\u58eb\u5175h", "\u58eb\u5175i", "\u58eb\u5175a", "\u58eb\u5175b", "\u58eb\u5175c", "\u58eb\u5175d", "\u58eb\u5175e", "\u58eb\u5175f", "\u58eb\u5175g", "\u594f\u592a", "\u65af\u5766\u5229", "\u6387\u661f\u652b\u8fb0\u5929\u541b", "\u5c0f\u5934", "\u5927\u6b66", "\u9676\u4e49\u9686", "\u6749\u672c", "\u82cf\u897f", "\u5acc\u7591\u4ebaa", "\u5acc\u7591\u4ebab", "\u5acc\u7591\u4ebac", "\u5acc\u7591\u4ebad", "\u65af\u4e07", "\u5251\u5ba2a", "\u5251\u5ba2b", "\u963f\u4e8c", "\u5fe0\u80dc", "\u5fe0\u592b", "\u963f\u656c", "\u5b5d\u5229", "\u9e70\u53f8\u8fdb", "\u9ad8\u5c71", "\u4e5d\u6761\u5b5d\u884c", "\u6bc5", "\u7af9\u5185", "\u62d3\u771f", "\u5353\u4e5f", "\u592a\u90ce\u4e38", "\u6cf0\u52d2", "\u624b\u5c9b", "\u54f2\u5e73", "\u54f2\u592b", "\u6258\u514b", "\u5927boss", "\u963f\u5f3a", "\u6258\u5c14\u5fb7\u62c9", "\u65c1\u89c2\u8005", "\u5929\u6210", "\u963f\u5927", "\u8482\u739b\u4e4c\u65af", "\u63d0\u7c73", "\u6237\u7530", "\u963f\u4e09", "\u4e00\u8d77\u7684\u4eba", "\u5fb7\u7530", "\u5fb7\u957f", "\u667a\u6811", "\u5229\u5f66", "\u80d6\u4e4e\u4e4e\u7684\u65c5\u884c\u8005", "\u85cf\u5b9d\u4ebaa", "\u85cf\u5b9d\u4ebab", "\u85cf\u5b9d\u4ebac", "\u85cf\u5b9d\u4ebad", "\u963f\u7947", "\u6052\u96c4", "\u9732\u5b50", "\u8bdd\u5267\u56e2\u56e2\u957f", "\u5185\u6751", "\u4e0a\u91ce", "\u4e0a\u6749", "\u8001\u6234", "\u8001\u9ad8", "\u8001\u8d3e", "\u8001\u58a8", "\u8001\u5b59", "\u5929\u67a2\u661f", "\u8001\u4e91", "\u6709\u4e50\u658b", "\u4e11\u96c4", "\u4e4c\u7ef4", "\u74e6\u4eac", "\u83f2\u5c14\u6208\u9edb\u7279", "\u7ef4\u591a\u5229\u4e9a", "\u8587\u5c14", "\u74e6\u683c\u7eb3", "\u963f\u5916", "\u4f8d\u5973", "\u74e6\u62c9", "\u671b\u96c5", "\u5b9b\u70df", "\u742c\u7389", "\u6218\u58eba", "\u6218\u58ebb", "\u6e21\u8fba", "\u6e21\u90e8", "\u963f\u4f1f", "\u6587\u749f", "\u6587\u6e0a", "\u97e6\u5c14\u7eb3", "\u738b\u6273\u624b", "\u6b66\u6c9b", "\u6653\u98de", "\u8f9b\u7a0b", "\u661f\u706b", "\u661f\u7a00", "\u8f9b\u79c0", "\u79c0\u534e", "\u963f\u65ed", "\u5f90\u5218\u5e08", "\u77e2\u90e8", "\u516b\u6728", "\u5c71\u4e0a", "\u963f\u9633", "\u989c\u7b11", "\u5eb7\u660e", "\u6cf0\u4e45", "\u5b89\u6b66", "\u77e2\u7530\u5e78\u559c", "\u77e2\u7530\u8f9b\u559c", "\u4e49\u575a", "\u83ba\u513f", "\u76c8\u4e30", "\u5b9c\u5e74", "\u94f6\u674f", "\u9038\u8f69", "\u6a2a\u5c71", "\u6c38\u8d35", "\u6c38\u4e1a", "\u5609\u4e45", "\u5409\u5ddd", "\u4e49\u9ad8", "\u7528\u9ad8", "\u9633\u592a", "\u5143\u84c9", "\u73a5\u8f89", "\u6bd3\u534e", "\u6709\u9999", "\u5e78\u4e5f", "\u7531\u771f", "\u7ed3\u83dc", "\u97f5\u5b81", "\u767e\u5408", "\u767e\u5408\u534e", "\u5c24\u82cf\u6ce2\u592b", "\u88d5\u5b50", "\u60a0\u7b56", "\u60a0\u4e5f", "\u4e8e\u5ae3", "\u67da\u5b50", "\u8001\u90d1", "\u6b63\u8302", "\u5fd7\u6210", "\u82b7\u5de7", "\u77e5\u6613", "\u652f\u652f", "\u5468\u826f", "\u73e0\u51fd", "\u795d\u660e", "\u795d\u6d9b"], 54 | "symbols": ["_", ",", ".", "!", "?", "-", "~", "\u2026", "A", "E", "I", "N", "O", "Q", "U", "a", "b", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "r", "s", "t", "u", "v", "w", "y", "z", "\u0283", "\u02a7", "\u02a6", "\u026f", "\u0279", "\u0259", "\u0265", "\u207c", "\u02b0", "`", "\u2192", "\u2193", "\u2191", " "] 55 | } -------------------------------------------------------------------------------- /data_utils.py: -------------------------------------------------------------------------------- 1 | import time 2 | import os 3 | import random 4 | import numpy as np 5 | import torch 6 | import torch.utils.data 7 | 8 | import commons 9 | from mel_processing import spectrogram_torch 10 | from utils import load_wav_to_torch, load_filepaths_and_text 11 | from text import text_to_sequence, cleaned_text_to_sequence 12 | 13 | 14 | class TextAudioLoader(torch.utils.data.Dataset): 15 | """ 16 | 1) loads audio, text pairs 17 | 2) normalizes text and converts them to sequences of integers 18 | 3) computes spectrograms from audio files. 19 | """ 20 | def __init__(self, audiopaths_and_text, hparams): 21 | self.audiopaths_and_text = load_filepaths_and_text(audiopaths_and_text) 22 | self.text_cleaners = hparams.text_cleaners 23 | self.max_wav_value = hparams.max_wav_value 24 | self.sampling_rate = hparams.sampling_rate 25 | self.filter_length = hparams.filter_length 26 | self.hop_length = hparams.hop_length 27 | self.win_length = hparams.win_length 28 | self.sampling_rate = hparams.sampling_rate 29 | 30 | self.cleaned_text = getattr(hparams, "cleaned_text", False) 31 | 32 | self.add_blank = hparams.add_blank 33 | self.min_text_len = getattr(hparams, "min_text_len", 1) 34 | self.max_text_len = getattr(hparams, "max_text_len", 190) 35 | 36 | random.seed(1234) 37 | random.shuffle(self.audiopaths_and_text) 38 | self._filter() 39 | 40 | 41 | def _filter(self): 42 | """ 43 | Filter text & store spec lengths 44 | """ 45 | # Store spectrogram lengths for Bucketing 46 | # wav_length ~= file_size / (wav_channels * Bytes per dim) = file_size / (1 * 2) 47 | # spec_length = wav_length // hop_length 48 | 49 | audiopaths_and_text_new = [] 50 | lengths = [] 51 | for audiopath, text in self.audiopaths_and_text: 52 | if self.min_text_len <= len(text) and len(text) <= self.max_text_len: 53 | audiopaths_and_text_new.append([audiopath, text]) 54 | lengths.append(os.path.getsize(audiopath) // (2 * self.hop_length)) 55 | self.audiopaths_and_text = audiopaths_and_text_new 56 | self.lengths = lengths 57 | 58 | def get_audio_text_pair(self, audiopath_and_text): 59 | # separate filename and text 60 | audiopath, text = audiopath_and_text[0], audiopath_and_text[1] 61 | text = self.get_text(text) 62 | spec, wav = self.get_audio(audiopath) 63 | return (text, spec, wav) 64 | 65 | def get_audio(self, filename): 66 | audio, sampling_rate = load_wav_to_torch(filename) 67 | if sampling_rate != self.sampling_rate: 68 | raise ValueError("{} {} SR doesn't match target {} SR".format( 69 | sampling_rate, self.sampling_rate)) 70 | audio_norm = audio / self.max_wav_value 71 | audio_norm = audio_norm.unsqueeze(0) 72 | spec_filename = filename.replace(".wav", ".spec.pt") 73 | if os.path.exists(spec_filename): 74 | spec = torch.load(spec_filename) 75 | else: 76 | spec = spectrogram_torch(audio_norm, self.filter_length, 77 | self.sampling_rate, self.hop_length, self.win_length, 78 | center=False) 79 | spec = torch.squeeze(spec, 0) 80 | torch.save(spec, spec_filename) 81 | return spec, audio_norm 82 | 83 | def get_text(self, text): 84 | if self.cleaned_text: 85 | text_norm = cleaned_text_to_sequence(text) 86 | else: 87 | text_norm = text_to_sequence(text, self.text_cleaners) 88 | if self.add_blank: 89 | text_norm = commons.intersperse(text_norm, 0) 90 | text_norm = torch.LongTensor(text_norm) 91 | return text_norm 92 | 93 | def __getitem__(self, index): 94 | return self.get_audio_text_pair(self.audiopaths_and_text[index]) 95 | 96 | def __len__(self): 97 | return len(self.audiopaths_and_text) 98 | 99 | 100 | class TextAudioCollate(): 101 | """ Zero-pads model inputs and targets 102 | """ 103 | def __init__(self, return_ids=False): 104 | self.return_ids = return_ids 105 | 106 | def __call__(self, batch): 107 | """Collate's training batch from normalized text and aduio 108 | PARAMS 109 | ------ 110 | batch: [text_normalized, spec_normalized, wav_normalized] 111 | """ 112 | # Right zero-pad all one-hot text sequences to max input length 113 | _, ids_sorted_decreasing = torch.sort( 114 | torch.LongTensor([x[1].size(1) for x in batch]), 115 | dim=0, descending=True) 116 | 117 | max_text_len = max([len(x[0]) for x in batch]) 118 | max_spec_len = max([x[1].size(1) for x in batch]) 119 | max_wav_len = max([x[2].size(1) for x in batch]) 120 | 121 | text_lengths = torch.LongTensor(len(batch)) 122 | spec_lengths = torch.LongTensor(len(batch)) 123 | wav_lengths = torch.LongTensor(len(batch)) 124 | 125 | text_padded = torch.LongTensor(len(batch), max_text_len) 126 | spec_padded = torch.FloatTensor(len(batch), batch[0][1].size(0), max_spec_len) 127 | wav_padded = torch.FloatTensor(len(batch), 1, max_wav_len) 128 | text_padded.zero_() 129 | spec_padded.zero_() 130 | wav_padded.zero_() 131 | for i in range(len(ids_sorted_decreasing)): 132 | row = batch[ids_sorted_decreasing[i]] 133 | 134 | text = row[0] 135 | text_padded[i, :text.size(0)] = text 136 | text_lengths[i] = text.size(0) 137 | 138 | spec = row[1] 139 | spec_padded[i, :, :spec.size(1)] = spec 140 | spec_lengths[i] = spec.size(1) 141 | 142 | wav = row[2] 143 | wav_padded[i, :, :wav.size(1)] = wav 144 | wav_lengths[i] = wav.size(1) 145 | 146 | if self.return_ids: 147 | return text_padded, text_lengths, spec_padded, spec_lengths, wav_padded, wav_lengths, ids_sorted_decreasing 148 | return text_padded, text_lengths, spec_padded, spec_lengths, wav_padded, wav_lengths 149 | 150 | 151 | """Multi speaker version""" 152 | class TextAudioSpeakerLoader(torch.utils.data.Dataset): 153 | """ 154 | 1) loads audio, speaker_id, text pairs 155 | 2) normalizes text and converts them to sequences of integers 156 | 3) computes spectrograms from audio files. 157 | """ 158 | def __init__(self, audiopaths_sid_text, hparams): 159 | self.audiopaths_sid_text = load_filepaths_and_text(audiopaths_sid_text) 160 | self.text_cleaners = hparams.text_cleaners 161 | self.max_wav_value = hparams.max_wav_value 162 | self.sampling_rate = hparams.sampling_rate 163 | self.filter_length = hparams.filter_length 164 | self.hop_length = hparams.hop_length 165 | self.win_length = hparams.win_length 166 | self.sampling_rate = hparams.sampling_rate 167 | 168 | self.cleaned_text = getattr(hparams, "cleaned_text", False) 169 | 170 | self.add_blank = hparams.add_blank 171 | self.min_text_len = getattr(hparams, "min_text_len", 1) 172 | self.max_text_len = getattr(hparams, "max_text_len", 190) 173 | 174 | random.seed(1234) 175 | random.shuffle(self.audiopaths_sid_text) 176 | self._filter() 177 | 178 | def _filter(self): 179 | """ 180 | Filter text & store spec lengths 181 | """ 182 | # Store spectrogram lengths for Bucketing 183 | # wav_length ~= file_size / (wav_channels * Bytes per dim) = file_size / (1 * 2) 184 | # spec_length = wav_length // hop_length 185 | 186 | audiopaths_sid_text_new = [] 187 | lengths = [] 188 | for audiopath, sid, text in self.audiopaths_sid_text: 189 | if self.min_text_len <= len(text) and len(text) <= self.max_text_len: 190 | audiopaths_sid_text_new.append([audiopath, sid, text]) 191 | lengths.append(os.path.getsize(audiopath) // (2 * self.hop_length)) 192 | self.audiopaths_sid_text = audiopaths_sid_text_new 193 | self.lengths = lengths 194 | 195 | def get_audio_text_speaker_pair(self, audiopath_sid_text): 196 | # separate filename, speaker_id and text 197 | audiopath, sid, text = audiopath_sid_text[0], audiopath_sid_text[1], audiopath_sid_text[2] 198 | text = self.get_text(text) 199 | spec, wav = self.get_audio(audiopath) 200 | sid = self.get_sid(sid) 201 | return (text, spec, wav, sid) 202 | 203 | def get_audio(self, filename): 204 | audio, sampling_rate = load_wav_to_torch(filename) 205 | if sampling_rate != self.sampling_rate: 206 | raise ValueError("{} {} SR doesn't match target {} SR".format( 207 | sampling_rate, self.sampling_rate)) 208 | audio_norm = audio / self.max_wav_value 209 | audio_norm = audio_norm.unsqueeze(0) 210 | spec_filename = filename.replace(".wav", ".spec.pt") 211 | if os.path.exists(spec_filename): 212 | spec = torch.load(spec_filename) 213 | else: 214 | spec = spectrogram_torch(audio_norm, self.filter_length, 215 | self.sampling_rate, self.hop_length, self.win_length, 216 | center=False) 217 | spec = torch.squeeze(spec, 0) 218 | torch.save(spec, spec_filename) 219 | return spec, audio_norm 220 | 221 | def get_text(self, text): 222 | if self.cleaned_text: 223 | text_norm = cleaned_text_to_sequence(text) 224 | else: 225 | text_norm = text_to_sequence(text, self.text_cleaners) 226 | if self.add_blank: 227 | text_norm = commons.intersperse(text_norm, 0) 228 | text_norm = torch.LongTensor(text_norm) 229 | return text_norm 230 | 231 | def get_sid(self, sid): 232 | sid = torch.LongTensor([int(sid)]) 233 | return sid 234 | 235 | def __getitem__(self, index): 236 | return self.get_audio_text_speaker_pair(self.audiopaths_sid_text[index]) 237 | 238 | def __len__(self): 239 | return len(self.audiopaths_sid_text) 240 | 241 | 242 | class TextAudioSpeakerCollate(): 243 | """ Zero-pads model inputs and targets 244 | """ 245 | def __init__(self, return_ids=False): 246 | self.return_ids = return_ids 247 | 248 | def __call__(self, batch): 249 | """Collate's training batch from normalized text, audio and speaker identities 250 | PARAMS 251 | ------ 252 | batch: [text_normalized, spec_normalized, wav_normalized, sid] 253 | """ 254 | # Right zero-pad all one-hot text sequences to max input length 255 | _, ids_sorted_decreasing = torch.sort( 256 | torch.LongTensor([x[1].size(1) for x in batch]), 257 | dim=0, descending=True) 258 | 259 | max_text_len = max([len(x[0]) for x in batch]) 260 | max_spec_len = max([x[1].size(1) for x in batch]) 261 | max_wav_len = max([x[2].size(1) for x in batch]) 262 | 263 | text_lengths = torch.LongTensor(len(batch)) 264 | spec_lengths = torch.LongTensor(len(batch)) 265 | wav_lengths = torch.LongTensor(len(batch)) 266 | sid = torch.LongTensor(len(batch)) 267 | 268 | text_padded = torch.LongTensor(len(batch), max_text_len) 269 | spec_padded = torch.FloatTensor(len(batch), batch[0][1].size(0), max_spec_len) 270 | wav_padded = torch.FloatTensor(len(batch), 1, max_wav_len) 271 | text_padded.zero_() 272 | spec_padded.zero_() 273 | wav_padded.zero_() 274 | for i in range(len(ids_sorted_decreasing)): 275 | row = batch[ids_sorted_decreasing[i]] 276 | 277 | text = row[0] 278 | text_padded[i, :text.size(0)] = text 279 | text_lengths[i] = text.size(0) 280 | 281 | spec = row[1] 282 | spec_padded[i, :, :spec.size(1)] = spec 283 | spec_lengths[i] = spec.size(1) 284 | 285 | wav = row[2] 286 | wav_padded[i, :, :wav.size(1)] = wav 287 | wav_lengths[i] = wav.size(1) 288 | 289 | sid[i] = row[3] 290 | 291 | if self.return_ids: 292 | return text_padded, text_lengths, spec_padded, spec_lengths, wav_padded, wav_lengths, sid, ids_sorted_decreasing 293 | return text_padded, text_lengths, spec_padded, spec_lengths, wav_padded, wav_lengths, sid 294 | 295 | 296 | class DistributedBucketSampler(torch.utils.data.distributed.DistributedSampler): 297 | """ 298 | Maintain similar input lengths in a batch. 299 | Length groups are specified by boundaries. 300 | Ex) boundaries = [b1, b2, b3] -> any batch is included either {x | b1 < length(x) <=b2} or {x | b2 < length(x) <= b3}. 301 | 302 | It removes samples which are not included in the boundaries. 303 | Ex) boundaries = [b1, b2, b3] -> any x s.t. length(x) <= b1 or length(x) > b3 are discarded. 304 | """ 305 | def __init__(self, dataset, batch_size, boundaries, num_replicas=None, rank=None, shuffle=True): 306 | super().__init__(dataset, num_replicas=num_replicas, rank=rank, shuffle=shuffle) 307 | self.lengths = dataset.lengths 308 | self.batch_size = batch_size 309 | self.boundaries = boundaries 310 | 311 | self.buckets, self.num_samples_per_bucket = self._create_buckets() 312 | self.total_size = sum(self.num_samples_per_bucket) 313 | self.num_samples = self.total_size // self.num_replicas 314 | 315 | def _create_buckets(self): 316 | buckets = [[] for _ in range(len(self.boundaries) - 1)] 317 | for i in range(len(self.lengths)): 318 | length = self.lengths[i] 319 | idx_bucket = self._bisect(length) 320 | if idx_bucket != -1: 321 | buckets[idx_bucket].append(i) 322 | 323 | for i in range(len(buckets) - 1, 0, -1): 324 | if len(buckets[i]) == 0: 325 | buckets.pop(i) 326 | self.boundaries.pop(i+1) 327 | 328 | num_samples_per_bucket = [] 329 | for i in range(len(buckets)): 330 | len_bucket = len(buckets[i]) 331 | total_batch_size = self.num_replicas * self.batch_size 332 | rem = (total_batch_size - (len_bucket % total_batch_size)) % total_batch_size 333 | num_samples_per_bucket.append(len_bucket + rem) 334 | return buckets, num_samples_per_bucket 335 | 336 | def __iter__(self): 337 | # deterministically shuffle based on epoch 338 | g = torch.Generator() 339 | g.manual_seed(self.epoch) 340 | 341 | indices = [] 342 | if self.shuffle: 343 | for bucket in self.buckets: 344 | indices.append(torch.randperm(len(bucket), generator=g).tolist()) 345 | else: 346 | for bucket in self.buckets: 347 | indices.append(list(range(len(bucket)))) 348 | 349 | batches = [] 350 | for i in range(len(self.buckets)): 351 | bucket = self.buckets[i] 352 | len_bucket = len(bucket) 353 | if len_bucket == 0: 354 | continue 355 | ids_bucket = indices[i] 356 | num_samples_bucket = self.num_samples_per_bucket[i] 357 | 358 | # add extra samples to make it evenly divisible 359 | rem = num_samples_bucket - len_bucket 360 | ids_bucket = ids_bucket + ids_bucket * (rem // len_bucket) + ids_bucket[:(rem % len_bucket)] 361 | 362 | # subsample 363 | ids_bucket = ids_bucket[self.rank::self.num_replicas] 364 | 365 | # batching 366 | for j in range(len(ids_bucket) // self.batch_size): 367 | batch = [bucket[idx] for idx in ids_bucket[j*self.batch_size:(j+1)*self.batch_size]] 368 | batches.append(batch) 369 | 370 | if self.shuffle: 371 | batch_ids = torch.randperm(len(batches), generator=g).tolist() 372 | batches = [batches[i] for i in batch_ids] 373 | self.batches = batches 374 | 375 | assert len(self.batches) * self.batch_size == self.num_samples 376 | return iter(self.batches) 377 | 378 | def _bisect(self, x, lo=0, hi=None): 379 | if hi is None: 380 | hi = len(self.boundaries) - 1 381 | 382 | if hi > lo: 383 | mid = (hi + lo) // 2 384 | if self.boundaries[mid] < x and x <= self.boundaries[mid+1]: 385 | return mid 386 | elif x <= self.boundaries[mid]: 387 | return self._bisect(x, lo, mid) 388 | else: 389 | return self._bisect(x, mid + 1, hi) 390 | else: 391 | return -1 392 | 393 | def __len__(self): 394 | return self.num_samples // self.batch_size 395 | -------------------------------------------------------------------------------- /filelists/miyu_train.txt: -------------------------------------------------------------------------------- 1 | wav/ba/miyu/533522.wav|10|ブ、ブルー……アーカイブ…… 2 | wav/ba/miyu/978431.wav|10|SRT特殊学園、ラビット小隊の霞沢ミユ、です…… あの、もう帰……っちゃ、だめですよね…… 3 | wav/ba/miyu/589002.wav|10|お、お疲れ様です、先生…… 4 | wav/ba/miyu/527590.wav|10|先生のお好きな時に、呼んでください……その、待つのは得意なので…… 5 | wav/ba/miyu/145077.wav|10|あの、先生……私の名前、覚えてますか……? 6 | wav/ba/miyu/39192.wav|10|新しい友達を作るなんて、そんな難しい事…… 7 | wav/ba/miyu/649415.wav|10|ラビット小隊、もっと仲良くなれたら良いのに…… 8 | wav/ba/miyu/306949.wav|10|もっと他の隊員みたいに、存在感があれば…… 9 | wav/ba/miyu/255333.wav|10|私なんて、どこにでもいる小石みたいなものなので…… 10 | wav/ba/miyu/616395.wav|10|そもそもどうして私なんかが、SRTに…… 11 | wav/ba/miyu/164427.wav|10|呼吸を整えて……目標に、集中……。距離千,150メートル、風は東北東から15ノット……温湿度による誤差を補正…… 12 | wav/ba/miyu/955071.wav|10|あ、あの……先生、できればその、そういった悪戯はご遠慮いただけますと……えっと、照準が乱れてしまうので…… 13 | wav/ba/miyu/537029.wav|10|……なるほど。これは邪魔ではなく、実践に近い状況下での狙撃訓練、ということですよね……SRTでも、似たような訓練をしたような気が…… 14 | wav/ba/miyu/750669.wav|10|ひゃぅっ……!?せ、先生……!いくら何でも、耳に息を吹きかけるのは……!? 15 | wav/ba/miyu/412781.wav|10|あ、えっと、その…… 16 | wav/ba/miyu/619085.wav|10|先生の指示、いいですね……怖く、無いので…… 17 | wav/ba/miyu/827005.wav|10|先生は…私のことを見つけてくれますよね。いつも、どんな時でも… 18 | wav/ba/miyu/706012.wav|10|こんな私でも…先生のお役に立てるといいのですが… 19 | wav/ba/miyu/814441.wav|10|先生、私のことを見失わないで下さいね。 20 | wav/ba/miyu/946836.wav|10|せ、戦闘ですか 21 | wav/ba/miyu/554075.wav|10|他の子の、邪魔にならないように…… 22 | wav/ba/miyu/714649.wav|10|わ、わたしがリーダーですか 23 | wav/ba/miyu/44255.wav|10|うまく、できるかも… 24 | wav/ba/miyu/422295.wav|10|すみません…できればでいいのですが、わたしについできてください。 25 | wav/ba/miyu/155037.wav|10|あ、ありがとうございます。 26 | wav/ba/miyu/124180.wav|10|すこし本給します。 27 | wav/ba/miyu/173043.wav|10|隠れました! 28 | wav/ba/miyu/379056.wav|10|もしかして、敵にも無視されます? 29 | wav/ba/miyu/472324.wav|10|ポジション、変更します。 30 | wav/ba/miyu/986967.wav|10|え、援護射撃お願いします。 31 | wav/ba/miyu/356351.wav|10|あ、これ?私の分…ですか…? 32 | wav/ba/miyu/930369.wav|10|みんな、ごめん。 33 | wav/ba/miyu/41287.wav|10|ヒット! 34 | wav/ba/miyu/456869.wav|10|タンゴーダウン 35 | wav/ba/miyu/55566.wav|10|い、移動します 36 | wav/ba/miyu/389580.wav|10|わたし、ここにいってもいいのでしょうか… 37 | wav/ba/miyu/130233.wav|10|わ、私のことはお気になされずに 38 | wav/ba/miyu/29571.wav|10|えっと、私たちの勝次ですか…?本当に? 39 | wav/ba/miyu/147691.wav|10|そんな…おかしいです。私なんかリーダーになって、勝てたなんて 40 | wav/ba/miyu/974397.wav|10|こうなると思っていました。私がかかわると、結局いつもこんな風に 41 | wav/ba/miyu/862032.wav|10|逃げたい、辛い、帰りたい… 42 | wav/ba/miyu/618533.wav|10|こんないいものを、わたしが頂いても…? 43 | wav/ba/miyu/90758.wav|10|戦術への理解が、より深まりました。 44 | wav/ba/miyu/861536.wav|10|せめて…SRTの名に恥じないよう、頑張ります。 45 | wav/ba/miyu/829111.wav|10|どうにか成長して、他のみんなの足を引っ張らないようにしないと… 46 | wav/ba/miyu/709504.wav|10|ラビット39式小銃、確かに受け取りました。……やはり、少し重いですね。 47 | wav/ba/miyu/749189.wav|10|ひぁっ…私の誕生日、ですか……?ど、どうして覚えて…そんな…。あ、ありがとうございます……本当に、嬉しいです……! 48 | wav/ba/miyu/536512.wav|10|せ、先生がその……本日、誕生日だと聞きまして……。野宿の立場なので、その、しっかりしたプレゼントはお渡しできないのですが……。 えっと、お祝いの言葉だけは、どうにか伝えたくて……。 49 | wav/ba/miyu/872843.wav|10|そういえば昔、ハロウィーンということでお化けの仮装をしたのですが。誰も、私の存在に気付いてくれませんでした……。 50 | wav/ba/miyu/436322.wav|10|今年は、SRTに帰れると良いですね 51 | wav/ba/miyu/387627.wav|10|サンタクロースさんは、悪い子にはプレゼントを渡さないとか……いえ、そもそもサンタクロースさんも、私の存在を忘れているかもしれませんね。 52 | -------------------------------------------------------------------------------- /filelists/miyu_train.txt.cleaned: -------------------------------------------------------------------------------- 1 | wav/ba/miyu/533522.wav|10|b ɯ, bɯ↑rɯ↓ɯ…… a↑aka↓ibɯ…… 2 | wav/ba/miyu/978431.wav|10|e↑sɯaarɯtiito↓kUʃɯ ga↑kɯeN, ra↑biQtoʃo↓otaino ka↑sɯmi↓sawa mi↓yɯ, de↓sU…… a↑no, mo↓o k i…… Q↓ʧa, da↑me↓desUyone…… 3 | wav/ba/miyu/589002.wav|10|o, o↑ʦUkaresamadesU, se↑Nse↓e…… 4 | wav/ba/miyu/527590.wav|10|se↑Nse↓eno o↑sUkina to↑ki↓ni, yo↑Nde kɯ↑dasa↓i…… so↑no, ma↓ʦɯ no↑wa to↑kɯ↓inanode…… 5 | wav/ba/miyu/145077.wav|10|a↑no, se↑Nse↓e…… wa↑taʃino na↑mae, o↑boe↓temasUka……? 6 | wav/ba/miyu/39192.wav|10|a↑taraʃi↓i to↑modaʧio ʦU↑kɯ↓rɯnaNte, so↑Nna mɯ↑zɯkaʃi↓i ko↑to…… 7 | wav/ba/miyu/649415.wav|10|ra↑biQtoʃo↓otai, mo↓Qto na↓kayokɯ na↑re↓tara yo↓inoni…… 8 | wav/ba/miyu/306949.wav|10|mo↓Qto ho↑kano ta↑iiNmi↓taini, so↑Nzai↓kaNga a↑re↓ba…… 9 | wav/ba/miyu/255333.wav|10|wa↑taʃina↓Nte, do↓konidemo i↑rɯ ko↑iʃimi↓taina mo↑no↓nanode…… 10 | wav/ba/miyu/616395.wav|10|so↓mosomo do↓oʃIte wa↑taʃina↓Nkaga, e↑sɯaarɯtiini…… 11 | wav/ba/miyu/164427.wav|10|ko↑kyɯɯo to↑tonoe↓te…… mo↑kUhyooni, ʃɯ↑ɯʧɯɯ……. kyo↑ri↓seN, hya↑kɯ go↑jɯɯme↓etorɯ, ka↑zewa to↑ohokUtookara jɯ↑ɯ go↑no↓Qto…… yɯ↑takaʃi↓ʦɯdoni yo↑rɯ go↓sao ho↑see…… 12 | wav/ba/miyu/955071.wav|10|a, a↑no…… se↑Nse↓e, de↑ki↓reba so↑no, so↑oiQta i↑tazɯrawa go↑e↓Nryo i↑tadakema↓sUto…… e↑Qto, ʃo↑ojɯNga mi↑dare↓te ʃi↑maɯ↓node…… 13 | wav/ba/miyu/537029.wav|10|…… na↑rɯhodo. ko↑rewa ja↑made↓wanakɯ, ji↑QseNni ʧI↑ka↓i jo↑okyoo↓kadeno so↑gekIkɯ↓NreN, t o i↑ɯ ko↑to↓desUyone…… e↑sɯaarɯtiidemo, ni↑ta yo↓ona kɯ↓NreNo ʃI↑ta yo↓ona ki↑ga…… 14 | wav/ba/miyu/750669.wav|10|hya↓ɯQ……!? s e, se↑Nse↓e……! i↓kɯra na↓nidemo, mi↑mi↓ni i↓kio fU↑kikake↓rɯ no↑wa……!? 15 | wav/ba/miyu/412781.wav|10|a, e↑Qto, so↑no…… 16 | wav/ba/miyu/619085.wav|10|se↑Nse↓eno ʃi↓ji, i↓idesUne…… ko↑wa↓kɯ, na↓inode…… 17 | wav/ba/miyu/827005.wav|10|se↑Nse↓ewa… wa↑taʃino ko↑to↓o mi↑ʦUkete kɯ↑rema↓sUyone. i↓ʦɯmo, do↓Nna to↑ki↓demo… 18 | wav/ba/miyu/706012.wav|10|ko↑Nna wa↑taʃide↓mo… se↑Nse↓eno o↑yakɯni ta↑te↓rɯto i↓i no↑de↓sUga… 19 | wav/ba/miyu/814441.wav|10|se↑Nse↓e, wa↑taʃino ko↑to↓o mi↑ɯʃina↓wanaide kɯ↑dasa↓ine. 20 | wav/ba/miyu/946836.wav|10|s e, se↑Ntoode↓sUka. 21 | wav/ba/miyu/554075.wav|10|ta↓no ko↑no, ja↑mani na↑ra↓nai yo↓oni…… 22 | wav/ba/miyu/714649.wav|10|w a, wa↑taʃiga ri↓idaadesUka. 23 | wav/ba/miyu/44255.wav|10|ɯ↑ma↓kɯ, de↑ki↓rɯkamo… 24 | wav/ba/miyu/422295.wav|10|sɯ↑mimase↓N… de↑ki↓rebade i↓i no↑de↓sUga, wa↑taʃini ʦɯ↓i de↑ki↓te kɯ↑dasa↓i. 25 | wav/ba/miyu/155037.wav|10|a, a↑ri↓gatoo go↑zaima↓sU. 26 | wav/ba/miyu/124180.wav|10|sU↑ko↓ʃI ho↓Nkyɯɯ ʃi↑ma↓sU. 27 | wav/ba/miyu/173043.wav|10|ka↑kɯrema↓ʃIta! 28 | wav/ba/miyu/379056.wav|10|mo↓ʃIkaʃIte, te↑kinimo mɯ↓ʃI sa↑rema↓sU? 29 | wav/ba/miyu/472324.wav|10|po↑ji↓ʃoN, he↑Nkoo ʃi↑ma↓sU. 30 | wav/ba/miyu/986967.wav|10|e, e↑Ngoʃagekio↓negai ʃi↑ma↓sU. 31 | wav/ba/miyu/356351.wav|10|a, ko↑re? wa↑taʃino bɯ↓N… de↓sUka…? 32 | wav/ba/miyu/930369.wav|10|mi↑Nna, go↑meN. 33 | wav/ba/miyu/41287.wav|10|hi↓Qto! 34 | wav/ba/miyu/456869.wav|10|ta↓NgoodaɯN. 35 | wav/ba/miyu/55566.wav|10|i, i↑doo ʃi↑ma↓sU. 36 | wav/ba/miyu/389580.wav|10|wa↑taʃi, ko↑koni i↑Qtemo i↓i no↑deʃoo↓ka… 37 | wav/ba/miyu/130233.wav|10|w a, wa↑taʃino ko↑to↓wa o↑kini na↑sarezɯni. 38 | wav/ba/miyu/29571.wav|10|e↑Qto, wa↑taʃi↓taʧino ka↓ʦɯjidesUka…? ho↑Ntooni? 39 | wav/ba/miyu/147691.wav|10|so↑Nna… o↑kaʃi↓idesU. wa↑taʃina↓Nka ri↓idaani na↓Qte, ka↑te↓tanaNte. 40 | wav/ba/miyu/974397.wav|10|ko↑o na↓rɯto o↑mo↓Qte i↑ma↓ʃIta. wa↑taʃiga ka↑kawa↓rɯto, ke↑Qkyokɯ i↓ʦɯmo ko↑Nna ka↑zeni. 41 | wav/ba/miyu/862032.wav|10|ni↑geta↓i, ʦɯ↑ra↓i, ka↑erita↓i… 42 | wav/ba/miyu/618533.wav|10|ko↑Nna i↓i mo↑no↓o, wa↑taʃiga i↑tadaitemo…? 43 | wav/ba/miyu/90758.wav|10|se↑Njɯʦɯeno ri↓kaiga, yo↓ri fU↑kamarima↓ʃIta. 44 | wav/ba/miyu/861536.wav|10|se↓mete… e↑sɯaarɯtiino na↑ni ha↑ji↓nai yo↓o, ga↑Nbarima↓sU. 45 | wav/ba/miyu/829111.wav|10|do↓onika se↑eʧoo ʃI↑te, ta↓no mi↑Nna↓no a↑ʃi↓o hi↑Qpara↓nai yo↓oni ʃi↑naito… 46 | wav/ba/miyu/709504.wav|10|ra↓biQto sa↓Njɯɯ kyɯ↑ɯ↓ʃIki ʃo↑ojɯɯ, ta↓ʃIkani ɯ↑ketorima↓ʃIta……. ya↑ha↓ri, sU↑ko↓ʃi o↑moi↓desUne. 47 | wav/ba/miyu/749189.wav|10|hi↓aQ… wa↑taʃino ta↑Njoo↓bi, de↓sUka……? d o, do↓oʃIte o↑boe↓te… so↑Nna…. a, a↑ri↓gatoo go↑zaima↓sU…… ho↑Ntooni, ɯ↑reʃi↓idesU……! 48 | wav/ba/miyu/536512.wav|10|s e, se↑Nse↓ega so↑no…… ho↓Njiʦɯ, ta↑Njoo↓bidato kI↑kima↓ʃIte……. no↓jɯkɯno ta↓ʧibananode, so↑no, ʃi↑Qka↓ri ʃI↑ta pɯ↑re↓zeNtowa o↑wa↓taʃi de↑ki↓nai no↑de↓sUga……. e↑Qto, o↑iwaino ko↑tobadakewa, do↓onika ʦU↑taeta↓kUte……. 49 | wav/ba/miyu/872843.wav|10|so↑o i↓eba mɯ↑kaʃi, ha↑rowi↓iNto i↑ɯ ko↑to↓de o↑ba↓keno ka↑sooo ʃI↑ta no↑de↓sUga. da↓remo, wa↑taʃino so↑Nzaini ki↑zɯ↓ite kɯ↑remase↓NdeʃIta……. 50 | wav/ba/miyu/436322.wav|10|ko↑toʃiwa, e↑sɯaarɯtiini ka↑ere↓rɯto yo↓idesUne. 51 | wav/ba/miyu/387627.wav|10|sa↑Ntakɯro↓osɯsaNwa, wa↑rɯ↓i ko↑niwa pɯ↑re↓zeNtoo wa↑tasanaitoka…… i↓e, so↓mosomo sa↑Ntakɯro↓osɯsaNmo, wa↑taʃino so↑Nzaio wa↑sɯrete i↑rɯkamo ʃi↑remase↓Nne. 52 | -------------------------------------------------------------------------------- /filelists/miyu_val.txt: -------------------------------------------------------------------------------- 1 | wav/ba/miyu/916145.wav|10|ど、どしてこんなことに… 2 | wav/ba/miyu/139528.wav|10|え、援護します 3 | wav/ba/miyu/211336.wav|10|やっぱり私は何もできないゴミ物なんだ 4 | wav/ba/miyu/886881.wav|10|私はその辺の小石 5 | wav/ba/miyu/899245.wav|10|SRTに入らなければ良かった 6 | wav/ba/miyu/805836.wav|10|な、何か、遮蔽物は……? -------------------------------------------------------------------------------- /filelists/miyu_val.txt.cleaned: -------------------------------------------------------------------------------- 1 | wav/ba/miyu/916145.wav|10|d o, do↑ʃi↓te ko↑Nna ko↑to↓ni… 2 | wav/ba/miyu/139528.wav|10|e, e↓Ngo ʃi↑ma↓sU. 3 | wav/ba/miyu/211336.wav|10|ya↑Qpa↓ri wa↑taʃiwa na↓nimo de↑ki↓nai go↑mibɯʦɯna N↓da. 4 | wav/ba/miyu/886881.wav|10|wa↑taʃiwa so↑no a↑tarino ko↑iʃi. 5 | wav/ba/miyu/899245.wav|10|e↑sɯaarɯtiini ha↑ira↓nakereba yo↓kaQta. 6 | wav/ba/miyu/805836.wav|10|n a, na↓nika, ʃa↑heebɯʦɯwa……? 7 | -------------------------------------------------------------------------------- /inference.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": null, 6 | "metadata": { 7 | "pycharm": { 8 | "name": "#%%\n" 9 | } 10 | }, 11 | "outputs": [], 12 | "source": [ 13 | "%matplotlib inline\n", 14 | "import matplotlib.pyplot as plt\n", 15 | "import IPython.display as ipd\n", 16 | "\n", 17 | "import os\n", 18 | "import json\n", 19 | "import math\n", 20 | "import torch\n", 21 | "from torch import nn\n", 22 | "from torch.nn import functional as F\n", 23 | "from torch.utils.data import DataLoader\n", 24 | "\n", 25 | "import commons\n", 26 | "import utils\n", 27 | "from data_utils import TextAudioLoader, TextAudioCollate, TextAudioSpeakerLoader, TextAudioSpeakerCollate\n", 28 | "from models import SynthesizerTrn\n", 29 | "from text.symbols import symbols\n", 30 | "from text import text_to_sequence\n", 31 | "\n", 32 | "from scipy.io.wavfile import write\n", 33 | "\n", 34 | "\n", 35 | "def get_text(text, hps):\n", 36 | " text_norm = text_to_sequence(text, hps.data.text_cleaners)\n", 37 | " if hps.data.add_blank:\n", 38 | " text_norm = commons.intersperse(text_norm, 0)\n", 39 | " text_norm = torch.LongTensor(text_norm)\n", 40 | " return text_norm" 41 | ] 42 | }, 43 | { 44 | "cell_type": "markdown", 45 | "metadata": { 46 | "pycharm": { 47 | "name": "#%% md\n" 48 | } 49 | }, 50 | "source": [ 51 | "## Single Speaker" 52 | ] 53 | }, 54 | { 55 | "cell_type": "code", 56 | "execution_count": null, 57 | "metadata": { 58 | "pycharm": { 59 | "name": "#%%\n" 60 | } 61 | }, 62 | "outputs": [], 63 | "source": [ 64 | "hps = utils.get_hparams_from_file(\"configs/XXX.json\")" 65 | ] 66 | }, 67 | { 68 | "cell_type": "code", 69 | "execution_count": null, 70 | "metadata": { 71 | "pycharm": { 72 | "name": "#%%\n" 73 | } 74 | }, 75 | "outputs": [], 76 | "source": [ 77 | "net_g = SynthesizerTrn(\n", 78 | " len(symbols),\n", 79 | " hps.data.filter_length // 2 + 1,\n", 80 | " hps.train.segment_size // hps.data.hop_length,\n", 81 | " **hps.model).cuda()\n", 82 | "_ = net_g.eval()\n", 83 | "\n", 84 | "_ = utils.load_checkpoint(\"/path/to/model.pth\", net_g, None)" 85 | ] 86 | }, 87 | { 88 | "cell_type": "code", 89 | "execution_count": null, 90 | "metadata": { 91 | "pycharm": { 92 | "name": "#%%\n" 93 | } 94 | }, 95 | "outputs": [], 96 | "source": [ 97 | "stn_tst = get_text(\"こんにちは\", hps)\n", 98 | "with torch.no_grad():\n", 99 | " x_tst = stn_tst.cuda().unsqueeze(0)\n", 100 | " x_tst_lengths = torch.LongTensor([stn_tst.size(0)]).cuda()\n", 101 | " audio = net_g.infer(x_tst, x_tst_lengths, noise_scale=.667, noise_scale_w=0.8, length_scale=1)[0][0,0].data.cpu().float().numpy()\n", 102 | "ipd.display(ipd.Audio(audio, rate=hps.data.sampling_rate, normalize=False))" 103 | ] 104 | }, 105 | { 106 | "cell_type": "markdown", 107 | "metadata": { 108 | "pycharm": { 109 | "name": "#%% md\n" 110 | } 111 | }, 112 | "source": [ 113 | "## Multiple Speakers" 114 | ] 115 | }, 116 | { 117 | "cell_type": "code", 118 | "execution_count": null, 119 | "metadata": { 120 | "pycharm": { 121 | "name": "#%%\n" 122 | } 123 | }, 124 | "outputs": [], 125 | "source": [ 126 | "hps = utils.get_hparams_from_file(\"./configs/XXX.json\")" 127 | ] 128 | }, 129 | { 130 | "cell_type": "code", 131 | "execution_count": null, 132 | "metadata": { 133 | "pycharm": { 134 | "name": "#%%\n" 135 | } 136 | }, 137 | "outputs": [], 138 | "source": [ 139 | "net_g = SynthesizerTrn(\n", 140 | " len(symbols),\n", 141 | " hps.data.filter_length // 2 + 1,\n", 142 | " hps.train.segment_size // hps.data.hop_length,\n", 143 | " n_speakers=hps.data.n_speakers,\n", 144 | " **hps.model).cuda()\n", 145 | "_ = net_g.eval()\n", 146 | "\n", 147 | "_ = utils.load_checkpoint(\"/path/to/model.pth\", net_g, None)" 148 | ] 149 | }, 150 | { 151 | "cell_type": "code", 152 | "execution_count": null, 153 | "metadata": { 154 | "pycharm": { 155 | "name": "#%%\n" 156 | } 157 | }, 158 | "outputs": [], 159 | "source": [ 160 | "stn_tst = get_text(\"こんにちは\", hps)\n", 161 | "with torch.no_grad():\n", 162 | " x_tst = stn_tst.cuda().unsqueeze(0)\n", 163 | " x_tst_lengths = torch.LongTensor([stn_tst.size(0)]).cuda()\n", 164 | " sid = torch.LongTensor([4]).cuda()\n", 165 | " audio = net_g.infer(x_tst, x_tst_lengths, sid=sid, noise_scale=.667, noise_scale_w=0.8, length_scale=1)[0][0,0].data.cpu().float().numpy()\n", 166 | "ipd.display(ipd.Audio(audio, rate=hps.data.sampling_rate, normalize=False))" 167 | ] 168 | }, 169 | { 170 | "cell_type": "markdown", 171 | "metadata": { 172 | "pycharm": { 173 | "name": "#%% md\n" 174 | } 175 | }, 176 | "source": [ 177 | "### Voice Conversion" 178 | ] 179 | }, 180 | { 181 | "cell_type": "code", 182 | "execution_count": null, 183 | "metadata": { 184 | "pycharm": { 185 | "name": "#%%\n" 186 | } 187 | }, 188 | "outputs": [], 189 | "source": [ 190 | "dataset = TextAudioSpeakerLoader(hps.data.validation_files, hps.data)\n", 191 | "collate_fn = TextAudioSpeakerCollate()\n", 192 | "loader = DataLoader(dataset, num_workers=8, shuffle=False,\n", 193 | " batch_size=1, pin_memory=True,\n", 194 | " drop_last=True, collate_fn=collate_fn)\n", 195 | "data_list = list(loader)" 196 | ] 197 | }, 198 | { 199 | "cell_type": "code", 200 | "execution_count": null, 201 | "metadata": { 202 | "pycharm": { 203 | "name": "#%%\n" 204 | } 205 | }, 206 | "outputs": [], 207 | "source": [ 208 | "with torch.no_grad():\n", 209 | " x, x_lengths, spec, spec_lengths, y, y_lengths, sid_src = [x.cuda() for x in data_list[0]]\n", 210 | " sid_tgt1 = torch.LongTensor([1]).cuda()\n", 211 | " sid_tgt2 = torch.LongTensor([2]).cuda()\n", 212 | " sid_tgt3 = torch.LongTensor([4]).cuda()\n", 213 | " audio1 = net_g.voice_conversion(spec, spec_lengths, sid_src=sid_src, sid_tgt=sid_tgt1)[0][0,0].data.cpu().float().numpy()\n", 214 | " audio2 = net_g.voice_conversion(spec, spec_lengths, sid_src=sid_src, sid_tgt=sid_tgt2)[0][0,0].data.cpu().float().numpy()\n", 215 | " audio3 = net_g.voice_conversion(spec, spec_lengths, sid_src=sid_src, sid_tgt=sid_tgt3)[0][0,0].data.cpu().float().numpy()\n", 216 | "print(\"Original SID: %d\" % sid_src.item())\n", 217 | "ipd.display(ipd.Audio(y[0].cpu().numpy(), rate=hps.data.sampling_rate, normalize=False))\n", 218 | "print(\"Converted SID: %d\" % sid_tgt1.item())\n", 219 | "ipd.display(ipd.Audio(audio1, rate=hps.data.sampling_rate, normalize=False))\n", 220 | "print(\"Converted SID: %d\" % sid_tgt2.item())\n", 221 | "ipd.display(ipd.Audio(audio2, rate=hps.data.sampling_rate, normalize=False))\n", 222 | "print(\"Converted SID: %d\" % sid_tgt3.item())\n", 223 | "ipd.display(ipd.Audio(audio3, rate=hps.data.sampling_rate, normalize=False))" 224 | ] 225 | } 226 | ], 227 | "metadata": { 228 | "kernelspec": { 229 | "display_name": "Python 3.7.9 64-bit", 230 | "language": "python", 231 | "name": "python3" 232 | }, 233 | "language_info": { 234 | "codemirror_mode": { 235 | "name": "ipython", 236 | "version": 3 237 | }, 238 | "file_extension": ".py", 239 | "mimetype": "text/x-python", 240 | "name": "python", 241 | "nbconvert_exporter": "python", 242 | "pygments_lexer": "ipython3", 243 | "version": "3.7.9" 244 | }, 245 | "vscode": { 246 | "interpreter": { 247 | "hash": "c15292341d300295ca9f634d04c483f667a0c1d5ee0c309c2ac4e312cce8b8df" 248 | } 249 | } 250 | }, 251 | "nbformat": 4, 252 | "nbformat_minor": 4 253 | } -------------------------------------------------------------------------------- /losses.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from torch.nn import functional as F 3 | 4 | import commons 5 | 6 | 7 | def feature_loss(fmap_r, fmap_g): 8 | loss = 0 9 | for dr, dg in zip(fmap_r, fmap_g): 10 | for rl, gl in zip(dr, dg): 11 | rl = rl.float().detach() 12 | gl = gl.float() 13 | loss += torch.mean(torch.abs(rl - gl)) 14 | 15 | return loss * 2 16 | 17 | 18 | def discriminator_loss(disc_real_outputs, disc_generated_outputs): 19 | loss = 0 20 | r_losses = [] 21 | g_losses = [] 22 | for dr, dg in zip(disc_real_outputs, disc_generated_outputs): 23 | dr = dr.float() 24 | dg = dg.float() 25 | r_loss = torch.mean((1-dr)**2) 26 | g_loss = torch.mean(dg**2) 27 | loss += (r_loss + g_loss) 28 | r_losses.append(r_loss.item()) 29 | g_losses.append(g_loss.item()) 30 | 31 | return loss, r_losses, g_losses 32 | 33 | 34 | def generator_loss(disc_outputs): 35 | loss = 0 36 | gen_losses = [] 37 | for dg in disc_outputs: 38 | dg = dg.float() 39 | l = torch.mean((1-dg)**2) 40 | gen_losses.append(l) 41 | loss += l 42 | 43 | return loss, gen_losses 44 | 45 | 46 | def kl_loss(z_p, logs_q, m_p, logs_p, z_mask): 47 | """ 48 | z_p, logs_q: [b, h, t_t] 49 | m_p, logs_p: [b, h, t_t] 50 | """ 51 | z_p = z_p.float() 52 | logs_q = logs_q.float() 53 | m_p = m_p.float() 54 | logs_p = logs_p.float() 55 | z_mask = z_mask.float() 56 | 57 | kl = logs_p - logs_q - 0.5 58 | kl += 0.5 * ((z_p - m_p)**2) * torch.exp(-2. * logs_p) 59 | kl = torch.sum(kl * z_mask) 60 | l = kl / torch.sum(z_mask) 61 | return l 62 | -------------------------------------------------------------------------------- /mel_processing.py: -------------------------------------------------------------------------------- 1 | import math 2 | import os 3 | import random 4 | import torch 5 | from torch import nn 6 | import torch.nn.functional as F 7 | import torch.utils.data 8 | import numpy as np 9 | import librosa 10 | import librosa.util as librosa_util 11 | from librosa.util import normalize, pad_center, tiny 12 | from scipy.signal import get_window 13 | from scipy.io.wavfile import read 14 | from librosa.filters import mel as librosa_mel_fn 15 | 16 | MAX_WAV_VALUE = 32768.0 17 | 18 | 19 | def dynamic_range_compression_torch(x, C=1, clip_val=1e-5): 20 | """ 21 | PARAMS 22 | ------ 23 | C: compression factor 24 | """ 25 | return torch.log(torch.clamp(x, min=clip_val) * C) 26 | 27 | 28 | def dynamic_range_decompression_torch(x, C=1): 29 | """ 30 | PARAMS 31 | ------ 32 | C: compression factor used to compress 33 | """ 34 | return torch.exp(x) / C 35 | 36 | 37 | def spectral_normalize_torch(magnitudes): 38 | output = dynamic_range_compression_torch(magnitudes) 39 | return output 40 | 41 | 42 | def spectral_de_normalize_torch(magnitudes): 43 | output = dynamic_range_decompression_torch(magnitudes) 44 | return output 45 | 46 | 47 | mel_basis = {} 48 | hann_window = {} 49 | 50 | 51 | def spectrogram_torch(y, n_fft, sampling_rate, hop_size, win_size, center=False): 52 | if torch.min(y) < -1.: 53 | print('min value is ', torch.min(y)) 54 | if torch.max(y) > 1.: 55 | print('max value is ', torch.max(y)) 56 | 57 | global hann_window 58 | dtype_device = str(y.dtype) + '_' + str(y.device) 59 | wnsize_dtype_device = str(win_size) + '_' + dtype_device 60 | if wnsize_dtype_device not in hann_window: 61 | hann_window[wnsize_dtype_device] = torch.hann_window(win_size).to(dtype=y.dtype, device=y.device) 62 | 63 | y = torch.nn.functional.pad(y.unsqueeze(1), (int((n_fft-hop_size)/2), int((n_fft-hop_size)/2)), mode='reflect') 64 | y = y.squeeze(1) 65 | 66 | spec = torch.stft(y, n_fft, hop_length=hop_size, win_length=win_size, window=hann_window[wnsize_dtype_device], 67 | center=center, pad_mode='reflect', normalized=False, onesided=True, return_complex=False) 68 | 69 | spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6) 70 | return spec 71 | 72 | 73 | def spec_to_mel_torch(spec, n_fft, num_mels, sampling_rate, fmin, fmax): 74 | global mel_basis 75 | dtype_device = str(spec.dtype) + '_' + str(spec.device) 76 | fmax_dtype_device = str(fmax) + '_' + dtype_device 77 | if fmax_dtype_device not in mel_basis: 78 | mel = librosa_mel_fn(sampling_rate, n_fft, num_mels, fmin, fmax) 79 | mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(dtype=spec.dtype, device=spec.device) 80 | spec = torch.matmul(mel_basis[fmax_dtype_device], spec) 81 | spec = spectral_normalize_torch(spec) 82 | return spec 83 | 84 | 85 | def mel_spectrogram_torch(y, n_fft, num_mels, sampling_rate, hop_size, win_size, fmin, fmax, center=False): 86 | if torch.min(y) < -1.: 87 | print('min value is ', torch.min(y)) 88 | if torch.max(y) > 1.: 89 | print('max value is ', torch.max(y)) 90 | 91 | global mel_basis, hann_window 92 | dtype_device = str(y.dtype) + '_' + str(y.device) 93 | fmax_dtype_device = str(fmax) + '_' + dtype_device 94 | wnsize_dtype_device = str(win_size) + '_' + dtype_device 95 | if fmax_dtype_device not in mel_basis: 96 | mel = librosa_mel_fn(sampling_rate, n_fft, num_mels, fmin, fmax) 97 | mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(dtype=y.dtype, device=y.device) 98 | if wnsize_dtype_device not in hann_window: 99 | hann_window[wnsize_dtype_device] = torch.hann_window(win_size).to(dtype=y.dtype, device=y.device) 100 | 101 | y = torch.nn.functional.pad(y.unsqueeze(1), (int((n_fft-hop_size)/2), int((n_fft-hop_size)/2)), mode='reflect') 102 | y = y.squeeze(1) 103 | 104 | spec = torch.stft(y, n_fft, hop_length=hop_size, win_length=win_size, window=hann_window[wnsize_dtype_device], 105 | center=center, pad_mode='reflect', normalized=False, onesided=True, return_complex=False) 106 | 107 | spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6) 108 | 109 | spec = torch.matmul(mel_basis[fmax_dtype_device], spec) 110 | spec = spectral_normalize_torch(spec) 111 | 112 | return spec 113 | -------------------------------------------------------------------------------- /models.py: -------------------------------------------------------------------------------- 1 | import copy 2 | import math 3 | import torch 4 | from torch import nn 5 | from torch.nn import functional as F 6 | 7 | import commons 8 | import modules 9 | import attentions 10 | import monotonic_align 11 | 12 | from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d 13 | from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm 14 | from commons import init_weights, get_padding 15 | 16 | 17 | class StochasticDurationPredictor(nn.Module): 18 | def __init__(self, in_channels, filter_channels, kernel_size, p_dropout, n_flows=4, gin_channels=0): 19 | super().__init__() 20 | filter_channels = in_channels # it needs to be removed from future version. 21 | self.in_channels = in_channels 22 | self.filter_channels = filter_channels 23 | self.kernel_size = kernel_size 24 | self.p_dropout = p_dropout 25 | self.n_flows = n_flows 26 | self.gin_channels = gin_channels 27 | 28 | self.log_flow = modules.Log() 29 | self.flows = nn.ModuleList() 30 | self.flows.append(modules.ElementwiseAffine(2)) 31 | for i in range(n_flows): 32 | self.flows.append(modules.ConvFlow(2, filter_channels, kernel_size, n_layers=3)) 33 | self.flows.append(modules.Flip()) 34 | 35 | self.post_pre = nn.Conv1d(1, filter_channels, 1) 36 | self.post_proj = nn.Conv1d(filter_channels, filter_channels, 1) 37 | self.post_convs = modules.DDSConv(filter_channels, kernel_size, n_layers=3, p_dropout=p_dropout) 38 | self.post_flows = nn.ModuleList() 39 | self.post_flows.append(modules.ElementwiseAffine(2)) 40 | for i in range(4): 41 | self.post_flows.append(modules.ConvFlow(2, filter_channels, kernel_size, n_layers=3)) 42 | self.post_flows.append(modules.Flip()) 43 | 44 | self.pre = nn.Conv1d(in_channels, filter_channels, 1) 45 | self.proj = nn.Conv1d(filter_channels, filter_channels, 1) 46 | self.convs = modules.DDSConv(filter_channels, kernel_size, n_layers=3, p_dropout=p_dropout) 47 | if gin_channels != 0: 48 | self.cond = nn.Conv1d(gin_channels, filter_channels, 1) 49 | 50 | def forward(self, x, x_mask, w=None, g=None, reverse=False, noise_scale=1.0): 51 | x = torch.detach(x) 52 | x = self.pre(x) 53 | if g is not None: 54 | g = torch.detach(g) 55 | x = x + self.cond(g) 56 | x = self.convs(x, x_mask) 57 | x = self.proj(x) * x_mask 58 | 59 | if not reverse: 60 | flows = self.flows 61 | assert w is not None 62 | 63 | logdet_tot_q = 0 64 | h_w = self.post_pre(w) 65 | h_w = self.post_convs(h_w, x_mask) 66 | h_w = self.post_proj(h_w) * x_mask 67 | e_q = torch.randn(w.size(0), 2, w.size(2)).to(device=x.device, dtype=x.dtype) * x_mask 68 | z_q = e_q 69 | for flow in self.post_flows: 70 | z_q, logdet_q = flow(z_q, x_mask, g=(x + h_w)) 71 | logdet_tot_q += logdet_q 72 | z_u, z1 = torch.split(z_q, [1, 1], 1) 73 | u = torch.sigmoid(z_u) * x_mask 74 | z0 = (w - u) * x_mask 75 | logdet_tot_q += torch.sum((F.logsigmoid(z_u) + F.logsigmoid(-z_u)) * x_mask, [1,2]) 76 | logq = torch.sum(-0.5 * (math.log(2*math.pi) + (e_q**2)) * x_mask, [1,2]) - logdet_tot_q 77 | 78 | logdet_tot = 0 79 | z0, logdet = self.log_flow(z0, x_mask) 80 | logdet_tot += logdet 81 | z = torch.cat([z0, z1], 1) 82 | for flow in flows: 83 | z, logdet = flow(z, x_mask, g=x, reverse=reverse) 84 | logdet_tot = logdet_tot + logdet 85 | nll = torch.sum(0.5 * (math.log(2*math.pi) + (z**2)) * x_mask, [1,2]) - logdet_tot 86 | return nll + logq # [b] 87 | else: 88 | flows = list(reversed(self.flows)) 89 | flows = flows[:-2] + [flows[-1]] # remove a useless vflow 90 | z = torch.randn(x.size(0), 2, x.size(2)).to(device=x.device, dtype=x.dtype) * noise_scale 91 | for flow in flows: 92 | z = flow(z, x_mask, g=x, reverse=reverse) 93 | z0, z1 = torch.split(z, [1, 1], 1) 94 | logw = z0 95 | return logw 96 | 97 | 98 | class DurationPredictor(nn.Module): 99 | def __init__(self, in_channels, filter_channels, kernel_size, p_dropout, gin_channels=0): 100 | super().__init__() 101 | 102 | self.in_channels = in_channels 103 | self.filter_channels = filter_channels 104 | self.kernel_size = kernel_size 105 | self.p_dropout = p_dropout 106 | self.gin_channels = gin_channels 107 | 108 | self.drop = nn.Dropout(p_dropout) 109 | self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size, padding=kernel_size//2) 110 | self.norm_1 = modules.LayerNorm(filter_channels) 111 | self.conv_2 = nn.Conv1d(filter_channels, filter_channels, kernel_size, padding=kernel_size//2) 112 | self.norm_2 = modules.LayerNorm(filter_channels) 113 | self.proj = nn.Conv1d(filter_channels, 1, 1) 114 | 115 | if gin_channels != 0: 116 | self.cond = nn.Conv1d(gin_channels, in_channels, 1) 117 | 118 | def forward(self, x, x_mask, g=None): 119 | x = torch.detach(x) 120 | if g is not None: 121 | g = torch.detach(g) 122 | x = x + self.cond(g) 123 | x = self.conv_1(x * x_mask) 124 | x = torch.relu(x) 125 | x = self.norm_1(x) 126 | x = self.drop(x) 127 | x = self.conv_2(x * x_mask) 128 | x = torch.relu(x) 129 | x = self.norm_2(x) 130 | x = self.drop(x) 131 | x = self.proj(x * x_mask) 132 | return x * x_mask 133 | 134 | 135 | class TextEncoder(nn.Module): 136 | def __init__(self, 137 | n_vocab, 138 | out_channels, 139 | hidden_channels, 140 | filter_channels, 141 | n_heads, 142 | n_layers, 143 | kernel_size, 144 | p_dropout): 145 | super().__init__() 146 | self.n_vocab = n_vocab 147 | self.out_channels = out_channels 148 | self.hidden_channels = hidden_channels 149 | self.filter_channels = filter_channels 150 | self.n_heads = n_heads 151 | self.n_layers = n_layers 152 | self.kernel_size = kernel_size 153 | self.p_dropout = p_dropout 154 | 155 | self.emb = nn.Embedding(n_vocab, hidden_channels) 156 | nn.init.normal_(self.emb.weight, 0.0, hidden_channels**-0.5) 157 | 158 | self.encoder = attentions.Encoder( 159 | hidden_channels, 160 | filter_channels, 161 | n_heads, 162 | n_layers, 163 | kernel_size, 164 | p_dropout) 165 | self.proj= nn.Conv1d(hidden_channels, out_channels * 2, 1) 166 | 167 | def forward(self, x, x_lengths): 168 | x = self.emb(x) * math.sqrt(self.hidden_channels) # [b, t, h] 169 | x = torch.transpose(x, 1, -1) # [b, h, t] 170 | x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype) 171 | 172 | x = self.encoder(x * x_mask, x_mask) 173 | stats = self.proj(x) * x_mask 174 | 175 | m, logs = torch.split(stats, self.out_channels, dim=1) 176 | return x, m, logs, x_mask 177 | 178 | 179 | class ResidualCouplingBlock(nn.Module): 180 | def __init__(self, 181 | channels, 182 | hidden_channels, 183 | kernel_size, 184 | dilation_rate, 185 | n_layers, 186 | n_flows=4, 187 | gin_channels=0): 188 | super().__init__() 189 | self.channels = channels 190 | self.hidden_channels = hidden_channels 191 | self.kernel_size = kernel_size 192 | self.dilation_rate = dilation_rate 193 | self.n_layers = n_layers 194 | self.n_flows = n_flows 195 | self.gin_channels = gin_channels 196 | 197 | self.flows = nn.ModuleList() 198 | for i in range(n_flows): 199 | self.flows.append(modules.ResidualCouplingLayer(channels, hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=gin_channels, mean_only=True)) 200 | self.flows.append(modules.Flip()) 201 | 202 | def forward(self, x, x_mask, g=None, reverse=False): 203 | if not reverse: 204 | for flow in self.flows: 205 | x, _ = flow(x, x_mask, g=g, reverse=reverse) 206 | else: 207 | for flow in reversed(self.flows): 208 | x = flow(x, x_mask, g=g, reverse=reverse) 209 | return x 210 | 211 | 212 | class PosteriorEncoder(nn.Module): 213 | def __init__(self, 214 | in_channels, 215 | out_channels, 216 | hidden_channels, 217 | kernel_size, 218 | dilation_rate, 219 | n_layers, 220 | gin_channels=0): 221 | super().__init__() 222 | self.in_channels = in_channels 223 | self.out_channels = out_channels 224 | self.hidden_channels = hidden_channels 225 | self.kernel_size = kernel_size 226 | self.dilation_rate = dilation_rate 227 | self.n_layers = n_layers 228 | self.gin_channels = gin_channels 229 | 230 | self.pre = nn.Conv1d(in_channels, hidden_channels, 1) 231 | self.enc = modules.WN(hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=gin_channels) 232 | self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) 233 | 234 | def forward(self, x, x_lengths, g=None): 235 | x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype) 236 | x = self.pre(x) * x_mask 237 | x = self.enc(x, x_mask, g=g) 238 | stats = self.proj(x) * x_mask 239 | m, logs = torch.split(stats, self.out_channels, dim=1) 240 | z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask 241 | return z, m, logs, x_mask 242 | 243 | 244 | class Generator(torch.nn.Module): 245 | def __init__(self, initial_channel, resblock, resblock_kernel_sizes, resblock_dilation_sizes, upsample_rates, upsample_initial_channel, upsample_kernel_sizes, gin_channels=0): 246 | super(Generator, self).__init__() 247 | self.num_kernels = len(resblock_kernel_sizes) 248 | self.num_upsamples = len(upsample_rates) 249 | self.conv_pre = Conv1d(initial_channel, upsample_initial_channel, 7, 1, padding=3) 250 | resblock = modules.ResBlock1 if resblock == '1' else modules.ResBlock2 251 | 252 | self.ups = nn.ModuleList() 253 | for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)): 254 | self.ups.append(weight_norm( 255 | ConvTranspose1d(upsample_initial_channel//(2**i), upsample_initial_channel//(2**(i+1)), 256 | k, u, padding=(k-u)//2))) 257 | 258 | self.resblocks = nn.ModuleList() 259 | for i in range(len(self.ups)): 260 | ch = upsample_initial_channel//(2**(i+1)) 261 | for j, (k, d) in enumerate(zip(resblock_kernel_sizes, resblock_dilation_sizes)): 262 | self.resblocks.append(resblock(ch, k, d)) 263 | 264 | self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False) 265 | self.ups.apply(init_weights) 266 | 267 | if gin_channels != 0: 268 | self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1) 269 | 270 | def forward(self, x, g=None): 271 | x = self.conv_pre(x) 272 | if g is not None: 273 | x = x + self.cond(g) 274 | 275 | for i in range(self.num_upsamples): 276 | x = F.leaky_relu(x, modules.LRELU_SLOPE) 277 | x = self.ups[i](x) 278 | xs = None 279 | for j in range(self.num_kernels): 280 | if xs is None: 281 | xs = self.resblocks[i*self.num_kernels+j](x) 282 | else: 283 | xs += self.resblocks[i*self.num_kernels+j](x) 284 | x = xs / self.num_kernels 285 | x = F.leaky_relu(x) 286 | x = self.conv_post(x) 287 | x = torch.tanh(x) 288 | 289 | return x 290 | 291 | def remove_weight_norm(self): 292 | print('Removing weight norm...') 293 | for l in self.ups: 294 | remove_weight_norm(l) 295 | for l in self.resblocks: 296 | l.remove_weight_norm() 297 | 298 | 299 | class DiscriminatorP(torch.nn.Module): 300 | def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False): 301 | super(DiscriminatorP, self).__init__() 302 | self.period = period 303 | self.use_spectral_norm = use_spectral_norm 304 | norm_f = weight_norm if use_spectral_norm == False else spectral_norm 305 | self.convs = nn.ModuleList([ 306 | norm_f(Conv2d(1, 32, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), 307 | norm_f(Conv2d(32, 128, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), 308 | norm_f(Conv2d(128, 512, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), 309 | norm_f(Conv2d(512, 1024, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), 310 | norm_f(Conv2d(1024, 1024, (kernel_size, 1), 1, padding=(get_padding(kernel_size, 1), 0))), 311 | ]) 312 | self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0))) 313 | 314 | def forward(self, x): 315 | fmap = [] 316 | 317 | # 1d to 2d 318 | b, c, t = x.shape 319 | if t % self.period != 0: # pad first 320 | n_pad = self.period - (t % self.period) 321 | x = F.pad(x, (0, n_pad), "reflect") 322 | t = t + n_pad 323 | x = x.view(b, c, t // self.period, self.period) 324 | 325 | for l in self.convs: 326 | x = l(x) 327 | x = F.leaky_relu(x, modules.LRELU_SLOPE) 328 | fmap.append(x) 329 | x = self.conv_post(x) 330 | fmap.append(x) 331 | x = torch.flatten(x, 1, -1) 332 | 333 | return x, fmap 334 | 335 | 336 | class DiscriminatorS(torch.nn.Module): 337 | def __init__(self, use_spectral_norm=False): 338 | super(DiscriminatorS, self).__init__() 339 | norm_f = weight_norm if use_spectral_norm == False else spectral_norm 340 | self.convs = nn.ModuleList([ 341 | norm_f(Conv1d(1, 16, 15, 1, padding=7)), 342 | norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)), 343 | norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)), 344 | norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)), 345 | norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)), 346 | norm_f(Conv1d(1024, 1024, 5, 1, padding=2)), 347 | ]) 348 | self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1)) 349 | 350 | def forward(self, x): 351 | fmap = [] 352 | 353 | for l in self.convs: 354 | x = l(x) 355 | x = F.leaky_relu(x, modules.LRELU_SLOPE) 356 | fmap.append(x) 357 | x = self.conv_post(x) 358 | fmap.append(x) 359 | x = torch.flatten(x, 1, -1) 360 | 361 | return x, fmap 362 | 363 | 364 | class MultiPeriodDiscriminator(torch.nn.Module): 365 | def __init__(self, use_spectral_norm=False): 366 | super(MultiPeriodDiscriminator, self).__init__() 367 | periods = [2,3,5,7,11] 368 | 369 | discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)] 370 | discs = discs + [DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods] 371 | self.discriminators = nn.ModuleList(discs) 372 | 373 | def forward(self, y, y_hat): 374 | y_d_rs = [] 375 | y_d_gs = [] 376 | fmap_rs = [] 377 | fmap_gs = [] 378 | for i, d in enumerate(self.discriminators): 379 | y_d_r, fmap_r = d(y) 380 | y_d_g, fmap_g = d(y_hat) 381 | y_d_rs.append(y_d_r) 382 | y_d_gs.append(y_d_g) 383 | fmap_rs.append(fmap_r) 384 | fmap_gs.append(fmap_g) 385 | 386 | return y_d_rs, y_d_gs, fmap_rs, fmap_gs 387 | 388 | 389 | 390 | class SynthesizerTrn(nn.Module): 391 | """ 392 | Synthesizer for Training 393 | """ 394 | 395 | def __init__(self, 396 | n_vocab, 397 | spec_channels, 398 | segment_size, 399 | inter_channels, 400 | hidden_channels, 401 | filter_channels, 402 | n_heads, 403 | n_layers, 404 | kernel_size, 405 | p_dropout, 406 | resblock, 407 | resblock_kernel_sizes, 408 | resblock_dilation_sizes, 409 | upsample_rates, 410 | upsample_initial_channel, 411 | upsample_kernel_sizes, 412 | n_speakers=0, 413 | gin_channels=0, 414 | use_sdp=True, 415 | **kwargs): 416 | 417 | super().__init__() 418 | self.n_vocab = n_vocab 419 | self.spec_channels = spec_channels 420 | self.inter_channels = inter_channels 421 | self.hidden_channels = hidden_channels 422 | self.filter_channels = filter_channels 423 | self.n_heads = n_heads 424 | self.n_layers = n_layers 425 | self.kernel_size = kernel_size 426 | self.p_dropout = p_dropout 427 | self.resblock = resblock 428 | self.resblock_kernel_sizes = resblock_kernel_sizes 429 | self.resblock_dilation_sizes = resblock_dilation_sizes 430 | self.upsample_rates = upsample_rates 431 | self.upsample_initial_channel = upsample_initial_channel 432 | self.upsample_kernel_sizes = upsample_kernel_sizes 433 | self.segment_size = segment_size 434 | self.n_speakers = n_speakers 435 | self.gin_channels = gin_channels 436 | 437 | self.use_sdp = use_sdp 438 | 439 | self.enc_p = TextEncoder(n_vocab, 440 | inter_channels, 441 | hidden_channels, 442 | filter_channels, 443 | n_heads, 444 | n_layers, 445 | kernel_size, 446 | p_dropout) 447 | self.dec = Generator(inter_channels, resblock, resblock_kernel_sizes, resblock_dilation_sizes, upsample_rates, upsample_initial_channel, upsample_kernel_sizes, gin_channels=gin_channels) 448 | self.enc_q = PosteriorEncoder(spec_channels, inter_channels, hidden_channels, 5, 1, 16, gin_channels=gin_channels) 449 | self.flow = ResidualCouplingBlock(inter_channels, hidden_channels, 5, 1, 4, gin_channels=gin_channels) 450 | 451 | if use_sdp: 452 | self.dp = StochasticDurationPredictor(hidden_channels, 192, 3, 0.5, 4, gin_channels=gin_channels) 453 | else: 454 | self.dp = DurationPredictor(hidden_channels, 256, 3, 0.5, gin_channels=gin_channels) 455 | 456 | if n_speakers > 1: 457 | self.emb_g = nn.Embedding(n_speakers, gin_channels) 458 | 459 | def forward(self, x, x_lengths, y, y_lengths, sid=None): 460 | 461 | x, m_p, logs_p, x_mask = self.enc_p(x, x_lengths) 462 | if self.n_speakers > 0: 463 | g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1] 464 | else: 465 | g = None 466 | 467 | z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g) 468 | z_p = self.flow(z, y_mask, g=g) 469 | 470 | with torch.no_grad(): 471 | # negative cross-entropy 472 | s_p_sq_r = torch.exp(-2 * logs_p) # [b, d, t] 473 | neg_cent1 = torch.sum(-0.5 * math.log(2 * math.pi) - logs_p, [1], keepdim=True) # [b, 1, t_s] 474 | neg_cent2 = torch.matmul(-0.5 * (z_p ** 2).transpose(1, 2), s_p_sq_r) # [b, t_t, d] x [b, d, t_s] = [b, t_t, t_s] 475 | neg_cent3 = torch.matmul(z_p.transpose(1, 2), (m_p * s_p_sq_r)) # [b, t_t, d] x [b, d, t_s] = [b, t_t, t_s] 476 | neg_cent4 = torch.sum(-0.5 * (m_p ** 2) * s_p_sq_r, [1], keepdim=True) # [b, 1, t_s] 477 | neg_cent = neg_cent1 + neg_cent2 + neg_cent3 + neg_cent4 478 | 479 | attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1) 480 | attn = monotonic_align.maximum_path(neg_cent, attn_mask.squeeze(1)).unsqueeze(1).detach() 481 | 482 | w = attn.sum(2) 483 | if self.use_sdp: 484 | l_length = self.dp(x, x_mask, w, g=g) 485 | l_length = l_length / torch.sum(x_mask) 486 | else: 487 | logw_ = torch.log(w + 1e-6) * x_mask 488 | logw = self.dp(x, x_mask, g=g) 489 | l_length = torch.sum((logw - logw_)**2, [1,2]) / torch.sum(x_mask) # for averaging 490 | 491 | # expand prior 492 | m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(1, 2) 493 | logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(1, 2) 494 | 495 | z_slice, ids_slice = commons.rand_slice_segments(z, y_lengths, self.segment_size) 496 | o = self.dec(z_slice, g=g) 497 | return o, l_length, attn, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q) 498 | 499 | def infer(self, x, x_lengths, sid=None, noise_scale=1, length_scale=1, noise_scale_w=1., max_len=None): 500 | x, m_p, logs_p, x_mask = self.enc_p(x, x_lengths) 501 | if self.n_speakers > 0: 502 | g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1] 503 | else: 504 | g = None 505 | 506 | if self.use_sdp: 507 | logw = self.dp(x, x_mask, g=g, reverse=True, noise_scale=noise_scale_w) 508 | else: 509 | logw = self.dp(x, x_mask, g=g) 510 | w = torch.exp(logw) * x_mask * length_scale 511 | w_ceil = torch.ceil(w) 512 | y_lengths = torch.clamp_min(torch.sum(w_ceil, [1, 2]), 1).long() 513 | y_mask = torch.unsqueeze(commons.sequence_mask(y_lengths, None), 1).to(x_mask.dtype) 514 | attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1) 515 | attn = commons.generate_path(w_ceil, attn_mask) 516 | 517 | m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(1, 2) # [b, t', t], [b, t, d] -> [b, d, t'] 518 | logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(1, 2) # [b, t', t], [b, t, d] -> [b, d, t'] 519 | 520 | z_p = m_p + torch.randn_like(m_p) * torch.exp(logs_p) * noise_scale 521 | z = self.flow(z_p, y_mask, g=g, reverse=True) 522 | o = self.dec((z * y_mask)[:,:,:max_len], g=g) 523 | return o, attn, y_mask, (z, z_p, m_p, logs_p) 524 | 525 | def voice_conversion(self, y, y_lengths, sid_src, sid_tgt): 526 | assert self.n_speakers > 0, "n_speakers have to be larger than 0." 527 | g_src = self.emb_g(sid_src).unsqueeze(-1) 528 | g_tgt = self.emb_g(sid_tgt).unsqueeze(-1) 529 | z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g_src) 530 | z_p = self.flow(z, y_mask, g=g_src) 531 | z_hat = self.flow(z_p, y_mask, g=g_tgt, reverse=True) 532 | o_hat = self.dec(z_hat * y_mask, g=g_tgt) 533 | return o_hat, y_mask, (z, z_p, z_hat) 534 | 535 | -------------------------------------------------------------------------------- /modules.py: -------------------------------------------------------------------------------- 1 | import copy 2 | import math 3 | import numpy as np 4 | import scipy 5 | import torch 6 | from torch import nn 7 | from torch.nn import functional as F 8 | 9 | from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d 10 | from torch.nn.utils import weight_norm, remove_weight_norm 11 | 12 | import commons 13 | from commons import init_weights, get_padding 14 | from transforms import piecewise_rational_quadratic_transform 15 | 16 | 17 | LRELU_SLOPE = 0.1 18 | 19 | 20 | class LayerNorm(nn.Module): 21 | def __init__(self, channels, eps=1e-5): 22 | super().__init__() 23 | self.channels = channels 24 | self.eps = eps 25 | 26 | self.gamma = nn.Parameter(torch.ones(channels)) 27 | self.beta = nn.Parameter(torch.zeros(channels)) 28 | 29 | def forward(self, x): 30 | x = x.transpose(1, -1) 31 | x = F.layer_norm(x, (self.channels,), self.gamma, self.beta, self.eps) 32 | return x.transpose(1, -1) 33 | 34 | 35 | class ConvReluNorm(nn.Module): 36 | def __init__(self, in_channels, hidden_channels, out_channels, kernel_size, n_layers, p_dropout): 37 | super().__init__() 38 | self.in_channels = in_channels 39 | self.hidden_channels = hidden_channels 40 | self.out_channels = out_channels 41 | self.kernel_size = kernel_size 42 | self.n_layers = n_layers 43 | self.p_dropout = p_dropout 44 | assert n_layers > 1, "Number of layers should be larger than 0." 45 | 46 | self.conv_layers = nn.ModuleList() 47 | self.norm_layers = nn.ModuleList() 48 | self.conv_layers.append(nn.Conv1d(in_channels, hidden_channels, kernel_size, padding=kernel_size//2)) 49 | self.norm_layers.append(LayerNorm(hidden_channels)) 50 | self.relu_drop = nn.Sequential( 51 | nn.ReLU(), 52 | nn.Dropout(p_dropout)) 53 | for _ in range(n_layers-1): 54 | self.conv_layers.append(nn.Conv1d(hidden_channels, hidden_channels, kernel_size, padding=kernel_size//2)) 55 | self.norm_layers.append(LayerNorm(hidden_channels)) 56 | self.proj = nn.Conv1d(hidden_channels, out_channels, 1) 57 | self.proj.weight.data.zero_() 58 | self.proj.bias.data.zero_() 59 | 60 | def forward(self, x, x_mask): 61 | x_org = x 62 | for i in range(self.n_layers): 63 | x = self.conv_layers[i](x * x_mask) 64 | x = self.norm_layers[i](x) 65 | x = self.relu_drop(x) 66 | x = x_org + self.proj(x) 67 | return x * x_mask 68 | 69 | 70 | class DDSConv(nn.Module): 71 | """ 72 | Dialted and Depth-Separable Convolution 73 | """ 74 | def __init__(self, channels, kernel_size, n_layers, p_dropout=0.): 75 | super().__init__() 76 | self.channels = channels 77 | self.kernel_size = kernel_size 78 | self.n_layers = n_layers 79 | self.p_dropout = p_dropout 80 | 81 | self.drop = nn.Dropout(p_dropout) 82 | self.convs_sep = nn.ModuleList() 83 | self.convs_1x1 = nn.ModuleList() 84 | self.norms_1 = nn.ModuleList() 85 | self.norms_2 = nn.ModuleList() 86 | for i in range(n_layers): 87 | dilation = kernel_size ** i 88 | padding = (kernel_size * dilation - dilation) // 2 89 | self.convs_sep.append(nn.Conv1d(channels, channels, kernel_size, 90 | groups=channels, dilation=dilation, padding=padding 91 | )) 92 | self.convs_1x1.append(nn.Conv1d(channels, channels, 1)) 93 | self.norms_1.append(LayerNorm(channels)) 94 | self.norms_2.append(LayerNorm(channels)) 95 | 96 | def forward(self, x, x_mask, g=None): 97 | if g is not None: 98 | x = x + g 99 | for i in range(self.n_layers): 100 | y = self.convs_sep[i](x * x_mask) 101 | y = self.norms_1[i](y) 102 | y = F.gelu(y) 103 | y = self.convs_1x1[i](y) 104 | y = self.norms_2[i](y) 105 | y = F.gelu(y) 106 | y = self.drop(y) 107 | x = x + y 108 | return x * x_mask 109 | 110 | 111 | class WN(torch.nn.Module): 112 | def __init__(self, hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=0, p_dropout=0): 113 | super(WN, self).__init__() 114 | assert(kernel_size % 2 == 1) 115 | self.hidden_channels =hidden_channels 116 | self.kernel_size = kernel_size, 117 | self.dilation_rate = dilation_rate 118 | self.n_layers = n_layers 119 | self.gin_channels = gin_channels 120 | self.p_dropout = p_dropout 121 | 122 | self.in_layers = torch.nn.ModuleList() 123 | self.res_skip_layers = torch.nn.ModuleList() 124 | self.drop = nn.Dropout(p_dropout) 125 | 126 | if gin_channels != 0: 127 | cond_layer = torch.nn.Conv1d(gin_channels, 2*hidden_channels*n_layers, 1) 128 | self.cond_layer = torch.nn.utils.weight_norm(cond_layer, name='weight') 129 | 130 | for i in range(n_layers): 131 | dilation = dilation_rate ** i 132 | padding = int((kernel_size * dilation - dilation) / 2) 133 | in_layer = torch.nn.Conv1d(hidden_channels, 2*hidden_channels, kernel_size, 134 | dilation=dilation, padding=padding) 135 | in_layer = torch.nn.utils.weight_norm(in_layer, name='weight') 136 | self.in_layers.append(in_layer) 137 | 138 | # last one is not necessary 139 | if i < n_layers - 1: 140 | res_skip_channels = 2 * hidden_channels 141 | else: 142 | res_skip_channels = hidden_channels 143 | 144 | res_skip_layer = torch.nn.Conv1d(hidden_channels, res_skip_channels, 1) 145 | res_skip_layer = torch.nn.utils.weight_norm(res_skip_layer, name='weight') 146 | self.res_skip_layers.append(res_skip_layer) 147 | 148 | def forward(self, x, x_mask, g=None, **kwargs): 149 | output = torch.zeros_like(x) 150 | n_channels_tensor = torch.IntTensor([self.hidden_channels]) 151 | 152 | if g is not None: 153 | g = self.cond_layer(g) 154 | 155 | for i in range(self.n_layers): 156 | x_in = self.in_layers[i](x) 157 | if g is not None: 158 | cond_offset = i * 2 * self.hidden_channels 159 | g_l = g[:,cond_offset:cond_offset+2*self.hidden_channels,:] 160 | else: 161 | g_l = torch.zeros_like(x_in) 162 | 163 | acts = commons.fused_add_tanh_sigmoid_multiply( 164 | x_in, 165 | g_l, 166 | n_channels_tensor) 167 | acts = self.drop(acts) 168 | 169 | res_skip_acts = self.res_skip_layers[i](acts) 170 | if i < self.n_layers - 1: 171 | res_acts = res_skip_acts[:,:self.hidden_channels,:] 172 | x = (x + res_acts) * x_mask 173 | output = output + res_skip_acts[:,self.hidden_channels:,:] 174 | else: 175 | output = output + res_skip_acts 176 | return output * x_mask 177 | 178 | def remove_weight_norm(self): 179 | if self.gin_channels != 0: 180 | torch.nn.utils.remove_weight_norm(self.cond_layer) 181 | for l in self.in_layers: 182 | torch.nn.utils.remove_weight_norm(l) 183 | for l in self.res_skip_layers: 184 | torch.nn.utils.remove_weight_norm(l) 185 | 186 | 187 | class ResBlock1(torch.nn.Module): 188 | def __init__(self, channels, kernel_size=3, dilation=(1, 3, 5)): 189 | super(ResBlock1, self).__init__() 190 | self.convs1 = nn.ModuleList([ 191 | weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0], 192 | padding=get_padding(kernel_size, dilation[0]))), 193 | weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1], 194 | padding=get_padding(kernel_size, dilation[1]))), 195 | weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[2], 196 | padding=get_padding(kernel_size, dilation[2]))) 197 | ]) 198 | self.convs1.apply(init_weights) 199 | 200 | self.convs2 = nn.ModuleList([ 201 | weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1, 202 | padding=get_padding(kernel_size, 1))), 203 | weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1, 204 | padding=get_padding(kernel_size, 1))), 205 | weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1, 206 | padding=get_padding(kernel_size, 1))) 207 | ]) 208 | self.convs2.apply(init_weights) 209 | 210 | def forward(self, x, x_mask=None): 211 | for c1, c2 in zip(self.convs1, self.convs2): 212 | xt = F.leaky_relu(x, LRELU_SLOPE) 213 | if x_mask is not None: 214 | xt = xt * x_mask 215 | xt = c1(xt) 216 | xt = F.leaky_relu(xt, LRELU_SLOPE) 217 | if x_mask is not None: 218 | xt = xt * x_mask 219 | xt = c2(xt) 220 | x = xt + x 221 | if x_mask is not None: 222 | x = x * x_mask 223 | return x 224 | 225 | def remove_weight_norm(self): 226 | for l in self.convs1: 227 | remove_weight_norm(l) 228 | for l in self.convs2: 229 | remove_weight_norm(l) 230 | 231 | 232 | class ResBlock2(torch.nn.Module): 233 | def __init__(self, channels, kernel_size=3, dilation=(1, 3)): 234 | super(ResBlock2, self).__init__() 235 | self.convs = nn.ModuleList([ 236 | weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0], 237 | padding=get_padding(kernel_size, dilation[0]))), 238 | weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1], 239 | padding=get_padding(kernel_size, dilation[1]))) 240 | ]) 241 | self.convs.apply(init_weights) 242 | 243 | def forward(self, x, x_mask=None): 244 | for c in self.convs: 245 | xt = F.leaky_relu(x, LRELU_SLOPE) 246 | if x_mask is not None: 247 | xt = xt * x_mask 248 | xt = c(xt) 249 | x = xt + x 250 | if x_mask is not None: 251 | x = x * x_mask 252 | return x 253 | 254 | def remove_weight_norm(self): 255 | for l in self.convs: 256 | remove_weight_norm(l) 257 | 258 | 259 | class Log(nn.Module): 260 | def forward(self, x, x_mask, reverse=False, **kwargs): 261 | if not reverse: 262 | y = torch.log(torch.clamp_min(x, 1e-5)) * x_mask 263 | logdet = torch.sum(-y, [1, 2]) 264 | return y, logdet 265 | else: 266 | x = torch.exp(x) * x_mask 267 | return x 268 | 269 | 270 | class Flip(nn.Module): 271 | def forward(self, x, *args, reverse=False, **kwargs): 272 | x = torch.flip(x, [1]) 273 | if not reverse: 274 | logdet = torch.zeros(x.size(0)).to(dtype=x.dtype, device=x.device) 275 | return x, logdet 276 | else: 277 | return x 278 | 279 | 280 | class ElementwiseAffine(nn.Module): 281 | def __init__(self, channels): 282 | super().__init__() 283 | self.channels = channels 284 | self.m = nn.Parameter(torch.zeros(channels,1)) 285 | self.logs = nn.Parameter(torch.zeros(channels,1)) 286 | 287 | def forward(self, x, x_mask, reverse=False, **kwargs): 288 | if not reverse: 289 | y = self.m + torch.exp(self.logs) * x 290 | y = y * x_mask 291 | logdet = torch.sum(self.logs * x_mask, [1,2]) 292 | return y, logdet 293 | else: 294 | x = (x - self.m) * torch.exp(-self.logs) * x_mask 295 | return x 296 | 297 | 298 | class ResidualCouplingLayer(nn.Module): 299 | def __init__(self, 300 | channels, 301 | hidden_channels, 302 | kernel_size, 303 | dilation_rate, 304 | n_layers, 305 | p_dropout=0, 306 | gin_channels=0, 307 | mean_only=False): 308 | assert channels % 2 == 0, "channels should be divisible by 2" 309 | super().__init__() 310 | self.channels = channels 311 | self.hidden_channels = hidden_channels 312 | self.kernel_size = kernel_size 313 | self.dilation_rate = dilation_rate 314 | self.n_layers = n_layers 315 | self.half_channels = channels // 2 316 | self.mean_only = mean_only 317 | 318 | self.pre = nn.Conv1d(self.half_channels, hidden_channels, 1) 319 | self.enc = WN(hidden_channels, kernel_size, dilation_rate, n_layers, p_dropout=p_dropout, gin_channels=gin_channels) 320 | self.post = nn.Conv1d(hidden_channels, self.half_channels * (2 - mean_only), 1) 321 | self.post.weight.data.zero_() 322 | self.post.bias.data.zero_() 323 | 324 | def forward(self, x, x_mask, g=None, reverse=False): 325 | x0, x1 = torch.split(x, [self.half_channels]*2, 1) 326 | h = self.pre(x0) * x_mask 327 | h = self.enc(h, x_mask, g=g) 328 | stats = self.post(h) * x_mask 329 | if not self.mean_only: 330 | m, logs = torch.split(stats, [self.half_channels]*2, 1) 331 | else: 332 | m = stats 333 | logs = torch.zeros_like(m) 334 | 335 | if not reverse: 336 | x1 = m + x1 * torch.exp(logs) * x_mask 337 | x = torch.cat([x0, x1], 1) 338 | logdet = torch.sum(logs, [1,2]) 339 | return x, logdet 340 | else: 341 | x1 = (x1 - m) * torch.exp(-logs) * x_mask 342 | x = torch.cat([x0, x1], 1) 343 | return x 344 | 345 | 346 | class ConvFlow(nn.Module): 347 | def __init__(self, in_channels, filter_channels, kernel_size, n_layers, num_bins=10, tail_bound=5.0): 348 | super().__init__() 349 | self.in_channels = in_channels 350 | self.filter_channels = filter_channels 351 | self.kernel_size = kernel_size 352 | self.n_layers = n_layers 353 | self.num_bins = num_bins 354 | self.tail_bound = tail_bound 355 | self.half_channels = in_channels // 2 356 | 357 | self.pre = nn.Conv1d(self.half_channels, filter_channels, 1) 358 | self.convs = DDSConv(filter_channels, kernel_size, n_layers, p_dropout=0.) 359 | self.proj = nn.Conv1d(filter_channels, self.half_channels * (num_bins * 3 - 1), 1) 360 | self.proj.weight.data.zero_() 361 | self.proj.bias.data.zero_() 362 | 363 | def forward(self, x, x_mask, g=None, reverse=False): 364 | x0, x1 = torch.split(x, [self.half_channels]*2, 1) 365 | h = self.pre(x0) 366 | h = self.convs(h, x_mask, g=g) 367 | h = self.proj(h) * x_mask 368 | 369 | b, c, t = x0.shape 370 | h = h.reshape(b, c, -1, t).permute(0, 1, 3, 2) # [b, cx?, t] -> [b, c, t, ?] 371 | 372 | unnormalized_widths = h[..., :self.num_bins] / math.sqrt(self.filter_channels) 373 | unnormalized_heights = h[..., self.num_bins:2*self.num_bins] / math.sqrt(self.filter_channels) 374 | unnormalized_derivatives = h[..., 2 * self.num_bins:] 375 | 376 | x1, logabsdet = piecewise_rational_quadratic_transform(x1, 377 | unnormalized_widths, 378 | unnormalized_heights, 379 | unnormalized_derivatives, 380 | inverse=reverse, 381 | tails='linear', 382 | tail_bound=self.tail_bound 383 | ) 384 | 385 | x = torch.cat([x0, x1], 1) * x_mask 386 | logdet = torch.sum(logabsdet * x_mask, [1,2]) 387 | if not reverse: 388 | return x, logdet 389 | else: 390 | return x 391 | -------------------------------------------------------------------------------- /monotonic_align/__init__.py: -------------------------------------------------------------------------------- 1 | from numpy import zeros, int32, float32 2 | from torch import from_numpy 3 | 4 | from .core import maximum_path_jit 5 | 6 | 7 | def maximum_path(neg_cent, mask): 8 | """ numba optimized version. 9 | neg_cent: [b, t_t, t_s] 10 | mask: [b, t_t, t_s] 11 | """ 12 | device = neg_cent.device 13 | dtype = neg_cent.dtype 14 | neg_cent = neg_cent.data.cpu().numpy().astype(float32) 15 | path = zeros(neg_cent.shape, dtype=int32) 16 | 17 | t_t_max = mask.sum(1)[:, 0].data.cpu().numpy().astype(int32) 18 | t_s_max = mask.sum(2)[:, 0].data.cpu().numpy().astype(int32) 19 | maximum_path_jit(path, neg_cent, t_t_max, t_s_max) 20 | return from_numpy(path).to(device=device, dtype=dtype) 21 | -------------------------------------------------------------------------------- /monotonic_align/core.py: -------------------------------------------------------------------------------- 1 | import numba 2 | 3 | 4 | @numba.jit(numba.void(numba.int32[:, :, ::1], numba.float32[:, :, ::1], numba.int32[::1], numba.int32[::1]), 5 | nopython=True, nogil=True) 6 | def maximum_path_jit(paths, values, t_ys, t_xs): 7 | b = paths.shape[0] 8 | max_neg_val = -1e9 9 | for i in range(int(b)): 10 | path = paths[i] 11 | value = values[i] 12 | t_y = t_ys[i] 13 | t_x = t_xs[i] 14 | 15 | v_prev = v_cur = 0.0 16 | index = t_x - 1 17 | 18 | for y in range(t_y): 19 | for x in range(max(0, t_x + y - t_y), min(t_x, y + 1)): 20 | if x == y: 21 | v_cur = max_neg_val 22 | else: 23 | v_cur = value[y - 1, x] 24 | if x == 0: 25 | if y == 0: 26 | v_prev = 0. 27 | else: 28 | v_prev = max_neg_val 29 | else: 30 | v_prev = value[y - 1, x - 1] 31 | value[y, x] += max(v_prev, v_cur) 32 | 33 | for y in range(t_y - 1, -1, -1): 34 | path[y, index] = 1 35 | if index != 0 and (index == y or value[y - 1, index] < value[y - 1, index - 1]): 36 | index = index - 1 -------------------------------------------------------------------------------- /preprocess.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import text 3 | from utils import load_filepaths_and_text 4 | 5 | if __name__ == '__main__': 6 | parser = argparse.ArgumentParser() 7 | parser.add_argument("--out_extension", default="cleaned") 8 | parser.add_argument("--text_index", default=2, type=int) 9 | parser.add_argument("--filelists", nargs="+", default=["filelists/miyu_train.txt", "filelists/miyu_val.txt"]) 10 | parser.add_argument("--text_cleaners", nargs="+", default=["japanese_cleaners"]) 11 | 12 | args = parser.parse_args() 13 | 14 | 15 | for filelist in args.filelists: 16 | print("START:", filelist) 17 | filepaths_and_text = load_filepaths_and_text(filelist) 18 | for i in range(len(filepaths_and_text)): 19 | original_text = filepaths_and_text[i][args.text_index] 20 | cleaned_text = text._clean_text(original_text, args.text_cleaners) 21 | filepaths_and_text[i][args.text_index] = cleaned_text 22 | 23 | new_filelist = filelist + "." + args.out_extension 24 | with open(new_filelist, "w", encoding="utf-8") as f: 25 | f.writelines(["|".join(x) + "\n" for x in filepaths_and_text]) 26 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | Cython 2 | librosa==0.9.1 3 | matplotlib 4 | numpy 5 | scipy 6 | tensorboard 7 | torch==1.13.1 8 | torchvision 9 | unidecode 10 | pyopenjtalk 11 | protobuf 12 | tqdm 13 | -------------------------------------------------------------------------------- /text/LICENSE: -------------------------------------------------------------------------------- 1 | Copyright (c) 2017 Keith Ito 2 | 3 | Permission is hereby granted, free of charge, to any person obtaining a copy 4 | of this software and associated documentation files (the "Software"), to deal 5 | in the Software without restriction, including without limitation the rights 6 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 7 | copies of the Software, and to permit persons to whom the Software is 8 | furnished to do so, subject to the following conditions: 9 | 10 | The above copyright notice and this permission notice shall be included in 11 | all copies or substantial portions of the Software. 12 | 13 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 18 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 19 | THE SOFTWARE. 20 | -------------------------------------------------------------------------------- /text/__init__.py: -------------------------------------------------------------------------------- 1 | """ from https://github.com/keithito/tacotron """ 2 | from text import cleaners 3 | from text.symbols import symbols 4 | 5 | 6 | # Mappings from symbol to numeric ID and vice versa: 7 | _symbol_to_id = {s: i for i, s in enumerate(symbols)} 8 | _id_to_symbol = {i: s for i, s in enumerate(symbols)} 9 | 10 | 11 | def text_to_sequence(text, cleaner_names): 12 | '''Converts a string of text to a sequence of IDs corresponding to the symbols in the text. 13 | Args: 14 | text: string to convert to a sequence 15 | cleaner_names: names of the cleaner functions to run the text through 16 | Returns: 17 | List of integers corresponding to the symbols in the text 18 | ''' 19 | sequence = [] 20 | 21 | clean_text = _clean_text(text, cleaner_names) 22 | for symbol in clean_text: 23 | if symbol not in _symbol_to_id.keys(): 24 | continue 25 | symbol_id = _symbol_to_id[symbol] 26 | sequence += [symbol_id] 27 | return sequence 28 | 29 | 30 | def cleaned_text_to_sequence(cleaned_text): 31 | '''Converts a string of text to a sequence of IDs corresponding to the symbols in the text. 32 | Args: 33 | text: string to convert to a sequence 34 | Returns: 35 | List of integers corresponding to the symbols in the text 36 | ''' 37 | sequence = [_symbol_to_id[symbol] for symbol in cleaned_text if symbol in _symbol_to_id.keys()] 38 | return sequence 39 | 40 | 41 | def sequence_to_text(sequence): 42 | '''Converts a sequence of IDs back to a string''' 43 | result = '' 44 | for symbol_id in sequence: 45 | s = _id_to_symbol[symbol_id] 46 | result += s 47 | return result 48 | 49 | 50 | def _clean_text(text, cleaner_names): 51 | for name in cleaner_names: 52 | cleaner = getattr(cleaners, name) 53 | if not cleaner: 54 | raise Exception('Unknown cleaner: %s' % name) 55 | text = cleaner(text) 56 | return text 57 | -------------------------------------------------------------------------------- /text/cleaners.py: -------------------------------------------------------------------------------- 1 | import re 2 | from text.japanese import japanese_to_romaji_with_accent 3 | 4 | def japanese_cleaners(text): 5 | text = f'[JA]{text}[JA]' 6 | text = re.sub(r'\[JA\](.*?)\[JA\]', lambda x: japanese_to_romaji_with_accent( 7 | x.group(1)).replace('ts', 'ʦ').replace('u', 'ɯ').replace('...', '…')+' ', text) 8 | text = re.sub(r'\s+$', '', text) 9 | text = re.sub(r'([^\.,!\?\-…~])$', r'\1.', text) 10 | return text 11 | -------------------------------------------------------------------------------- /text/japanese.py: -------------------------------------------------------------------------------- 1 | import re 2 | from unidecode import unidecode 3 | import pyopenjtalk 4 | 5 | 6 | # Regular expression matching Japanese without punctuation marks: 7 | _japanese_characters = re.compile( 8 | r'[A-Za-z\d\u3005\u3040-\u30ff\u4e00-\u9fff\uff11-\uff19\uff21-\uff3a\uff41-\uff5a\uff66-\uff9d]') 9 | 10 | # Regular expression matching non-Japanese characters or punctuation marks: 11 | _japanese_marks = re.compile( 12 | r'[^A-Za-z\d\u3005\u3040-\u30ff\u4e00-\u9fff\uff11-\uff19\uff21-\uff3a\uff41-\uff5a\uff66-\uff9d]') 13 | 14 | # List of (symbol, Japanese) pairs for marks: 15 | _symbols_to_japanese = [(re.compile('%s' % x[0]), x[1]) for x in [ 16 | ('%', 'パーセント') 17 | ]] 18 | 19 | # List of (romaji, ipa) pairs for marks: 20 | _romaji_to_ipa = [(re.compile('%s' % x[0]), x[1]) for x in [ 21 | ('ts', 'ʦ'), 22 | ('u', 'ɯ'), 23 | ('j', 'ʥ'), 24 | ('y', 'j'), 25 | ('ni', 'n^i'), 26 | ('nj', 'n^'), 27 | ('hi', 'çi'), 28 | ('hj', 'ç'), 29 | ('f', 'ɸ'), 30 | ('I', 'i*'), 31 | ('U', 'ɯ*'), 32 | ('r', 'ɾ') 33 | ]] 34 | 35 | # List of (romaji, ipa2) pairs for marks: 36 | _romaji_to_ipa2 = [(re.compile('%s' % x[0]), x[1]) for x in [ 37 | ('u', 'ɯ'), 38 | ('ʧ', 'tʃ'), 39 | ('j', 'dʑ'), 40 | ('y', 'j'), 41 | ('ni', 'n^i'), 42 | ('nj', 'n^'), 43 | ('hi', 'çi'), 44 | ('hj', 'ç'), 45 | ('f', 'ɸ'), 46 | ('I', 'i*'), 47 | ('U', 'ɯ*'), 48 | ('r', 'ɾ') 49 | ]] 50 | 51 | # List of (consonant, sokuon) pairs: 52 | _real_sokuon = [(re.compile('%s' % x[0]), x[1]) for x in [ 53 | (r'Q([↑↓]*[kg])', r'k#\1'), 54 | (r'Q([↑↓]*[tdjʧ])', r't#\1'), 55 | (r'Q([↑↓]*[sʃ])', r's\1'), 56 | (r'Q([↑↓]*[pb])', r'p#\1') 57 | ]] 58 | 59 | # List of (consonant, hatsuon) pairs: 60 | _real_hatsuon = [(re.compile('%s' % x[0]), x[1]) for x in [ 61 | (r'N([↑↓]*[pbm])', r'm\1'), 62 | (r'N([↑↓]*[ʧʥj])', r'n^\1'), 63 | (r'N([↑↓]*[tdn])', r'n\1'), 64 | (r'N([↑↓]*[kg])', r'ŋ\1') 65 | ]] 66 | 67 | 68 | def symbols_to_japanese(text): 69 | for regex, replacement in _symbols_to_japanese: 70 | text = re.sub(regex, replacement, text) 71 | return text 72 | 73 | 74 | def japanese_to_romaji_with_accent(text): 75 | '''Reference https://r9y9.github.io/ttslearn/latest/notebooks/ch10_Recipe-Tacotron.html''' 76 | text = symbols_to_japanese(text) 77 | sentences = re.split(_japanese_marks, text) 78 | marks = re.findall(_japanese_marks, text) 79 | text = '' 80 | for i, sentence in enumerate(sentences): 81 | if re.match(_japanese_characters, sentence): 82 | if text != '': 83 | text += ' ' 84 | labels = pyopenjtalk.extract_fullcontext(sentence) 85 | for n, label in enumerate(labels): 86 | phoneme = re.search(r'\-([^\+]*)\+', label).group(1) 87 | if phoneme not in ['sil', 'pau']: 88 | text += phoneme.replace('ch', 'ʧ').replace('sh', 89 | 'ʃ').replace('cl', 'Q') 90 | else: 91 | continue 92 | # n_moras = int(re.search(r'/F:(\d+)_', label).group(1)) 93 | a1 = int(re.search(r"/A:(\-?[0-9]+)\+", label).group(1)) 94 | a2 = int(re.search(r"\+(\d+)\+", label).group(1)) 95 | a3 = int(re.search(r"\+(\d+)/", label).group(1)) 96 | if re.search(r'\-([^\+]*)\+', labels[n + 1]).group(1) in ['sil', 'pau']: 97 | a2_next = -1 98 | else: 99 | a2_next = int( 100 | re.search(r"\+(\d+)\+", labels[n + 1]).group(1)) 101 | # Accent phrase boundary 102 | if a3 == 1 and a2_next == 1: 103 | text += ' ' 104 | # Falling 105 | elif a1 == 0 and a2_next == a2 + 1: 106 | text += '↓' 107 | # Rising 108 | elif a2 == 1 and a2_next == 2: 109 | text += '↑' 110 | if i < len(marks): 111 | text += unidecode(marks[i]).replace(' ', '') 112 | return text 113 | 114 | 115 | def get_real_sokuon(text): 116 | for regex, replacement in _real_sokuon: 117 | text = re.sub(regex, replacement, text) 118 | return text 119 | 120 | 121 | def get_real_hatsuon(text): 122 | for regex, replacement in _real_hatsuon: 123 | text = re.sub(regex, replacement, text) 124 | return text 125 | 126 | 127 | def japanese_to_ipa(text): 128 | text = japanese_to_romaji_with_accent(text).replace('...', '…') 129 | text = re.sub( 130 | r'([aiueo])\1+', lambda x: x.group(0)[0]+'ː'*(len(x.group(0))-1), text) 131 | text = get_real_sokuon(text) 132 | text = get_real_hatsuon(text) 133 | for regex, replacement in _romaji_to_ipa: 134 | text = re.sub(regex, replacement, text) 135 | return text 136 | 137 | 138 | def japanese_to_ipa2(text): 139 | text = japanese_to_romaji_with_accent(text).replace('...', '…') 140 | text = get_real_sokuon(text) 141 | text = get_real_hatsuon(text) 142 | for regex, replacement in _romaji_to_ipa2: 143 | text = re.sub(regex, replacement, text) 144 | return text 145 | 146 | 147 | def japanese_to_ipa3(text): 148 | text = japanese_to_ipa2(text).replace('n^', 'ȵ').replace( 149 | 'ʃ', 'ɕ').replace('*', '\u0325').replace('#', '\u031a') 150 | text = re.sub( 151 | r'([aiɯeo])\1+', lambda x: x.group(0)[0]+'ː'*(len(x.group(0))-1), text) 152 | text = re.sub(r'((?:^|\s)(?:ts|tɕ|[kpt]))', r'\1ʰ', text) 153 | return text 154 | -------------------------------------------------------------------------------- /text/symbols.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Defines the set of symbols used in text input to the model. 3 | ''' 4 | 5 | _pad = '_' 6 | _punctuation = ',.!?-~…' 7 | _letters = 'AEINOQUabdefghijklmnoprstuvwyzʃʧʦɯɹəɥ⁼ʰ`→↓↑ ' 8 | 9 | 10 | 11 | # Export all symbols: 12 | symbols = [_pad] + list(_punctuation) + list(_letters) 13 | 14 | # Special symbol ids 15 | SPACE_ID = symbols.index(" ") 16 | -------------------------------------------------------------------------------- /train.py: -------------------------------------------------------------------------------- 1 | import os 2 | import json 3 | import argparse 4 | import itertools 5 | import math 6 | import torch 7 | from torch import nn, optim 8 | from torch.nn import functional as F 9 | from torch.utils.data import DataLoader 10 | from torch.utils.tensorboard import SummaryWriter 11 | import torch.multiprocessing as mp 12 | import torch.distributed as dist 13 | from torch.nn.parallel import DistributedDataParallel as DDP 14 | from torch.cuda.amp import autocast, GradScaler 15 | from tqdm import tqdm 16 | 17 | import librosa 18 | import logging 19 | 20 | logging.getLogger('numba').setLevel(logging.WARNING) 21 | 22 | import commons 23 | import utils 24 | from data_utils import ( 25 | TextAudioLoader, 26 | TextAudioCollate, 27 | DistributedBucketSampler 28 | ) 29 | from models import ( 30 | SynthesizerTrn, 31 | MultiPeriodDiscriminator, 32 | ) 33 | from losses import ( 34 | generator_loss, 35 | discriminator_loss, 36 | feature_loss, 37 | kl_loss 38 | ) 39 | from mel_processing import mel_spectrogram_torch, spec_to_mel_torch 40 | from text.symbols import symbols 41 | 42 | 43 | torch.backends.cudnn.benchmark = True 44 | global_step = 0 45 | 46 | 47 | def main(): 48 | """Assume Single Node Multi GPUs Training Only""" 49 | assert torch.cuda.is_available(), "CPU training is not allowed." 50 | 51 | n_gpus = torch.cuda.device_count() 52 | os.environ['MASTER_ADDR'] = 'localhost' 53 | os.environ['MASTER_PORT'] = '8000' 54 | 55 | hps = utils.get_hparams() 56 | mp.spawn(run, nprocs=n_gpus, args=(n_gpus, hps,)) 57 | 58 | 59 | def run(rank, n_gpus, hps): 60 | global global_step 61 | if rank == 0: 62 | logger = utils.get_logger(hps.model_dir) 63 | logger.info(hps) 64 | utils.check_git_hash(hps.model_dir) 65 | writer = SummaryWriter(log_dir=hps.model_dir) 66 | writer_eval = SummaryWriter(log_dir=os.path.join(hps.model_dir, "eval")) 67 | 68 | dist.init_process_group(backend='gloo' if os.name == 'nt' else 'nccl', init_method='env://', world_size=n_gpus, rank=rank) 69 | torch.manual_seed(hps.train.seed) 70 | torch.cuda.set_device(rank) 71 | 72 | train_dataset = TextAudioLoader(hps.data.training_files, hps.data) 73 | train_sampler = DistributedBucketSampler( 74 | train_dataset, 75 | hps.train.batch_size, 76 | [32,300,400,500,600,700,800,900,1000], 77 | num_replicas=n_gpus, 78 | rank=rank, 79 | shuffle=True) 80 | collate_fn = TextAudioCollate() 81 | train_loader = DataLoader(train_dataset, num_workers=8, shuffle=False, pin_memory=True, 82 | collate_fn=collate_fn, batch_sampler=train_sampler) 83 | if rank == 0: 84 | eval_dataset = TextAudioLoader(hps.data.validation_files, hps.data) 85 | eval_loader = DataLoader(eval_dataset, num_workers=8, shuffle=False, 86 | batch_size=hps.train.batch_size, pin_memory=True, 87 | drop_last=False, collate_fn=collate_fn) 88 | 89 | net_g = SynthesizerTrn( 90 | len(symbols), 91 | hps.data.filter_length // 2 + 1, 92 | hps.train.segment_size // hps.data.hop_length, 93 | **hps.model).cuda(rank) 94 | net_d = MultiPeriodDiscriminator(hps.model.use_spectral_norm).cuda(rank) 95 | optim_g = torch.optim.AdamW( 96 | net_g.parameters(), 97 | hps.train.learning_rate, 98 | betas=hps.train.betas, 99 | eps=hps.train.eps) 100 | optim_d = torch.optim.AdamW( 101 | net_d.parameters(), 102 | hps.train.learning_rate, 103 | betas=hps.train.betas, 104 | eps=hps.train.eps) 105 | net_g = DDP(net_g, device_ids=[rank]) 106 | net_d = DDP(net_d, device_ids=[rank]) 107 | 108 | try: 109 | _, _, _, epoch_str = utils.load_checkpoint(utils.latest_checkpoint_path(hps.model_dir, "G_*.pth"), net_g, optim_g) 110 | _, _, _, epoch_str = utils.load_checkpoint(utils.latest_checkpoint_path(hps.model_dir, "D_*.pth"), net_d, optim_d) 111 | global_step = (epoch_str - 1) * len(train_loader) 112 | except: 113 | epoch_str = 1 114 | global_step = 0 115 | 116 | scheduler_g = torch.optim.lr_scheduler.ExponentialLR(optim_g, gamma=hps.train.lr_decay, last_epoch=epoch_str-2) 117 | scheduler_d = torch.optim.lr_scheduler.ExponentialLR(optim_d, gamma=hps.train.lr_decay, last_epoch=epoch_str-2) 118 | 119 | scaler = GradScaler(enabled=hps.train.fp16_run) 120 | 121 | for epoch in range(epoch_str, hps.train.epochs + 1): 122 | if rank==0: 123 | train_and_evaluate(rank, epoch, hps, [net_g, net_d], [optim_g, optim_d], [scheduler_g, scheduler_d], scaler, [train_loader, eval_loader], logger, [writer, writer_eval]) 124 | else: 125 | train_and_evaluate(rank, epoch, hps, [net_g, net_d], [optim_g, optim_d], [scheduler_g, scheduler_d], scaler, [train_loader, None], None, None) 126 | scheduler_g.step() 127 | scheduler_d.step() 128 | 129 | 130 | def train_and_evaluate(rank, epoch, hps, nets, optims, schedulers, scaler, loaders, logger, writers): 131 | net_g, net_d = nets 132 | optim_g, optim_d = optims 133 | scheduler_g, scheduler_d = schedulers 134 | train_loader, eval_loader = loaders 135 | if writers is not None: 136 | writer, writer_eval = writers 137 | 138 | train_loader.batch_sampler.set_epoch(epoch) 139 | global global_step 140 | 141 | net_g.train() 142 | net_d.train() 143 | for batch_idx, (x, x_lengths, spec, spec_lengths, y, y_lengths) in enumerate(tqdm(train_loader)): 144 | x, x_lengths = x.cuda(rank, non_blocking=True), x_lengths.cuda(rank, non_blocking=True) 145 | spec, spec_lengths = spec.cuda(rank, non_blocking=True), spec_lengths.cuda(rank, non_blocking=True) 146 | y, y_lengths = y.cuda(rank, non_blocking=True), y_lengths.cuda(rank, non_blocking=True) 147 | 148 | with autocast(enabled=hps.train.fp16_run): 149 | y_hat, l_length, attn, ids_slice, x_mask, z_mask,\ 150 | (z, z_p, m_p, logs_p, m_q, logs_q) = net_g(x, x_lengths, spec, spec_lengths) 151 | 152 | mel = spec_to_mel_torch( 153 | spec, 154 | hps.data.filter_length, 155 | hps.data.n_mel_channels, 156 | hps.data.sampling_rate, 157 | hps.data.mel_fmin, 158 | hps.data.mel_fmax) 159 | y_mel = commons.slice_segments(mel, ids_slice, hps.train.segment_size // hps.data.hop_length) 160 | y_hat_mel = mel_spectrogram_torch( 161 | y_hat.squeeze(1), 162 | hps.data.filter_length, 163 | hps.data.n_mel_channels, 164 | hps.data.sampling_rate, 165 | hps.data.hop_length, 166 | hps.data.win_length, 167 | hps.data.mel_fmin, 168 | hps.data.mel_fmax 169 | ) 170 | 171 | y = commons.slice_segments(y, ids_slice * hps.data.hop_length, hps.train.segment_size) # slice 172 | 173 | # Discriminator 174 | y_d_hat_r, y_d_hat_g, _, _ = net_d(y, y_hat.detach()) 175 | with autocast(enabled=False): 176 | loss_disc, losses_disc_r, losses_disc_g = discriminator_loss(y_d_hat_r, y_d_hat_g) 177 | loss_disc_all = loss_disc 178 | optim_d.zero_grad() 179 | scaler.scale(loss_disc_all).backward() 180 | scaler.unscale_(optim_d) 181 | grad_norm_d = commons.clip_grad_value_(net_d.parameters(), None) 182 | scaler.step(optim_d) 183 | 184 | with autocast(enabled=hps.train.fp16_run): 185 | # Generator 186 | y_d_hat_r, y_d_hat_g, fmap_r, fmap_g = net_d(y, y_hat) 187 | with autocast(enabled=False): 188 | loss_dur = torch.sum(l_length.float()) 189 | loss_mel = F.l1_loss(y_mel, y_hat_mel) * hps.train.c_mel 190 | loss_kl = kl_loss(z_p, logs_q, m_p, logs_p, z_mask) * hps.train.c_kl 191 | 192 | loss_fm = feature_loss(fmap_r, fmap_g) 193 | loss_gen, losses_gen = generator_loss(y_d_hat_g) 194 | loss_gen_all = loss_gen + loss_fm + loss_mel + loss_dur + loss_kl 195 | optim_g.zero_grad() 196 | scaler.scale(loss_gen_all).backward() 197 | scaler.unscale_(optim_g) 198 | grad_norm_g = commons.clip_grad_value_(net_g.parameters(), None) 199 | scaler.step(optim_g) 200 | scaler.update() 201 | 202 | if rank==0: 203 | if global_step % hps.train.log_interval == 0: 204 | lr = optim_g.param_groups[0]['lr'] 205 | losses = [loss_disc, loss_gen, loss_fm, loss_mel, loss_dur, loss_kl] 206 | logger.info('Train Epoch: {} [{:.0f}%]'.format( 207 | epoch, 208 | 100. * batch_idx / len(train_loader))) 209 | logger.info([x.item() for x in losses] + [global_step, lr]) 210 | 211 | scalar_dict = {"loss/g/total": loss_gen_all, "loss/d/total": loss_disc_all, "learning_rate": lr, "grad_norm_d": grad_norm_d, "grad_norm_g": grad_norm_g} 212 | scalar_dict.update({"loss/g/fm": loss_fm, "loss/g/mel": loss_mel, "loss/g/dur": loss_dur, "loss/g/kl": loss_kl}) 213 | 214 | scalar_dict.update({"loss/g/{}".format(i): v for i, v in enumerate(losses_gen)}) 215 | scalar_dict.update({"loss/d_r/{}".format(i): v for i, v in enumerate(losses_disc_r)}) 216 | scalar_dict.update({"loss/d_g/{}".format(i): v for i, v in enumerate(losses_disc_g)}) 217 | image_dict = { 218 | "slice/mel_org": utils.plot_spectrogram_to_numpy(y_mel[0].data.cpu().numpy()), 219 | "slice/mel_gen": utils.plot_spectrogram_to_numpy(y_hat_mel[0].data.cpu().numpy()), 220 | "all/mel": utils.plot_spectrogram_to_numpy(mel[0].data.cpu().numpy()), 221 | "all/attn": utils.plot_alignment_to_numpy(attn[0,0].data.cpu().numpy()) 222 | } 223 | utils.summarize( 224 | writer=writer, 225 | global_step=global_step, 226 | images=image_dict, 227 | scalars=scalar_dict) 228 | 229 | if global_step % hps.train.eval_interval == 0: 230 | evaluate(hps, net_g, eval_loader, writer_eval) 231 | utils.save_checkpoint(net_g, optim_g, hps.train.learning_rate, epoch, os.path.join(hps.model_dir, "G_{}.pth".format(global_step))) 232 | utils.save_checkpoint(net_d, optim_d, hps.train.learning_rate, epoch, os.path.join(hps.model_dir, "D_{}.pth".format(global_step))) 233 | old_g=os.path.join(hps.model_dir, "G_{}.pth".format(global_step-2000)) 234 | old_d=os.path.join(hps.model_dir, "D_{}.pth".format(global_step-2000)) 235 | if os.path.exists(old_g): 236 | os.remove(old_g) 237 | if os.path.exists(old_d): 238 | os.remove(old_d) 239 | global_step += 1 240 | 241 | if rank == 0: 242 | logger.info('====> Epoch: {}'.format(epoch)) 243 | 244 | 245 | def evaluate(hps, generator, eval_loader, writer_eval): 246 | generator.eval() 247 | with torch.no_grad(): 248 | for batch_idx, (x, x_lengths, spec, spec_lengths, y, y_lengths) in enumerate(eval_loader): 249 | x, x_lengths = x.cuda(0), x_lengths.cuda(0) 250 | spec, spec_lengths = spec.cuda(0), spec_lengths.cuda(0) 251 | y, y_lengths = y.cuda(0), y_lengths.cuda(0) 252 | 253 | # remove else 254 | x = x[:1] 255 | x_lengths = x_lengths[:1] 256 | spec = spec[:1] 257 | spec_lengths = spec_lengths[:1] 258 | y = y[:1] 259 | y_lengths = y_lengths[:1] 260 | break 261 | y_hat, attn, mask, *_ = generator.module.infer(x, x_lengths, max_len=1000) 262 | y_hat_lengths = mask.sum([1,2]).long() * hps.data.hop_length 263 | 264 | mel = spec_to_mel_torch( 265 | spec, 266 | hps.data.filter_length, 267 | hps.data.n_mel_channels, 268 | hps.data.sampling_rate, 269 | hps.data.mel_fmin, 270 | hps.data.mel_fmax) 271 | y_hat_mel = mel_spectrogram_torch( 272 | y_hat.squeeze(1).float(), 273 | hps.data.filter_length, 274 | hps.data.n_mel_channels, 275 | hps.data.sampling_rate, 276 | hps.data.hop_length, 277 | hps.data.win_length, 278 | hps.data.mel_fmin, 279 | hps.data.mel_fmax 280 | ) 281 | image_dict = { 282 | "gen/mel": utils.plot_spectrogram_to_numpy(y_hat_mel[0].cpu().numpy()) 283 | } 284 | audio_dict = { 285 | "gen/audio": y_hat[0,:,:y_hat_lengths[0]] 286 | } 287 | if global_step == 0: 288 | image_dict.update({"gt/mel": utils.plot_spectrogram_to_numpy(mel[0].cpu().numpy())}) 289 | audio_dict.update({"gt/audio": y[0,:,:y_lengths[0]]}) 290 | 291 | utils.summarize( 292 | writer=writer_eval, 293 | global_step=global_step, 294 | images=image_dict, 295 | audios=audio_dict, 296 | audio_sampling_rate=hps.data.sampling_rate 297 | ) 298 | generator.train() 299 | 300 | 301 | if __name__ == "__main__": 302 | main() 303 | -------------------------------------------------------------------------------- /train_ms.py: -------------------------------------------------------------------------------- 1 | import os 2 | import json 3 | import argparse 4 | import itertools 5 | import math 6 | import torch 7 | from torch import nn, optim 8 | from torch.nn import functional as F 9 | from torch.utils.data import DataLoader 10 | from torch.utils.tensorboard import SummaryWriter 11 | import torch.multiprocessing as mp 12 | import torch.distributed as dist 13 | from torch.nn.parallel import DistributedDataParallel as DDP 14 | from torch.cuda.amp import autocast, GradScaler 15 | from tqdm import tqdm 16 | 17 | import librosa 18 | import logging 19 | 20 | logging.getLogger('numba').setLevel(logging.WARNING) 21 | 22 | import commons 23 | import utils 24 | from data_utils import ( 25 | TextAudioSpeakerLoader, 26 | TextAudioSpeakerCollate, 27 | DistributedBucketSampler 28 | ) 29 | from models import ( 30 | SynthesizerTrn, 31 | MultiPeriodDiscriminator, 32 | ) 33 | from losses import ( 34 | generator_loss, 35 | discriminator_loss, 36 | feature_loss, 37 | kl_loss 38 | ) 39 | from mel_processing import mel_spectrogram_torch, spec_to_mel_torch 40 | from text.symbols import symbols 41 | 42 | 43 | torch.backends.cudnn.benchmark = True 44 | global_step = 0 45 | 46 | 47 | def main(): 48 | """Assume Single Node Multi GPUs Training Only""" 49 | assert torch.cuda.is_available(), "CPU training is not allowed." 50 | 51 | n_gpus = torch.cuda.device_count() 52 | os.environ['MASTER_ADDR'] = 'localhost' 53 | os.environ['MASTER_PORT'] = '8000' 54 | 55 | hps = utils.get_hparams() 56 | mp.spawn(run, nprocs=n_gpus, args=(n_gpus, hps,)) 57 | 58 | 59 | def run(rank, n_gpus, hps): 60 | global global_step 61 | if rank == 0: 62 | logger = utils.get_logger(hps.model_dir) 63 | logger.info(hps) 64 | utils.check_git_hash(hps.model_dir) 65 | writer = SummaryWriter(log_dir=hps.model_dir) 66 | writer_eval = SummaryWriter(log_dir=os.path.join(hps.model_dir, "eval")) 67 | 68 | dist.init_process_group(backend='gloo' if os.name == 'nt' else 'nccl', init_method='env://', world_size=n_gpus, rank=rank) 69 | torch.manual_seed(hps.train.seed) 70 | torch.cuda.set_device(rank) 71 | 72 | train_dataset = TextAudioSpeakerLoader(hps.data.training_files, hps.data) 73 | train_sampler = DistributedBucketSampler( 74 | train_dataset, 75 | hps.train.batch_size, 76 | [32,300,400,500,600,700,800,900,1000], 77 | num_replicas=n_gpus, 78 | rank=rank, 79 | shuffle=True) 80 | collate_fn = TextAudioSpeakerCollate() 81 | train_loader = DataLoader(train_dataset, num_workers=8, shuffle=False, pin_memory=True, 82 | collate_fn=collate_fn, batch_sampler=train_sampler) 83 | if rank == 0: 84 | eval_dataset = TextAudioSpeakerLoader(hps.data.validation_files, hps.data) 85 | eval_loader = DataLoader(eval_dataset, num_workers=8, shuffle=False, 86 | batch_size=hps.train.batch_size, pin_memory=True, 87 | drop_last=False, collate_fn=collate_fn) 88 | 89 | net_g = SynthesizerTrn( 90 | len(symbols), 91 | hps.data.filter_length // 2 + 1, 92 | hps.train.segment_size // hps.data.hop_length, 93 | n_speakers=hps.data.n_speakers, 94 | **hps.model).cuda(rank) 95 | net_d = MultiPeriodDiscriminator(hps.model.use_spectral_norm).cuda(rank) 96 | optim_g = torch.optim.AdamW( 97 | net_g.parameters(), 98 | hps.train.learning_rate, 99 | betas=hps.train.betas, 100 | eps=hps.train.eps) 101 | optim_d = torch.optim.AdamW( 102 | net_d.parameters(), 103 | hps.train.learning_rate, 104 | betas=hps.train.betas, 105 | eps=hps.train.eps) 106 | net_g = DDP(net_g, device_ids=[rank]) 107 | net_d = DDP(net_d, device_ids=[rank]) 108 | 109 | try: 110 | _, _, _, epoch_str = utils.load_checkpoint(utils.latest_checkpoint_path(hps.model_dir, "G_*.pth"), net_g, optim_g) 111 | _, _, _, epoch_str = utils.load_checkpoint(utils.latest_checkpoint_path(hps.model_dir, "D_*.pth"), net_d, optim_d) 112 | global_step = (epoch_str - 1) * len(train_loader) 113 | except: 114 | epoch_str = 1 115 | global_step = 0 116 | 117 | scheduler_g = torch.optim.lr_scheduler.ExponentialLR(optim_g, gamma=hps.train.lr_decay, last_epoch=epoch_str-2) 118 | scheduler_d = torch.optim.lr_scheduler.ExponentialLR(optim_d, gamma=hps.train.lr_decay, last_epoch=epoch_str-2) 119 | 120 | scaler = GradScaler(enabled=hps.train.fp16_run) 121 | 122 | for epoch in range(epoch_str, hps.train.epochs + 1): 123 | if rank==0: 124 | train_and_evaluate(rank, epoch, hps, [net_g, net_d], [optim_g, optim_d], [scheduler_g, scheduler_d], scaler, [train_loader, eval_loader], logger, [writer, writer_eval]) 125 | else: 126 | train_and_evaluate(rank, epoch, hps, [net_g, net_d], [optim_g, optim_d], [scheduler_g, scheduler_d], scaler, [train_loader, None], None, None) 127 | scheduler_g.step() 128 | scheduler_d.step() 129 | 130 | 131 | def train_and_evaluate(rank, epoch, hps, nets, optims, schedulers, scaler, loaders, logger, writers): 132 | net_g, net_d = nets 133 | optim_g, optim_d = optims 134 | scheduler_g, scheduler_d = schedulers 135 | train_loader, eval_loader = loaders 136 | if writers is not None: 137 | writer, writer_eval = writers 138 | 139 | train_loader.batch_sampler.set_epoch(epoch) 140 | global global_step 141 | 142 | net_g.train() 143 | net_d.train() 144 | for batch_idx, (x, x_lengths, spec, spec_lengths, y, y_lengths, speakers) in enumerate(tqdm(train_loader)): 145 | x, x_lengths = x.cuda(rank, non_blocking=True), x_lengths.cuda(rank, non_blocking=True) 146 | spec, spec_lengths = spec.cuda(rank, non_blocking=True), spec_lengths.cuda(rank, non_blocking=True) 147 | y, y_lengths = y.cuda(rank, non_blocking=True), y_lengths.cuda(rank, non_blocking=True) 148 | speakers = speakers.cuda(rank, non_blocking=True) 149 | 150 | with autocast(enabled=hps.train.fp16_run): 151 | y_hat, l_length, attn, ids_slice, x_mask, z_mask,\ 152 | (z, z_p, m_p, logs_p, m_q, logs_q) = net_g(x, x_lengths, spec, spec_lengths, speakers) 153 | 154 | mel = spec_to_mel_torch( 155 | spec, 156 | hps.data.filter_length, 157 | hps.data.n_mel_channels, 158 | hps.data.sampling_rate, 159 | hps.data.mel_fmin, 160 | hps.data.mel_fmax) 161 | y_mel = commons.slice_segments(mel, ids_slice, hps.train.segment_size // hps.data.hop_length) 162 | y_hat_mel = mel_spectrogram_torch( 163 | y_hat.squeeze(1), 164 | hps.data.filter_length, 165 | hps.data.n_mel_channels, 166 | hps.data.sampling_rate, 167 | hps.data.hop_length, 168 | hps.data.win_length, 169 | hps.data.mel_fmin, 170 | hps.data.mel_fmax 171 | ) 172 | 173 | y = commons.slice_segments(y, ids_slice * hps.data.hop_length, hps.train.segment_size) # slice 174 | 175 | # Discriminator 176 | y_d_hat_r, y_d_hat_g, _, _ = net_d(y, y_hat.detach()) 177 | with autocast(enabled=False): 178 | loss_disc, losses_disc_r, losses_disc_g = discriminator_loss(y_d_hat_r, y_d_hat_g) 179 | loss_disc_all = loss_disc 180 | optim_d.zero_grad() 181 | scaler.scale(loss_disc_all).backward() 182 | scaler.unscale_(optim_d) 183 | grad_norm_d = commons.clip_grad_value_(net_d.parameters(), None) 184 | scaler.step(optim_d) 185 | 186 | with autocast(enabled=hps.train.fp16_run): 187 | # Generator 188 | y_d_hat_r, y_d_hat_g, fmap_r, fmap_g = net_d(y, y_hat) 189 | with autocast(enabled=False): 190 | loss_dur = torch.sum(l_length.float()) 191 | loss_mel = F.l1_loss(y_mel, y_hat_mel) * hps.train.c_mel 192 | loss_kl = kl_loss(z_p, logs_q, m_p, logs_p, z_mask) * hps.train.c_kl 193 | 194 | loss_fm = feature_loss(fmap_r, fmap_g) 195 | loss_gen, losses_gen = generator_loss(y_d_hat_g) 196 | loss_gen_all = loss_gen + loss_fm + loss_mel + loss_dur + loss_kl 197 | optim_g.zero_grad() 198 | scaler.scale(loss_gen_all).backward() 199 | scaler.unscale_(optim_g) 200 | grad_norm_g = commons.clip_grad_value_(net_g.parameters(), None) 201 | scaler.step(optim_g) 202 | scaler.update() 203 | 204 | if rank==0: 205 | if global_step % hps.train.log_interval == 0: 206 | lr = optim_g.param_groups[0]['lr'] 207 | losses = [loss_disc, loss_gen, loss_fm, loss_mel, loss_dur, loss_kl] 208 | logger.info('Train Epoch: {} [{:.0f}%]'.format( 209 | epoch, 210 | 100. * batch_idx / len(train_loader))) 211 | logger.info([x.item() for x in losses] + [global_step, lr]) 212 | 213 | scalar_dict = {"loss/g/total": loss_gen_all, "loss/d/total": loss_disc_all, "learning_rate": lr, "grad_norm_d": grad_norm_d, "grad_norm_g": grad_norm_g} 214 | scalar_dict.update({"loss/g/fm": loss_fm, "loss/g/mel": loss_mel, "loss/g/dur": loss_dur, "loss/g/kl": loss_kl}) 215 | 216 | scalar_dict.update({"loss/g/{}".format(i): v for i, v in enumerate(losses_gen)}) 217 | scalar_dict.update({"loss/d_r/{}".format(i): v for i, v in enumerate(losses_disc_r)}) 218 | scalar_dict.update({"loss/d_g/{}".format(i): v for i, v in enumerate(losses_disc_g)}) 219 | image_dict = { 220 | "slice/mel_org": utils.plot_spectrogram_to_numpy(y_mel[0].data.cpu().numpy()), 221 | "slice/mel_gen": utils.plot_spectrogram_to_numpy(y_hat_mel[0].data.cpu().numpy()), 222 | "all/mel": utils.plot_spectrogram_to_numpy(mel[0].data.cpu().numpy()), 223 | "all/attn": utils.plot_alignment_to_numpy(attn[0,0].data.cpu().numpy()) 224 | } 225 | utils.summarize( 226 | writer=writer, 227 | global_step=global_step, 228 | images=image_dict, 229 | scalars=scalar_dict) 230 | 231 | if global_step % hps.train.eval_interval == 0: 232 | evaluate(hps, net_g, eval_loader, writer_eval) 233 | utils.save_checkpoint(net_g, optim_g, hps.train.learning_rate, epoch, os.path.join(hps.model_dir, "G_{}.pth".format(global_step))) 234 | utils.save_checkpoint(net_d, optim_d, hps.train.learning_rate, epoch, os.path.join(hps.model_dir, "D_{}.pth".format(global_step))) 235 | old_g=os.path.join(hps.model_dir, "G_{}.pth".format(global_step-2000)) 236 | old_d=os.path.join(hps.model_dir, "D_{}.pth".format(global_step-2000)) 237 | if os.path.exists(old_g): 238 | os.remove(old_g) 239 | if os.path.exists(old_d): 240 | os.remove(old_d) 241 | global_step += 1 242 | 243 | if rank == 0: 244 | logger.info('====> Epoch: {}'.format(epoch)) 245 | 246 | 247 | def evaluate(hps, generator, eval_loader, writer_eval): 248 | generator.eval() 249 | with torch.no_grad(): 250 | for batch_idx, (x, x_lengths, spec, spec_lengths, y, y_lengths, speakers) in enumerate(eval_loader): 251 | x, x_lengths = x.cuda(0), x_lengths.cuda(0) 252 | spec, spec_lengths = spec.cuda(0), spec_lengths.cuda(0) 253 | y, y_lengths = y.cuda(0), y_lengths.cuda(0) 254 | speakers = speakers.cuda(0) 255 | 256 | # remove else 257 | x = x[:1] 258 | x_lengths = x_lengths[:1] 259 | spec = spec[:1] 260 | spec_lengths = spec_lengths[:1] 261 | y = y[:1] 262 | y_lengths = y_lengths[:1] 263 | speakers = speakers[:1] 264 | break 265 | y_hat, attn, mask, *_ = generator.module.infer(x, x_lengths, speakers, max_len=1000) 266 | y_hat_lengths = mask.sum([1,2]).long() * hps.data.hop_length 267 | 268 | mel = spec_to_mel_torch( 269 | spec, 270 | hps.data.filter_length, 271 | hps.data.n_mel_channels, 272 | hps.data.sampling_rate, 273 | hps.data.mel_fmin, 274 | hps.data.mel_fmax) 275 | y_hat_mel = mel_spectrogram_torch( 276 | y_hat.squeeze(1).float(), 277 | hps.data.filter_length, 278 | hps.data.n_mel_channels, 279 | hps.data.sampling_rate, 280 | hps.data.hop_length, 281 | hps.data.win_length, 282 | hps.data.mel_fmin, 283 | hps.data.mel_fmax 284 | ) 285 | image_dict = { 286 | "gen/mel": utils.plot_spectrogram_to_numpy(y_hat_mel[0].cpu().numpy()) 287 | } 288 | audio_dict = { 289 | "gen/audio": y_hat[0,:,:y_hat_lengths[0]] 290 | } 291 | if global_step == 0: 292 | image_dict.update({"gt/mel": utils.plot_spectrogram_to_numpy(mel[0].cpu().numpy())}) 293 | audio_dict.update({"gt/audio": y[0,:,:y_lengths[0]]}) 294 | 295 | utils.summarize( 296 | writer=writer_eval, 297 | global_step=global_step, 298 | images=image_dict, 299 | audios=audio_dict, 300 | audio_sampling_rate=hps.data.sampling_rate 301 | ) 302 | generator.train() 303 | 304 | 305 | if __name__ == "__main__": 306 | main() 307 | -------------------------------------------------------------------------------- /transforms.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from torch.nn import functional as F 3 | 4 | import numpy as np 5 | 6 | 7 | DEFAULT_MIN_BIN_WIDTH = 1e-3 8 | DEFAULT_MIN_BIN_HEIGHT = 1e-3 9 | DEFAULT_MIN_DERIVATIVE = 1e-3 10 | 11 | 12 | def piecewise_rational_quadratic_transform(inputs, 13 | unnormalized_widths, 14 | unnormalized_heights, 15 | unnormalized_derivatives, 16 | inverse=False, 17 | tails=None, 18 | tail_bound=1., 19 | min_bin_width=DEFAULT_MIN_BIN_WIDTH, 20 | min_bin_height=DEFAULT_MIN_BIN_HEIGHT, 21 | min_derivative=DEFAULT_MIN_DERIVATIVE): 22 | 23 | if tails is None: 24 | spline_fn = rational_quadratic_spline 25 | spline_kwargs = {} 26 | else: 27 | spline_fn = unconstrained_rational_quadratic_spline 28 | spline_kwargs = { 29 | 'tails': tails, 30 | 'tail_bound': tail_bound 31 | } 32 | 33 | outputs, logabsdet = spline_fn( 34 | inputs=inputs, 35 | unnormalized_widths=unnormalized_widths, 36 | unnormalized_heights=unnormalized_heights, 37 | unnormalized_derivatives=unnormalized_derivatives, 38 | inverse=inverse, 39 | min_bin_width=min_bin_width, 40 | min_bin_height=min_bin_height, 41 | min_derivative=min_derivative, 42 | **spline_kwargs 43 | ) 44 | return outputs, logabsdet 45 | 46 | 47 | def searchsorted(bin_locations, inputs, eps=1e-6): 48 | bin_locations[..., -1] += eps 49 | return torch.sum( 50 | inputs[..., None] >= bin_locations, 51 | dim=-1 52 | ) - 1 53 | 54 | 55 | def unconstrained_rational_quadratic_spline(inputs, 56 | unnormalized_widths, 57 | unnormalized_heights, 58 | unnormalized_derivatives, 59 | inverse=False, 60 | tails='linear', 61 | tail_bound=1., 62 | min_bin_width=DEFAULT_MIN_BIN_WIDTH, 63 | min_bin_height=DEFAULT_MIN_BIN_HEIGHT, 64 | min_derivative=DEFAULT_MIN_DERIVATIVE): 65 | inside_interval_mask = (inputs >= -tail_bound) & (inputs <= tail_bound) 66 | outside_interval_mask = ~inside_interval_mask 67 | 68 | outputs = torch.zeros_like(inputs) 69 | logabsdet = torch.zeros_like(inputs) 70 | 71 | if tails == 'linear': 72 | unnormalized_derivatives = F.pad(unnormalized_derivatives, pad=(1, 1)) 73 | constant = np.log(np.exp(1 - min_derivative) - 1) 74 | unnormalized_derivatives[..., 0] = constant 75 | unnormalized_derivatives[..., -1] = constant 76 | 77 | outputs[outside_interval_mask] = inputs[outside_interval_mask] 78 | logabsdet[outside_interval_mask] = 0 79 | else: 80 | raise RuntimeError('{} tails are not implemented.'.format(tails)) 81 | 82 | outputs[inside_interval_mask], logabsdet[inside_interval_mask] = rational_quadratic_spline( 83 | inputs=inputs[inside_interval_mask], 84 | unnormalized_widths=unnormalized_widths[inside_interval_mask, :], 85 | unnormalized_heights=unnormalized_heights[inside_interval_mask, :], 86 | unnormalized_derivatives=unnormalized_derivatives[inside_interval_mask, :], 87 | inverse=inverse, 88 | left=-tail_bound, right=tail_bound, bottom=-tail_bound, top=tail_bound, 89 | min_bin_width=min_bin_width, 90 | min_bin_height=min_bin_height, 91 | min_derivative=min_derivative 92 | ) 93 | 94 | return outputs, logabsdet 95 | 96 | def rational_quadratic_spline(inputs, 97 | unnormalized_widths, 98 | unnormalized_heights, 99 | unnormalized_derivatives, 100 | inverse=False, 101 | left=0., right=1., bottom=0., top=1., 102 | min_bin_width=DEFAULT_MIN_BIN_WIDTH, 103 | min_bin_height=DEFAULT_MIN_BIN_HEIGHT, 104 | min_derivative=DEFAULT_MIN_DERIVATIVE): 105 | if torch.min(inputs) < left or torch.max(inputs) > right: 106 | raise ValueError('Input to a transform is not within its domain') 107 | 108 | num_bins = unnormalized_widths.shape[-1] 109 | 110 | if min_bin_width * num_bins > 1.0: 111 | raise ValueError('Minimal bin width too large for the number of bins') 112 | if min_bin_height * num_bins > 1.0: 113 | raise ValueError('Minimal bin height too large for the number of bins') 114 | 115 | widths = F.softmax(unnormalized_widths, dim=-1) 116 | widths = min_bin_width + (1 - min_bin_width * num_bins) * widths 117 | cumwidths = torch.cumsum(widths, dim=-1) 118 | cumwidths = F.pad(cumwidths, pad=(1, 0), mode='constant', value=0.0) 119 | cumwidths = (right - left) * cumwidths + left 120 | cumwidths[..., 0] = left 121 | cumwidths[..., -1] = right 122 | widths = cumwidths[..., 1:] - cumwidths[..., :-1] 123 | 124 | derivatives = min_derivative + F.softplus(unnormalized_derivatives) 125 | 126 | heights = F.softmax(unnormalized_heights, dim=-1) 127 | heights = min_bin_height + (1 - min_bin_height * num_bins) * heights 128 | cumheights = torch.cumsum(heights, dim=-1) 129 | cumheights = F.pad(cumheights, pad=(1, 0), mode='constant', value=0.0) 130 | cumheights = (top - bottom) * cumheights + bottom 131 | cumheights[..., 0] = bottom 132 | cumheights[..., -1] = top 133 | heights = cumheights[..., 1:] - cumheights[..., :-1] 134 | 135 | if inverse: 136 | bin_idx = searchsorted(cumheights, inputs)[..., None] 137 | else: 138 | bin_idx = searchsorted(cumwidths, inputs)[..., None] 139 | 140 | input_cumwidths = cumwidths.gather(-1, bin_idx)[..., 0] 141 | input_bin_widths = widths.gather(-1, bin_idx)[..., 0] 142 | 143 | input_cumheights = cumheights.gather(-1, bin_idx)[..., 0] 144 | delta = heights / widths 145 | input_delta = delta.gather(-1, bin_idx)[..., 0] 146 | 147 | input_derivatives = derivatives.gather(-1, bin_idx)[..., 0] 148 | input_derivatives_plus_one = derivatives[..., 1:].gather(-1, bin_idx)[..., 0] 149 | 150 | input_heights = heights.gather(-1, bin_idx)[..., 0] 151 | 152 | if inverse: 153 | a = (((inputs - input_cumheights) * (input_derivatives 154 | + input_derivatives_plus_one 155 | - 2 * input_delta) 156 | + input_heights * (input_delta - input_derivatives))) 157 | b = (input_heights * input_derivatives 158 | - (inputs - input_cumheights) * (input_derivatives 159 | + input_derivatives_plus_one 160 | - 2 * input_delta)) 161 | c = - input_delta * (inputs - input_cumheights) 162 | 163 | discriminant = b.pow(2) - 4 * a * c 164 | assert (discriminant >= 0).all() 165 | 166 | root = (2 * c) / (-b - torch.sqrt(discriminant)) 167 | outputs = root * input_bin_widths + input_cumwidths 168 | 169 | theta_one_minus_theta = root * (1 - root) 170 | denominator = input_delta + ((input_derivatives + input_derivatives_plus_one - 2 * input_delta) 171 | * theta_one_minus_theta) 172 | derivative_numerator = input_delta.pow(2) * (input_derivatives_plus_one * root.pow(2) 173 | + 2 * input_delta * theta_one_minus_theta 174 | + input_derivatives * (1 - root).pow(2)) 175 | logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator) 176 | 177 | return outputs, -logabsdet 178 | else: 179 | theta = (inputs - input_cumwidths) / input_bin_widths 180 | theta_one_minus_theta = theta * (1 - theta) 181 | 182 | numerator = input_heights * (input_delta * theta.pow(2) 183 | + input_derivatives * theta_one_minus_theta) 184 | denominator = input_delta + ((input_derivatives + input_derivatives_plus_one - 2 * input_delta) 185 | * theta_one_minus_theta) 186 | outputs = input_cumheights + numerator / denominator 187 | 188 | derivative_numerator = input_delta.pow(2) * (input_derivatives_plus_one * theta.pow(2) 189 | + 2 * input_delta * theta_one_minus_theta 190 | + input_derivatives * (1 - theta).pow(2)) 191 | logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator) 192 | 193 | return outputs, logabsdet 194 | -------------------------------------------------------------------------------- /utils.py: -------------------------------------------------------------------------------- 1 | import os 2 | import glob 3 | import sys 4 | import argparse 5 | import logging 6 | import json 7 | import subprocess 8 | import numpy as np 9 | from scipy.io.wavfile import read 10 | import torch 11 | 12 | MATPLOTLIB_FLAG = False 13 | 14 | logging.basicConfig(stream=sys.stdout, level=logging.DEBUG) 15 | logger = logging 16 | 17 | 18 | def load_checkpoint(checkpoint_path, model, optimizer=None): 19 | assert os.path.isfile(checkpoint_path) 20 | checkpoint_dict = torch.load(checkpoint_path, map_location='cpu') 21 | iteration = checkpoint_dict['iteration'] 22 | learning_rate = checkpoint_dict['learning_rate'] 23 | if iteration is None: 24 | iteration = 1 25 | if learning_rate is None: 26 | learning_rate = 0.0002 27 | if optimizer is not None and checkpoint_dict['optimizer'] is not None: 28 | optimizer.load_state_dict(checkpoint_dict['optimizer']) 29 | saved_state_dict = checkpoint_dict['model'] 30 | if hasattr(model, 'module'): 31 | state_dict = model.module.state_dict() 32 | else: 33 | state_dict = model.state_dict() 34 | new_state_dict= {} 35 | for k, v in state_dict.items(): 36 | try: 37 | new_state_dict[k] = saved_state_dict[k] 38 | except: 39 | logger.info("%s is not in the checkpoint" % k) 40 | new_state_dict[k] = v 41 | if hasattr(model, 'module'): 42 | model.module.load_state_dict(new_state_dict) 43 | else: 44 | model.load_state_dict(new_state_dict) 45 | logger.info("Loaded checkpoint '{}' (iteration {})" .format( 46 | checkpoint_path, iteration)) 47 | return model, optimizer, learning_rate, iteration 48 | 49 | 50 | def save_checkpoint(model, optimizer, learning_rate, iteration, checkpoint_path): 51 | logger.info("Saving model and optimizer state at iteration {} to {}".format( 52 | iteration, checkpoint_path)) 53 | if hasattr(model, 'module'): 54 | state_dict = model.module.state_dict() 55 | else: 56 | state_dict = model.state_dict() 57 | torch.save({'model': state_dict, 58 | 'iteration': iteration, 59 | 'optimizer': optimizer.state_dict(), 60 | 'learning_rate': learning_rate}, checkpoint_path) 61 | 62 | 63 | def summarize(writer, global_step, scalars={}, histograms={}, images={}, audios={}, audio_sampling_rate=22050): 64 | for k, v in scalars.items(): 65 | writer.add_scalar(k, v, global_step) 66 | for k, v in histograms.items(): 67 | writer.add_histogram(k, v, global_step) 68 | for k, v in images.items(): 69 | writer.add_image(k, v, global_step, dataformats='HWC') 70 | for k, v in audios.items(): 71 | writer.add_audio(k, v, global_step, audio_sampling_rate) 72 | 73 | 74 | def latest_checkpoint_path(dir_path, regex="G_*.pth"): 75 | f_list = glob.glob(os.path.join(dir_path, regex)) 76 | f_list.sort(key=lambda f: int("".join(filter(str.isdigit, f)))) 77 | x = f_list[-1] 78 | print(x) 79 | return x 80 | 81 | 82 | def plot_spectrogram_to_numpy(spectrogram): 83 | global MATPLOTLIB_FLAG 84 | if not MATPLOTLIB_FLAG: 85 | import matplotlib 86 | matplotlib.use("Agg") 87 | MATPLOTLIB_FLAG = True 88 | mpl_logger = logging.getLogger('matplotlib') 89 | mpl_logger.setLevel(logging.WARNING) 90 | import matplotlib.pylab as plt 91 | import numpy as np 92 | 93 | fig, ax = plt.subplots(figsize=(10,2)) 94 | im = ax.imshow(spectrogram, aspect="auto", origin="lower", 95 | interpolation='none') 96 | plt.colorbar(im, ax=ax) 97 | plt.xlabel("Frames") 98 | plt.ylabel("Channels") 99 | plt.tight_layout() 100 | 101 | fig.canvas.draw() 102 | data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='') 103 | data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,)) 104 | plt.close() 105 | return data 106 | 107 | 108 | def plot_alignment_to_numpy(alignment, info=None): 109 | global MATPLOTLIB_FLAG 110 | if not MATPLOTLIB_FLAG: 111 | import matplotlib 112 | matplotlib.use("Agg") 113 | MATPLOTLIB_FLAG = True 114 | mpl_logger = logging.getLogger('matplotlib') 115 | mpl_logger.setLevel(logging.WARNING) 116 | import matplotlib.pylab as plt 117 | import numpy as np 118 | 119 | fig, ax = plt.subplots(figsize=(6, 4)) 120 | im = ax.imshow(alignment.transpose(), aspect='auto', origin='lower', 121 | interpolation='none') 122 | fig.colorbar(im, ax=ax) 123 | xlabel = 'Decoder timestep' 124 | if info is not None: 125 | xlabel += '\n\n' + info 126 | plt.xlabel(xlabel) 127 | plt.ylabel('Encoder timestep') 128 | plt.tight_layout() 129 | 130 | fig.canvas.draw() 131 | data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='') 132 | data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,)) 133 | plt.close() 134 | return data 135 | 136 | 137 | def load_wav_to_torch(full_path): 138 | sampling_rate, data = read(full_path) 139 | return torch.FloatTensor(data.astype(np.float32)), sampling_rate 140 | 141 | 142 | def load_filepaths_and_text(filename, split="|"): 143 | with open(filename, encoding='utf-8') as f: 144 | filepaths_and_text = [line.strip().split(split) for line in f] 145 | return filepaths_and_text 146 | 147 | 148 | def get_hparams(init=True): 149 | parser = argparse.ArgumentParser() 150 | parser.add_argument('-c', '--config', type=str, default="./configs/base.json", 151 | help='JSON file for configuration') 152 | parser.add_argument('-m', '--model', type=str, required=True, 153 | help='Model name') 154 | 155 | args = parser.parse_args() 156 | model_dir = "../drive/MyDrive/vits-finetune" 157 | model_dir = os.path.join(model_dir, args.model) 158 | 159 | if not os.path.exists(model_dir): 160 | os.makedirs(model_dir) 161 | 162 | config_path = args.config 163 | config_save_path = os.path.join(model_dir, "config.json") 164 | if init: 165 | with open(config_path, "r") as f: 166 | data = f.read() 167 | with open(config_save_path, "w") as f: 168 | f.write(data) 169 | else: 170 | with open(config_save_path, "r") as f: 171 | data = f.read() 172 | config = json.loads(data) 173 | 174 | hparams = HParams(**config) 175 | hparams.model_dir = model_dir 176 | return hparams 177 | 178 | 179 | def get_hparams_from_dir(model_dir): 180 | config_save_path = os.path.join(model_dir, "config.json") 181 | with open(config_save_path, "r") as f: 182 | data = f.read() 183 | config = json.loads(data) 184 | 185 | hparams =HParams(**config) 186 | hparams.model_dir = model_dir 187 | return hparams 188 | 189 | 190 | def get_hparams_from_file(config_path): 191 | with open(config_path, "r") as f: 192 | data = f.read() 193 | config = json.loads(data) 194 | 195 | hparams =HParams(**config) 196 | return hparams 197 | 198 | 199 | def check_git_hash(model_dir): 200 | source_dir = os.path.dirname(os.path.realpath(__file__)) 201 | if not os.path.exists(os.path.join(source_dir, ".git")): 202 | logger.warn("{} is not a git repository, therefore hash value comparison will be ignored.".format( 203 | source_dir 204 | )) 205 | return 206 | 207 | cur_hash = subprocess.getoutput("git rev-parse HEAD") 208 | 209 | path = os.path.join(model_dir, "githash") 210 | if os.path.exists(path): 211 | saved_hash = open(path).read() 212 | if saved_hash != cur_hash: 213 | logger.warn("git hash values are different. {}(saved) != {}(current)".format( 214 | saved_hash[:8], cur_hash[:8])) 215 | else: 216 | open(path, "w").write(cur_hash) 217 | 218 | 219 | def get_logger(model_dir, filename="train.log"): 220 | global logger 221 | logger = logging.getLogger(os.path.basename(model_dir)) 222 | logger.setLevel(logging.DEBUG) 223 | 224 | formatter = logging.Formatter("%(asctime)s\t%(name)s\t%(levelname)s\t%(message)s") 225 | if not os.path.exists(model_dir): 226 | os.makedirs(model_dir) 227 | h = logging.FileHandler(os.path.join(model_dir, filename)) 228 | h.setLevel(logging.DEBUG) 229 | h.setFormatter(formatter) 230 | logger.addHandler(h) 231 | return logger 232 | 233 | 234 | class HParams(): 235 | def __init__(self, **kwargs): 236 | for k, v in kwargs.items(): 237 | if type(v) == dict: 238 | v = HParams(**v) 239 | self[k] = v 240 | 241 | def keys(self): 242 | return self.__dict__.keys() 243 | 244 | def items(self): 245 | return self.__dict__.items() 246 | 247 | def values(self): 248 | return self.__dict__.values() 249 | 250 | def __len__(self): 251 | return len(self.__dict__) 252 | 253 | def __getitem__(self, key): 254 | return getattr(self, key) 255 | 256 | def __setitem__(self, key, value): 257 | return setattr(self, key, value) 258 | 259 | def __contains__(self, key): 260 | return key in self.__dict__ 261 | 262 | def __repr__(self): 263 | return self.__dict__.__repr__() 264 | -------------------------------------------------------------------------------- /wav/ba/miyu/124180.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SayaSS/vits-finetuning/11e767c4cf7487247181a1bb26234718da52b16e/wav/ba/miyu/124180.wav -------------------------------------------------------------------------------- /wav/ba/miyu/130233.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SayaSS/vits-finetuning/11e767c4cf7487247181a1bb26234718da52b16e/wav/ba/miyu/130233.wav -------------------------------------------------------------------------------- /wav/ba/miyu/139528.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SayaSS/vits-finetuning/11e767c4cf7487247181a1bb26234718da52b16e/wav/ba/miyu/139528.wav -------------------------------------------------------------------------------- /wav/ba/miyu/145077.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SayaSS/vits-finetuning/11e767c4cf7487247181a1bb26234718da52b16e/wav/ba/miyu/145077.wav -------------------------------------------------------------------------------- /wav/ba/miyu/147691.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SayaSS/vits-finetuning/11e767c4cf7487247181a1bb26234718da52b16e/wav/ba/miyu/147691.wav -------------------------------------------------------------------------------- /wav/ba/miyu/155037.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SayaSS/vits-finetuning/11e767c4cf7487247181a1bb26234718da52b16e/wav/ba/miyu/155037.wav -------------------------------------------------------------------------------- /wav/ba/miyu/164427.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SayaSS/vits-finetuning/11e767c4cf7487247181a1bb26234718da52b16e/wav/ba/miyu/164427.wav -------------------------------------------------------------------------------- /wav/ba/miyu/173043.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SayaSS/vits-finetuning/11e767c4cf7487247181a1bb26234718da52b16e/wav/ba/miyu/173043.wav -------------------------------------------------------------------------------- /wav/ba/miyu/211336.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SayaSS/vits-finetuning/11e767c4cf7487247181a1bb26234718da52b16e/wav/ba/miyu/211336.wav -------------------------------------------------------------------------------- /wav/ba/miyu/255333.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SayaSS/vits-finetuning/11e767c4cf7487247181a1bb26234718da52b16e/wav/ba/miyu/255333.wav -------------------------------------------------------------------------------- /wav/ba/miyu/29571.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SayaSS/vits-finetuning/11e767c4cf7487247181a1bb26234718da52b16e/wav/ba/miyu/29571.wav -------------------------------------------------------------------------------- /wav/ba/miyu/306949.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SayaSS/vits-finetuning/11e767c4cf7487247181a1bb26234718da52b16e/wav/ba/miyu/306949.wav -------------------------------------------------------------------------------- /wav/ba/miyu/318631.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SayaSS/vits-finetuning/11e767c4cf7487247181a1bb26234718da52b16e/wav/ba/miyu/318631.wav -------------------------------------------------------------------------------- /wav/ba/miyu/356351.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SayaSS/vits-finetuning/11e767c4cf7487247181a1bb26234718da52b16e/wav/ba/miyu/356351.wav -------------------------------------------------------------------------------- /wav/ba/miyu/379056.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SayaSS/vits-finetuning/11e767c4cf7487247181a1bb26234718da52b16e/wav/ba/miyu/379056.wav -------------------------------------------------------------------------------- /wav/ba/miyu/387627.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SayaSS/vits-finetuning/11e767c4cf7487247181a1bb26234718da52b16e/wav/ba/miyu/387627.wav -------------------------------------------------------------------------------- /wav/ba/miyu/389580.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SayaSS/vits-finetuning/11e767c4cf7487247181a1bb26234718da52b16e/wav/ba/miyu/389580.wav -------------------------------------------------------------------------------- /wav/ba/miyu/39192.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SayaSS/vits-finetuning/11e767c4cf7487247181a1bb26234718da52b16e/wav/ba/miyu/39192.wav -------------------------------------------------------------------------------- /wav/ba/miyu/412781.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SayaSS/vits-finetuning/11e767c4cf7487247181a1bb26234718da52b16e/wav/ba/miyu/412781.wav -------------------------------------------------------------------------------- /wav/ba/miyu/41287.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SayaSS/vits-finetuning/11e767c4cf7487247181a1bb26234718da52b16e/wav/ba/miyu/41287.wav -------------------------------------------------------------------------------- /wav/ba/miyu/422295.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SayaSS/vits-finetuning/11e767c4cf7487247181a1bb26234718da52b16e/wav/ba/miyu/422295.wav -------------------------------------------------------------------------------- /wav/ba/miyu/436322.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SayaSS/vits-finetuning/11e767c4cf7487247181a1bb26234718da52b16e/wav/ba/miyu/436322.wav -------------------------------------------------------------------------------- /wav/ba/miyu/44255.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SayaSS/vits-finetuning/11e767c4cf7487247181a1bb26234718da52b16e/wav/ba/miyu/44255.wav -------------------------------------------------------------------------------- /wav/ba/miyu/450375.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SayaSS/vits-finetuning/11e767c4cf7487247181a1bb26234718da52b16e/wav/ba/miyu/450375.wav -------------------------------------------------------------------------------- /wav/ba/miyu/456869.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SayaSS/vits-finetuning/11e767c4cf7487247181a1bb26234718da52b16e/wav/ba/miyu/456869.wav -------------------------------------------------------------------------------- /wav/ba/miyu/461433.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SayaSS/vits-finetuning/11e767c4cf7487247181a1bb26234718da52b16e/wav/ba/miyu/461433.wav -------------------------------------------------------------------------------- /wav/ba/miyu/472324.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SayaSS/vits-finetuning/11e767c4cf7487247181a1bb26234718da52b16e/wav/ba/miyu/472324.wav -------------------------------------------------------------------------------- /wav/ba/miyu/527590.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SayaSS/vits-finetuning/11e767c4cf7487247181a1bb26234718da52b16e/wav/ba/miyu/527590.wav -------------------------------------------------------------------------------- /wav/ba/miyu/533522.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SayaSS/vits-finetuning/11e767c4cf7487247181a1bb26234718da52b16e/wav/ba/miyu/533522.wav -------------------------------------------------------------------------------- /wav/ba/miyu/536512.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SayaSS/vits-finetuning/11e767c4cf7487247181a1bb26234718da52b16e/wav/ba/miyu/536512.wav -------------------------------------------------------------------------------- /wav/ba/miyu/537029.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SayaSS/vits-finetuning/11e767c4cf7487247181a1bb26234718da52b16e/wav/ba/miyu/537029.wav -------------------------------------------------------------------------------- /wav/ba/miyu/554075.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SayaSS/vits-finetuning/11e767c4cf7487247181a1bb26234718da52b16e/wav/ba/miyu/554075.wav -------------------------------------------------------------------------------- /wav/ba/miyu/55566.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SayaSS/vits-finetuning/11e767c4cf7487247181a1bb26234718da52b16e/wav/ba/miyu/55566.wav -------------------------------------------------------------------------------- /wav/ba/miyu/589002.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SayaSS/vits-finetuning/11e767c4cf7487247181a1bb26234718da52b16e/wav/ba/miyu/589002.wav -------------------------------------------------------------------------------- /wav/ba/miyu/616395.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SayaSS/vits-finetuning/11e767c4cf7487247181a1bb26234718da52b16e/wav/ba/miyu/616395.wav -------------------------------------------------------------------------------- /wav/ba/miyu/618533.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SayaSS/vits-finetuning/11e767c4cf7487247181a1bb26234718da52b16e/wav/ba/miyu/618533.wav -------------------------------------------------------------------------------- /wav/ba/miyu/619085.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SayaSS/vits-finetuning/11e767c4cf7487247181a1bb26234718da52b16e/wav/ba/miyu/619085.wav -------------------------------------------------------------------------------- /wav/ba/miyu/643475.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SayaSS/vits-finetuning/11e767c4cf7487247181a1bb26234718da52b16e/wav/ba/miyu/643475.wav -------------------------------------------------------------------------------- /wav/ba/miyu/648148.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SayaSS/vits-finetuning/11e767c4cf7487247181a1bb26234718da52b16e/wav/ba/miyu/648148.wav -------------------------------------------------------------------------------- /wav/ba/miyu/649415.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SayaSS/vits-finetuning/11e767c4cf7487247181a1bb26234718da52b16e/wav/ba/miyu/649415.wav -------------------------------------------------------------------------------- /wav/ba/miyu/706012.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SayaSS/vits-finetuning/11e767c4cf7487247181a1bb26234718da52b16e/wav/ba/miyu/706012.wav -------------------------------------------------------------------------------- /wav/ba/miyu/709504.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SayaSS/vits-finetuning/11e767c4cf7487247181a1bb26234718da52b16e/wav/ba/miyu/709504.wav -------------------------------------------------------------------------------- /wav/ba/miyu/712096.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SayaSS/vits-finetuning/11e767c4cf7487247181a1bb26234718da52b16e/wav/ba/miyu/712096.wav -------------------------------------------------------------------------------- /wav/ba/miyu/714649.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SayaSS/vits-finetuning/11e767c4cf7487247181a1bb26234718da52b16e/wav/ba/miyu/714649.wav -------------------------------------------------------------------------------- /wav/ba/miyu/749189.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SayaSS/vits-finetuning/11e767c4cf7487247181a1bb26234718da52b16e/wav/ba/miyu/749189.wav -------------------------------------------------------------------------------- /wav/ba/miyu/750669.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SayaSS/vits-finetuning/11e767c4cf7487247181a1bb26234718da52b16e/wav/ba/miyu/750669.wav -------------------------------------------------------------------------------- /wav/ba/miyu/755180.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SayaSS/vits-finetuning/11e767c4cf7487247181a1bb26234718da52b16e/wav/ba/miyu/755180.wav -------------------------------------------------------------------------------- /wav/ba/miyu/805836.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SayaSS/vits-finetuning/11e767c4cf7487247181a1bb26234718da52b16e/wav/ba/miyu/805836.wav -------------------------------------------------------------------------------- /wav/ba/miyu/814441.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SayaSS/vits-finetuning/11e767c4cf7487247181a1bb26234718da52b16e/wav/ba/miyu/814441.wav -------------------------------------------------------------------------------- /wav/ba/miyu/827005.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SayaSS/vits-finetuning/11e767c4cf7487247181a1bb26234718da52b16e/wav/ba/miyu/827005.wav -------------------------------------------------------------------------------- /wav/ba/miyu/829111.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SayaSS/vits-finetuning/11e767c4cf7487247181a1bb26234718da52b16e/wav/ba/miyu/829111.wav -------------------------------------------------------------------------------- /wav/ba/miyu/861536.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SayaSS/vits-finetuning/11e767c4cf7487247181a1bb26234718da52b16e/wav/ba/miyu/861536.wav -------------------------------------------------------------------------------- /wav/ba/miyu/862032.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SayaSS/vits-finetuning/11e767c4cf7487247181a1bb26234718da52b16e/wav/ba/miyu/862032.wav -------------------------------------------------------------------------------- /wav/ba/miyu/872843.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SayaSS/vits-finetuning/11e767c4cf7487247181a1bb26234718da52b16e/wav/ba/miyu/872843.wav -------------------------------------------------------------------------------- /wav/ba/miyu/886881.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SayaSS/vits-finetuning/11e767c4cf7487247181a1bb26234718da52b16e/wav/ba/miyu/886881.wav -------------------------------------------------------------------------------- /wav/ba/miyu/899245.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SayaSS/vits-finetuning/11e767c4cf7487247181a1bb26234718da52b16e/wav/ba/miyu/899245.wav -------------------------------------------------------------------------------- /wav/ba/miyu/902456.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SayaSS/vits-finetuning/11e767c4cf7487247181a1bb26234718da52b16e/wav/ba/miyu/902456.wav -------------------------------------------------------------------------------- /wav/ba/miyu/90758.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SayaSS/vits-finetuning/11e767c4cf7487247181a1bb26234718da52b16e/wav/ba/miyu/90758.wav -------------------------------------------------------------------------------- /wav/ba/miyu/916145.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SayaSS/vits-finetuning/11e767c4cf7487247181a1bb26234718da52b16e/wav/ba/miyu/916145.wav -------------------------------------------------------------------------------- /wav/ba/miyu/930369.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SayaSS/vits-finetuning/11e767c4cf7487247181a1bb26234718da52b16e/wav/ba/miyu/930369.wav -------------------------------------------------------------------------------- /wav/ba/miyu/940433.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SayaSS/vits-finetuning/11e767c4cf7487247181a1bb26234718da52b16e/wav/ba/miyu/940433.wav -------------------------------------------------------------------------------- /wav/ba/miyu/946836.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SayaSS/vits-finetuning/11e767c4cf7487247181a1bb26234718da52b16e/wav/ba/miyu/946836.wav -------------------------------------------------------------------------------- /wav/ba/miyu/955071.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SayaSS/vits-finetuning/11e767c4cf7487247181a1bb26234718da52b16e/wav/ba/miyu/955071.wav -------------------------------------------------------------------------------- /wav/ba/miyu/974397.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SayaSS/vits-finetuning/11e767c4cf7487247181a1bb26234718da52b16e/wav/ba/miyu/974397.wav -------------------------------------------------------------------------------- /wav/ba/miyu/978431.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SayaSS/vits-finetuning/11e767c4cf7487247181a1bb26234718da52b16e/wav/ba/miyu/978431.wav -------------------------------------------------------------------------------- /wav/ba/miyu/986967.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SayaSS/vits-finetuning/11e767c4cf7487247181a1bb26234718da52b16e/wav/ba/miyu/986967.wav -------------------------------------------------------------------------------- /webui.py: -------------------------------------------------------------------------------- 1 | # coding=utf-8 2 | import argparse 3 | import utils 4 | import commons 5 | import torch 6 | import gradio as gr 7 | import webbrowser 8 | from models import SynthesizerTrn 9 | from text import text_to_sequence 10 | from torch import no_grad, LongTensor 11 | import logging 12 | logging.getLogger('numba').setLevel(logging.WARNING) 13 | logging.getLogger("PIL").setLevel(logging.WARNING) 14 | logging.getLogger("urllib3").setLevel(logging.WARNING) 15 | logging.getLogger("httpx").setLevel(logging.WARNING) 16 | logging.getLogger("asyncio").setLevel(logging.WARNING) 17 | 18 | def get_text(text, hps): 19 | text_norm= text_to_sequence(text, hps.data.text_cleaners) 20 | if hps.data.add_blank: 21 | text_norm = commons.intersperse(text_norm, 0) 22 | text_norm = LongTensor(text_norm) 23 | return text_norm 24 | 25 | def create_tts_fn(net_g_ms): 26 | def tts_fn(text, noise_scale, noise_scale_w, length_scale, speaker_id): 27 | text = text.replace('\n', ' ').replace('\r', '').replace(" ", "") 28 | stn_tst= get_text(text, hps_ms) 29 | with no_grad(): 30 | x_tst = stn_tst.unsqueeze(0).to(device) 31 | x_tst_lengths = LongTensor([stn_tst.size(0)]).to(device) 32 | sid = LongTensor([speaker_id]).to(device) 33 | audio = net_g_ms.infer(x_tst, x_tst_lengths, sid=sid, noise_scale=noise_scale, noise_scale_w=noise_scale_w, 34 | length_scale=length_scale)[0][0, 0].data.cpu().float().numpy() 35 | return "Success", (22050, audio) 36 | return tts_fn 37 | 38 | 39 | download_audio_js = """ 40 | () =>{{ 41 | let root = document.querySelector("body > gradio-app"); 42 | if (root.shadowRoot != null) 43 | root = root.shadowRoot; 44 | let audio = root.querySelector("#tts-audio").querySelector("audio"); 45 | let text = root.querySelector("#input-text").querySelector("textarea"); 46 | if (audio == undefined) 47 | return; 48 | text = text.value; 49 | if (text == undefined) 50 | text = Math.floor(Math.random()*100000000); 51 | audio = audio.src; 52 | let oA = document.createElement("a"); 53 | oA.download = text.substr(0, 20)+'.wav'; 54 | oA.href = audio; 55 | document.body.appendChild(oA); 56 | oA.click(); 57 | oA.remove(); 58 | }} 59 | """ 60 | 61 | if __name__ == '__main__': 62 | parser = argparse.ArgumentParser() 63 | parser.add_argument('--device', type=str, default='cpu') 64 | parser.add_argument('--api', action="store_true", default=False) 65 | parser.add_argument("--share", action="store_true", default=False, help="share gradio app") 66 | parser.add_argument("--colab", action="store_true", default=False) 67 | parser.add_argument('-c', '--config', type=str, default="configs/config.json", help='JSON file for configuration') 68 | parser.add_argument('-m', '--model', type=str, required=True, help='Model path') 69 | args = parser.parse_args() 70 | device = torch.device(args.device) 71 | hps_ms = utils.get_hparams_from_file(args.config) 72 | models = [] 73 | net_g_ms = SynthesizerTrn( 74 | len(hps_ms.symbols), 75 | hps_ms.data.filter_length // 2 + 1, 76 | hps_ms.train.segment_size // hps_ms.data.hop_length, 77 | n_speakers=hps_ms.data.n_speakers, 78 | **hps_ms.model) 79 | utils.load_checkpoint(args.model, net_g_ms, None) 80 | _ = net_g_ms.eval().to(device) 81 | models.append((net_g_ms, create_tts_fn(net_g_ms,))) 82 | with gr.Blocks() as app: 83 | with gr.Tabs(): 84 | for (net_g_ms, tts_fn) in models: 85 | with gr.TabItem(args.model): 86 | with gr.Row(): 87 | with gr.Column(): 88 | input_text = gr.Textbox(label="Text", 89 | lines=5, value="今日はいい天気ですね。", 90 | elem_id=f"input-text") 91 | btn = gr.Button(value="Generate", variant="primary") 92 | sid = gr.Number(label="speaker_id", value=10) 93 | with gr.Row(): 94 | ns = gr.Slider(label="noise_scale", minimum=0.1, maximum=1.0, step=0.1, value=0.6, 95 | interactive=True) 96 | nsw = gr.Slider(label="noise_scale_w", minimum=0.1, maximum=1.0, step=0.1, value=0.668, 97 | interactive=True) 98 | ls = gr.Slider(label="length_scale", minimum=0.1, maximum=2.0, step=0.1, value=1.0) 99 | with gr.Column(): 100 | o1 = gr.Textbox(label="Output Message") 101 | o2 = gr.Audio(label="Output Audio", elem_id=f"tts-audio") 102 | download = gr.Button("Download Audio") 103 | btn.click(tts_fn, inputs=[input_text, ns, nsw, ls, sid], outputs=[o1, o2], api_name=f"tts") 104 | download.click(None, [], [], _js=download_audio_js) 105 | if args.colab: 106 | webbrowser.open("http://127.0.0.1:7860") 107 | app.queue(concurrency_count=1, api_open=args.api).launch(share=args.share) 108 | --------------------------------------------------------------------------------