├── README.md ├── data_loader.py ├── hparams.py ├── model.py ├── requirements.txt ├── run.py ├── solver.py ├── synthesis.py ├── tfcompat ├── __init__.py ├── __pycache__ │ ├── __init__.cpython-36.pyc │ ├── hparam.cpython-36.pyc │ └── hparamvc.cpython-36.pyc ├── hparam.py └── hparamvc.py └── utils.py /README.md: -------------------------------------------------------------------------------- 1 | # CYCLEFLOW: PURIFY INFORMATION FACTORS BY CYCLE LOSS 2 | 3 | This code is a pytorch version for CycleFlow model in "CycleFlow: Purify Information Factors by Cycle Loss", which is modified for from the original [Speechflow](https://github.com/auspicious3000/SpeechSplit). 4 | 5 | Some toolkits to be used can be found at our modified [SpeechFlow](https://github.com/FantSun/Speechflow) 6 | 7 | ## Dependencies 8 | 9 | This project is built with python 3.6, for other packages, you can install them by ```pip install -r requirements.txt```. 10 | 11 | 12 | ## To Prepare Training Data(take VCTK as an example here, for other dataset, you should modify some settings in below files) 13 | 14 | 1. Prepare your wavefiles 15 | 16 | 2. Prepare spectrograms and pitch contours for your data following tools provided in [SpeechFlow](https://github.com/FantSun/Speechflow) 17 | 18 | 19 | ## To Train 20 | 21 | 1. change settings at ```hparams.py``` and ```run.py``` 22 | 23 | 2. Run the training scripts: ```python run.py``` 24 | 25 | 26 | ## To implement inference 27 | You can change inputs of the "Generator_loop" module to make the model adapted for your inference. 28 | 29 | 30 | ## Final Words 31 | 32 | This is a code for "CycleFlow: Purify Information Factors by Cycle Loss"([demo](http://cycleflow.cslt.org/), [paper](http://cycleflow.cslt.org/paper.pdf)), this improved model can produce more disentangled factors than SpeechFlow. The main design in this model is a random factor substitution (RFS) operation and a cycle loss. We highlight that this technique is simple and general and can be applied to any factorization model, although in this study we test it with the SpeechFlow architecture, hence the name CycleFlow. 33 | This code is modified for our task from the original [Speechflow](https://github.com/auspicious3000/SpeechSplit). Thanks for Kaizhi Qian providing the original code, which is much helpful for us. 34 | -------------------------------------------------------------------------------- /data_loader.py: -------------------------------------------------------------------------------- 1 | import os 2 | import torch 3 | import pickle 4 | import numpy as np 5 | 6 | from functools import partial 7 | from numpy.random import uniform 8 | from multiprocessing import Process, Manager 9 | 10 | from torch.utils import data 11 | from torch.utils.data.sampler import Sampler 12 | 13 | 14 | class Utterances(data.Dataset): 15 | """Dataset class for the Utterances dataset.""" 16 | 17 | def __init__(self, root_dir, feat_dir, dvec_dir, mode): 18 | """Initialize and preprocess the Utterances dataset.""" 19 | self.root_dir = root_dir 20 | self.feat_dir = feat_dir 21 | self.dvec_dir = dvec_dir 22 | self.mode = mode 23 | self.step = 20 24 | self.split = 0 25 | 26 | metaname = os.path.join(self.root_dir, "train.pkl") 27 | meta = pickle.load(open(metaname, "rb")) 28 | 29 | manager = Manager() 30 | meta = manager.list(meta) 31 | dataset = manager.list(len(meta)*[None]) # <-- can be shared between processes. 32 | 33 | processes = [] 34 | for i in range(0, len(meta), self.step): 35 | p = Process(target=self.load_data, 36 | args=(meta[i:i+self.step],dataset,i,mode)) 37 | p.start() 38 | processes.append(p) 39 | for p in processes: 40 | p.join() 41 | 42 | 43 | # very importtant to do dataset = list(dataset) 44 | if mode == 'train': 45 | self.train_dataset = list(dataset) 46 | self.num_tokens = len(self.train_dataset) 47 | elif mode == 'test': 48 | self.test_dataset = list(dataset) 49 | self.num_tokens = len(self.test_dataset) 50 | else: 51 | raise ValueError 52 | 53 | print('Finished loading {} dataset...'.format(mode)) 54 | 55 | 56 | 57 | def load_data(self, submeta, dataset, idx_offset, mode): 58 | for k, sbmt in enumerate(submeta): 59 | uttrs = len(sbmt)*[None] 60 | # fill in speaker id and embedding 61 | uttrs[0] = sbmt[0] 62 | uttrs[1] = sbmt[1] 63 | # fill in data 64 | sp_tmp = np.load(os.path.join(self.root_dir, sbmt[2])) 65 | f0_tmp = np.load(os.path.join(self.feat_dir, sbmt[2])) 66 | dv_tmp = np.load(os.path.join(self.dvec_dir, sbmt[3])) 67 | if self.mode == 'train': 68 | sp_tmp = sp_tmp[self.split:, :] 69 | f0_tmp = f0_tmp[self.split:] 70 | elif self.mode == 'test': 71 | sp_tmp = sp_tmp[:self.split, :] 72 | f0_tmp = f0_tmp[:self.split] 73 | else: 74 | raise ValueError 75 | uttrs[2] = ( sp_tmp, f0_tmp, dv_tmp ) 76 | dataset[idx_offset+k] = uttrs 77 | 78 | 79 | 80 | def __getitem__(self, index): 81 | dataset = self.train_dataset if self.mode == 'train' else self.test_dataset 82 | 83 | list_uttrs = dataset[index] 84 | spk_org = list_uttrs[0] 85 | spk_id_org = list_uttrs[1] 86 | 87 | melsp, f0_org, emb_org = list_uttrs[2] 88 | 89 | return melsp, emb_org, f0_org 90 | 91 | 92 | def __len__(self): 93 | """Return the number of spkrs.""" 94 | return self.num_tokens 95 | 96 | 97 | 98 | class MyCollator(object): 99 | def __init__(self, hparams): 100 | self.min_len_seq = hparams.min_len_seq 101 | self.max_len_seq = hparams.max_len_seq 102 | self.max_len_pad = hparams.max_len_pad 103 | 104 | def __call__(self, batch): 105 | # batch[i] is a tuple of __getitem__ outputs 106 | new_batch = [] 107 | for token in batch: 108 | aa, b, c = token 109 | len_crop = np.random.randint(self.min_len_seq, self.max_len_seq+1, size=2) # 1.5s ~ 3s 110 | if len(aa) - len_crop[0] <= 0: 111 | continue 112 | left = np.random.randint(0, len(aa)-len_crop[0], size=2) 113 | #pdb.set_trace() 114 | 115 | a = aa[left[0]:left[0]+len_crop[0], :] 116 | c = c[left[0]:left[0]+len_crop[0]] 117 | 118 | a = np.clip(a, 0, 1) 119 | 120 | a_pad = np.pad(a, ((0,self.max_len_pad-a.shape[0]),(0,0)), 'constant') 121 | c_pad = np.pad(c[:,np.newaxis], ((0,self.max_len_pad-c.shape[0]),(0,0)), 'constant', constant_values=-1e10) 122 | 123 | new_batch.append( (a_pad, b, c_pad, len_crop[0]) ) 124 | 125 | batch = new_batch 126 | 127 | a, b, c, d = zip(*batch) 128 | melsp = torch.from_numpy(np.stack(a, axis=0)) 129 | spk_emb = torch.from_numpy(np.stack(b, axis=0)) 130 | pitch = torch.from_numpy(np.stack(c, axis=0)) 131 | len_org = torch.from_numpy(np.stack(d, axis=0)) 132 | 133 | return melsp, spk_emb, pitch, len_org 134 | 135 | 136 | 137 | 138 | class MultiSampler(Sampler): 139 | """Samples elements more than once in a single pass through the data. 140 | """ 141 | def __init__(self, num_samples, n_repeats, shuffle=False): 142 | self.num_samples = num_samples 143 | self.n_repeats = n_repeats 144 | self.shuffle = shuffle 145 | 146 | def gen_sample_array(self): 147 | self.sample_idx_array = torch.arange(self.num_samples, dtype=torch.int64).repeat(self.n_repeats) 148 | if self.shuffle: 149 | self.sample_idx_array = self.sample_idx_array[torch.randperm(len(self.sample_idx_array))] 150 | return self.sample_idx_array 151 | 152 | def __iter__(self): 153 | return iter(self.gen_sample_array()) 154 | 155 | def __len__(self): 156 | return len(self.sample_idx_array) 157 | 158 | 159 | 160 | 161 | def get_loader(hparams): 162 | """Build and return a data loader.""" 163 | 164 | dataset = Utterances(hparams.root_dir, hparams.feat_dir, hparams.dvec_dir, hparams.mode) 165 | 166 | my_collator = MyCollator(hparams) 167 | 168 | sampler = MultiSampler(len(dataset), hparams.samplier, shuffle=hparams.shuffle) 169 | 170 | worker_init_fn = lambda x: np.random.seed((torch.initial_seed()) % (2**32)) 171 | 172 | data_loader = data.DataLoader(dataset=dataset, 173 | batch_size=hparams.batch_size, 174 | sampler=sampler, 175 | num_workers=hparams.num_workers, 176 | drop_last=True, 177 | pin_memory=True, 178 | worker_init_fn=worker_init_fn, 179 | collate_fn=my_collator) 180 | return data_loader 181 | -------------------------------------------------------------------------------- /hparams.py: -------------------------------------------------------------------------------- 1 | from tfcompat.hparam import HParams 2 | 3 | # NOTE: If you want full control for model architecture. please take a look 4 | # at the code and change whatever you want. Some hyper parameters are hardcoded. 5 | 6 | # Default hyperparameters: 7 | hparams = HParams( 8 | # model 9 | freq_c = 8, 10 | dim_neck_c = 8, 11 | freq_r = 8, 12 | dim_neck_r = 1, 13 | freq_f = 8, 14 | dim_neck_f = 32, 15 | freq_t = 192, 16 | 17 | dim_enc_c = 512, 18 | dim_enc_r = 128, 19 | dim_enc_f = 256, 20 | 21 | dim_freq = 80, 22 | dim_spk_emb = 32, 23 | dim_f0 = 257, 24 | dim_dec = 512, 25 | len_raw = 128, 26 | chs_grp = 16, 27 | 28 | # interp 29 | min_len_seg = 19, 30 | max_len_seg = 32, 31 | min_len_seq = 64, 32 | max_len_seq = 128, 33 | max_len_pad = 192, 34 | 35 | # data loader 36 | root_dir = 'data/training_set/spmel', 37 | feat_dir = 'data/training_set/raptf0', 38 | dvec_dir = 'data/training_set/dvec', 39 | demo_file = 'data/valid_set/demo.pkl', 40 | batch_size = 64, 41 | mode = 'train', 42 | shuffle = True, 43 | num_workers = 0, 44 | samplier = 8, 45 | 46 | ) 47 | 48 | 49 | def hparams_debug_string(): 50 | values = hparams.values() 51 | hp = [' %s: %s' % (name, values[name]) for name in values] 52 | return 'Hyperparameters:\n' + '\n'.join(hp) 53 | -------------------------------------------------------------------------------- /model.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | import torch.nn.functional as F 4 | import numpy as np 5 | 6 | 7 | class LinearNorm(torch.nn.Module): 8 | def __init__(self, in_dim, out_dim, bias=True, w_init_gain='linear'): 9 | super(LinearNorm, self).__init__() 10 | self.linear_layer = torch.nn.Linear(in_dim, out_dim, bias=bias) 11 | 12 | torch.nn.init.xavier_uniform_( 13 | self.linear_layer.weight, 14 | gain=torch.nn.init.calculate_gain(w_init_gain)) 15 | 16 | def forward(self, x): 17 | return self.linear_layer(x) 18 | 19 | 20 | 21 | class ConvNorm(torch.nn.Module): 22 | def __init__(self, in_channels, out_channels, kernel_size=1, stride=1, 23 | padding=None, dilation=1, bias=True, w_init_gain='linear'): 24 | super(ConvNorm, self).__init__() 25 | if padding is None: 26 | assert(kernel_size % 2 == 1) 27 | padding = int(dilation * (kernel_size - 1) / 2) 28 | 29 | self.conv = torch.nn.Conv1d(in_channels, out_channels, 30 | kernel_size=kernel_size, stride=stride, 31 | padding=padding, dilation=dilation, 32 | bias=bias) 33 | 34 | torch.nn.init.xavier_uniform_( 35 | self.conv.weight, gain=torch.nn.init.calculate_gain(w_init_gain)) 36 | 37 | def forward(self, signal): 38 | conv_signal = self.conv(signal) 39 | return conv_signal 40 | 41 | 42 | 43 | class Encoder_r(nn.Module): 44 | """Rhythm Encoder 45 | """ 46 | def __init__(self, hparams): 47 | super().__init__() 48 | 49 | self.dim_neck_r = hparams.dim_neck_r 50 | self.freq_r = hparams.freq_r 51 | self.dim_freq = hparams.dim_freq 52 | self.dim_enc_r = hparams.dim_enc_r 53 | self.chs_grp = hparams.chs_grp 54 | 55 | convolutions = [] 56 | for i in range(1): 57 | conv_layer = nn.Sequential( 58 | ConvNorm(self.dim_freq if i==0 else self.dim_enc_r, 59 | self.dim_enc_r, 60 | kernel_size=5, stride=1, 61 | padding=2, 62 | dilation=1, w_init_gain='relu'), 63 | nn.GroupNorm(self.dim_enc_r//self.chs_grp, self.dim_enc_r)) 64 | convolutions.append(conv_layer) 65 | self.convolutions_r = nn.ModuleList(convolutions) 66 | 67 | self.lstm_r = nn.LSTM(self.dim_enc_r, self.dim_neck_r, 1, batch_first=True, bidirectional=True) 68 | 69 | 70 | def forward(self, x, mask): 71 | 72 | for conv in self.convolutions_r: 73 | x = F.relu(conv(x)) 74 | x = x.transpose(1, 2) 75 | 76 | self.lstm_r.flatten_parameters() 77 | outputs, _ = self.lstm_r(x) 78 | if mask is not None: 79 | outputs = outputs * mask 80 | out_forward = outputs[:, :, :self.dim_neck_r] 81 | out_backward = outputs[:, :, self.dim_neck_r:] 82 | 83 | codes = torch.cat((out_forward[:,self.freq_r-1::self.freq_r,:], out_backward[:,::self.freq_r,:]), dim=-1) 84 | 85 | return codes 86 | 87 | 88 | 89 | class Encoder_cf(nn.Module): 90 | """Sync Encoder module 91 | """ 92 | def __init__(self, hparams): 93 | super().__init__() 94 | 95 | self.dim_neck_c = hparams.dim_neck_c 96 | self.freq_c = hparams.freq_c 97 | self.freq_f = hparams.freq_f 98 | self.dim_enc_c = hparams.dim_enc_c 99 | self.dim_enc_f = hparams.dim_enc_f 100 | self.dim_freq = hparams.dim_freq 101 | self.chs_grp = hparams.chs_grp 102 | self.register_buffer('len_org', torch.tensor(hparams.max_len_pad)) 103 | self.dim_neck_f = hparams.dim_neck_f 104 | self.dim_f0 = hparams.dim_f0 105 | 106 | # convolutions for content 107 | convolutions = [] 108 | for i in range(3): 109 | conv_layer = nn.Sequential( 110 | ConvNorm(self.dim_freq if i==0 else self.dim_enc_c, 111 | self.dim_enc_c, 112 | kernel_size=5, stride=1, 113 | padding=2, 114 | dilation=1, w_init_gain='relu'), 115 | nn.GroupNorm(self.dim_enc_c//self.chs_grp, self.dim_enc_c)) 116 | convolutions.append(conv_layer) 117 | self.convolutions_c = nn.ModuleList(convolutions) 118 | 119 | self.lstm_c = nn.LSTM(self.dim_enc_c, self.dim_neck_c, 2, batch_first=True, bidirectional=True) 120 | 121 | # convolutions for pitch 122 | convolutions = [] 123 | for i in range(3): 124 | conv_layer = nn.Sequential( 125 | ConvNorm(self.dim_f0 if i==0 else self.dim_enc_f, 126 | self.dim_enc_f, 127 | kernel_size=5, stride=1, 128 | padding=2, 129 | dilation=1, w_init_gain='relu'), 130 | nn.GroupNorm(self.dim_enc_f//self.chs_grp, self.dim_enc_f)) 131 | convolutions.append(conv_layer) 132 | self.convolutions_f = nn.ModuleList(convolutions) 133 | 134 | self.lstm_f = nn.LSTM(self.dim_enc_f, self.dim_neck_f, 1, batch_first=True, bidirectional=True) 135 | 136 | self.interp = InterpLnr(hparams) 137 | 138 | 139 | def forward(self, x_f0): 140 | 141 | x = x_f0[:, :self.dim_freq, :] 142 | f0 = x_f0[:, self.dim_freq:, :] 143 | 144 | for conv_1, conv_2 in zip(self.convolutions_c, self.convolutions_f): 145 | x = F.relu(conv_1(x)) 146 | f0 = F.relu(conv_2(f0)) 147 | x_f0 = torch.cat((x, f0), dim=1).transpose(1, 2) 148 | x_f0 = self.interp(x_f0, self.len_org.expand(x.size(0))) 149 | x_f0 = x_f0.transpose(1, 2) 150 | x = x_f0[:, :self.dim_enc_c, :] 151 | f0 = x_f0[:, self.dim_enc_c:, :] 152 | 153 | 154 | x_f0 = x_f0.transpose(1, 2) 155 | x = x_f0[:, :, :self.dim_enc_c] 156 | f0 = x_f0[:, :, self.dim_enc_c:] 157 | 158 | # code 1 159 | x = self.lstm_c(x)[0] 160 | f0 = self.lstm_f(f0)[0] 161 | 162 | x_forward = x[:, :, :self.dim_neck_c] 163 | x_backward = x[:, :, self.dim_neck_c:] 164 | 165 | f0_forward = f0[:, :, :self.dim_neck_f] 166 | f0_backward = f0[:, :, self.dim_neck_f:] 167 | 168 | codes_c = torch.cat((x_forward[:,self.freq_c-1::self.freq_c,:], 169 | x_backward[:,::self.freq_c,:]), dim=-1) 170 | 171 | codes_f = torch.cat((f0_forward[:,self.freq_f-1::self.freq_f,:], 172 | f0_backward[:,::self.freq_f,:]), dim=-1) 173 | 174 | return codes_c, codes_f 175 | 176 | 177 | 178 | class Decoder_S(nn.Module): 179 | """Decoder module 180 | """ 181 | def __init__(self, hparams): 182 | super().__init__() 183 | self.dim_neck_c = hparams.dim_neck_c 184 | self.dim_neck_r = hparams.dim_neck_r 185 | self.dim_neck_f = hparams.dim_neck_f 186 | self.dim_emb = hparams.dim_spk_emb 187 | 188 | self.lstm = nn.LSTM(self.dim_neck_c*2+self.dim_neck_r*2+self.dim_neck_f*2+self.dim_emb, 189 | 512, 3, batch_first=True, bidirectional=True) 190 | 191 | self.linear_projection = LinearNorm(1024, self.dim_freq) 192 | 193 | def forward(self, x): 194 | 195 | outputs, _ = self.lstm(x) 196 | 197 | decoder_output = self.linear_projection(outputs) 198 | 199 | return decoder_output 200 | 201 | 202 | 203 | class Decoder_P(nn.Module): 204 | """For F0 converter 205 | """ 206 | def __init__(self, hparams): 207 | super().__init__() 208 | self.dim_neck_r = hparams.dim_neck_r 209 | self.dim_neck_f = hparams.dim_neck_f 210 | self.dim_f0 = hparams.dim_f0 211 | 212 | self.lstm = nn.LSTM(self.dim_neck_r*2+self.dim_neck_f*2, 213 | 256, 2, batch_first=True, bidirectional=True) 214 | 215 | self.linear_projection = LinearNorm(512, self.dim_f0) 216 | 217 | def forward(self, x): 218 | 219 | outputs, _ = self.lstm(x) 220 | 221 | decoder_output = self.linear_projection(outputs) 222 | 223 | return decoder_output 224 | 225 | 226 | 227 | class Encoder_cyc(nn.Module): 228 | """Encoder""" 229 | def __init__(self, hparams): 230 | super().__init__() 231 | 232 | self.encoder_cf = Encoder_cf(hparams) 233 | self.encoder_r = Encoder_r(hparams) 234 | self.encoder_t = Encoder_t(hparams) 235 | 236 | def forward(self, x_f0, x_org): 237 | 238 | x_1 = x_f0.transpose(2,1) 239 | codes_c, codes_f = self.encoder_cf(x_1) 240 | 241 | x_2 = x_org.transpose(2,1) 242 | codes_r = self.encoder_r(x_2, None) 243 | 244 | return codes_r, codes_c, codes_f 245 | 246 | 247 | class Decoder_cyc(nn.Module): 248 | """CycleFlow model""" 249 | def __init__(self, hparams): 250 | super().__init__() 251 | 252 | self.decoder_S = Decoder_S(hparams) 253 | self.decoder_P = Decoder_P(hparams) 254 | 255 | self.freq_c = hparams.freq_c 256 | self.freq_r = hparams.freq_r 257 | self.freq_f = hparams.freq_f 258 | self.freq_t = hparams.freq_t 259 | 260 | 261 | def forward(self, codes_t, codes_r, codes_c, codes_f, mode="test"): 262 | 263 | code_exp_c = codes_c.repeat_interleave(self.freq_c, dim=1) # content 264 | code_exp_f = codes_f.repeat_interleave(self.freq_f, dim=1) # pitch 265 | code_exp_r = codes_r.repeat_interleave(self.freq_r, dim=1) # rhythm 266 | code_exp_t = codes_t.repeat_interleave(self.freq_t, dim=1) # timbre 267 | 268 | Z_S = torch.cat((code_exp_c, code_exp_f, code_exp_r, code_exp_t), dim=-1) 269 | mel_outputs = self.decoder_S(Z_S) 270 | 271 | if mode == 'train': 272 | Z_P = torch.cat((code_exp_r, code_exp_f), dim=-1) 273 | f0_outputs = self.decoder_P(Z_P) 274 | return mel_outputs, f0_outputs 275 | else: 276 | return mel_outputs 277 | 278 | 279 | 280 | class InterpLnr(nn.Module): 281 | 282 | def __init__(self, hparams): 283 | super().__init__() 284 | self.max_len_seq = hparams.max_len_seq 285 | self.max_len_pad = hparams.max_len_pad 286 | 287 | self.min_len_seg = hparams.min_len_seg 288 | self.max_len_seg = hparams.max_len_seg 289 | 290 | self.max_num_seg = self.max_len_seq // self.min_len_seg + 1 291 | 292 | 293 | def pad_sequences(self, sequences): 294 | channel_dim = sequences[0].size()[-1] 295 | out_dims = (len(sequences), self.max_len_pad, channel_dim) 296 | out_tensor = sequences[0].data.new(*out_dims).fill_(0) 297 | 298 | for i, tensor in enumerate(sequences): 299 | length = tensor.size(0) 300 | out_tensor[i, :length, :] = tensor[:self.max_len_pad] 301 | 302 | return out_tensor 303 | 304 | 305 | def forward(self, x, len_seq): 306 | 307 | if not self.training: 308 | return x 309 | 310 | device = x.device 311 | batch_size = x.size(0) 312 | 313 | # indices of each sub segment 314 | indices = torch.arange(self.max_len_seg*2, device=device)\ 315 | .unsqueeze(0).expand(batch_size*self.max_num_seg, -1) 316 | # scales of each sub segment 317 | scales = torch.rand(batch_size*self.max_num_seg, 318 | device=device) + 0.5 319 | 320 | idx_scaled = indices / scales.unsqueeze(-1) 321 | idx_scaled_fl = torch.floor(idx_scaled) 322 | lambda_ = idx_scaled - idx_scaled_fl 323 | 324 | len_seg = torch.randint(low=self.min_len_seg, 325 | high=self.max_len_seg, 326 | size=(batch_size*self.max_num_seg,1), 327 | device=device) 328 | 329 | # end point of each segment 330 | idx_mask = idx_scaled_fl < (len_seg - 1) 331 | 332 | offset = len_seg.view(batch_size, -1).cumsum(dim=-1) 333 | # offset starts from the 2nd segment 334 | offset = F.pad(offset[:, :-1], (1,0), value=0).view(-1, 1) 335 | 336 | idx_scaled_org = idx_scaled_fl + offset 337 | 338 | len_seq_rp = torch.repeat_interleave(len_seq, self.max_num_seg) 339 | idx_mask_org = idx_scaled_org < (len_seq_rp - 1).unsqueeze(-1) 340 | 341 | idx_mask_final = idx_mask & idx_mask_org 342 | 343 | counts = idx_mask_final.sum(dim=-1).view(batch_size, -1).sum(dim=-1) 344 | 345 | index_1 = torch.repeat_interleave(torch.arange(batch_size, 346 | device=device), counts) 347 | 348 | index_2_fl = idx_scaled_org[idx_mask_final].long() 349 | index_2_cl = index_2_fl + 1 350 | 351 | y_fl = x[index_1, index_2_fl, :] 352 | y_cl = x[index_1, index_2_cl, :] 353 | lambda_f = lambda_[idx_mask_final].unsqueeze(-1) 354 | 355 | y = (1-lambda_f)*y_fl + lambda_f*y_cl 356 | 357 | sequences = torch.split(y, counts.tolist(), dim=0) 358 | 359 | seq_padded = self.pad_sequences(sequences) 360 | 361 | return seq_padded 362 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | numpy==1.19.5 2 | scipy==1.5.3 3 | torch==1.8.1 4 | librosa==0.8.1 5 | pysptk==0.1.18 6 | soundfile==0.10.2 7 | matplotlib==3.3.4 8 | tqdm==4.60.0 9 | six==1.16.0 10 | tensorboard==2.5.0 11 | python-speech-features==0.6 12 | wavenet-vocoder==0.1.1 13 | -------------------------------------------------------------------------------- /run.py: -------------------------------------------------------------------------------- 1 | import os 2 | import argparse 3 | import torch 4 | from torch.backends import cudnn 5 | 6 | from solver import Solver 7 | from data_loader import get_loader 8 | from hparams import hparams, hparams_debug_string 9 | 10 | 11 | 12 | def str2bool(v): 13 | return v.lower() in ('true') 14 | 15 | def main(config): 16 | # For fast training. 17 | cudnn.benchmark = True 18 | 19 | # Create directories if not exist. 20 | if not os.path.exists(config.log_dir): 21 | os.makedirs(config.log_dir) 22 | if not os.path.exists(config.model_save_dir): 23 | os.makedirs(config.model_save_dir) 24 | if not os.path.exists(config.sample_dir): 25 | os.makedirs(config.sample_dir) 26 | 27 | # Data loader. 28 | vcc_loader = get_loader(hparams) 29 | 30 | # Solver for training 31 | solver = Solver(vcc_loader, config, hparams) 32 | 33 | solver.train() 34 | 35 | 36 | 37 | if __name__ == '__main__': 38 | parser = argparse.ArgumentParser() 39 | 40 | # Training configuration. 41 | parser.add_argument('--num_iters', type=int, default=1000001, help='number of total iterations') 42 | parser.add_argument('--g_lr', type=float, default=0.0001, help='learning rate for G') 43 | parser.add_argument('--beta1', type=float, default=0.9, help='beta1 for Adam optimizer') 44 | parser.add_argument('--beta2', type=float, default=0.999, help='beta2 for Adam optimizer') 45 | parser.add_argument('--resume_iters', type=int, default=None, help='resume training from this step') 46 | 47 | # Miscellaneous. 48 | parser.add_argument('--use_tensorboard', type=str2bool, default=True) 49 | parser.add_argument('--device_id', type=int, default=0) 50 | 51 | # Directories. 52 | parser.add_argument('--log_dir', type=str, default='run_cyc/logs') 53 | parser.add_argument('--model_save_dir', type=str, default='run_cyc/models') 54 | parser.add_argument('--sample_dir', type=str, default='run_cyc/samples') 55 | 56 | # Step size. 57 | parser.add_argument('--log_step', type=int, default=100) 58 | parser.add_argument('--valid_step', type=int, default=1000) 59 | parser.add_argument('--model_save_step', type=int, default=1000) 60 | parser.add_argument('--sample_step', type=int, default=10000) 61 | 62 | config = parser.parse_args() 63 | print(config) 64 | print(hparams_debug_string()) 65 | main(config) 66 | -------------------------------------------------------------------------------- /solver.py: -------------------------------------------------------------------------------- 1 | from model import Encoder_cyc as Encoder 2 | from model import Decoder_cyc as Decoder 3 | from model import InterpLnr 4 | import matplotlib.pyplot as plt 5 | import torch 6 | import torch.nn.functional as F 7 | import numpy as np 8 | import os 9 | import time 10 | import datetime 11 | import pickle 12 | 13 | from utils import pad_seq_to_2, quantize_f0_torch, quantize_f0_numpy 14 | 15 | 16 | class Solver(object): 17 | """Solver for training""" 18 | 19 | def __init__(self, vcc_loader, config, hparams): 20 | """Initialize configurations.""" 21 | 22 | # Data loader. 23 | self.vcc_loader = vcc_loader 24 | self.hparams = hparams 25 | 26 | # Training configurations. 27 | self.num_iters = config.num_iters 28 | self.g_lr = config.g_lr 29 | self.beta1 = config.beta1 30 | self.beta2 = config.beta2 31 | self.resume_iters = config.resume_iters 32 | 33 | # Miscellaneous. 34 | self.use_tensorboard = config.use_tensorboard 35 | self.use_cuda = torch.cuda.is_available() 36 | self.device = torch.device('cuda:{}'.format(config.device_id) if self.use_cuda else 'cpu') 37 | 38 | # Directories. 39 | self.log_dir = config.log_dir 40 | self.sample_dir = config.sample_dir 41 | self.model_save_dir = config.model_save_dir 42 | 43 | # use demo data for simplicity 44 | # make your own validation set as needed 45 | self.validation_pt = pickle.load(open(self.hparams.demo_file, "rb")) 46 | 47 | # Step size. 48 | self.log_step = config.log_step 49 | self.valid_step = config.valid_step 50 | self.sample_step = config.sample_step 51 | self.model_save_step = config.model_save_step 52 | 53 | self.len_pad = self.hparams.max_len_pad 54 | 55 | # Build the model and tensorboard. 56 | self.build_model() 57 | if self.use_tensorboard: 58 | self.build_tensorboard() 59 | 60 | 61 | def build_model(self): 62 | self.E = Encoder(self.hparams) 63 | self.D = Decoder(self.hparams) 64 | 65 | self.Interp = InterpLnr(self.hparams) 66 | 67 | self.g_optimizer = torch.optim.Adam([{'params': self.E.parameters()}, {'params': self.D.parameters()}], self.g_lr, [self.beta1, self.beta2]) 68 | self.print_network(self.E, 'E') 69 | self.print_network(self.D, 'D') 70 | 71 | self.E.to(self.device) 72 | self.D.to(self.device) 73 | self.Interp.to(self.device) 74 | 75 | 76 | def print_network(self, model, name): 77 | """Print out the network information.""" 78 | num_params = 0 79 | for p in model.parameters(): 80 | num_params += p.numel() 81 | print(model) 82 | print(name) 83 | print("The number of parameters: {}".format(num_params)) 84 | 85 | 86 | def print_optimizer(self, opt, name): 87 | print(opt) 88 | print(name) 89 | 90 | 91 | def restore_model(self, resume_iters): 92 | print('Loading the trained models from step {}...'.format(resume_iters)) 93 | G_path = os.path.join(self.model_save_dir, '{}-G.ckpt'.format(resume_iters)) 94 | g_checkpoint = torch.load(G_path, map_location=lambda storage, loc: storage) 95 | self.E.load_state_dict(g_checkpoint['encoder']) 96 | self.D.load_state_dict(g_checkpoint['decoder']) 97 | self.g_optimizer.load_state_dict(g_checkpoint['optimizer']) 98 | self.g_lr = self.g_optimizer.param_groups[0]['lr'] 99 | 100 | 101 | def build_tensorboard(self): 102 | """Build a tensorboard logger.""" 103 | from torch.utils.tensorboard import SummaryWriter 104 | self.writer = SummaryWriter(self.log_dir) 105 | 106 | 107 | def reset_grad(self): 108 | """Reset the gradient buffers.""" 109 | self.g_optimizer.zero_grad() 110 | 111 | 112 | #===================================================================================================================== 113 | 114 | 115 | 116 | def train(self): 117 | # Set data loader. 118 | data_loader = self.vcc_loader 119 | 120 | # Fetch fixed inputs for debugging. 121 | data_iter = iter(data_loader) 122 | 123 | # Start training from scratch or resume training. 124 | start_iters = 0 125 | if self.resume_iters: 126 | print('Resuming ...') 127 | start_iters = self.resume_iters 128 | #self.num_iters += self.resume_iters 129 | self.restore_model(self.resume_iters) 130 | self.print_optimizer(self.g_optimizer, 'G_optimizer') 131 | 132 | # Learning rate cache for decaying. 133 | g_lr = self.g_lr 134 | print ('Current learning rates, g_lr: {}.'.format(g_lr)) 135 | 136 | # Print logs in specified order 137 | keys = ['G/loss', 'G/loss_x', 'G/loss_f0', 'G/loss_z'] 138 | 139 | # Start training. 140 | print('Start training...') 141 | start_time = time.time() 142 | for i in range(start_iters, self.num_iters): 143 | 144 | # =================================================================================== # 145 | # 1. Preprocess input data # 146 | # =================================================================================== # 147 | 148 | # Fetch real images and labels. 149 | try: 150 | x_real_org, emb_org, f0_org, len_org = next(data_iter) 151 | except: 152 | data_iter = iter(data_loader) 153 | x_real_org, emb_org, f0_org, len_org = next(data_iter) 154 | 155 | x_real_org = x_real_org.to(self.device) 156 | emb_org = emb_org.to(self.device) 157 | len_org = len_org.to(self.device) 158 | f0_org = f0_org.to(self.device) 159 | 160 | 161 | # =================================================================================== # 162 | # 2. Train the generator # 163 | # =================================================================================== # 164 | self.E = self.E.train() 165 | self.D = self.D.train() 166 | 167 | # Identity mapping loss 168 | x_f0 = torch.cat((x_real_org, f0_org), dim=-1) 169 | f0_org_q = quantize_f0_torch(x_f0[:,:,-1])[0] 170 | x_f0_intrp = self.Interp(x_f0, len_org) 171 | f0_org_intrp = quantize_f0_torch(x_f0_intrp[:,:,-1])[0] 172 | x_f0_intrp_org = torch.cat((x_f0_intrp[:,:,:-1], f0_org_intrp), dim=-1) 173 | 174 | codes_r, codes_c, codes_f = self.E(x_f0_intrp_org, x_real_org) 175 | x_identic, f0_identic = self.D(emb_org, codes_r, codes_c, codes_f, mode='train') 176 | 177 | z_select = torch.randint(0, 4, (1,))[0] 178 | ord_sfl = torch.randperm(x_real_org.shape[0]) 179 | if z_select == 0: 180 | codes_r = codes_r[ord_sfl] 181 | elif z_select == 1: 182 | codes_c = codes_c[ord_sfl] 183 | elif z_select == 2: 184 | codes_f = codes_f[ord_sfl] 185 | else: 186 | emb_org = emb_org[ord_sfl] 187 | x_identic_cyc, f0_identic_cyc = self.D(emb_org, codes_r, codes_c, codes_f, mode='train') 188 | x_f0_cyc = torch.cat((x_identic_cyc, f0_identic_cyc), dim=-1) 189 | codes_cyc_r, codes_cyc_c, codes_cyc_f = self.E(x_f0_cyc, x_identic_cyc) 190 | 191 | codes_org = torch.cat((codes_r.reshape(x_real_org.shape[0], -1), codes_c.reshape(x_real_org.shape[0], -1), codes_f.reshape(x_real_org.shape[0], -1)), dim=-1) 192 | codes_cyc = torch.cat((codes_cyc_r.reshape(x_real_org.shape[0], -1), codes_cyc_c.reshape(x_real_org.shape[0], -1), codes_cyc_f.reshape(x_real_org.shape[0], -1)), dim=-1) 193 | 194 | g_loss_x = 0.2 * F.mse_loss(x_real_org, x_identic, reduction='mean') 195 | g_loss_f0 = F.mse_loss(f0_org_q, f0_identic, reduction='mean') 196 | g_loss_z = F.mse_loss(codes_org, codes_cyc, reduction='mean') 197 | 198 | # Backward and optimize. 199 | g_loss = g_loss_x + g_loss_f0 + g_loss_z 200 | self.reset_grad() 201 | g_loss.backward() 202 | self.g_optimizer.step() 203 | 204 | # Logging. 205 | loss = {} 206 | loss['G/loss'] = g_loss.item() 207 | loss['G/loss_x'] = g_loss_x.item() 208 | loss['G/loss_f0'] = g_loss_f0.item() 209 | loss['G/loss_z'] = g_loss_z.item() 210 | 211 | 212 | # =================================================================================== # 213 | # 4. Miscellaneous # 214 | # =================================================================================== # 215 | 216 | # Print out training information. 217 | if (i+1) % self.log_step == 0: 218 | et = time.time() - start_time 219 | et = str(datetime.timedelta(seconds=et))[:-7] 220 | log = "Elapsed [{}], Iteration [{}/{}]".format(et, i+1, self.num_iters) 221 | for tag in keys: 222 | log += ", {}: {:.8f}".format(tag, loss[tag]) 223 | print(log) 224 | 225 | if self.use_tensorboard: 226 | for tag, value in loss.items(): 227 | self.writer.add_scalar(tag, value, i+1) 228 | 229 | 230 | # Save model checkpoints. 231 | if (i+1) % self.model_save_step == 0: 232 | G_path = os.path.join(self.model_save_dir, '{}-G.ckpt'.format(i+1)) 233 | torch.save({'encoder': self.E.state_dict(), 234 | 'decoder': self.D.state_dict(), 235 | 'optimizer': self.g_optimizer.state_dict()}, G_path) 236 | print('Saved model checkpoints into {}...'.format(self.model_save_dir)) 237 | 238 | 239 | # Validation. 240 | if (i+1) % self.valid_step == 0: 241 | self.E = self.E.eval() 242 | self.D = self.D.eval() 243 | with torch.no_grad(): 244 | loss_val = [] 245 | for val_sub in self.validation_pt: 246 | emb_val = torch.from_numpy(val_sub[1]).to(self.device) 247 | for k in range(2, 3): 248 | x_real_pad, _ = pad_seq_to_2(val_sub[k][0][np.newaxis,:,:], self.len_pad) 249 | len_val = torch.tensor([val_sub[k][2]]).to(self.device) 250 | f0_val = np.pad(val_sub[k][1], (0, self.len_pad-val_sub[k][2]), 'constant', constant_values=(0, 0)) 251 | f0_quantized = quantize_f0_numpy(f0_val)[0] 252 | f0_onehot = f0_quantized[np.newaxis, :, :] 253 | f0_org_val = torch.from_numpy(f0_onehot).to(self.device) 254 | x_real_pad = torch.from_numpy(x_real_pad).to(self.device) 255 | x_f0 = torch.cat((x_real_pad, f0_org_val), dim=-1) 256 | codes_val_r, codes_val_c, codes_val_f = self.E(x_f0, x_real_pad) 257 | x_identic_val, f0_identic_val = self.D(emb_val, codes_val_r, codes_val_c, codes_val_f, mode='train') 258 | z_select = torch.randint(0, 4, (1,))[0] 259 | 260 | ord_sfl_val = torch.randperm(x_real_pad.shape[0]) 261 | if z_select == 0: 262 | codes_val_r = codes_val_r[ord_sfl_val] 263 | elif z_select == 1: 264 | codes_val_c = codes_val_c[ord_sfl_val] 265 | elif z_select == 2: 266 | codes_val_f = codes_val_f[ord_sfl_val] 267 | else: 268 | emb_val = emb_val[ord_sfl_val] 269 | x_identic_cyc_val, f0_identic_cyc_val = self.D(emb_val, codes_val_r, codes_val_c, codes_val_f, mode='train') 270 | x_f0_cyc_val = torch.cat((x_identic_cyc_val, f0_identic_cyc_val), dim=-1) 271 | codes_val_cyc_r, codes_val_cyc_c, codes_val_cyc_f = self.E(x_f0_cyc_val, x_identic_cyc_val) 272 | 273 | codes_val_org = torch.cat((codes_val_r.reshape(x_real_pad.shape[0], -1), codes_val_c.reshape(x_real_pad.shape[0], -1), codes_val_f.reshape(x_real_pad.shape[0], -1)), dim=-1) 274 | codes_val_cyc = torch.cat((codes_val_cyc_r.reshape(x_real_pad.shape[0], -1), codes_val_cyc_c.reshape(x_real_pad.shape[0], -1), codes_val_cyc_f.reshape(x_real_pad.shape[0], -1)), dim=-1) 275 | 276 | g_loss_val_x = 0.2 * F.mse_loss(x_real_pad, x_identic_val, reduction='mean') 277 | g_loss_val_f0 = F.mse_loss(f0_org_val, f0_identic_val, reduction='mean') 278 | g_loss_val_z = F.mse_loss(codes_val_org, codes_val_cyc, reduction='mean') 279 | g_loss_val = g_loss_val_x + g_loss_val_f0 + g_loss_val_z 280 | loss_val.append(g_loss_val.item()) 281 | val_loss = np.mean(loss_val) 282 | print('Validation loss: {}'.format(val_loss)) 283 | if self.use_tensorboard: 284 | self.writer.add_scalar('Validation_loss', val_loss, i+1) 285 | 286 | 287 | # plot test samples 288 | if (i+1) % self.sample_step == 0: 289 | self.E = self.E.eval() 290 | self.D = self.D.eval() 291 | with torch.no_grad(): 292 | for smp_sub in self.validation_pt: 293 | emb_smp = torch.from_numpy(smp_sub[1]).to(self.device) 294 | for k in range(2, 3): 295 | x_real_pad, _ = pad_seq_to_2(smp_sub[k][0][np.newaxis,:,:], self.len_pad) 296 | len_smp = torch.tensor([smp_sub[k][2]]).to(self.device) 297 | f0_smp = np.pad(smp_sub[k][1], (0, self.len_pad-smp_sub[k][2]), 'constant', constant_values=(0, 0)) 298 | f0_quantized = quantize_f0_numpy(f0_smp)[0] 299 | f0_onehot = f0_quantized[np.newaxis, :, :] 300 | f0_org_smp = torch.from_numpy(f0_onehot).to(self.device) 301 | x_real_pad = torch.from_numpy(x_real_pad).to(self.device) 302 | x_f0 = torch.cat((x_real_pad, f0_org_smp), dim=-1) 303 | x_f0_F = torch.cat((x_real_pad, torch.zeros_like(f0_org_smp)), dim=-1) 304 | x_f0_C = torch.cat((torch.zeros_like(x_real_pad), f0_org_smp), dim=-1) 305 | 306 | codes_r, codes_c, codes_f = self.E(x_f0, x_real_pad) 307 | x_identic_smp = self.D(emb_smp, codes_r, codes_c, codes_f, mode='test') 308 | codes_r, codes_c, codes_f = self.E(x_f0_F, x_real_pad) 309 | x_identic_woF = self.D(emb_smp, codes_r, codes_c, codes_f, mode='test') 310 | codes_r, codes_c, codes_f = self.E(x_f0, torch.zeros_like(x_real_pad)) 311 | x_identic_woR = self.D(emb_smp, codes_r, codes_c, codes_f, mode='test') 312 | codes_r, codes_c, codes_f = self.E(x_f0_C, x_real_pad) 313 | x_identic_woC = self.D(emb_smp, codes_r, codes_c, codes_f, mode='test') 314 | 315 | melsp_gd_pad = x_real_pad[0].cpu().numpy().T 316 | melsp_out = x_identic_smp[0].cpu().numpy().T 317 | melsp_woF = x_identic_woF[0].cpu().numpy().T 318 | melsp_woR = x_identic_woR[0].cpu().numpy().T 319 | melsp_woC = x_identic_woC[0].cpu().numpy().T 320 | 321 | min_value = np.min(np.hstack([melsp_gd_pad, melsp_out, melsp_woF, melsp_woR, melsp_woC])) 322 | max_value = np.max(np.hstack([melsp_gd_pad, melsp_out, melsp_woF, melsp_woR, melsp_woC])) 323 | 324 | fig, (ax1,ax2,ax3,ax4,ax5) = plt.subplots(5, 1, sharex=True) 325 | im1 = ax1.imshow(melsp_gd_pad, aspect='auto', vmin=min_value, vmax=max_value) 326 | im2 = ax2.imshow(melsp_out, aspect='auto', vmin=min_value, vmax=max_value) 327 | im3 = ax3.imshow(melsp_woC, aspect='auto', vmin=min_value, vmax=max_value) 328 | im4 = ax4.imshow(melsp_woR, aspect='auto', vmin=min_value, vmax=max_value) 329 | im5 = ax5.imshow(melsp_woF, aspect='auto', vmin=min_value, vmax=max_value) 330 | plt.savefig(f'{self.sample_dir}/{i+1}_{smp_sub[0]}_{k}.png', dpi=150) 331 | plt.close(fig) 332 | -------------------------------------------------------------------------------- /synthesis.py: -------------------------------------------------------------------------------- 1 | # coding: utf-8 2 | """ 3 | Synthesis waveform from trained WaveNet. 4 | 5 | Modified from https://github.com/r9y9/wavenet_vocoder 6 | """ 7 | 8 | import torch 9 | from tqdm import tqdm 10 | import librosa 11 | from tfcompat.hparamvc import hparams 12 | from wavenet_vocoder import builder 13 | 14 | torch.set_num_threads(4) 15 | use_cuda = torch.cuda.is_available() 16 | #device = torch.device("cuda" if use_cuda else "cpu") 17 | #device = torch.device("cpu") 18 | 19 | 20 | def build_model(): 21 | 22 | model = getattr(builder, hparams.builder)( 23 | out_channels=hparams.out_channels, 24 | layers=hparams.layers, 25 | stacks=hparams.stacks, 26 | residual_channels=hparams.residual_channels, 27 | gate_channels=hparams.gate_channels, 28 | skip_out_channels=hparams.skip_out_channels, 29 | cin_channels=hparams.cin_channels, 30 | gin_channels=hparams.gin_channels, 31 | weight_normalization=hparams.weight_normalization, 32 | n_speakers=hparams.n_speakers, 33 | dropout=hparams.dropout, 34 | kernel_size=hparams.kernel_size, 35 | upsample_conditional_features=hparams.upsample_conditional_features, 36 | upsample_scales=hparams.upsample_scales, 37 | freq_axis_kernel_size=hparams.freq_axis_kernel_size, 38 | scalar_input=True, 39 | legacy=hparams.legacy, 40 | ) 41 | return model 42 | 43 | 44 | 45 | def wavegen(model, c=None, device='cpu', tqdm=tqdm): 46 | """Generate waveform samples by WaveNet. 47 | 48 | """ 49 | 50 | model.eval() 51 | model.make_generation_fast_() 52 | 53 | Tc = c.shape[0] 54 | upsample_factor = hparams.hop_size 55 | # Overwrite length according to feature size 56 | length = Tc * upsample_factor 57 | 58 | # B x C x T 59 | c = torch.FloatTensor(c.T).unsqueeze(0) 60 | 61 | initial_input = torch.zeros(1, 1, 1).fill_(0.0) 62 | 63 | # Transform data to GPU 64 | initial_input = initial_input.to(device) 65 | c = None if c is None else c.to(device) 66 | 67 | with torch.no_grad(): 68 | y_hat = model.incremental_forward( 69 | initial_input, c=c, g=None, T=length, tqdm=tqdm, softmax=True, quantize=True, 70 | log_scale_min=hparams.log_scale_min) 71 | 72 | y_hat = y_hat.view(-1).cpu().data.numpy() 73 | 74 | return y_hat 75 | -------------------------------------------------------------------------------- /tfcompat/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FantSun/CycleFlow/c1b5689a6b01f11503b1c5c208d4ecbbb681dc05/tfcompat/__init__.py -------------------------------------------------------------------------------- /tfcompat/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FantSun/CycleFlow/c1b5689a6b01f11503b1c5c208d4ecbbb681dc05/tfcompat/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /tfcompat/__pycache__/hparam.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FantSun/CycleFlow/c1b5689a6b01f11503b1c5c208d4ecbbb681dc05/tfcompat/__pycache__/hparam.cpython-36.pyc -------------------------------------------------------------------------------- /tfcompat/__pycache__/hparamvc.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FantSun/CycleFlow/c1b5689a6b01f11503b1c5c208d4ecbbb681dc05/tfcompat/__pycache__/hparamvc.cpython-36.pyc -------------------------------------------------------------------------------- /tfcompat/hparam.py: -------------------------------------------------------------------------------- 1 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | # ============================================================================== 15 | """Hyperparameter values.""" 16 | from __future__ import absolute_import 17 | from __future__ import division 18 | from __future__ import print_function 19 | 20 | import json 21 | import numbers 22 | import re 23 | 24 | import six 25 | 26 | ## from tensorflow.contrib.training.python.training import hparam_pb2 27 | ## from tensorflow.python.framework import ops 28 | ## from tensorflow.python.util import compat 29 | ## from tensorflow.python.util import deprecation 30 | 31 | # Define the regular expression for parsing a single clause of the input 32 | # (delimited by commas). A legal clause looks like: 33 | # []? = 34 | # where is either a single token or [] enclosed list of tokens. 35 | # For example: "var[1] = a" or "x = [1,2,3]" 36 | PARAM_RE = re.compile(r""" 37 | (?P[a-zA-Z][\w\.]*) # variable name: "var" or "x" 38 | (\[\s*(?P\d+)\s*\])? # (optional) index: "1" or None 39 | \s*=\s* 40 | ((?P[^,\[]*) # single value: "a" or None 41 | | 42 | \[(?P[^\]]*)\]) # list of values: None or "1,2,3" 43 | ($|,\s*)""", re.VERBOSE) 44 | 45 | 46 | def _parse_fail(name, var_type, value, values): 47 | """Helper function for raising a value error for bad assignment.""" 48 | raise ValueError( 49 | 'Could not parse hparam \'%s\' of type \'%s\' with value \'%s\' in %s' % 50 | (name, var_type.__name__, value, values)) 51 | 52 | 53 | def _reuse_fail(name, values): 54 | """Helper function for raising a value error for reuse of name.""" 55 | raise ValueError('Multiple assignments to variable \'%s\' in %s' % (name, 56 | values)) 57 | 58 | 59 | def _process_scalar_value(name, parse_fn, var_type, m_dict, values, 60 | results_dictionary): 61 | """Update results_dictionary with a scalar value. 62 | 63 | Used to update the results_dictionary to be returned by parse_values when 64 | encountering a clause with a scalar RHS (e.g. "s=5" or "arr[0]=5".) 65 | 66 | Mutates results_dictionary. 67 | 68 | Args: 69 | name: Name of variable in assignment ("s" or "arr"). 70 | parse_fn: Function for parsing the actual value. 71 | var_type: Type of named variable. 72 | m_dict: Dictionary constructed from regex parsing. 73 | m_dict['val']: RHS value (scalar) 74 | m_dict['index']: List index value (or None) 75 | values: Full expression being parsed 76 | results_dictionary: The dictionary being updated for return by the parsing 77 | function. 78 | 79 | Raises: 80 | ValueError: If the name has already been used. 81 | """ 82 | try: 83 | parsed_value = parse_fn(m_dict['val']) 84 | except ValueError: 85 | _parse_fail(name, var_type, m_dict['val'], values) 86 | 87 | # If no index is provided 88 | if not m_dict['index']: 89 | if name in results_dictionary: 90 | _reuse_fail(name, values) 91 | results_dictionary[name] = parsed_value 92 | else: 93 | if name in results_dictionary: 94 | # The name has already been used as a scalar, then it 95 | # will be in this dictionary and map to a non-dictionary. 96 | if not isinstance(results_dictionary.get(name), dict): 97 | _reuse_fail(name, values) 98 | else: 99 | results_dictionary[name] = {} 100 | 101 | index = int(m_dict['index']) 102 | # Make sure the index position hasn't already been assigned a value. 103 | if index in results_dictionary[name]: 104 | _reuse_fail('{}[{}]'.format(name, index), values) 105 | results_dictionary[name][index] = parsed_value 106 | 107 | 108 | def _process_list_value(name, parse_fn, var_type, m_dict, values, 109 | results_dictionary): 110 | """Update results_dictionary from a list of values. 111 | 112 | Used to update results_dictionary to be returned by parse_values when 113 | encountering a clause with a list RHS (e.g. "arr=[1,2,3]".) 114 | 115 | Mutates results_dictionary. 116 | 117 | Args: 118 | name: Name of variable in assignment ("arr"). 119 | parse_fn: Function for parsing individual values. 120 | var_type: Type of named variable. 121 | m_dict: Dictionary constructed from regex parsing. 122 | m_dict['val']: RHS value (scalar) 123 | values: Full expression being parsed 124 | results_dictionary: The dictionary being updated for return by the parsing 125 | function. 126 | 127 | Raises: 128 | ValueError: If the name has an index or the values cannot be parsed. 129 | """ 130 | if m_dict['index'] is not None: 131 | raise ValueError('Assignment of a list to a list index.') 132 | elements = filter(None, re.split('[ ,]', m_dict['vals'])) 133 | # Make sure the name hasn't already been assigned a value 134 | if name in results_dictionary: 135 | raise _reuse_fail(name, values) 136 | try: 137 | results_dictionary[name] = [parse_fn(e) for e in elements] 138 | except ValueError: 139 | _parse_fail(name, var_type, m_dict['vals'], values) 140 | 141 | 142 | def _cast_to_type_if_compatible(name, param_type, value): 143 | """Cast hparam to the provided type, if compatible. 144 | 145 | Args: 146 | name: Name of the hparam to be cast. 147 | param_type: The type of the hparam. 148 | value: The value to be cast, if compatible. 149 | 150 | Returns: 151 | The result of casting `value` to `param_type`. 152 | 153 | Raises: 154 | ValueError: If the type of `value` is not compatible with param_type. 155 | * If `param_type` is a string type, but `value` is not. 156 | * If `param_type` is a boolean, but `value` is not, or vice versa. 157 | * If `param_type` is an integer type, but `value` is not. 158 | * If `param_type` is a float type, but `value` is not a numeric type. 159 | """ 160 | fail_msg = ( 161 | "Could not cast hparam '%s' of type '%s' from value %r" % 162 | (name, param_type, value)) 163 | 164 | # Some callers use None, for which we can't do any casting/checking. :( 165 | if issubclass(param_type, type(None)): 166 | return value 167 | 168 | # Avoid converting a non-string type to a string. 169 | if (issubclass(param_type, (six.string_types, six.binary_type)) and 170 | not isinstance(value, (six.string_types, six.binary_type))): 171 | raise ValueError(fail_msg) 172 | 173 | # Avoid converting a number or string type to a boolean or vice versa. 174 | if issubclass(param_type, bool) != isinstance(value, bool): 175 | raise ValueError(fail_msg) 176 | 177 | # Avoid converting float to an integer (the reverse is fine). 178 | if (issubclass(param_type, numbers.Integral) and 179 | not isinstance(value, numbers.Integral)): 180 | raise ValueError(fail_msg) 181 | 182 | # Avoid converting a non-numeric type to a numeric type. 183 | if (issubclass(param_type, numbers.Number) and 184 | not isinstance(value, numbers.Number)): 185 | raise ValueError(fail_msg) 186 | 187 | return param_type(value) 188 | 189 | 190 | def parse_values(values, type_map): 191 | """Parses hyperparameter values from a string into a python map. 192 | 193 | `values` is a string containing comma-separated `name=value` pairs. 194 | For each pair, the value of the hyperparameter named `name` is set to 195 | `value`. 196 | 197 | If a hyperparameter name appears multiple times in `values`, a ValueError 198 | is raised (e.g. 'a=1,a=2', 'a[1]=1,a[1]=2'). 199 | 200 | If a hyperparameter name in both an index assignment and scalar assignment, 201 | a ValueError is raised. (e.g. 'a=[1,2,3],a[0] = 1'). 202 | 203 | The hyperparameter name may contain '.' symbols, which will result in an 204 | attribute name that is only accessible through the getattr and setattr 205 | functions. (And must be first explicit added through add_hparam.) 206 | 207 | WARNING: Use of '.' in your variable names is allowed, but is not well 208 | supported and not recommended. 209 | 210 | The `value` in `name=value` must follows the syntax according to the 211 | type of the parameter: 212 | 213 | * Scalar integer: A Python-parsable integer point value. E.g.: 1, 214 | 100, -12. 215 | * Scalar float: A Python-parsable floating point value. E.g.: 1.0, 216 | -.54e89. 217 | * Boolean: Either true or false. 218 | * Scalar string: A non-empty sequence of characters, excluding comma, 219 | spaces, and square brackets. E.g.: foo, bar_1. 220 | * List: A comma separated list of scalar values of the parameter type 221 | enclosed in square brackets. E.g.: [1,2,3], [1.0,1e-12], [high,low]. 222 | 223 | When index assignment is used, the corresponding type_map key should be the 224 | list name. E.g. for "arr[1]=0" the type_map must have the key "arr" (not 225 | "arr[1]"). 226 | 227 | Args: 228 | values: String. Comma separated list of `name=value` pairs where 229 | 'value' must follow the syntax described above. 230 | type_map: A dictionary mapping hyperparameter names to types. Note every 231 | parameter name in values must be a key in type_map. The values must 232 | conform to the types indicated, where a value V is said to conform to a 233 | type T if either V has type T, or V is a list of elements of type T. 234 | Hence, for a multidimensional parameter 'x' taking float values, 235 | 'x=[0.1,0.2]' will parse successfully if type_map['x'] = float. 236 | 237 | Returns: 238 | A python map mapping each name to either: 239 | * A scalar value. 240 | * A list of scalar values. 241 | * A dictionary mapping index numbers to scalar values. 242 | (e.g. "x=5,L=[1,2],arr[1]=3" results in {'x':5,'L':[1,2],'arr':{1:3}}") 243 | 244 | Raises: 245 | ValueError: If there is a problem with input. 246 | * If `values` cannot be parsed. 247 | * If a list is assigned to a list index (e.g. 'a[1] = [1,2,3]'). 248 | * If the same rvalue is assigned two different values (e.g. 'a=1,a=2', 249 | 'a[1]=1,a[1]=2', or 'a=1,a=[1]') 250 | """ 251 | results_dictionary = {} 252 | pos = 0 253 | while pos < len(values): 254 | m = PARAM_RE.match(values, pos) 255 | if not m: 256 | raise ValueError('Malformed hyperparameter value: %s' % values[pos:]) 257 | # Check that there is a comma between parameters and move past it. 258 | pos = m.end() 259 | # Parse the values. 260 | m_dict = m.groupdict() 261 | name = m_dict['name'] 262 | if name not in type_map: 263 | raise ValueError('Unknown hyperparameter type for %s' % name) 264 | type_ = type_map[name] 265 | 266 | # Set up correct parsing function (depending on whether type_ is a bool) 267 | if type_ == bool: 268 | 269 | def parse_bool(value): 270 | if value in ['true', 'True']: 271 | return True 272 | elif value in ['false', 'False']: 273 | return False 274 | else: 275 | try: 276 | return bool(int(value)) 277 | except ValueError: 278 | _parse_fail(name, type_, value, values) 279 | 280 | parse = parse_bool 281 | else: 282 | parse = type_ 283 | 284 | # If a singe value is provided 285 | if m_dict['val'] is not None: 286 | _process_scalar_value(name, parse, type_, m_dict, values, 287 | results_dictionary) 288 | 289 | # If the assigned value is a list: 290 | elif m_dict['vals'] is not None: 291 | _process_list_value(name, parse, type_, m_dict, values, 292 | results_dictionary) 293 | 294 | else: # Not assigned a list or value 295 | _parse_fail(name, type_, '', values) 296 | 297 | return results_dictionary 298 | 299 | 300 | class HParams(object): 301 | """Class to hold a set of hyperparameters as name-value pairs. 302 | 303 | A `HParams` object holds hyperparameters used to build and train a model, 304 | such as the number of hidden units in a neural net layer or the learning rate 305 | to use when training. 306 | 307 | You first create a `HParams` object by specifying the names and values of the 308 | hyperparameters. 309 | 310 | To make them easily accessible the parameter names are added as direct 311 | attributes of the class. A typical usage is as follows: 312 | 313 | ```python 314 | # Create a HParams object specifying names and values of the model 315 | # hyperparameters: 316 | hparams = HParams(learning_rate=0.1, num_hidden_units=100) 317 | 318 | # The hyperparameter are available as attributes of the HParams object: 319 | hparams.learning_rate ==> 0.1 320 | hparams.num_hidden_units ==> 100 321 | ``` 322 | 323 | Hyperparameters have type, which is inferred from the type of their value 324 | passed at construction type. The currently supported types are: integer, 325 | float, boolean, string, and list of integer, float, boolean, or string. 326 | 327 | You can override hyperparameter values by calling the 328 | [`parse()`](#HParams.parse) method, passing a string of comma separated 329 | `name=value` pairs. This is intended to make it possible to override 330 | any hyperparameter values from a single command-line flag to which 331 | the user passes 'hyper-param=value' pairs. It avoids having to define 332 | one flag for each hyperparameter. 333 | 334 | The syntax expected for each value depends on the type of the parameter. 335 | See `parse()` for a description of the syntax. 336 | 337 | Example: 338 | 339 | ```python 340 | # Define a command line flag to pass name=value pairs. 341 | # For example using argparse: 342 | import argparse 343 | parser = argparse.ArgumentParser(description='Train my model.') 344 | parser.add_argument('--hparams', type=str, 345 | help='Comma separated list of "name=value" pairs.') 346 | args = parser.parse_args() 347 | ... 348 | def my_program(): 349 | # Create a HParams object specifying the names and values of the 350 | # model hyperparameters: 351 | hparams = tf.HParams(learning_rate=0.1, num_hidden_units=100, 352 | activations=['relu', 'tanh']) 353 | 354 | # Override hyperparameters values by parsing the command line 355 | hparams.parse(args.hparams) 356 | 357 | # If the user passed `--hparams=learning_rate=0.3` on the command line 358 | # then 'hparams' has the following attributes: 359 | hparams.learning_rate ==> 0.3 360 | hparams.num_hidden_units ==> 100 361 | hparams.activations ==> ['relu', 'tanh'] 362 | 363 | # If the hyperparameters are in json format use parse_json: 364 | hparams.parse_json('{"learning_rate": 0.3, "activations": "relu"}') 365 | ``` 366 | """ 367 | 368 | _HAS_DYNAMIC_ATTRIBUTES = True # Required for pytype checks. 369 | 370 | def __init__(self, hparam_def=None, model_structure=None, **kwargs): 371 | """Create an instance of `HParams` from keyword arguments. 372 | 373 | The keyword arguments specify name-values pairs for the hyperparameters. 374 | The parameter types are inferred from the type of the values passed. 375 | 376 | The parameter names are added as attributes of `HParams` object, so they 377 | can be accessed directly with the dot notation `hparams._name_`. 378 | 379 | Example: 380 | 381 | ```python 382 | # Define 3 hyperparameters: 'learning_rate' is a float parameter, 383 | # 'num_hidden_units' an integer parameter, and 'activation' a string 384 | # parameter. 385 | hparams = tf.HParams( 386 | learning_rate=0.1, num_hidden_units=100, activation='relu') 387 | 388 | hparams.activation ==> 'relu' 389 | ``` 390 | 391 | Note that a few names are reserved and cannot be used as hyperparameter 392 | names. If you use one of the reserved name the constructor raises a 393 | `ValueError`. 394 | 395 | Args: 396 | hparam_def: Serialized hyperparameters, encoded as a hparam_pb2.HParamDef 397 | protocol buffer. If provided, this object is initialized by 398 | deserializing hparam_def. Otherwise **kwargs is used. 399 | model_structure: An instance of ModelStructure, defining the feature 400 | crosses to be used in the Trial. 401 | **kwargs: Key-value pairs where the key is the hyperparameter name and 402 | the value is the value for the parameter. 403 | 404 | Raises: 405 | ValueError: If both `hparam_def` and initialization values are provided, 406 | or if one of the arguments is invalid. 407 | 408 | """ 409 | # Register the hyperparameters and their type in _hparam_types. 410 | # This simplifies the implementation of parse(). 411 | # _hparam_types maps the parameter name to a tuple (type, bool). 412 | # The type value is the type of the parameter for scalar hyperparameters, 413 | # or the type of the list elements for multidimensional hyperparameters. 414 | # The bool value is True if the value is a list, False otherwise. 415 | self._hparam_types = {} 416 | self._model_structure = model_structure 417 | if hparam_def: 418 | ## self._init_from_proto(hparam_def) 419 | ## if kwargs: 420 | ## raise ValueError('hparam_def and initialization values are ' 421 | ## 'mutually exclusive') 422 | raise ValueError('hparam_def has been disabled in this version') 423 | else: 424 | for name, value in six.iteritems(kwargs): 425 | self.add_hparam(name, value) 426 | 427 | ## def _init_from_proto(self, hparam_def): 428 | ## """Creates a new HParams from `HParamDef` protocol buffer. 429 | ## 430 | ## Args: 431 | ## hparam_def: `HParamDef` protocol buffer. 432 | ## """ 433 | ## assert isinstance(hparam_def, hparam_pb2.HParamDef) 434 | ## for name, value in hparam_def.hparam.items(): 435 | ## kind = value.WhichOneof('kind') 436 | ## if kind.endswith('_value'): 437 | ## # Single value. 438 | ## if kind.startswith('int64'): 439 | ## # Setting attribute value to be 'int' to ensure the type is compatible 440 | ## # with both Python2 and Python3. 441 | ## self.add_hparam(name, int(getattr(value, kind))) 442 | ## elif kind.startswith('bytes'): 443 | ## # Setting attribute value to be 'str' to ensure the type is compatible 444 | ## # with both Python2 and Python3. UTF-8 encoding is assumed. 445 | ## self.add_hparam(name, compat.as_str(getattr(value, kind))) 446 | ## else: 447 | ## self.add_hparam(name, getattr(value, kind)) 448 | ## else: 449 | ## # List of values. 450 | ## if kind.startswith('int64'): 451 | ## # Setting attribute value to be 'int' to ensure the type is compatible 452 | ## # with both Python2 and Python3. 453 | ## self.add_hparam(name, [int(v) for v in getattr(value, kind).value]) 454 | ## elif kind.startswith('bytes'): 455 | ## # Setting attribute value to be 'str' to ensure the type is compatible 456 | ## # with both Python2 and Python3. UTF-8 encoding is assumed. 457 | ## self.add_hparam( 458 | ## name, [compat.as_str(v) for v in getattr(value, kind).value]) 459 | ## else: 460 | ## self.add_hparam(name, [v for v in getattr(value, kind).value]) 461 | 462 | def add_hparam(self, name, value): 463 | """Adds {name, value} pair to hyperparameters. 464 | 465 | Args: 466 | name: Name of the hyperparameter. 467 | value: Value of the hyperparameter. Can be one of the following types: 468 | int, float, string, int list, float list, or string list. 469 | 470 | Raises: 471 | ValueError: if one of the arguments is invalid. 472 | """ 473 | # Keys in kwargs are unique, but 'name' could the name of a pre-existing 474 | # attribute of this object. In that case we refuse to use it as a 475 | # hyperparameter name. 476 | if getattr(self, name, None) is not None: 477 | raise ValueError('Hyperparameter name is reserved: %s' % name) 478 | if isinstance(value, (list, tuple)): 479 | if not value: 480 | raise ValueError( 481 | 'Multi-valued hyperparameters cannot be empty: %s' % name) 482 | self._hparam_types[name] = (type(value[0]), True) 483 | else: 484 | self._hparam_types[name] = (type(value), False) 485 | setattr(self, name, value) 486 | 487 | def set_hparam(self, name, value): 488 | """Set the value of an existing hyperparameter. 489 | 490 | This function verifies that the type of the value matches the type of the 491 | existing hyperparameter. 492 | 493 | Args: 494 | name: Name of the hyperparameter. 495 | value: New value of the hyperparameter. 496 | 497 | Raises: 498 | ValueError: If there is a type mismatch. 499 | """ 500 | param_type, is_list = self._hparam_types[name] 501 | if isinstance(value, list): 502 | if not is_list: 503 | raise ValueError( 504 | 'Must not pass a list for single-valued parameter: %s' % name) 505 | setattr(self, name, [ 506 | _cast_to_type_if_compatible(name, param_type, v) for v in value]) 507 | else: 508 | if is_list: 509 | raise ValueError( 510 | 'Must pass a list for multi-valued parameter: %s.' % name) 511 | setattr(self, name, _cast_to_type_if_compatible(name, param_type, value)) 512 | 513 | def del_hparam(self, name): 514 | """Removes the hyperparameter with key 'name'. 515 | 516 | Args: 517 | name: Name of the hyperparameter. 518 | """ 519 | if hasattr(self, name): 520 | delattr(self, name) 521 | del self._hparam_types[name] 522 | 523 | def parse(self, values): 524 | """Override hyperparameter values, parsing new values from a string. 525 | 526 | See parse_values for more detail on the allowed format for values. 527 | 528 | Args: 529 | values: String. Comma separated list of `name=value` pairs where 530 | 'value' must follow the syntax described above. 531 | 532 | Returns: 533 | The `HParams` instance. 534 | 535 | Raises: 536 | ValueError: If `values` cannot be parsed. 537 | """ 538 | type_map = dict() 539 | for name, t in self._hparam_types.items(): 540 | param_type, _ = t 541 | type_map[name] = param_type 542 | 543 | values_map = parse_values(values, type_map) 544 | return self.override_from_dict(values_map) 545 | 546 | def override_from_dict(self, values_dict): 547 | """Override hyperparameter values, parsing new values from a dictionary. 548 | 549 | Args: 550 | values_dict: Dictionary of name:value pairs. 551 | 552 | Returns: 553 | The `HParams` instance. 554 | 555 | Raises: 556 | ValueError: If `values_dict` cannot be parsed. 557 | """ 558 | for name, value in values_dict.items(): 559 | self.set_hparam(name, value) 560 | return self 561 | 562 | ## @deprecation.deprecated(None, 'Use `override_from_dict`.') 563 | def set_from_map(self, values_map): 564 | """DEPRECATED. Use override_from_dict.""" 565 | return self.override_from_dict(values_dict=values_map) 566 | 567 | def set_model_structure(self, model_structure): 568 | self._model_structure = model_structure 569 | 570 | def get_model_structure(self): 571 | return self._model_structure 572 | 573 | def to_json(self, indent=None, separators=None, sort_keys=False): 574 | """Serializes the hyperparameters into JSON. 575 | 576 | Args: 577 | indent: If a non-negative integer, JSON array elements and object members 578 | will be pretty-printed with that indent level. An indent level of 0, or 579 | negative, will only insert newlines. `None` (the default) selects the 580 | most compact representation. 581 | separators: Optional `(item_separator, key_separator)` tuple. Default is 582 | `(', ', ': ')`. 583 | sort_keys: If `True`, the output dictionaries will be sorted by key. 584 | 585 | Returns: 586 | A JSON string. 587 | """ 588 | return json.dumps( 589 | self.values(), 590 | indent=indent, 591 | separators=separators, 592 | sort_keys=sort_keys) 593 | 594 | def parse_json(self, values_json): 595 | """Override hyperparameter values, parsing new values from a json object. 596 | 597 | Args: 598 | values_json: String containing a json object of name:value pairs. 599 | 600 | Returns: 601 | The `HParams` instance. 602 | 603 | Raises: 604 | ValueError: If `values_json` cannot be parsed. 605 | """ 606 | values_map = json.loads(values_json) 607 | return self.override_from_dict(values_map) 608 | 609 | def values(self): 610 | """Return the hyperparameter values as a Python dictionary. 611 | 612 | Returns: 613 | A dictionary with hyperparameter names as keys. The values are the 614 | hyperparameter values. 615 | """ 616 | return {n: getattr(self, n) for n in self._hparam_types.keys()} 617 | 618 | def get(self, key, default=None): 619 | """Returns the value of `key` if it exists, else `default`.""" 620 | if key in self._hparam_types: 621 | # Ensure that default is compatible with the parameter type. 622 | if default is not None: 623 | param_type, is_param_list = self._hparam_types[key] 624 | type_str = 'list<%s>' % param_type if is_param_list else str(param_type) 625 | fail_msg = ("Hparam '%s' of type '%s' is incompatible with " 626 | 'default=%s' % (key, type_str, default)) 627 | 628 | is_default_list = isinstance(default, list) 629 | if is_param_list != is_default_list: 630 | raise ValueError(fail_msg) 631 | 632 | try: 633 | if is_default_list: 634 | for value in default: 635 | _cast_to_type_if_compatible(key, param_type, value) 636 | else: 637 | _cast_to_type_if_compatible(key, param_type, default) 638 | except ValueError as e: 639 | raise ValueError('%s. %s' % (fail_msg, e)) 640 | 641 | return getattr(self, key) 642 | 643 | return default 644 | 645 | def __contains__(self, key): 646 | return key in self._hparam_types 647 | 648 | def __str__(self): 649 | return str(sorted(self.values().items())) 650 | 651 | def __repr__(self): 652 | return '%s(%s)' % (type(self).__name__, self.__str__()) 653 | 654 | @staticmethod 655 | def _get_kind_name(param_type, is_list): 656 | """Returns the field name given parameter type and is_list. 657 | 658 | Args: 659 | param_type: Data type of the hparam. 660 | is_list: Whether this is a list. 661 | 662 | Returns: 663 | A string representation of the field name. 664 | 665 | Raises: 666 | ValueError: If parameter type is not recognized. 667 | """ 668 | if issubclass(param_type, bool): 669 | # This check must happen before issubclass(param_type, six.integer_types), 670 | # since Python considers bool to be a subclass of int. 671 | typename = 'bool' 672 | elif issubclass(param_type, six.integer_types): 673 | # Setting 'int' and 'long' types to be 'int64' to ensure the type is 674 | # compatible with both Python2 and Python3. 675 | typename = 'int64' 676 | elif issubclass(param_type, (six.string_types, six.binary_type)): 677 | # Setting 'string' and 'bytes' types to be 'bytes' to ensure the type is 678 | # compatible with both Python2 and Python3. 679 | typename = 'bytes' 680 | elif issubclass(param_type, float): 681 | typename = 'float' 682 | else: 683 | raise ValueError('Unsupported parameter type: %s' % str(param_type)) 684 | 685 | suffix = 'list' if is_list else 'value' 686 | return '_'.join([typename, suffix]) 687 | 688 | ## def to_proto(self, export_scope=None): # pylint: disable=unused-argument 689 | ## """Converts a `HParams` object to a `HParamDef` protocol buffer. 690 | ## 691 | ## Args: 692 | ## export_scope: Optional `string`. Name scope to remove. 693 | ## 694 | ## Returns: 695 | ## A `HParamDef` protocol buffer. 696 | ## """ 697 | ## hparam_proto = hparam_pb2.HParamDef() 698 | ## for name in self._hparam_types: 699 | ## # Parse the values. 700 | ## param_type, is_list = self._hparam_types.get(name, (None, None)) 701 | ## kind = HParams._get_kind_name(param_type, is_list) 702 | ## 703 | ## if is_list: 704 | ## if kind.startswith('bytes'): 705 | ## v_list = [compat.as_bytes(v) for v in getattr(self, name)] 706 | ## else: 707 | ## v_list = [v for v in getattr(self, name)] 708 | ## getattr(hparam_proto.hparam[name], kind).value.extend(v_list) 709 | ## else: 710 | ## v = getattr(self, name) 711 | ## if kind.startswith('bytes'): 712 | ## v = compat.as_bytes(getattr(self, name)) 713 | ## setattr(hparam_proto.hparam[name], kind, v) 714 | ## 715 | ## return hparam_proto 716 | 717 | ## @staticmethod 718 | ## def from_proto(hparam_def, import_scope=None): # pylint: disable=unused-argument 719 | ## return HParams(hparam_def=hparam_def) 720 | 721 | 722 | ## ops.register_proto_function( 723 | ## 'hparams', 724 | ## proto_type=hparam_pb2.HParamDef, 725 | ## to_proto=HParams.to_proto, 726 | ## from_proto=HParams.from_proto) 727 | -------------------------------------------------------------------------------- /tfcompat/hparamvc.py: -------------------------------------------------------------------------------- 1 | # NOTE: If you want full control for model architecture. please take a look 2 | # at the code and change whatever you want. Some hyper parameters are hardcoded. 3 | 4 | 5 | class Map(dict): 6 | """ 7 | Example: 8 | m = Map({'first_name': 'Eduardo'}, last_name='Pool', age=24, sports=['Soccer']) 9 | Credits to epool: 10 | https://stackoverflow.com/questions/2352181/how-to-use-a-dot-to-access-members-of-dictionary 11 | """ 12 | 13 | def __init__(self, *args, **kwargs): 14 | super(Map, self).__init__(*args, **kwargs) 15 | for arg in args: 16 | if isinstance(arg, dict): 17 | for k, v in arg.items(): 18 | self[k] = v 19 | 20 | if kwargs: 21 | for k, v in kwargs.iteritems(): 22 | self[k] = v 23 | 24 | def __getattr__(self, attr): 25 | return self.get(attr) 26 | 27 | def __setattr__(self, key, value): 28 | self.__setitem__(key, value) 29 | 30 | def __setitem__(self, key, value): 31 | super(Map, self).__setitem__(key, value) 32 | self.__dict__.update({key: value}) 33 | 34 | def __delattr__(self, item): 35 | self.__delitem__(item) 36 | 37 | def __delitem__(self, key): 38 | super(Map, self).__delitem__(key) 39 | del self.__dict__[key] 40 | 41 | 42 | # Default hyperparameters: 43 | hparams = Map({ 44 | 'name': "wavenet_vocoder", 45 | 46 | # Convenient model builder 47 | 'builder': "wavenet", 48 | 49 | # Input type: 50 | # 1. raw [-1, 1] 51 | # 2. mulaw [-1, 1] 52 | # 3. mulaw-quantize [0, mu] 53 | # If input_type is raw or mulaw, network assumes scalar input and 54 | # discretized mixture of logistic distributions output, otherwise one-hot 55 | # input and softmax output are assumed. 56 | # **NOTE**: if you change the one of the two parameters below, you need to 57 | # re-run preprocessing before training. 58 | 'input_type': "raw", 59 | 'quantize_channels': 65536, # 65536 or 256 60 | 61 | # Audio: 62 | 'sample_rate': 16000, 63 | # this is only valid for mulaw is True 64 | 'silence_threshold': 2, 65 | 'num_mels': 80, 66 | 'fmin': 125, 67 | 'fmax': 7600, 68 | 'fft_size': 1024, 69 | # shift can be specified by either hop_size or frame_shift_ms 70 | 'hop_size': 256, 71 | 'frame_shift_ms': None, 72 | 'min_level_db': -100, 73 | 'ref_level_db': 20, 74 | # whether to rescale waveform or not. 75 | # Let x is an input waveform, rescaled waveform y is given by: 76 | # y = x / np.abs(x).max() * rescaling_max 77 | 'rescaling': True, 78 | 'rescaling_max': 0.999, 79 | # mel-spectrogram is normalized to [0, 1] for each utterance and clipping may 80 | # happen depends on min_level_db and ref_level_db, causing clipping noise. 81 | # If False, assertion is added to ensure no clipping happens.o0 82 | 'allow_clipping_in_normalization': True, 83 | 84 | # Mixture of logistic distributions: 85 | 'log_scale_min': float(-32.23619130191664), 86 | 87 | # Model: 88 | # This should equal to `quantize_channels` if mu-law quantize enabled 89 | # otherwise num_mixture * 3 (pi, mean, log_scale) 90 | 'out_channels': 10 * 3, 91 | 'layers': 24, 92 | 'stacks': 4, 93 | 'residual_channels': 512, 94 | 'gate_channels': 512, # split into 2 gropus internally for gated activation 95 | 'skip_out_channels': 256, 96 | 'dropout': 1 - 0.95, 97 | 'kernel_size': 3, 98 | # If True, apply weight normalization as same as DeepVoice3 99 | 'weight_normalization': True, 100 | # Use legacy code or not. Default is True since we already provided a model 101 | # based on the legacy code that can generate high-quality audio. 102 | # Ref: https://github.com/r9y9/wavenet_vocoder/pull/73 103 | 'legacy': True, 104 | 105 | # Local conditioning (set negative value to disable)) 106 | 'cin_channels': 80, 107 | # If True, use transposed convolutions to upsample conditional features, 108 | # otherwise repeat features to adjust time resolution 109 | 'upsample_conditional_features': True, 110 | # should np.prod(upsample_scales) == hop_size 111 | 'upsample_scales': [4, 4, 4, 4], 112 | # Freq axis kernel size for upsampling network 113 | 'freq_axis_kernel_size': 3, 114 | 115 | # Global conditioning (set negative value to disable) 116 | # currently limited for speaker embedding 117 | # this should only be enabled for multi-speaker dataset 118 | 'gin_channels': -1, # i.e., speaker embedding dim 119 | 'n_speakers': -1, 120 | 121 | # Data loader 122 | 'pin_memory': True, 123 | 'num_workers': 2, 124 | 125 | # train/test 126 | # test size can be specified as portion or num samples 127 | 'test_size': 0.0441, # 50 for CMU ARCTIC single speaker 128 | 'test_num_samples': None, 129 | 'random_state': 1234, 130 | 131 | # Loss 132 | 133 | # Training: 134 | 'batch_size': 2, 135 | 'adam_beta1': 0.9, 136 | 'adam_beta2': 0.999, 137 | 'adam_eps': 1e-8, 138 | 'amsgrad': False, 139 | 'initial_learning_rate': 1e-3, 140 | # see lrschedule.py for available lr_schedule 141 | 'lr_schedule': "noam_learning_rate_decay", 142 | 'lr_schedule_kwargs': {}, # {"anneal_rate": 0.5, "anneal_interval": 50000}, 143 | 'nepochs': 2000, 144 | 'weight_decay': 0.0, 145 | 'clip_thresh': -1, 146 | # max time steps can either be specified as sec or steps 147 | # if both are None, then full audio samples are used in a batch 148 | 'max_time_sec': None, 149 | 'max_time_steps': 8000, 150 | # Hold moving averaged parameters and use them for evaluation 151 | 'exponential_moving_average': True, 152 | # averaged = decay * averaged + (1 - decay) * x 153 | 'ema_decay': 0.9999, 154 | 155 | # Save 156 | # per-step intervals 157 | 'checkpoint_interval': 10000, 158 | 'train_eval_interval': 10000, 159 | # per-epoch interval 160 | 'test_eval_epoch_interval': 5, 161 | 'save_optimizer_state': True, 162 | 163 | # Eval: 164 | }) 165 | 166 | 167 | def hparams_debug_string(): 168 | values = hparams.values() 169 | hp = [' %s: %s' % (name, values[name]) for name in sorted(values)] 170 | return 'Hyperparameters:\n' + '\n'.join(hp) 171 | -------------------------------------------------------------------------------- /utils.py: -------------------------------------------------------------------------------- 1 | import copy 2 | import torch 3 | import numpy as np 4 | from scipy import signal 5 | from librosa.filters import mel 6 | from scipy.signal import get_window 7 | 8 | 9 | 10 | def butter_highpass(cutoff, fs, order=5): 11 | nyq = 0.5 * fs 12 | normal_cutoff = cutoff / nyq 13 | b, a = signal.butter(order, normal_cutoff, btype='high', analog=False) 14 | return b, a 15 | 16 | 17 | 18 | def pySTFT(x, fft_length=1024, hop_length=256): 19 | 20 | x = np.pad(x, int(fft_length//2), mode='reflect') 21 | 22 | noverlap = fft_length - hop_length 23 | shape = x.shape[:-1]+((x.shape[-1]-noverlap)//hop_length, fft_length) 24 | strides = x.strides[:-1]+(hop_length*x.strides[-1], x.strides[-1]) 25 | result = np.lib.stride_tricks.as_strided(x, shape=shape, 26 | strides=strides) 27 | 28 | fft_window = get_window('hann', fft_length, fftbins=True) 29 | result = np.fft.rfft(fft_window * result, n=fft_length).T 30 | 31 | return np.abs(result) 32 | 33 | 34 | 35 | def speaker_normalization(f0, index_nonzero, mean_f0, std_f0): 36 | # f0 is logf0 37 | f0 = f0.astype(float).copy() 38 | #index_nonzero = f0 != 0 39 | f0[index_nonzero] = (f0[index_nonzero] - mean_f0) / std_f0 / 4.0 40 | f0[index_nonzero] = np.clip(f0[index_nonzero], -1, 1) 41 | f0[index_nonzero] = (f0[index_nonzero] + 1) / 2.0 42 | return f0 43 | 44 | 45 | 46 | def quantize_f0_numpy(x, num_bins=256): 47 | # x is logf0 48 | assert x.ndim==1 49 | x = x.astype(float).copy() 50 | uv = (x<=0) 51 | x[uv] = 0.0 52 | assert (x >= 0).all() and (x <= 1).all() 53 | x = np.round(x * (num_bins-1)) 54 | x = x + 1 55 | x[uv] = 0.0 56 | enc = np.zeros((len(x), num_bins+1), dtype=np.float32) 57 | enc[np.arange(len(x)), x.astype(np.int32)] = 1.0 58 | return enc, x.astype(np.int64) 59 | 60 | 61 | 62 | def quantize_f0_torch(x, num_bins=256): 63 | # x is logf0 64 | B = x.size(0) 65 | x = x.view(-1).clone() 66 | uv = (x<=0) 67 | x[uv] = 0 68 | assert (x >= 0).all() and (x <= 1).all() 69 | x = torch.round(x * (num_bins-1)) 70 | x = x + 1 71 | x[uv] = 0 72 | enc = torch.zeros((x.size(0), num_bins+1), device=x.device) 73 | enc[torch.arange(x.size(0)), x.long()] = 1 74 | return enc.view(B, -1, num_bins+1), x.view(B, -1).long() 75 | 76 | 77 | 78 | def get_mask_from_lengths(lengths, max_len): 79 | ids = torch.arange(0, max_len, device=lengths.device) 80 | mask = (ids >= lengths.unsqueeze(1)).bool() 81 | return mask 82 | 83 | 84 | 85 | def pad_seq_to_2(x, len_out=128): 86 | len_pad = (len_out - x.shape[1]) 87 | assert len_pad >= 0 88 | return np.pad(x, ((0,0),(0,len_pad),(0,0)), 'constant'), len_pad --------------------------------------------------------------------------------