├── .gitignore
├── LICENSE
├── README.md
├── audio
├── __init__.py
├── audio_processing.py
├── stft.py
└── tools.py
├── config
└── LJSpeech
│ ├── model.yaml
│ ├── preprocess.yaml
│ └── train.yaml
├── dataset.py
├── evaluate.py
├── hifigan
├── LICENSE
├── __init__.py
├── config.json
├── generator_LJSpeech.pth.tar.zip
├── generator_universal.pth.tar.zip
└── models.py
├── img
├── model.png
├── tensorboard_audio.png
├── tensorboard_loss.png
└── tensorboard_spec.png
├── lexicon
├── librispeech-lexicon.txt
└── pinyin-lexicon-r.txt
├── model
├── FastPitchFormant.py
├── __init__.py
├── blocks.py
├── loss.py
├── modules.py
└── optimizer.py
├── prepare_align.py
├── preprocess.py
├── preprocessed_data
└── LJSpeech
│ ├── speakers.json
│ ├── stats.json
│ ├── train.txt
│ └── val.txt
├── preprocessor
├── ljspeech.py
└── preprocessor.py
├── requirements.txt
├── synthesize.py
├── text
├── __init__.py
├── cleaners.py
├── cmudict.py
├── numbers.py
├── pinyin.py
└── symbols.py
├── train.py
└── utils
├── model.py
└── tools.py
/.gitignore:
--------------------------------------------------------------------------------
1 | # Byte-compiled / optimized / DLL files
2 | __pycache__/
3 | *.py[cod]
4 | *$py.class
5 |
6 | # C extensions
7 | *.so
8 |
9 | # Distribution / packaging
10 | .Python
11 | build/
12 | develop-eggs/
13 | dist/
14 | downloads/
15 | eggs/
16 | .eggs/
17 | lib/
18 | lib64/
19 | parts/
20 | sdist/
21 | var/
22 | wheels/
23 | *.egg-info/
24 | .installed.cfg
25 | *.egg
26 | MANIFEST
27 |
28 | # PyInstaller
29 | # Usually these files are written by a python script from a template
30 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
31 | *.manifest
32 | *.spec
33 |
34 | # Installer logs
35 | pip-log.txt
36 | pip-delete-this-directory.txt
37 |
38 | # Unit test / coverage reports
39 | htmlcov/
40 | .tox/
41 | .coverage
42 | .coverage.*
43 | .cache
44 | nosetests.xml
45 | coverage.xml
46 | *.cover
47 | .hypothesis/
48 | .pytest_cache/
49 |
50 | # Translations
51 | *.mo
52 | *.pot
53 |
54 | # Django stuff:
55 | *.log
56 | local_settings.py
57 | db.sqlite3
58 |
59 | # Flask stuff:
60 | instance/
61 | .webassets-cache
62 |
63 | # Scrapy stuff:
64 | .scrapy
65 |
66 | # Sphinx documentation
67 | docs/_build/
68 |
69 | # PyBuilder
70 | target/
71 |
72 | # Jupyter Notebook
73 | .ipynb_checkpoints
74 |
75 | # pyenv
76 | .python-version
77 |
78 | # celery beat schedule file
79 | celerybeat-schedule
80 |
81 | # SageMath parsed files
82 | *.sage.py
83 |
84 | # Environments
85 | .env
86 | .venv
87 | env/
88 | venv/
89 | ENV/
90 | env.bak/
91 | venv.bak/
92 |
93 | # Spyder project settings
94 | .spyderproject
95 | .spyproject
96 |
97 | # Rope project settings
98 | .ropeproject
99 |
100 | # mkdocs documentation
101 | /site
102 |
103 | # mypy
104 | .mypy_cache/
105 |
106 | __pycache__
107 | .vscode
108 | .DS_Store
109 |
110 | # MFA
111 | montreal-forced-aligner/
112 |
113 | # data, checkpoint, and models
114 | raw_data/
115 | output/
116 | *.npy
117 | TextGrid/
118 | hifigan/*.pth.tar
119 | *.out
120 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2021 Keon Lee
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # FastPitchFormant - PyTorch Implementation
2 |
3 | PyTorch Implementation of [FastPitchFormant: Source-filter based Decomposed Modeling
4 | for Speech Synthesis](https://arxiv.org/abs/2106.15123).
5 |
6 |
7 |
8 |
9 |
10 | # Quickstart
11 |
12 | ## Dependencies
13 | You can install the Python dependencies with
14 | ```
15 | pip3 install -r requirements.txt
16 | ```
17 |
18 | ## Inference
19 |
20 | You have to download the [pretrained models](https://drive.google.com/drive/folders/1Pa7wNfmtt0VfWxFNA3xdChqLsYFTvYuA?usp=sharing) and put them in ``output/ckpt/LJSpeech/``.
21 |
22 | For English single-speaker TTS, run
23 | ```
24 | python3 synthesize.py --text "YOUR_DESIRED_TEXT" --restore_step 600000 --mode single -p config/LJSpeech/preprocess.yaml -m config/LJSpeech/model.yaml -t config/LJSpeech/train.yaml
25 | ```
26 | The generated utterances will be put in ``output/result/``.
27 |
28 |
29 | ## Batch Inference
30 | Batch inference is also supported, try
31 |
32 | ```
33 | python3 synthesize.py --source preprocessed_data/LJSpeech/val.txt --restore_step 600000 --mode batch -p config/LJSpeech/preprocess.yaml -m config/LJSpeech/model.yaml -t config/LJSpeech/train.yaml
34 | ```
35 | to synthesize all utterances in ``preprocessed_data/LJSpeech/val.txt``
36 |
37 | ## Controllability
38 | The pitch/speaking rate of the synthesized utterances can be controlled by specifying the desired pitch/energy/duration ratios.
39 | For example, one can increase the speaking rate by 20 % and decrease the pitch by 20 % by
40 |
41 | ```
42 | python3 synthesize.py --text "YOUR_DESIRED_TEXT" --restore_step 600000 --mode single -p config/LJSpeech/preprocess.yaml -m config/LJSpeech/model.yaml -t config/LJSpeech/train.yaml --duration_control 0.8 --pitch_control 0.8
43 | ```
44 |
45 | # Training
46 |
47 | ## Datasets
48 |
49 | The supported datasets are
50 |
51 | - [LJSpeech](https://keithito.com/LJ-Speech-Dataset/): a single-speaker English dataset consists of 13100 short audio clips of a female speaker reading passages from 7 non-fiction books, approximately 24 hours in total.
52 |
53 | ## Preprocessing
54 |
55 | First, run
56 | ```
57 | python3 prepare_align.py config/LJSpeech/preprocess.yaml
58 | ```
59 | for some preparations.
60 |
61 | As described in the paper, [Montreal Forced Aligner](https://montreal-forced-aligner.readthedocs.io/en/latest/) (MFA) is used to obtain the alignments between the utterances and the phoneme sequences.
62 | Alignments for the LJSpeech datasets are provided [here](https://drive.google.com/drive/folders/1DBRkALpPd6FL9gjHMmMEdHODmkgNIIK4?usp=sharing).
63 | You have to unzip the files in ``preprocessed_data/LJSpeech/TextGrid/``.
64 |
65 | After that, run the preprocessing script by
66 | ```
67 | python3 preprocess.py config/LJSpeech/preprocess.yaml
68 | ```
69 |
70 | Alternately, you can align the corpus by yourself.
71 | Download the official MFA package and run
72 | ```
73 | ./montreal-forced-aligner/bin/mfa_align raw_data/LJSpeech/ lexicon/librispeech-lexicon.txt english preprocessed_data/LJSpeech
74 | ```
75 | or
76 | ```
77 | ./montreal-forced-aligner/bin/mfa_train_and_align raw_data/LJSpeech/ lexicon/librispeech-lexicon.txt preprocessed_data/LJSpeech
78 | ```
79 |
80 | to align the corpus and then run the preprocessing script.
81 | ```
82 | python3 preprocess.py config/LJSpeech/preprocess.yaml
83 | ```
84 |
85 | ## Training
86 |
87 | Train your model with
88 | ```
89 | python3 train.py -p config/LJSpeech/preprocess.yaml -m config/LJSpeech/model.yaml -t config/LJSpeech/train.yaml
90 | ```
91 |
92 | # TensorBoard
93 |
94 | Use
95 | ```
96 | tensorboard --logdir output/log/LJSpeech
97 | ```
98 |
99 | to serve TensorBoard on your localhost.
100 | The loss curves, synthesized mel-spectrograms, and audios are shown.
101 |
102 | 
103 | 
104 | 
105 |
106 | # Implementation Issues
107 |
108 | - The current implementation and pre-trained model are using normalized pitch values. In my experiments, the pitch controllability is not dynamic with the proposed pitch shifts. You may set `normalization` to `False` in `./config/LJSpeech/preprocess.yaml` when you need to see more wide pitch range as the paper described.
109 | - Please note that the paper trained the model up to 1000k whereas the current implementation provides 600k pre-trained model.
110 | - Use **HiFi-GAN** instead of **VocGAN** for vocoding.
111 |
112 | # Citation
113 |
114 | ```
115 | @misc{lee2021fastpitchformant,
116 | author = {Lee, Keon},
117 | title = {FastPitchFormant},
118 | year = {2021},
119 | publisher = {GitHub},
120 | journal = {GitHub repository},
121 | howpublished = {\url{https://github.com/keonlee9420/FastPitchFormant}}
122 | }
123 | ```
124 |
125 | # References
126 | - [FastPitchFormant: Source-filter based Decomposed Modeling
127 | for Speech Synthesis](https://arxiv.org/abs/2106.15123)
128 | - [ming024's FastSpeech2](https://github.com/ming024/FastSpeech2)
129 |
--------------------------------------------------------------------------------
/audio/__init__.py:
--------------------------------------------------------------------------------
1 | import audio.tools
2 | import audio.stft
3 | import audio.audio_processing
4 |
--------------------------------------------------------------------------------
/audio/audio_processing.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import numpy as np
3 | import librosa.util as librosa_util
4 | from scipy.signal import get_window
5 |
6 |
7 | def window_sumsquare(
8 | window,
9 | n_frames,
10 | hop_length,
11 | win_length,
12 | n_fft,
13 | dtype=np.float32,
14 | norm=None,
15 | ):
16 | """
17 | # from librosa 0.6
18 | Compute the sum-square envelope of a window function at a given hop length.
19 |
20 | This is used to estimate modulation effects induced by windowing
21 | observations in short-time fourier transforms.
22 |
23 | Parameters
24 | ----------
25 | window : string, tuple, number, callable, or list-like
26 | Window specification, as in `get_window`
27 |
28 | n_frames : int > 0
29 | The number of analysis frames
30 |
31 | hop_length : int > 0
32 | The number of samples to advance between frames
33 |
34 | win_length : [optional]
35 | The length of the window function. By default, this matches `n_fft`.
36 |
37 | n_fft : int > 0
38 | The length of each analysis frame.
39 |
40 | dtype : np.dtype
41 | The data type of the output
42 |
43 | Returns
44 | -------
45 | wss : np.ndarray, shape=`(n_fft + hop_length * (n_frames - 1))`
46 | The sum-squared envelope of the window function
47 | """
48 | if win_length is None:
49 | win_length = n_fft
50 |
51 | n = n_fft + hop_length * (n_frames - 1)
52 | x = np.zeros(n, dtype=dtype)
53 |
54 | # Compute the squared window at the desired length
55 | win_sq = get_window(window, win_length, fftbins=True)
56 | win_sq = librosa_util.normalize(win_sq, norm=norm) ** 2
57 | win_sq = librosa_util.pad_center(win_sq, n_fft)
58 |
59 | # Fill the envelope
60 | for i in range(n_frames):
61 | sample = i * hop_length
62 | x[sample : min(n, sample + n_fft)] += win_sq[: max(0, min(n_fft, n - sample))]
63 | return x
64 |
65 |
66 | def griffin_lim(magnitudes, stft_fn, n_iters=30):
67 | """
68 | PARAMS
69 | ------
70 | magnitudes: spectrogram magnitudes
71 | stft_fn: STFT class with transform (STFT) and inverse (ISTFT) methods
72 | """
73 |
74 | angles = np.angle(np.exp(2j * np.pi * np.random.rand(*magnitudes.size())))
75 | angles = angles.astype(np.float32)
76 | angles = torch.autograd.Variable(torch.from_numpy(angles))
77 | signal = stft_fn.inverse(magnitudes, angles).squeeze(1)
78 |
79 | for i in range(n_iters):
80 | _, angles = stft_fn.transform(signal)
81 | signal = stft_fn.inverse(magnitudes, angles).squeeze(1)
82 | return signal
83 |
84 |
85 | def dynamic_range_compression(x, C=1, clip_val=1e-5):
86 | """
87 | PARAMS
88 | ------
89 | C: compression factor
90 | """
91 | return torch.log(torch.clamp(x, min=clip_val) * C)
92 |
93 |
94 | def dynamic_range_decompression(x, C=1):
95 | """
96 | PARAMS
97 | ------
98 | C: compression factor used to compress
99 | """
100 | return torch.exp(x) / C
101 |
--------------------------------------------------------------------------------
/audio/stft.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torch.nn.functional as F
3 | import numpy as np
4 | from scipy.signal import get_window
5 | from librosa.util import pad_center, tiny
6 | from librosa.filters import mel as librosa_mel_fn
7 |
8 | from audio.audio_processing import (
9 | dynamic_range_compression,
10 | dynamic_range_decompression,
11 | window_sumsquare,
12 | )
13 |
14 |
15 | class STFT(torch.nn.Module):
16 | """adapted from Prem Seetharaman's https://github.com/pseeth/pytorch-stft"""
17 |
18 | def __init__(self, filter_length, hop_length, win_length, window="hann"):
19 | super(STFT, self).__init__()
20 | self.filter_length = filter_length
21 | self.hop_length = hop_length
22 | self.win_length = win_length
23 | self.window = window
24 | self.forward_transform = None
25 | scale = self.filter_length / self.hop_length
26 | fourier_basis = np.fft.fft(np.eye(self.filter_length))
27 |
28 | cutoff = int((self.filter_length / 2 + 1))
29 | fourier_basis = np.vstack(
30 | [np.real(fourier_basis[:cutoff, :]), np.imag(fourier_basis[:cutoff, :])]
31 | )
32 |
33 | forward_basis = torch.FloatTensor(fourier_basis[:, None, :])
34 | inverse_basis = torch.FloatTensor(
35 | np.linalg.pinv(scale * fourier_basis).T[:, None, :]
36 | )
37 |
38 | if window is not None:
39 | assert filter_length >= win_length
40 | # get window and zero center pad it to filter_length
41 | fft_window = get_window(window, win_length, fftbins=True)
42 | fft_window = pad_center(fft_window, filter_length)
43 | fft_window = torch.from_numpy(fft_window).float()
44 |
45 | # window the bases
46 | forward_basis *= fft_window
47 | inverse_basis *= fft_window
48 |
49 | self.register_buffer("forward_basis", forward_basis.float())
50 | self.register_buffer("inverse_basis", inverse_basis.float())
51 |
52 | def transform(self, input_data):
53 | num_batches = input_data.size(0)
54 | num_samples = input_data.size(1)
55 |
56 | self.num_samples = num_samples
57 |
58 | # similar to librosa, reflect-pad the input
59 | input_data = input_data.view(num_batches, 1, num_samples)
60 | input_data = F.pad(
61 | input_data.unsqueeze(1),
62 | (int(self.filter_length / 2), int(self.filter_length / 2), 0, 0),
63 | mode="reflect",
64 | )
65 | input_data = input_data.squeeze(1)
66 |
67 | forward_transform = F.conv1d(
68 | input_data.cuda(),
69 | torch.autograd.Variable(self.forward_basis, requires_grad=False).cuda(),
70 | stride=self.hop_length,
71 | padding=0,
72 | ).cpu()
73 |
74 | cutoff = int((self.filter_length / 2) + 1)
75 | real_part = forward_transform[:, :cutoff, :]
76 | imag_part = forward_transform[:, cutoff:, :]
77 |
78 | magnitude = torch.sqrt(real_part ** 2 + imag_part ** 2)
79 | phase = torch.autograd.Variable(torch.atan2(imag_part.data, real_part.data))
80 |
81 | return magnitude, phase
82 |
83 | def inverse(self, magnitude, phase):
84 | recombine_magnitude_phase = torch.cat(
85 | [magnitude * torch.cos(phase), magnitude * torch.sin(phase)], dim=1
86 | )
87 |
88 | inverse_transform = F.conv_transpose1d(
89 | recombine_magnitude_phase,
90 | torch.autograd.Variable(self.inverse_basis, requires_grad=False),
91 | stride=self.hop_length,
92 | padding=0,
93 | )
94 |
95 | if self.window is not None:
96 | window_sum = window_sumsquare(
97 | self.window,
98 | magnitude.size(-1),
99 | hop_length=self.hop_length,
100 | win_length=self.win_length,
101 | n_fft=self.filter_length,
102 | dtype=np.float32,
103 | )
104 | # remove modulation effects
105 | approx_nonzero_indices = torch.from_numpy(
106 | np.where(window_sum > tiny(window_sum))[0]
107 | )
108 | window_sum = torch.autograd.Variable(
109 | torch.from_numpy(window_sum), requires_grad=False
110 | )
111 | window_sum = window_sum.cuda() if magnitude.is_cuda else window_sum
112 | inverse_transform[:, :, approx_nonzero_indices] /= window_sum[
113 | approx_nonzero_indices
114 | ]
115 |
116 | # scale by hop ratio
117 | inverse_transform *= float(self.filter_length) / self.hop_length
118 |
119 | inverse_transform = inverse_transform[:, :, int(self.filter_length / 2) :]
120 | inverse_transform = inverse_transform[:, :, : -int(self.filter_length / 2) :]
121 |
122 | return inverse_transform
123 |
124 | def forward(self, input_data):
125 | self.magnitude, self.phase = self.transform(input_data)
126 | reconstruction = self.inverse(self.magnitude, self.phase)
127 | return reconstruction
128 |
129 |
130 | class TacotronSTFT(torch.nn.Module):
131 | def __init__(
132 | self,
133 | filter_length,
134 | hop_length,
135 | win_length,
136 | n_mel_channels,
137 | sampling_rate,
138 | mel_fmin,
139 | mel_fmax,
140 | ):
141 | super(TacotronSTFT, self).__init__()
142 | self.n_mel_channels = n_mel_channels
143 | self.sampling_rate = sampling_rate
144 | self.stft_fn = STFT(filter_length, hop_length, win_length)
145 | mel_basis = librosa_mel_fn(
146 | sampling_rate, filter_length, n_mel_channels, mel_fmin, mel_fmax
147 | )
148 | mel_basis = torch.from_numpy(mel_basis).float()
149 | self.register_buffer("mel_basis", mel_basis)
150 |
151 | def spectral_normalize(self, magnitudes):
152 | output = dynamic_range_compression(magnitudes)
153 | return output
154 |
155 | def spectral_de_normalize(self, magnitudes):
156 | output = dynamic_range_decompression(magnitudes)
157 | return output
158 |
159 | def mel_spectrogram(self, y):
160 | """Computes mel-spectrograms from a batch of waves
161 | PARAMS
162 | ------
163 | y: Variable(torch.FloatTensor) with shape (B, T) in range [-1, 1]
164 |
165 | RETURNS
166 | -------
167 | mel_output: torch.FloatTensor of shape (B, n_mel_channels, T)
168 | """
169 | assert torch.min(y.data) >= -1
170 | assert torch.max(y.data) <= 1
171 |
172 | magnitudes, phases = self.stft_fn.transform(y)
173 | magnitudes = magnitudes.data
174 | mel_output = torch.matmul(self.mel_basis, magnitudes)
175 | mel_output = self.spectral_normalize(mel_output)
176 | energy = torch.norm(magnitudes, dim=1)
177 |
178 | return mel_output, energy
179 |
--------------------------------------------------------------------------------
/audio/tools.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import numpy as np
3 | from scipy.io.wavfile import write
4 |
5 | from audio.audio_processing import griffin_lim
6 |
7 |
8 | def get_mel_from_wav(audio, _stft):
9 | audio = torch.clip(torch.FloatTensor(audio).unsqueeze(0), -1, 1)
10 | audio = torch.autograd.Variable(audio, requires_grad=False)
11 | melspec, energy = _stft.mel_spectrogram(audio)
12 | melspec = torch.squeeze(melspec, 0).numpy().astype(np.float32)
13 | energy = torch.squeeze(energy, 0).numpy().astype(np.float32)
14 |
15 | return melspec, energy
16 |
17 |
18 | def inv_mel_spec(mel, out_filename, _stft, griffin_iters=60):
19 | mel = torch.stack([mel])
20 | mel_decompress = _stft.spectral_de_normalize(mel)
21 | mel_decompress = mel_decompress.transpose(1, 2).data.cpu()
22 | spec_from_mel_scaling = 1000
23 | spec_from_mel = torch.mm(mel_decompress[0], _stft.mel_basis)
24 | spec_from_mel = spec_from_mel.transpose(0, 1).unsqueeze(0)
25 | spec_from_mel = spec_from_mel * spec_from_mel_scaling
26 |
27 | audio = griffin_lim(
28 | torch.autograd.Variable(spec_from_mel[:, :, :-1]), _stft._stft_fn, griffin_iters
29 | )
30 |
31 | audio = audio.squeeze()
32 | audio = audio.cpu().numpy()
33 | audio_path = out_filename
34 | write(audio_path, _stft.sampling_rate, audio)
35 |
--------------------------------------------------------------------------------
/config/LJSpeech/model.yaml:
--------------------------------------------------------------------------------
1 | transformer:
2 | encoder_layer: 6
3 | encoder_head: 2
4 | encoder_hidden: 256
5 | decoder_layer: 2
6 | decoder_head: 2
7 | decoder_hidden: 256
8 | conv_filter_size: 1024
9 | conv_kernel_size: [9, 1]
10 | encoder_dropout: 0.2
11 | decoder_dropout: 0.2
12 | generator_layer: 4
13 |
14 | variance_predictor:
15 | filter_size: 256
16 | kernel_size: 3
17 | dropout: 0.5
18 |
19 | variance_embedding:
20 | kernel_size: 9
21 |
22 | multi_speaker: False
23 |
24 | max_seq_len: 1000
25 |
26 | vocoder:
27 | model: "HiFi-GAN" # support 'HiFi-GAN', 'MelGAN'
28 | speaker: "LJSpeech" # support 'LJSpeech', 'universal'
29 |
--------------------------------------------------------------------------------
/config/LJSpeech/preprocess.yaml:
--------------------------------------------------------------------------------
1 | dataset: "LJSpeech"
2 |
3 | path:
4 | corpus_path: "/mnt/nfs2/speech-datasets/en/LJSpeech-1.1"
5 | lexicon_path: "lexicon/librispeech-lexicon.txt"
6 | raw_path: "./raw_data/LJSpeech"
7 | preprocessed_path: "./preprocessed_data/LJSpeech"
8 |
9 | preprocessing:
10 | val_size: 512
11 | text:
12 | text_cleaners: ["english_cleaners"]
13 | language: "en"
14 | audio:
15 | sampling_rate: 22050
16 | max_wav_value: 32768.0
17 | stft:
18 | filter_length: 1024
19 | hop_length: 256
20 | win_length: 1024
21 | mel:
22 | n_mel_channels: 80
23 | mel_fmin: 0
24 | mel_fmax: 8000 # please set to 8000 for HiFi-GAN vocoder, set to null for MelGAN vocoder
25 | pitch:
26 | feature: "phoneme_level" # only support 'phoneme_level'
27 | normalization: True
28 |
--------------------------------------------------------------------------------
/config/LJSpeech/train.yaml:
--------------------------------------------------------------------------------
1 | path:
2 | ckpt_path: "./output/ckpt/LJSpeech"
3 | log_path: "./output/log/LJSpeech"
4 | result_path: "./output/result/LJSpeech"
5 | optimizer:
6 | batch_size: 16
7 | betas: [0.5, 0.9]
8 | eps: 0.000001
9 | weight_decay: 0.0
10 | grad_clip_thresh: 1.0
11 | grad_acc_step: 1
12 | warm_up_step: 4000
13 | anneal_steps: [300000, 400000, 500000]
14 | anneal_rate: 0.3
15 | init_lr: 0.005
16 | step:
17 | total_step: 1000000
18 | log_step: 100
19 | synth_step: 1000
20 | val_step: 1000
21 | save_step: 100000
22 |
--------------------------------------------------------------------------------
/dataset.py:
--------------------------------------------------------------------------------
1 | import json
2 | import math
3 | import os
4 |
5 | import numpy as np
6 | from torch.utils.data import Dataset
7 |
8 | from text import text_to_sequence
9 | from utils.tools import pad_1D, pad_2D
10 |
11 |
12 | class Dataset(Dataset):
13 | def __init__(
14 | self, filename, preprocess_config, train_config, sort=False, drop_last=False
15 | ):
16 | self.dataset_name = preprocess_config["dataset"]
17 | self.preprocessed_path = preprocess_config["path"]["preprocessed_path"]
18 | self.cleaners = preprocess_config["preprocessing"]["text"]["text_cleaners"]
19 | self.batch_size = train_config["optimizer"]["batch_size"]
20 |
21 | self.basename, self.speaker, self.text, self.raw_text = self.process_meta(
22 | filename
23 | )
24 | with open(os.path.join(self.preprocessed_path, "speakers.json")) as f:
25 | self.speaker_map = json.load(f)
26 | self.sort = sort
27 | self.drop_last = drop_last
28 |
29 | def __len__(self):
30 | return len(self.text)
31 |
32 | def __getitem__(self, idx):
33 | basename = self.basename[idx]
34 | speaker = self.speaker[idx]
35 | speaker_id = self.speaker_map[speaker]
36 | raw_text = self.raw_text[idx]
37 | phone = np.array(text_to_sequence(self.text[idx], self.cleaners))
38 | mel_path = os.path.join(
39 | self.preprocessed_path,
40 | "mel",
41 | "{}-mel-{}.npy".format(speaker, basename),
42 | )
43 | mel = np.load(mel_path)
44 | pitch_path = os.path.join(
45 | self.preprocessed_path,
46 | "pitch",
47 | "{}-pitch-{}.npy".format(speaker, basename),
48 | )
49 | pitch = np.load(pitch_path)
50 | duration_path = os.path.join(
51 | self.preprocessed_path,
52 | "duration",
53 | "{}-duration-{}.npy".format(speaker, basename),
54 | )
55 | duration = np.load(duration_path)
56 |
57 | sample = {
58 | "id": basename,
59 | "speaker": speaker_id,
60 | "text": phone,
61 | "raw_text": raw_text,
62 | "mel": mel,
63 | "pitch": pitch,
64 | "duration": duration,
65 | }
66 |
67 | return sample
68 |
69 | def process_meta(self, filename):
70 | with open(
71 | os.path.join(self.preprocessed_path, filename), "r", encoding="utf-8"
72 | ) as f:
73 | name = []
74 | speaker = []
75 | text = []
76 | raw_text = []
77 | for line in f.readlines():
78 | n, s, t, r = line.strip("\n").split("|")
79 | name.append(n)
80 | speaker.append(s)
81 | text.append(t)
82 | raw_text.append(r)
83 | return name, speaker, text, raw_text
84 |
85 | def reprocess(self, data, idxs):
86 | ids = [data[idx]["id"] for idx in idxs]
87 | speakers = [data[idx]["speaker"] for idx in idxs]
88 | texts = [data[idx]["text"] for idx in idxs]
89 | raw_texts = [data[idx]["raw_text"] for idx in idxs]
90 | mels = [data[idx]["mel"] for idx in idxs]
91 | pitches = [data[idx]["pitch"] for idx in idxs]
92 | durations = [data[idx]["duration"] for idx in idxs]
93 |
94 | text_lens = np.array([text.shape[0] for text in texts])
95 | mel_lens = np.array([mel.shape[0] for mel in mels])
96 |
97 | speakers = np.array(speakers)
98 | texts = pad_1D(texts)
99 | mels = pad_2D(mels)
100 | pitches = pad_1D(pitches)
101 | durations = pad_1D(durations)
102 |
103 | return (
104 | ids,
105 | raw_texts,
106 | speakers,
107 | texts,
108 | text_lens,
109 | max(text_lens),
110 | mels,
111 | mel_lens,
112 | max(mel_lens),
113 | pitches,
114 | durations,
115 | )
116 |
117 | def collate_fn(self, data):
118 | data_size = len(data)
119 |
120 | if self.sort:
121 | len_arr = np.array([d["text"].shape[0] for d in data])
122 | idx_arr = np.argsort(-len_arr)
123 | else:
124 | idx_arr = np.arange(data_size)
125 |
126 | tail = idx_arr[len(idx_arr) - (len(idx_arr) % self.batch_size) :]
127 | idx_arr = idx_arr[: len(idx_arr) - (len(idx_arr) % self.batch_size)]
128 | idx_arr = idx_arr.reshape((-1, self.batch_size)).tolist()
129 | if not self.drop_last and len(tail) > 0:
130 | idx_arr += [tail.tolist()]
131 |
132 | output = list()
133 | for idx in idx_arr:
134 | output.append(self.reprocess(data, idx))
135 |
136 | return output
137 |
138 |
139 | class TextDataset(Dataset):
140 | def __init__(self, filepath, preprocess_config):
141 | self.cleaners = preprocess_config["preprocessing"]["text"]["text_cleaners"]
142 |
143 | self.basename, self.speaker, self.text, self.raw_text = self.process_meta(
144 | filepath
145 | )
146 | with open(
147 | os.path.join(
148 | preprocess_config["path"]["preprocessed_path"], "speakers.json"
149 | )
150 | ) as f:
151 | self.speaker_map = json.load(f)
152 |
153 | def __len__(self):
154 | return len(self.text)
155 |
156 | def __getitem__(self, idx):
157 | basename = self.basename[idx]
158 | speaker = self.speaker[idx]
159 | speaker_id = self.speaker_map[speaker]
160 | raw_text = self.raw_text[idx]
161 | phone = np.array(text_to_sequence(self.text[idx], self.cleaners))
162 |
163 | return (basename, speaker_id, phone, raw_text)
164 |
165 | def process_meta(self, filename):
166 | with open(filename, "r", encoding="utf-8") as f:
167 | name = []
168 | speaker = []
169 | text = []
170 | raw_text = []
171 | for line in f.readlines():
172 | n, s, t, r = line.strip("\n").split("|")
173 | name.append(n)
174 | speaker.append(s)
175 | text.append(t)
176 | raw_text.append(r)
177 | return name, speaker, text, raw_text
178 |
179 | def collate_fn(self, data):
180 | ids = [d[0] for d in data]
181 | speakers = np.array([d[1] for d in data])
182 | texts = [d[2] for d in data]
183 | raw_texts = [d[3] for d in data]
184 | text_lens = np.array([text.shape[0] for text in texts])
185 |
186 | texts = pad_1D(texts)
187 |
188 | return ids, raw_texts, speakers, texts, text_lens, max(text_lens)
189 |
190 |
191 | if __name__ == "__main__":
192 | # Test
193 | import torch
194 | import yaml
195 | from torch.utils.data import DataLoader
196 | from utils.utils import to_device
197 |
198 | device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
199 | preprocess_config = yaml.load(
200 | open("./config/LJSpeech/preprocess.yaml", "r"), Loader=yaml.FullLoader
201 | )
202 | train_config = yaml.load(
203 | open("./config/LJSpeech/train.yaml", "r"), Loader=yaml.FullLoader
204 | )
205 |
206 | train_dataset = Dataset(
207 | "train.txt", preprocess_config, train_config, sort=True, drop_last=True
208 | )
209 | val_dataset = Dataset(
210 | "val.txt", preprocess_config, train_config, sort=False, drop_last=False
211 | )
212 | train_loader = DataLoader(
213 | train_dataset,
214 | batch_size=train_config["optimizer"]["batch_size"] * 4,
215 | shuffle=True,
216 | collate_fn=train_dataset.collate_fn,
217 | )
218 | val_loader = DataLoader(
219 | val_dataset,
220 | batch_size=train_config["optimizer"]["batch_size"],
221 | shuffle=False,
222 | collate_fn=val_dataset.collate_fn,
223 | )
224 |
225 | n_batch = 0
226 | for batchs in train_loader:
227 | for batch in batchs:
228 | to_device(batch, device)
229 | n_batch += 1
230 | print(
231 | "Training set with size {} is composed of {} batches.".format(
232 | len(train_dataset), n_batch
233 | )
234 | )
235 |
236 | n_batch = 0
237 | for batchs in val_loader:
238 | for batch in batchs:
239 | to_device(batch, device)
240 | n_batch += 1
241 | print(
242 | "Validation set with size {} is composed of {} batches.".format(
243 | len(val_dataset), n_batch
244 | )
245 | )
--------------------------------------------------------------------------------
/evaluate.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | import os
3 |
4 | import torch
5 | import yaml
6 | import torch.nn as nn
7 | from torch.utils.data import DataLoader
8 |
9 | from utils.model import get_model, get_vocoder
10 | from utils.tools import to_device, log, synth_one_sample
11 | from model import FastPitchFormantLoss
12 | from dataset import Dataset
13 |
14 |
15 | device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
16 |
17 |
18 | def evaluate(model, step, configs, logger=None, vocoder=None, loss_len=4):
19 | preprocess_config, model_config, train_config = configs
20 |
21 | # Get dataset
22 | dataset = Dataset(
23 | "val.txt", preprocess_config, train_config, sort=False, drop_last=False
24 | )
25 | batch_size = train_config["optimizer"]["batch_size"]
26 | loader = DataLoader(
27 | dataset,
28 | batch_size=batch_size,
29 | shuffle=False,
30 | collate_fn=dataset.collate_fn,
31 | )
32 |
33 | # Get loss function
34 | Loss = FastPitchFormantLoss(preprocess_config, model_config).to(device)
35 |
36 | # Evaluation
37 | loss_sums = [0 for _ in range(loss_len)]
38 | for batchs in loader:
39 | for batch in batchs:
40 | batch = to_device(batch, device)
41 | with torch.no_grad():
42 | # Forward
43 | output = model(*(batch[2:]))
44 |
45 | # Cal Loss
46 | losses = Loss(batch, output)
47 |
48 | for i in range(len(losses)):
49 | loss_sums[i] += losses[i].item() * len(batch[0])
50 |
51 | loss_means = [loss_sum / len(dataset) for loss_sum in loss_sums]
52 |
53 | message = "Validation Step {}, Total Loss: {:.4f}, Mel Loss: {:.4f}, Pitch Loss: {:.4f}, Duration Loss: {:.4f}".format(
54 | *([step] + [l for l in loss_means])
55 | )
56 |
57 | if logger is not None:
58 | fig, wav_reconstruction, wav_prediction, tag = synth_one_sample(
59 | batch,
60 | output,
61 | vocoder,
62 | model_config,
63 | preprocess_config,
64 | )
65 |
66 | log(logger, step, losses=loss_means)
67 | log(
68 | logger,
69 | fig=fig,
70 | tag="Validation/step_{}_{}".format(step, tag),
71 | )
72 | sampling_rate = preprocess_config["preprocessing"]["audio"]["sampling_rate"]
73 | log(
74 | logger,
75 | audio=wav_reconstruction,
76 | sampling_rate=sampling_rate,
77 | tag="Validation/step_{}_{}_reconstructed".format(step, tag),
78 | )
79 | log(
80 | logger,
81 | audio=wav_prediction,
82 | sampling_rate=sampling_rate,
83 | tag="Validation/step_{}_{}_synthesized".format(step, tag),
84 | )
85 |
86 | return message
87 |
88 |
89 | if __name__ == "__main__":
90 |
91 | parser = argparse.ArgumentParser()
92 | parser.add_argument("--restore_step", type=int, default=30000)
93 | parser.add_argument(
94 | "-p",
95 | "--preprocess_config",
96 | type=str,
97 | required=True,
98 | help="path to preprocess.yaml",
99 | )
100 | parser.add_argument(
101 | "-m", "--model_config", type=str, required=True, help="path to model.yaml"
102 | )
103 | parser.add_argument(
104 | "-t", "--train_config", type=str, required=True, help="path to train.yaml"
105 | )
106 | args = parser.parse_args()
107 |
108 | # Read Config
109 | preprocess_config = yaml.load(
110 | open(args.preprocess_config, "r"), Loader=yaml.FullLoader
111 | )
112 | model_config = yaml.load(open(args.model_config, "r"), Loader=yaml.FullLoader)
113 | train_config = yaml.load(open(args.train_config, "r"), Loader=yaml.FullLoader)
114 | configs = (preprocess_config, model_config, train_config)
115 |
116 | # Get model
117 | model = get_model(args, configs, device, train=False).to(device)
118 |
119 | message = evaluate(model, args.restore_step, configs)
120 | print(message)
--------------------------------------------------------------------------------
/hifigan/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2020 Jungil Kong
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
--------------------------------------------------------------------------------
/hifigan/__init__.py:
--------------------------------------------------------------------------------
1 | from .models import Generator
2 |
3 |
4 | class AttrDict(dict):
5 | def __init__(self, *args, **kwargs):
6 | super(AttrDict, self).__init__(*args, **kwargs)
7 | self.__dict__ = self
--------------------------------------------------------------------------------
/hifigan/config.json:
--------------------------------------------------------------------------------
1 | {
2 | "resblock": "1",
3 | "num_gpus": 0,
4 | "batch_size": 16,
5 | "learning_rate": 0.0002,
6 | "adam_b1": 0.8,
7 | "adam_b2": 0.99,
8 | "lr_decay": 0.999,
9 | "seed": 1234,
10 |
11 | "upsample_rates": [8,8,2,2],
12 | "upsample_kernel_sizes": [16,16,4,4],
13 | "upsample_initial_channel": 512,
14 | "resblock_kernel_sizes": [3,7,11],
15 | "resblock_dilation_sizes": [[1,3,5], [1,3,5], [1,3,5]],
16 |
17 | "segment_size": 8192,
18 | "num_mels": 80,
19 | "num_freq": 1025,
20 | "n_fft": 1024,
21 | "hop_size": 256,
22 | "win_size": 1024,
23 |
24 | "sampling_rate": 22050,
25 |
26 | "fmin": 0,
27 | "fmax": 8000,
28 | "fmax_for_loss": null,
29 |
30 | "num_workers": 4,
31 |
32 | "dist_config": {
33 | "dist_backend": "nccl",
34 | "dist_url": "tcp://localhost:54321",
35 | "world_size": 1
36 | }
37 | }
38 |
--------------------------------------------------------------------------------
/hifigan/generator_LJSpeech.pth.tar.zip:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/keonlee9420/FastPitchFormant/dd86032953be04fb526b658b19ecdc5600ff25a5/hifigan/generator_LJSpeech.pth.tar.zip
--------------------------------------------------------------------------------
/hifigan/generator_universal.pth.tar.zip:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/keonlee9420/FastPitchFormant/dd86032953be04fb526b658b19ecdc5600ff25a5/hifigan/generator_universal.pth.tar.zip
--------------------------------------------------------------------------------
/hifigan/models.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torch.nn as nn
3 | import torch.nn.functional as F
4 | from torch.nn import Conv1d, ConvTranspose1d
5 | from torch.nn.utils import weight_norm, remove_weight_norm
6 |
7 | LRELU_SLOPE = 0.1
8 |
9 |
10 | def init_weights(m, mean=0.0, std=0.01):
11 | classname = m.__class__.__name__
12 | if classname.find("Conv") != -1:
13 | m.weight.data.normal_(mean, std)
14 |
15 |
16 | def get_padding(kernel_size, dilation=1):
17 | return int((kernel_size * dilation - dilation) / 2)
18 |
19 |
20 | class ResBlock(torch.nn.Module):
21 | def __init__(self, h, channels, kernel_size=3, dilation=(1, 3, 5)):
22 | super(ResBlock, self).__init__()
23 | self.h = h
24 | self.convs1 = nn.ModuleList(
25 | [
26 | weight_norm(
27 | Conv1d(
28 | channels,
29 | channels,
30 | kernel_size,
31 | 1,
32 | dilation=dilation[0],
33 | padding=get_padding(kernel_size, dilation[0]),
34 | )
35 | ),
36 | weight_norm(
37 | Conv1d(
38 | channels,
39 | channels,
40 | kernel_size,
41 | 1,
42 | dilation=dilation[1],
43 | padding=get_padding(kernel_size, dilation[1]),
44 | )
45 | ),
46 | weight_norm(
47 | Conv1d(
48 | channels,
49 | channels,
50 | kernel_size,
51 | 1,
52 | dilation=dilation[2],
53 | padding=get_padding(kernel_size, dilation[2]),
54 | )
55 | ),
56 | ]
57 | )
58 | self.convs1.apply(init_weights)
59 |
60 | self.convs2 = nn.ModuleList(
61 | [
62 | weight_norm(
63 | Conv1d(
64 | channels,
65 | channels,
66 | kernel_size,
67 | 1,
68 | dilation=1,
69 | padding=get_padding(kernel_size, 1),
70 | )
71 | ),
72 | weight_norm(
73 | Conv1d(
74 | channels,
75 | channels,
76 | kernel_size,
77 | 1,
78 | dilation=1,
79 | padding=get_padding(kernel_size, 1),
80 | )
81 | ),
82 | weight_norm(
83 | Conv1d(
84 | channels,
85 | channels,
86 | kernel_size,
87 | 1,
88 | dilation=1,
89 | padding=get_padding(kernel_size, 1),
90 | )
91 | ),
92 | ]
93 | )
94 | self.convs2.apply(init_weights)
95 |
96 | def forward(self, x):
97 | for c1, c2 in zip(self.convs1, self.convs2):
98 | xt = F.leaky_relu(x, LRELU_SLOPE)
99 | xt = c1(xt)
100 | xt = F.leaky_relu(xt, LRELU_SLOPE)
101 | xt = c2(xt)
102 | x = xt + x
103 | return x
104 |
105 | def remove_weight_norm(self):
106 | for l in self.convs1:
107 | remove_weight_norm(l)
108 | for l in self.convs2:
109 | remove_weight_norm(l)
110 |
111 |
112 | class Generator(torch.nn.Module):
113 | def __init__(self, h):
114 | super(Generator, self).__init__()
115 | self.h = h
116 | self.num_kernels = len(h.resblock_kernel_sizes)
117 | self.num_upsamples = len(h.upsample_rates)
118 | self.conv_pre = weight_norm(
119 | Conv1d(80, h.upsample_initial_channel, 7, 1, padding=3)
120 | )
121 | resblock = ResBlock
122 |
123 | self.ups = nn.ModuleList()
124 | for i, (u, k) in enumerate(zip(h.upsample_rates, h.upsample_kernel_sizes)):
125 | self.ups.append(
126 | weight_norm(
127 | ConvTranspose1d(
128 | h.upsample_initial_channel // (2 ** i),
129 | h.upsample_initial_channel // (2 ** (i + 1)),
130 | k,
131 | u,
132 | padding=(k - u) // 2,
133 | )
134 | )
135 | )
136 |
137 | self.resblocks = nn.ModuleList()
138 | for i in range(len(self.ups)):
139 | ch = h.upsample_initial_channel // (2 ** (i + 1))
140 | for j, (k, d) in enumerate(
141 | zip(h.resblock_kernel_sizes, h.resblock_dilation_sizes)
142 | ):
143 | self.resblocks.append(resblock(h, ch, k, d))
144 |
145 | self.conv_post = weight_norm(Conv1d(ch, 1, 7, 1, padding=3))
146 | self.ups.apply(init_weights)
147 | self.conv_post.apply(init_weights)
148 |
149 | def forward(self, x):
150 | x = self.conv_pre(x)
151 | for i in range(self.num_upsamples):
152 | x = F.leaky_relu(x, LRELU_SLOPE)
153 | x = self.ups[i](x)
154 | xs = None
155 | for j in range(self.num_kernels):
156 | if xs is None:
157 | xs = self.resblocks[i * self.num_kernels + j](x)
158 | else:
159 | xs += self.resblocks[i * self.num_kernels + j](x)
160 | x = xs / self.num_kernels
161 | x = F.leaky_relu(x)
162 | x = self.conv_post(x)
163 | x = torch.tanh(x)
164 |
165 | return x
166 |
167 | def remove_weight_norm(self):
168 | print("Removing weight norm...")
169 | for l in self.ups:
170 | remove_weight_norm(l)
171 | for l in self.resblocks:
172 | l.remove_weight_norm()
173 | remove_weight_norm(self.conv_pre)
174 | remove_weight_norm(self.conv_post)
--------------------------------------------------------------------------------
/img/model.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/keonlee9420/FastPitchFormant/dd86032953be04fb526b658b19ecdc5600ff25a5/img/model.png
--------------------------------------------------------------------------------
/img/tensorboard_audio.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/keonlee9420/FastPitchFormant/dd86032953be04fb526b658b19ecdc5600ff25a5/img/tensorboard_audio.png
--------------------------------------------------------------------------------
/img/tensorboard_loss.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/keonlee9420/FastPitchFormant/dd86032953be04fb526b658b19ecdc5600ff25a5/img/tensorboard_loss.png
--------------------------------------------------------------------------------
/img/tensorboard_spec.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/keonlee9420/FastPitchFormant/dd86032953be04fb526b658b19ecdc5600ff25a5/img/tensorboard_spec.png
--------------------------------------------------------------------------------
/lexicon/pinyin-lexicon-r.txt:
--------------------------------------------------------------------------------
1 | a1 a1
2 | a2 a2
3 | a3 a3
4 | a4 a4
5 | a5 a5
6 | ai1 ai1
7 | ai2 ai2
8 | ai3 ai3
9 | ai4 ai4
10 | ai5 ai5
11 | an1 an1
12 | an2 an2
13 | an3 an3
14 | an4 an4
15 | an5 an5
16 | ang1 ang1
17 | ang2 ang2
18 | ang3 ang3
19 | ang4 ang4
20 | ang5 ang5
21 | ao1 ao1
22 | ao2 ao2
23 | ao3 ao3
24 | ao4 ao4
25 | ao5 ao5
26 | ba1 b a1
27 | ba2 b a2
28 | ba3 b a3
29 | ba4 b a4
30 | ba5 b a5
31 | bai1 b ai1
32 | bai2 b ai2
33 | bai3 b ai3
34 | bai4 b ai4
35 | bai5 b ai5
36 | ban1 b an1
37 | ban2 b an2
38 | ban3 b an3
39 | ban4 b an4
40 | ban5 b an5
41 | bang1 b ang1
42 | bang2 b ang2
43 | bang3 b ang3
44 | bang4 b ang4
45 | bang5 b ang5
46 | bao1 b ao1
47 | bao2 b ao2
48 | bao3 b ao3
49 | bao4 b ao4
50 | bao5 b ao5
51 | bei1 b ei1
52 | bei2 b ei2
53 | bei3 b ei3
54 | bei4 b ei4
55 | bei5 b ei5
56 | ben1 b en1
57 | ben2 b en2
58 | ben3 b en3
59 | ben4 b en4
60 | ben5 b en5
61 | beng1 b eng1
62 | beng2 b eng2
63 | beng3 b eng3
64 | beng4 b eng4
65 | beng5 b eng5
66 | bi1 b i1
67 | bi2 b i2
68 | bi3 b i3
69 | bi4 b i4
70 | bi5 b i5
71 | bian1 b ian1
72 | bian2 b ian2
73 | bian3 b ian3
74 | bian4 b ian4
75 | bian5 b ian5
76 | biao1 b iao1
77 | biao2 b iao2
78 | biao3 b iao3
79 | biao4 b iao4
80 | biao5 b iao5
81 | bie1 b ie1
82 | bie2 b ie2
83 | bie3 b ie3
84 | bie4 b ie4
85 | bie5 b ie5
86 | bin1 b in1
87 | bin2 b in2
88 | bin3 b in3
89 | bin4 b in4
90 | bin5 b in5
91 | bing1 b ing1
92 | bing2 b ing2
93 | bing3 b ing3
94 | bing4 b ing4
95 | bing5 b ing5
96 | bo1 b o1
97 | bo2 b o2
98 | bo3 b o3
99 | bo4 b o4
100 | bo5 b o5
101 | bu1 b u1
102 | bu2 b u2
103 | bu3 b u3
104 | bu4 b u4
105 | bu5 b u5
106 | ca1 c a1
107 | ca2 c a2
108 | ca3 c a3
109 | ca4 c a4
110 | ca5 c a5
111 | cai1 c ai1
112 | cai2 c ai2
113 | cai3 c ai3
114 | cai4 c ai4
115 | cai5 c ai5
116 | can1 c an1
117 | can2 c an2
118 | can3 c an3
119 | can4 c an4
120 | can5 c an5
121 | cang1 c ang1
122 | cang2 c ang2
123 | cang3 c ang3
124 | cang4 c ang4
125 | cang5 c ang5
126 | cao1 c ao1
127 | cao2 c ao2
128 | cao3 c ao3
129 | cao4 c ao4
130 | cao5 c ao5
131 | ce1 c e1
132 | ce2 c e2
133 | ce3 c e3
134 | ce4 c e4
135 | ce5 c e5
136 | cen1 c en1
137 | cen2 c en2
138 | cen3 c en3
139 | cen4 c en4
140 | cen5 c en5
141 | ceng1 c eng1
142 | ceng2 c eng2
143 | ceng3 c eng3
144 | ceng4 c eng4
145 | ceng5 c eng5
146 | cha1 ch a1
147 | cha2 ch a2
148 | cha3 ch a3
149 | cha4 ch a4
150 | cha5 ch a5
151 | chai1 ch ai1
152 | chai2 ch ai2
153 | chai3 ch ai3
154 | chai4 ch ai4
155 | chai5 ch ai5
156 | chan1 ch an1
157 | chan2 ch an2
158 | chan3 ch an3
159 | chan4 ch an4
160 | chan5 ch an5
161 | chang1 ch ang1
162 | chang2 ch ang2
163 | chang3 ch ang3
164 | chang4 ch ang4
165 | chang5 ch ang5
166 | chao1 ch ao1
167 | chao2 ch ao2
168 | chao3 ch ao3
169 | chao4 ch ao4
170 | chao5 ch ao5
171 | che1 ch e1
172 | che2 ch e2
173 | che3 ch e3
174 | che4 ch e4
175 | che5 ch e5
176 | chen1 ch en1
177 | chen2 ch en2
178 | chen3 ch en3
179 | chen4 ch en4
180 | chen5 ch en5
181 | cheng1 ch eng1
182 | cheng2 ch eng2
183 | cheng3 ch eng3
184 | cheng4 ch eng4
185 | cheng5 ch eng5
186 | chi1 ch iii1
187 | chi2 ch iii2
188 | chi3 ch iii3
189 | chi4 ch iii4
190 | chi5 ch iii5
191 | chong1 ch ong1
192 | chong2 ch ong2
193 | chong3 ch ong3
194 | chong4 ch ong4
195 | chong5 ch ong5
196 | chou1 ch ou1
197 | chou2 ch ou2
198 | chou3 ch ou3
199 | chou4 ch ou4
200 | chou5 ch ou5
201 | chu1 ch u1
202 | chu2 ch u2
203 | chu3 ch u3
204 | chu4 ch u4
205 | chu5 ch u5
206 | chuai1 ch uai1
207 | chuai2 ch uai2
208 | chuai3 ch uai3
209 | chuai4 ch uai4
210 | chuai5 ch uai5
211 | chuan1 ch uan1
212 | chuan2 ch uan2
213 | chuan3 ch uan3
214 | chuan4 ch uan4
215 | chuan5 ch uan5
216 | chuang1 ch uang1
217 | chuang2 ch uang2
218 | chuang3 ch uang3
219 | chuang4 ch uang4
220 | chuang5 ch uang5
221 | chui1 ch uei1
222 | chui2 ch uei2
223 | chui3 ch uei3
224 | chui4 ch uei4
225 | chui5 ch uei5
226 | chun1 ch uen1
227 | chun2 ch uen2
228 | chun3 ch uen3
229 | chun4 ch uen4
230 | chun5 ch uen5
231 | chuo1 ch uo1
232 | chuo2 ch uo2
233 | chuo3 ch uo3
234 | chuo4 ch uo4
235 | chuo5 ch uo5
236 | ci1 c ii1
237 | ci2 c ii2
238 | ci3 c ii3
239 | ci4 c ii4
240 | ci5 c ii5
241 | cong1 c ong1
242 | cong2 c ong2
243 | cong3 c ong3
244 | cong4 c ong4
245 | cong5 c ong5
246 | cou1 c ou1
247 | cou2 c ou2
248 | cou3 c ou3
249 | cou4 c ou4
250 | cou5 c ou5
251 | cu1 c u1
252 | cu2 c u2
253 | cu3 c u3
254 | cu4 c u4
255 | cu5 c u5
256 | cuan1 c uan1
257 | cuan2 c uan2
258 | cuan3 c uan3
259 | cuan4 c uan4
260 | cuan5 c uan5
261 | cui1 c uei1
262 | cui2 c uei2
263 | cui3 c uei3
264 | cui4 c uei4
265 | cui5 c uei5
266 | cun1 c uen1
267 | cun2 c uen2
268 | cun3 c uen3
269 | cun4 c uen4
270 | cun5 c uen5
271 | cuo1 c uo1
272 | cuo2 c uo2
273 | cuo3 c uo3
274 | cuo4 c uo4
275 | cuo5 c uo5
276 | da1 d a1
277 | da2 d a2
278 | da3 d a3
279 | da4 d a4
280 | da5 d a5
281 | dai1 d ai1
282 | dai2 d ai2
283 | dai3 d ai3
284 | dai4 d ai4
285 | dai5 d ai5
286 | dan1 d an1
287 | dan2 d an2
288 | dan3 d an3
289 | dan4 d an4
290 | dan5 d an5
291 | dang1 d ang1
292 | dang2 d ang2
293 | dang3 d ang3
294 | dang4 d ang4
295 | dang5 d ang5
296 | dao1 d ao1
297 | dao2 d ao2
298 | dao3 d ao3
299 | dao4 d ao4
300 | dao5 d ao5
301 | de1 d e1
302 | de2 d e2
303 | de3 d e3
304 | de4 d e4
305 | de5 d e5
306 | dei1 d ei1
307 | dei2 d ei2
308 | dei3 d ei3
309 | dei4 d ei4
310 | dei5 d ei5
311 | den1 d en1
312 | den2 d en2
313 | den3 d en3
314 | den4 d en4
315 | den5 d en5
316 | deng1 d eng1
317 | deng2 d eng2
318 | deng3 d eng3
319 | deng4 d eng4
320 | deng5 d eng5
321 | di1 d i1
322 | di2 d i2
323 | di3 d i3
324 | di4 d i4
325 | di5 d i5
326 | dia1 d ia1
327 | dia2 d ia2
328 | dia3 d ia3
329 | dia4 d ia4
330 | dia5 d ia5
331 | dian1 d ian1
332 | dian2 d ian2
333 | dian3 d ian3
334 | dian4 d ian4
335 | dian5 d ian5
336 | diao1 d iao1
337 | diao2 d iao2
338 | diao3 d iao3
339 | diao4 d iao4
340 | diao5 d iao5
341 | die1 d ie1
342 | die2 d ie2
343 | die3 d ie3
344 | die4 d ie4
345 | die5 d ie5
346 | ding1 d ing1
347 | ding2 d ing2
348 | ding3 d ing3
349 | ding4 d ing4
350 | ding5 d ing5
351 | diu1 d iou1
352 | diu2 d iou2
353 | diu3 d iou3
354 | diu4 d iou4
355 | diu5 d iou5
356 | dong1 d ong1
357 | dong2 d ong2
358 | dong3 d ong3
359 | dong4 d ong4
360 | dong5 d ong5
361 | dou1 d ou1
362 | dou2 d ou2
363 | dou3 d ou3
364 | dou4 d ou4
365 | dou5 d ou5
366 | du1 d u1
367 | du2 d u2
368 | du3 d u3
369 | du4 d u4
370 | du5 d u5
371 | duan1 d uan1
372 | duan2 d uan2
373 | duan3 d uan3
374 | duan4 d uan4
375 | duan5 d uan5
376 | dui1 d uei1
377 | dui2 d uei2
378 | dui3 d uei3
379 | dui4 d uei4
380 | dui5 d uei5
381 | dun1 d uen1
382 | dun2 d uen2
383 | dun3 d uen3
384 | dun4 d uen4
385 | dun5 d uen5
386 | duo1 d uo1
387 | duo2 d uo2
388 | duo3 d uo3
389 | duo4 d uo4
390 | duo5 d uo5
391 | e1 e1
392 | e2 e2
393 | e3 e3
394 | e4 e4
395 | e5 e5
396 | ei1 ei1
397 | ei2 ei2
398 | ei3 ei3
399 | ei4 ei4
400 | ei5 ei5
401 | en1 en1
402 | en2 en2
403 | en3 en3
404 | en4 en4
405 | en5 en5
406 | eng1 eng1
407 | eng2 eng2
408 | eng3 eng3
409 | eng4 eng4
410 | eng5 eng5
411 | r1 er1
412 | r2 er2
413 | r3 er3
414 | r4 er4
415 | r5 er5
416 | er1 er1
417 | er2 er2
418 | er3 er3
419 | er4 er4
420 | er5 er5
421 | fa1 f a1
422 | fa2 f a2
423 | fa3 f a3
424 | fa4 f a4
425 | fa5 f a5
426 | fan1 f an1
427 | fan2 f an2
428 | fan3 f an3
429 | fan4 f an4
430 | fan5 f an5
431 | fang1 f ang1
432 | fang2 f ang2
433 | fang3 f ang3
434 | fang4 f ang4
435 | fang5 f ang5
436 | fei1 f ei1
437 | fei2 f ei2
438 | fei3 f ei3
439 | fei4 f ei4
440 | fei5 f ei5
441 | fen1 f en1
442 | fen2 f en2
443 | fen3 f en3
444 | fen4 f en4
445 | fen5 f en5
446 | feng1 f eng1
447 | feng2 f eng2
448 | feng3 f eng3
449 | feng4 f eng4
450 | feng5 f eng5
451 | fo1 f o1
452 | fo2 f o2
453 | fo3 f o3
454 | fo4 f o4
455 | fo5 f o5
456 | fou1 f ou1
457 | fou2 f ou2
458 | fou3 f ou3
459 | fou4 f ou4
460 | fou5 f ou5
461 | fu1 f u1
462 | fu2 f u2
463 | fu3 f u3
464 | fu4 f u4
465 | fu5 f u5
466 | ga1 g a1
467 | ga2 g a2
468 | ga3 g a3
469 | ga4 g a4
470 | ga5 g a5
471 | gai1 g ai1
472 | gai2 g ai2
473 | gai3 g ai3
474 | gai4 g ai4
475 | gai5 g ai5
476 | gan1 g an1
477 | gan2 g an2
478 | gan3 g an3
479 | gan4 g an4
480 | gan5 g an5
481 | gang1 g ang1
482 | gang2 g ang2
483 | gang3 g ang3
484 | gang4 g ang4
485 | gang5 g ang5
486 | gao1 g ao1
487 | gao2 g ao2
488 | gao3 g ao3
489 | gao4 g ao4
490 | gao5 g ao5
491 | ge1 g e1
492 | ge2 g e2
493 | ge3 g e3
494 | ge4 g e4
495 | ge5 g e5
496 | gei1 g ei1
497 | gei2 g ei2
498 | gei3 g ei3
499 | gei4 g ei4
500 | gei5 g ei5
501 | gen1 g en1
502 | gen2 g en2
503 | gen3 g en3
504 | gen4 g en4
505 | gen5 g en5
506 | geng1 g eng1
507 | geng2 g eng2
508 | geng3 g eng3
509 | geng4 g eng4
510 | geng5 g eng5
511 | gong1 g ong1
512 | gong2 g ong2
513 | gong3 g ong3
514 | gong4 g ong4
515 | gong5 g ong5
516 | gou1 g ou1
517 | gou2 g ou2
518 | gou3 g ou3
519 | gou4 g ou4
520 | gou5 g ou5
521 | gu1 g u1
522 | gu2 g u2
523 | gu3 g u3
524 | gu4 g u4
525 | gu5 g u5
526 | gua1 g ua1
527 | gua2 g ua2
528 | gua3 g ua3
529 | gua4 g ua4
530 | gua5 g ua5
531 | guai1 g uai1
532 | guai2 g uai2
533 | guai3 g uai3
534 | guai4 g uai4
535 | guai5 g uai5
536 | guan1 g uan1
537 | guan2 g uan2
538 | guan3 g uan3
539 | guan4 g uan4
540 | guan5 g uan5
541 | guang1 g uang1
542 | guang2 g uang2
543 | guang3 g uang3
544 | guang4 g uang4
545 | guang5 g uang5
546 | gui1 g uei1
547 | gui2 g uei2
548 | gui3 g uei3
549 | gui4 g uei4
550 | gui5 g uei5
551 | gun1 g uen1
552 | gun2 g uen2
553 | gun3 g uen3
554 | gun4 g uen4
555 | gun5 g uen5
556 | guo1 g uo1
557 | guo2 g uo2
558 | guo3 g uo3
559 | guo4 g uo4
560 | guo5 g uo5
561 | ha1 h a1
562 | ha2 h a2
563 | ha3 h a3
564 | ha4 h a4
565 | ha5 h a5
566 | hai1 h ai1
567 | hai2 h ai2
568 | hai3 h ai3
569 | hai4 h ai4
570 | hai5 h ai5
571 | han1 h an1
572 | han2 h an2
573 | han3 h an3
574 | han4 h an4
575 | han5 h an5
576 | hang1 h ang1
577 | hang2 h ang2
578 | hang3 h ang3
579 | hang4 h ang4
580 | hang5 h ang5
581 | hao1 h ao1
582 | hao2 h ao2
583 | hao3 h ao3
584 | hao4 h ao4
585 | hao5 h ao5
586 | he1 h e1
587 | he2 h e2
588 | he3 h e3
589 | he4 h e4
590 | he5 h e5
591 | hei1 h ei1
592 | hei2 h ei2
593 | hei3 h ei3
594 | hei4 h ei4
595 | hei5 h ei5
596 | hen1 h en1
597 | hen2 h en2
598 | hen3 h en3
599 | hen4 h en4
600 | hen5 h en5
601 | heng1 h eng1
602 | heng2 h eng2
603 | heng3 h eng3
604 | heng4 h eng4
605 | heng5 h eng5
606 | hong1 h ong1
607 | hong2 h ong2
608 | hong3 h ong3
609 | hong4 h ong4
610 | hong5 h ong5
611 | hou1 h ou1
612 | hou2 h ou2
613 | hou3 h ou3
614 | hou4 h ou4
615 | hou5 h ou5
616 | hu1 h u1
617 | hu2 h u2
618 | hu3 h u3
619 | hu4 h u4
620 | hu5 h u5
621 | hua1 h ua1
622 | hua2 h ua2
623 | hua3 h ua3
624 | hua4 h ua4
625 | hua5 h ua5
626 | huai1 h uai1
627 | huai2 h uai2
628 | huai3 h uai3
629 | huai4 h uai4
630 | huai5 h uai5
631 | huan1 h uan1
632 | huan2 h uan2
633 | huan3 h uan3
634 | huan4 h uan4
635 | huan5 h uan5
636 | huang1 h uang1
637 | huang2 h uang2
638 | huang3 h uang3
639 | huang4 h uang4
640 | huang5 h uang5
641 | hui1 h uei1
642 | hui2 h uei2
643 | hui3 h uei3
644 | hui4 h uei4
645 | hui5 h uei5
646 | hun1 h uen1
647 | hun2 h uen2
648 | hun3 h uen3
649 | hun4 h uen4
650 | hun5 h uen5
651 | huo1 h uo1
652 | huo2 h uo2
653 | huo3 h uo3
654 | huo4 h uo4
655 | huo5 h uo5
656 | ji1 j i1
657 | ji2 j i2
658 | ji3 j i3
659 | ji4 j i4
660 | ji5 j i5
661 | jia1 j ia1
662 | jia2 j ia2
663 | jia3 j ia3
664 | jia4 j ia4
665 | jia5 j ia5
666 | jian1 j ian1
667 | jian2 j ian2
668 | jian3 j ian3
669 | jian4 j ian4
670 | jian5 j ian5
671 | jiang1 j iang1
672 | jiang2 j iang2
673 | jiang3 j iang3
674 | jiang4 j iang4
675 | jiang5 j iang5
676 | jiao1 j iao1
677 | jiao2 j iao2
678 | jiao3 j iao3
679 | jiao4 j iao4
680 | jiao5 j iao5
681 | jie1 j ie1
682 | jie2 j ie2
683 | jie3 j ie3
684 | jie4 j ie4
685 | jie5 j ie5
686 | jin1 j in1
687 | jin2 j in2
688 | jin3 j in3
689 | jin4 j in4
690 | jin5 j in5
691 | jing1 j ing1
692 | jing2 j ing2
693 | jing3 j ing3
694 | jing4 j ing4
695 | jing5 j ing5
696 | jiong1 j iong1
697 | jiong2 j iong2
698 | jiong3 j iong3
699 | jiong4 j iong4
700 | jiong5 j iong5
701 | jiu1 j iou1
702 | jiu2 j iou2
703 | jiu3 j iou3
704 | jiu4 j iou4
705 | jiu5 j iou5
706 | ju1 j v1
707 | ju2 j v2
708 | ju3 j v3
709 | ju4 j v4
710 | ju5 j v5
711 | juan1 j van1
712 | juan2 j van2
713 | juan3 j van3
714 | juan4 j van4
715 | juan5 j van5
716 | jue1 j ve1
717 | jue2 j ve2
718 | jue3 j ve3
719 | jue4 j ve4
720 | jue5 j ve5
721 | jun1 j vn1
722 | jun2 j vn2
723 | jun3 j vn3
724 | jun4 j vn4
725 | jun5 j vn5
726 | ka1 k a1
727 | ka2 k a2
728 | ka3 k a3
729 | ka4 k a4
730 | ka5 k a5
731 | kai1 k ai1
732 | kai2 k ai2
733 | kai3 k ai3
734 | kai4 k ai4
735 | kai5 k ai5
736 | kan1 k an1
737 | kan2 k an2
738 | kan3 k an3
739 | kan4 k an4
740 | kan5 k an5
741 | kang1 k ang1
742 | kang2 k ang2
743 | kang3 k ang3
744 | kang4 k ang4
745 | kang5 k ang5
746 | kao1 k ao1
747 | kao2 k ao2
748 | kao3 k ao3
749 | kao4 k ao4
750 | kao5 k ao5
751 | ke1 k e1
752 | ke2 k e2
753 | ke3 k e3
754 | ke4 k e4
755 | ke5 k e5
756 | kei1 k ei1
757 | kei2 k ei2
758 | kei3 k ei3
759 | kei4 k ei4
760 | kei5 k ei5
761 | ken1 k en1
762 | ken2 k en2
763 | ken3 k en3
764 | ken4 k en4
765 | ken5 k en5
766 | keng1 k eng1
767 | keng2 k eng2
768 | keng3 k eng3
769 | keng4 k eng4
770 | keng5 k eng5
771 | kong1 k ong1
772 | kong2 k ong2
773 | kong3 k ong3
774 | kong4 k ong4
775 | kong5 k ong5
776 | kou1 k ou1
777 | kou2 k ou2
778 | kou3 k ou3
779 | kou4 k ou4
780 | kou5 k ou5
781 | ku1 k u1
782 | ku2 k u2
783 | ku3 k u3
784 | ku4 k u4
785 | ku5 k u5
786 | kua1 k ua1
787 | kua2 k ua2
788 | kua3 k ua3
789 | kua4 k ua4
790 | kua5 k ua5
791 | kuai1 k uai1
792 | kuai2 k uai2
793 | kuai3 k uai3
794 | kuai4 k uai4
795 | kuai5 k uai5
796 | kuan1 k uan1
797 | kuan2 k uan2
798 | kuan3 k uan3
799 | kuan4 k uan4
800 | kuan5 k uan5
801 | kuang1 k uang1
802 | kuang2 k uang2
803 | kuang3 k uang3
804 | kuang4 k uang4
805 | kuang5 k uang5
806 | kui1 k uei1
807 | kui2 k uei2
808 | kui3 k uei3
809 | kui4 k uei4
810 | kui5 k uei5
811 | kun1 k uen1
812 | kun2 k uen2
813 | kun3 k uen3
814 | kun4 k uen4
815 | kun5 k uen5
816 | kuo1 k uo1
817 | kuo2 k uo2
818 | kuo3 k uo3
819 | kuo4 k uo4
820 | kuo5 k uo5
821 | la1 l a1
822 | la2 l a2
823 | la3 l a3
824 | la4 l a4
825 | la5 l a5
826 | lai1 l ai1
827 | lai2 l ai2
828 | lai3 l ai3
829 | lai4 l ai4
830 | lai5 l ai5
831 | lan1 l an1
832 | lan2 l an2
833 | lan3 l an3
834 | lan4 l an4
835 | lan5 l an5
836 | lang1 l ang1
837 | lang2 l ang2
838 | lang3 l ang3
839 | lang4 l ang4
840 | lang5 l ang5
841 | lao1 l ao1
842 | lao2 l ao2
843 | lao3 l ao3
844 | lao4 l ao4
845 | lao5 l ao5
846 | le1 l e1
847 | le2 l e2
848 | le3 l e3
849 | le4 l e4
850 | le5 l e5
851 | lei1 l ei1
852 | lei2 l ei2
853 | lei3 l ei3
854 | lei4 l ei4
855 | lei5 l ei5
856 | leng1 l eng1
857 | leng2 l eng2
858 | leng3 l eng3
859 | leng4 l eng4
860 | leng5 l eng5
861 | li1 l i1
862 | li2 l i2
863 | li3 l i3
864 | li4 l i4
865 | li5 l i5
866 | lia1 l ia1
867 | lia2 l ia2
868 | lia3 l ia3
869 | lia4 l ia4
870 | lia5 l ia5
871 | lian1 l ian1
872 | lian2 l ian2
873 | lian3 l ian3
874 | lian4 l ian4
875 | lian5 l ian5
876 | liang1 l iang1
877 | liang2 l iang2
878 | liang3 l iang3
879 | liang4 l iang4
880 | liang5 l iang5
881 | liao1 l iao1
882 | liao2 l iao2
883 | liao3 l iao3
884 | liao4 l iao4
885 | liao5 l iao5
886 | lie1 l ie1
887 | lie2 l ie2
888 | lie3 l ie3
889 | lie4 l ie4
890 | lie5 l ie5
891 | lin1 l in1
892 | lin2 l in2
893 | lin3 l in3
894 | lin4 l in4
895 | lin5 l in5
896 | ling1 l ing1
897 | ling2 l ing2
898 | ling3 l ing3
899 | ling4 l ing4
900 | ling5 l ing5
901 | liu1 l iou1
902 | liu2 l iou2
903 | liu3 l iou3
904 | liu4 l iou4
905 | liu5 l iou5
906 | lo1 l o1
907 | lo2 l o2
908 | lo3 l o3
909 | lo4 l o4
910 | lo5 l o5
911 | long1 l ong1
912 | long2 l ong2
913 | long3 l ong3
914 | long4 l ong4
915 | long5 l ong5
916 | lou1 l ou1
917 | lou2 l ou2
918 | lou3 l ou3
919 | lou4 l ou4
920 | lou5 l ou5
921 | lu1 l u1
922 | lu2 l u2
923 | lu3 l u3
924 | lu4 l u4
925 | lu5 l u5
926 | luan1 l uan1
927 | luan2 l uan2
928 | luan3 l uan3
929 | luan4 l uan4
930 | luan5 l uan5
931 | lue1 l ve1
932 | lue2 l ve2
933 | lue3 l ve3
934 | lue4 l ve4
935 | lue5 l ve5
936 | lve1 l ve1
937 | lve2 l ve2
938 | lve3 l ve3
939 | lve4 l ve4
940 | lve5 l ve5
941 | lun1 l uen1
942 | lun2 l uen2
943 | lun3 l uen3
944 | lun4 l uen4
945 | lun5 l uen5
946 | luo1 l uo1
947 | luo2 l uo2
948 | luo3 l uo3
949 | luo4 l uo4
950 | luo5 l uo5
951 | lv1 l v1
952 | lv2 l v2
953 | lv3 l v3
954 | lv4 l v4
955 | lv5 l v5
956 | ma1 m a1
957 | ma2 m a2
958 | ma3 m a3
959 | ma4 m a4
960 | ma5 m a5
961 | mai1 m ai1
962 | mai2 m ai2
963 | mai3 m ai3
964 | mai4 m ai4
965 | mai5 m ai5
966 | man1 m an1
967 | man2 m an2
968 | man3 m an3
969 | man4 m an4
970 | man5 m an5
971 | mang1 m ang1
972 | mang2 m ang2
973 | mang3 m ang3
974 | mang4 m ang4
975 | mang5 m ang5
976 | mao1 m ao1
977 | mao2 m ao2
978 | mao3 m ao3
979 | mao4 m ao4
980 | mao5 m ao5
981 | me1 m e1
982 | me2 m e2
983 | me3 m e3
984 | me4 m e4
985 | me5 m e5
986 | mei1 m ei1
987 | mei2 m ei2
988 | mei3 m ei3
989 | mei4 m ei4
990 | mei5 m ei5
991 | men1 m en1
992 | men2 m en2
993 | men3 m en3
994 | men4 m en4
995 | men5 m en5
996 | meng1 m eng1
997 | meng2 m eng2
998 | meng3 m eng3
999 | meng4 m eng4
1000 | meng5 m eng5
1001 | mi1 m i1
1002 | mi2 m i2
1003 | mi3 m i3
1004 | mi4 m i4
1005 | mi5 m i5
1006 | mian1 m ian1
1007 | mian2 m ian2
1008 | mian3 m ian3
1009 | mian4 m ian4
1010 | mian5 m ian5
1011 | miao1 m iao1
1012 | miao2 m iao2
1013 | miao3 m iao3
1014 | miao4 m iao4
1015 | miao5 m iao5
1016 | mie1 m ie1
1017 | mie2 m ie2
1018 | mie3 m ie3
1019 | mie4 m ie4
1020 | mie5 m ie5
1021 | min1 m in1
1022 | min2 m in2
1023 | min3 m in3
1024 | min4 m in4
1025 | min5 m in5
1026 | ming1 m ing1
1027 | ming2 m ing2
1028 | ming3 m ing3
1029 | ming4 m ing4
1030 | ming5 m ing5
1031 | miu1 m iou1
1032 | miu2 m iou2
1033 | miu3 m iou3
1034 | miu4 m iou4
1035 | miu5 m iou5
1036 | mo1 m o1
1037 | mo2 m o2
1038 | mo3 m o3
1039 | mo4 m o4
1040 | mo5 m o5
1041 | mou1 m ou1
1042 | mou2 m ou2
1043 | mou3 m ou3
1044 | mou4 m ou4
1045 | mou5 m ou5
1046 | mu1 m u1
1047 | mu2 m u2
1048 | mu3 m u3
1049 | mu4 m u4
1050 | mu5 m u5
1051 | na1 n a1
1052 | na2 n a2
1053 | na3 n a3
1054 | na4 n a4
1055 | na5 n a5
1056 | nai1 n ai1
1057 | nai2 n ai2
1058 | nai3 n ai3
1059 | nai4 n ai4
1060 | nai5 n ai5
1061 | nan1 n an1
1062 | nan2 n an2
1063 | nan3 n an3
1064 | nan4 n an4
1065 | nan5 n an5
1066 | nang1 n ang1
1067 | nang2 n ang2
1068 | nang3 n ang3
1069 | nang4 n ang4
1070 | nang5 n ang5
1071 | nao1 n ao1
1072 | nao2 n ao2
1073 | nao3 n ao3
1074 | nao4 n ao4
1075 | nao5 n ao5
1076 | ne1 n e1
1077 | ne2 n e2
1078 | ne3 n e3
1079 | ne4 n e4
1080 | ne5 n e5
1081 | nei1 n ei1
1082 | nei2 n ei2
1083 | nei3 n ei3
1084 | nei4 n ei4
1085 | nei5 n ei5
1086 | nen1 n en1
1087 | nen2 n en2
1088 | nen3 n en3
1089 | nen4 n en4
1090 | nen5 n en5
1091 | neng1 n eng1
1092 | neng2 n eng2
1093 | neng3 n eng3
1094 | neng4 n eng4
1095 | neng5 n eng5
1096 | ni1 n i1
1097 | ni2 n i2
1098 | ni3 n i3
1099 | ni4 n i4
1100 | ni5 n i5
1101 | nian1 n ian1
1102 | nian2 n ian2
1103 | nian3 n ian3
1104 | nian4 n ian4
1105 | nian5 n ian5
1106 | niang1 n iang1
1107 | niang2 n iang2
1108 | niang3 n iang3
1109 | niang4 n iang4
1110 | niang5 n iang5
1111 | niao1 n iao1
1112 | niao2 n iao2
1113 | niao3 n iao3
1114 | niao4 n iao4
1115 | niao5 n iao5
1116 | nie1 n ie1
1117 | nie2 n ie2
1118 | nie3 n ie3
1119 | nie4 n ie4
1120 | nie5 n ie5
1121 | nin1 n in1
1122 | nin2 n in2
1123 | nin3 n in3
1124 | nin4 n in4
1125 | nin5 n in5
1126 | ning1 n ing1
1127 | ning2 n ing2
1128 | ning3 n ing3
1129 | ning4 n ing4
1130 | ning5 n ing5
1131 | niu1 n iou1
1132 | niu2 n iou2
1133 | niu3 n iou3
1134 | niu4 n iou4
1135 | niu5 n iou5
1136 | nong1 n ong1
1137 | nong2 n ong2
1138 | nong3 n ong3
1139 | nong4 n ong4
1140 | nong5 n ong5
1141 | nou1 n ou1
1142 | nou2 n ou2
1143 | nou3 n ou3
1144 | nou4 n ou4
1145 | nou5 n ou5
1146 | nu1 n u1
1147 | nu2 n u2
1148 | nu3 n u3
1149 | nu4 n u4
1150 | nu5 n u5
1151 | nuan1 n uan1
1152 | nuan2 n uan2
1153 | nuan3 n uan3
1154 | nuan4 n uan4
1155 | nuan5 n uan5
1156 | nue1 n ve1
1157 | nue2 n ve2
1158 | nue3 n ve3
1159 | nue4 n ve4
1160 | nue5 n ve5
1161 | nve1 n ve1
1162 | nve2 n ve2
1163 | nve3 n ve3
1164 | nve4 n ve4
1165 | nve5 n ve5
1166 | nuo1 n uo1
1167 | nuo2 n uo2
1168 | nuo3 n uo3
1169 | nuo4 n uo4
1170 | nuo5 n uo5
1171 | nv1 n v1
1172 | nv2 n v2
1173 | nv3 n v3
1174 | nv4 n v4
1175 | nv5 n v5
1176 | o1 o1
1177 | o2 o2
1178 | o3 o3
1179 | o4 o4
1180 | o5 o5
1181 | ou1 ou1
1182 | ou2 ou2
1183 | ou3 ou3
1184 | ou4 ou4
1185 | ou5 ou5
1186 | pa1 p a1
1187 | pa2 p a2
1188 | pa3 p a3
1189 | pa4 p a4
1190 | pa5 p a5
1191 | pai1 p ai1
1192 | pai2 p ai2
1193 | pai3 p ai3
1194 | pai4 p ai4
1195 | pai5 p ai5
1196 | pan1 p an1
1197 | pan2 p an2
1198 | pan3 p an3
1199 | pan4 p an4
1200 | pan5 p an5
1201 | pang1 p ang1
1202 | pang2 p ang2
1203 | pang3 p ang3
1204 | pang4 p ang4
1205 | pang5 p ang5
1206 | pao1 p ao1
1207 | pao2 p ao2
1208 | pao3 p ao3
1209 | pao4 p ao4
1210 | pao5 p ao5
1211 | pei1 p ei1
1212 | pei2 p ei2
1213 | pei3 p ei3
1214 | pei4 p ei4
1215 | pei5 p ei5
1216 | pen1 p en1
1217 | pen2 p en2
1218 | pen3 p en3
1219 | pen4 p en4
1220 | pen5 p en5
1221 | peng1 p eng1
1222 | peng2 p eng2
1223 | peng3 p eng3
1224 | peng4 p eng4
1225 | peng5 p eng5
1226 | pi1 p i1
1227 | pi2 p i2
1228 | pi3 p i3
1229 | pi4 p i4
1230 | pi5 p i5
1231 | pian1 p ian1
1232 | pian2 p ian2
1233 | pian3 p ian3
1234 | pian4 p ian4
1235 | pian5 p ian5
1236 | piao1 p iao1
1237 | piao2 p iao2
1238 | piao3 p iao3
1239 | piao4 p iao4
1240 | piao5 p iao5
1241 | pie1 p ie1
1242 | pie2 p ie2
1243 | pie3 p ie3
1244 | pie4 p ie4
1245 | pie5 p ie5
1246 | pin1 p in1
1247 | pin2 p in2
1248 | pin3 p in3
1249 | pin4 p in4
1250 | pin5 p in5
1251 | ping1 p ing1
1252 | ping2 p ing2
1253 | ping3 p ing3
1254 | ping4 p ing4
1255 | ping5 p ing5
1256 | po1 p o1
1257 | po2 p o2
1258 | po3 p o3
1259 | po4 p o4
1260 | po5 p o5
1261 | pou1 p ou1
1262 | pou2 p ou2
1263 | pou3 p ou3
1264 | pou4 p ou4
1265 | pou5 p ou5
1266 | pu1 p u1
1267 | pu2 p u2
1268 | pu3 p u3
1269 | pu4 p u4
1270 | pu5 p u5
1271 | qi1 q i1
1272 | qi2 q i2
1273 | qi3 q i3
1274 | qi4 q i4
1275 | qi5 q i5
1276 | qia1 q ia1
1277 | qia2 q ia2
1278 | qia3 q ia3
1279 | qia4 q ia4
1280 | qia5 q ia5
1281 | qian1 q ian1
1282 | qian2 q ian2
1283 | qian3 q ian3
1284 | qian4 q ian4
1285 | qian5 q ian5
1286 | qiang1 q iang1
1287 | qiang2 q iang2
1288 | qiang3 q iang3
1289 | qiang4 q iang4
1290 | qiang5 q iang5
1291 | qiao1 q iao1
1292 | qiao2 q iao2
1293 | qiao3 q iao3
1294 | qiao4 q iao4
1295 | qiao5 q iao5
1296 | qie1 q ie1
1297 | qie2 q ie2
1298 | qie3 q ie3
1299 | qie4 q ie4
1300 | qie5 q ie5
1301 | qin1 q in1
1302 | qin2 q in2
1303 | qin3 q in3
1304 | qin4 q in4
1305 | qin5 q in5
1306 | qing1 q ing1
1307 | qing2 q ing2
1308 | qing3 q ing3
1309 | qing4 q ing4
1310 | qing5 q ing5
1311 | qiong1 q iong1
1312 | qiong2 q iong2
1313 | qiong3 q iong3
1314 | qiong4 q iong4
1315 | qiong5 q iong5
1316 | qiu1 q iou1
1317 | qiu2 q iou2
1318 | qiu3 q iou3
1319 | qiu4 q iou4
1320 | qiu5 q iou5
1321 | qu1 q v1
1322 | qu2 q v2
1323 | qu3 q v3
1324 | qu4 q v4
1325 | qu5 q v5
1326 | quan1 q van1
1327 | quan2 q van2
1328 | quan3 q van3
1329 | quan4 q van4
1330 | quan5 q van5
1331 | que1 q ve1
1332 | que2 q ve2
1333 | que3 q ve3
1334 | que4 q ve4
1335 | que5 q ve5
1336 | qun1 q vn1
1337 | qun2 q vn2
1338 | qun3 q vn3
1339 | qun4 q vn4
1340 | qun5 q vn5
1341 | ran1 r an1
1342 | ran2 r an2
1343 | ran3 r an3
1344 | ran4 r an4
1345 | ran5 r an5
1346 | rang1 r ang1
1347 | rang2 r ang2
1348 | rang3 r ang3
1349 | rang4 r ang4
1350 | rang5 r ang5
1351 | rao1 r ao1
1352 | rao2 r ao2
1353 | rao3 r ao3
1354 | rao4 r ao4
1355 | rao5 r ao5
1356 | re1 r e1
1357 | re2 r e2
1358 | re3 r e3
1359 | re4 r e4
1360 | re5 r e5
1361 | ren1 r en1
1362 | ren2 r en2
1363 | ren3 r en3
1364 | ren4 r en4
1365 | ren5 r en5
1366 | reng1 r eng1
1367 | reng2 r eng2
1368 | reng3 r eng3
1369 | reng4 r eng4
1370 | reng5 r eng5
1371 | ri1 r iii1
1372 | ri2 r iii2
1373 | ri3 r iii3
1374 | ri4 r iii4
1375 | ri5 r iii5
1376 | rong1 r ong1
1377 | rong2 r ong2
1378 | rong3 r ong3
1379 | rong4 r ong4
1380 | rong5 r ong5
1381 | rou1 r ou1
1382 | rou2 r ou2
1383 | rou3 r ou3
1384 | rou4 r ou4
1385 | rou5 r ou5
1386 | ru1 r u1
1387 | ru2 r u2
1388 | ru3 r u3
1389 | ru4 r u4
1390 | ru5 r u5
1391 | rua1 r ua1
1392 | rua2 r ua2
1393 | rua3 r ua3
1394 | rua4 r ua4
1395 | rua5 r ua5
1396 | ruan1 r uan1
1397 | ruan2 r uan2
1398 | ruan3 r uan3
1399 | ruan4 r uan4
1400 | ruan5 r uan5
1401 | rui1 r uei1
1402 | rui2 r uei2
1403 | rui3 r uei3
1404 | rui4 r uei4
1405 | rui5 r uei5
1406 | run1 r uen1
1407 | run2 r uen2
1408 | run3 r uen3
1409 | run4 r uen4
1410 | run5 r uen5
1411 | ruo1 r uo1
1412 | ruo2 r uo2
1413 | ruo3 r uo3
1414 | ruo4 r uo4
1415 | ruo5 r uo5
1416 | sa1 s a1
1417 | sa2 s a2
1418 | sa3 s a3
1419 | sa4 s a4
1420 | sa5 s a5
1421 | sai1 s ai1
1422 | sai2 s ai2
1423 | sai3 s ai3
1424 | sai4 s ai4
1425 | sai5 s ai5
1426 | san1 s an1
1427 | san2 s an2
1428 | san3 s an3
1429 | san4 s an4
1430 | san5 s an5
1431 | sang1 s ang1
1432 | sang2 s ang2
1433 | sang3 s ang3
1434 | sang4 s ang4
1435 | sang5 s ang5
1436 | sao1 s ao1
1437 | sao2 s ao2
1438 | sao3 s ao3
1439 | sao4 s ao4
1440 | sao5 s ao5
1441 | se1 s e1
1442 | se2 s e2
1443 | se3 s e3
1444 | se4 s e4
1445 | se5 s e5
1446 | sen1 s en1
1447 | sen2 s en2
1448 | sen3 s en3
1449 | sen4 s en4
1450 | sen5 s en5
1451 | seng1 s eng1
1452 | seng2 s eng2
1453 | seng3 s eng3
1454 | seng4 s eng4
1455 | seng5 s eng5
1456 | sha1 sh a1
1457 | sha2 sh a2
1458 | sha3 sh a3
1459 | sha4 sh a4
1460 | sha5 sh a5
1461 | shai1 sh ai1
1462 | shai2 sh ai2
1463 | shai3 sh ai3
1464 | shai4 sh ai4
1465 | shai5 sh ai5
1466 | shan1 sh an1
1467 | shan2 sh an2
1468 | shan3 sh an3
1469 | shan4 sh an4
1470 | shan5 sh an5
1471 | shang1 sh ang1
1472 | shang2 sh ang2
1473 | shang3 sh ang3
1474 | shang4 sh ang4
1475 | shang5 sh ang5
1476 | shao1 sh ao1
1477 | shao2 sh ao2
1478 | shao3 sh ao3
1479 | shao4 sh ao4
1480 | shao5 sh ao5
1481 | she1 sh e1
1482 | she2 sh e2
1483 | she3 sh e3
1484 | she4 sh e4
1485 | she5 sh e5
1486 | shei1 sh ei1
1487 | shei2 sh ei2
1488 | shei3 sh ei3
1489 | shei4 sh ei4
1490 | shei5 sh ei5
1491 | shen1 sh en1
1492 | shen2 sh en2
1493 | shen3 sh en3
1494 | shen4 sh en4
1495 | shen5 sh en5
1496 | sheng1 sh eng1
1497 | sheng2 sh eng2
1498 | sheng3 sh eng3
1499 | sheng4 sh eng4
1500 | sheng5 sh eng5
1501 | shi1 sh iii1
1502 | shi2 sh iii2
1503 | shi3 sh iii3
1504 | shi4 sh iii4
1505 | shi5 sh iii5
1506 | shou1 sh ou1
1507 | shou2 sh ou2
1508 | shou3 sh ou3
1509 | shou4 sh ou4
1510 | shou5 sh ou5
1511 | shu1 sh u1
1512 | shu2 sh u2
1513 | shu3 sh u3
1514 | shu4 sh u4
1515 | shu5 sh u5
1516 | shua1 sh ua1
1517 | shua2 sh ua2
1518 | shua3 sh ua3
1519 | shua4 sh ua4
1520 | shua5 sh ua5
1521 | shuai1 sh uai1
1522 | shuai2 sh uai2
1523 | shuai3 sh uai3
1524 | shuai4 sh uai4
1525 | shuai5 sh uai5
1526 | shuan1 sh uan1
1527 | shuan2 sh uan2
1528 | shuan3 sh uan3
1529 | shuan4 sh uan4
1530 | shuan5 sh uan5
1531 | shuang1 sh uang1
1532 | shuang2 sh uang2
1533 | shuang3 sh uang3
1534 | shuang4 sh uang4
1535 | shuang5 sh uang5
1536 | shui1 sh uei1
1537 | shui2 sh uei2
1538 | shui3 sh uei3
1539 | shui4 sh uei4
1540 | shui5 sh uei5
1541 | shun1 sh uen1
1542 | shun2 sh uen2
1543 | shun3 sh uen3
1544 | shun4 sh uen4
1545 | shun5 sh uen5
1546 | shuo1 sh uo1
1547 | shuo2 sh uo2
1548 | shuo3 sh uo3
1549 | shuo4 sh uo4
1550 | shuo5 sh uo5
1551 | si1 s ii1
1552 | si2 s ii2
1553 | si3 s ii3
1554 | si4 s ii4
1555 | si5 s ii5
1556 | song1 s ong1
1557 | song2 s ong2
1558 | song3 s ong3
1559 | song4 s ong4
1560 | song5 s ong5
1561 | sou1 s ou1
1562 | sou2 s ou2
1563 | sou3 s ou3
1564 | sou4 s ou4
1565 | sou5 s ou5
1566 | su1 s u1
1567 | su2 s u2
1568 | su3 s u3
1569 | su4 s u4
1570 | su5 s u5
1571 | suan1 s uan1
1572 | suan2 s uan2
1573 | suan3 s uan3
1574 | suan4 s uan4
1575 | suan5 s uan5
1576 | sui1 s uei1
1577 | sui2 s uei2
1578 | sui3 s uei3
1579 | sui4 s uei4
1580 | sui5 s uei5
1581 | sun1 s uen1
1582 | sun2 s uen2
1583 | sun3 s uen3
1584 | sun4 s uen4
1585 | sun5 s uen5
1586 | suo1 s uo1
1587 | suo2 s uo2
1588 | suo3 s uo3
1589 | suo4 s uo4
1590 | suo5 s uo5
1591 | ta1 t a1
1592 | ta2 t a2
1593 | ta3 t a3
1594 | ta4 t a4
1595 | ta5 t a5
1596 | tai1 t ai1
1597 | tai2 t ai2
1598 | tai3 t ai3
1599 | tai4 t ai4
1600 | tai5 t ai5
1601 | tan1 t an1
1602 | tan2 t an2
1603 | tan3 t an3
1604 | tan4 t an4
1605 | tan5 t an5
1606 | tang1 t ang1
1607 | tang2 t ang2
1608 | tang3 t ang3
1609 | tang4 t ang4
1610 | tang5 t ang5
1611 | tao1 t ao1
1612 | tao2 t ao2
1613 | tao3 t ao3
1614 | tao4 t ao4
1615 | tao5 t ao5
1616 | te1 t e1
1617 | te2 t e2
1618 | te3 t e3
1619 | te4 t e4
1620 | te5 t e5
1621 | tei1 t ei1
1622 | tei2 t ei2
1623 | tei3 t ei3
1624 | tei4 t ei4
1625 | tei5 t ei5
1626 | teng1 t eng1
1627 | teng2 t eng2
1628 | teng3 t eng3
1629 | teng4 t eng4
1630 | teng5 t eng5
1631 | ti1 t i1
1632 | ti2 t i2
1633 | ti3 t i3
1634 | ti4 t i4
1635 | ti5 t i5
1636 | tian1 t ian1
1637 | tian2 t ian2
1638 | tian3 t ian3
1639 | tian4 t ian4
1640 | tian5 t ian5
1641 | tiao1 t iao1
1642 | tiao2 t iao2
1643 | tiao3 t iao3
1644 | tiao4 t iao4
1645 | tiao5 t iao5
1646 | tie1 t ie1
1647 | tie2 t ie2
1648 | tie3 t ie3
1649 | tie4 t ie4
1650 | tie5 t ie5
1651 | ting1 t ing1
1652 | ting2 t ing2
1653 | ting3 t ing3
1654 | ting4 t ing4
1655 | ting5 t ing5
1656 | tong1 t ong1
1657 | tong2 t ong2
1658 | tong3 t ong3
1659 | tong4 t ong4
1660 | tong5 t ong5
1661 | tou1 t ou1
1662 | tou2 t ou2
1663 | tou3 t ou3
1664 | tou4 t ou4
1665 | tou5 t ou5
1666 | tu1 t u1
1667 | tu2 t u2
1668 | tu3 t u3
1669 | tu4 t u4
1670 | tu5 t u5
1671 | tuan1 t uan1
1672 | tuan2 t uan2
1673 | tuan3 t uan3
1674 | tuan4 t uan4
1675 | tuan5 t uan5
1676 | tui1 t uei1
1677 | tui2 t uei2
1678 | tui3 t uei3
1679 | tui4 t uei4
1680 | tui5 t uei5
1681 | tun1 t uen1
1682 | tun2 t uen2
1683 | tun3 t uen3
1684 | tun4 t uen4
1685 | tun5 t uen5
1686 | tuo1 t uo1
1687 | tuo2 t uo2
1688 | tuo3 t uo3
1689 | tuo4 t uo4
1690 | tuo5 t uo5
1691 | wa1 w ua1
1692 | wa2 w ua2
1693 | wa3 w ua3
1694 | wa4 w ua4
1695 | wa5 w ua5
1696 | wai1 w uai1
1697 | wai2 w uai2
1698 | wai3 w uai3
1699 | wai4 w uai4
1700 | wai5 w uai5
1701 | wan1 w uan1
1702 | wan2 w uan2
1703 | wan3 w uan3
1704 | wan4 w uan4
1705 | wan5 w uan5
1706 | wang1 w uang1
1707 | wang2 w uang2
1708 | wang3 w uang3
1709 | wang4 w uang4
1710 | wang5 w uang5
1711 | wei1 w uei1
1712 | wei2 w uei2
1713 | wei3 w uei3
1714 | wei4 w uei4
1715 | wei5 w uei5
1716 | wen1 w uen1
1717 | wen2 w uen2
1718 | wen3 w uen3
1719 | wen4 w uen4
1720 | wen5 w uen5
1721 | weng1 w uen1
1722 | weng2 w uen2
1723 | weng3 w uen3
1724 | weng4 w uen4
1725 | weng5 w uen5
1726 | wo1 w uo1
1727 | wo2 w uo2
1728 | wo3 w uo3
1729 | wo4 w uo4
1730 | wo5 w uo5
1731 | wu1 w u1
1732 | wu2 w u2
1733 | wu3 w u3
1734 | wu4 w u4
1735 | wu5 w u5
1736 | xi1 x i1
1737 | xi2 x i2
1738 | xi3 x i3
1739 | xi4 x i4
1740 | xi5 x i5
1741 | xia1 x ia1
1742 | xia2 x ia2
1743 | xia3 x ia3
1744 | xia4 x ia4
1745 | xia5 x ia5
1746 | xian1 x ian1
1747 | xian2 x ian2
1748 | xian3 x ian3
1749 | xian4 x ian4
1750 | xian5 x ian5
1751 | xiang1 x iang1
1752 | xiang2 x iang2
1753 | xiang3 x iang3
1754 | xiang4 x iang4
1755 | xiang5 x iang5
1756 | xiao1 x iao1
1757 | xiao2 x iao2
1758 | xiao3 x iao3
1759 | xiao4 x iao4
1760 | xiao5 x iao5
1761 | xie1 x ie1
1762 | xie2 x ie2
1763 | xie3 x ie3
1764 | xie4 x ie4
1765 | xie5 x ie5
1766 | xin1 x in1
1767 | xin2 x in2
1768 | xin3 x in3
1769 | xin4 x in4
1770 | xin5 x in5
1771 | xing1 x ing1
1772 | xing2 x ing2
1773 | xing3 x ing3
1774 | xing4 x ing4
1775 | xing5 x ing5
1776 | xiong1 x iong1
1777 | xiong2 x iong2
1778 | xiong3 x iong3
1779 | xiong4 x iong4
1780 | xiong5 x iong5
1781 | xiu1 x iou1
1782 | xiu2 x iou2
1783 | xiu3 x iou3
1784 | xiu4 x iou4
1785 | xiu5 x iou5
1786 | xu1 x v1
1787 | xu2 x v2
1788 | xu3 x v3
1789 | xu4 x v4
1790 | xu5 x v5
1791 | xuan1 x van1
1792 | xuan2 x van2
1793 | xuan3 x van3
1794 | xuan4 x van4
1795 | xuan5 x van5
1796 | xue1 x ve1
1797 | xue2 x ve2
1798 | xue3 x ve3
1799 | xue4 x ve4
1800 | xue5 x ve5
1801 | xun1 x vn1
1802 | xun2 x vn2
1803 | xun3 x vn3
1804 | xun4 x vn4
1805 | xun5 x vn5
1806 | ya1 y ia1
1807 | ya2 y ia2
1808 | ya3 y ia3
1809 | ya4 y ia4
1810 | ya5 y ia5
1811 | yan1 y ian1
1812 | yan2 y ian2
1813 | yan3 y ian3
1814 | yan4 y ian4
1815 | yan5 y ian5
1816 | yang1 y iang1
1817 | yang2 y iang2
1818 | yang3 y iang3
1819 | yang4 y iang4
1820 | yang5 y iang5
1821 | yao1 y iao1
1822 | yao2 y iao2
1823 | yao3 y iao3
1824 | yao4 y iao4
1825 | yao5 y iao5
1826 | ye1 y ie1
1827 | ye2 y ie2
1828 | ye3 y ie3
1829 | ye4 y ie4
1830 | ye5 y ie5
1831 | yi1 y i1
1832 | yi2 y i2
1833 | yi3 y i3
1834 | yi4 y i4
1835 | yi5 y i5
1836 | yin1 y in1
1837 | yin2 y in2
1838 | yin3 y in3
1839 | yin4 y in4
1840 | yin5 y in5
1841 | ying1 y ing1
1842 | ying2 y ing2
1843 | ying3 y ing3
1844 | ying4 y ing4
1845 | ying5 y ing5
1846 | yo1 y iou1
1847 | yo2 y iou2
1848 | yo3 y iou3
1849 | yo4 y iou4
1850 | yo5 y iou5
1851 | yong1 y iong1
1852 | yong2 y iong2
1853 | yong3 y iong3
1854 | yong4 y iong4
1855 | yong5 y iong5
1856 | you1 y iou1
1857 | you2 y iou2
1858 | you3 y iou3
1859 | you4 y iou4
1860 | you5 y iou5
1861 | yu1 y v1
1862 | yu2 y v2
1863 | yu3 y v3
1864 | yu4 y v4
1865 | yu5 y v5
1866 | yuan1 y van1
1867 | yuan2 y van2
1868 | yuan3 y van3
1869 | yuan4 y van4
1870 | yuan5 y van5
1871 | yue1 y ve1
1872 | yue2 y ve2
1873 | yue3 y ve3
1874 | yue4 y ve4
1875 | yue5 y ve5
1876 | yun1 y vn1
1877 | yun2 y vn2
1878 | yun3 y vn3
1879 | yun4 y vn4
1880 | yun5 y vn5
1881 | za1 z a1
1882 | za2 z a2
1883 | za3 z a3
1884 | za4 z a4
1885 | za5 z a5
1886 | zai1 z ai1
1887 | zai2 z ai2
1888 | zai3 z ai3
1889 | zai4 z ai4
1890 | zai5 z ai5
1891 | zan1 z an1
1892 | zan2 z an2
1893 | zan3 z an3
1894 | zan4 z an4
1895 | zan5 z an5
1896 | zang1 z ang1
1897 | zang2 z ang2
1898 | zang3 z ang3
1899 | zang4 z ang4
1900 | zang5 z ang5
1901 | zao1 z ao1
1902 | zao2 z ao2
1903 | zao3 z ao3
1904 | zao4 z ao4
1905 | zao5 z ao5
1906 | ze1 z e1
1907 | ze2 z e2
1908 | ze3 z e3
1909 | ze4 z e4
1910 | ze5 z e5
1911 | zei1 z ei1
1912 | zei2 z ei2
1913 | zei3 z ei3
1914 | zei4 z ei4
1915 | zei5 z ei5
1916 | zen1 z en1
1917 | zen2 z en2
1918 | zen3 z en3
1919 | zen4 z en4
1920 | zen5 z en5
1921 | zeng1 z eng1
1922 | zeng2 z eng2
1923 | zeng3 z eng3
1924 | zeng4 z eng4
1925 | zeng5 z eng5
1926 | zha1 zh a1
1927 | zha2 zh a2
1928 | zha3 zh a3
1929 | zha4 zh a4
1930 | zha5 zh a5
1931 | zhai1 zh ai1
1932 | zhai2 zh ai2
1933 | zhai3 zh ai3
1934 | zhai4 zh ai4
1935 | zhai5 zh ai5
1936 | zhan1 zh an1
1937 | zhan2 zh an2
1938 | zhan3 zh an3
1939 | zhan4 zh an4
1940 | zhan5 zh an5
1941 | zhang1 zh ang1
1942 | zhang2 zh ang2
1943 | zhang3 zh ang3
1944 | zhang4 zh ang4
1945 | zhang5 zh ang5
1946 | zhao1 zh ao1
1947 | zhao2 zh ao2
1948 | zhao3 zh ao3
1949 | zhao4 zh ao4
1950 | zhao5 zh ao5
1951 | zhe1 zh e1
1952 | zhe2 zh e2
1953 | zhe3 zh e3
1954 | zhe4 zh e4
1955 | zhe5 zh e5
1956 | zhei1 zh ei1
1957 | zhei2 zh ei2
1958 | zhei3 zh ei3
1959 | zhei4 zh ei4
1960 | zhei5 zh ei5
1961 | zhen1 zh en1
1962 | zhen2 zh en2
1963 | zhen3 zh en3
1964 | zhen4 zh en4
1965 | zhen5 zh en5
1966 | zheng1 zh eng1
1967 | zheng2 zh eng2
1968 | zheng3 zh eng3
1969 | zheng4 zh eng4
1970 | zheng5 zh eng5
1971 | zhi1 zh iii1
1972 | zhi2 zh iii2
1973 | zhi3 zh iii3
1974 | zhi4 zh iii4
1975 | zhi5 zh iii5
1976 | zhong1 zh ong1
1977 | zhong2 zh ong2
1978 | zhong3 zh ong3
1979 | zhong4 zh ong4
1980 | zhong5 zh ong5
1981 | zhou1 zh ou1
1982 | zhou2 zh ou2
1983 | zhou3 zh ou3
1984 | zhou4 zh ou4
1985 | zhou5 zh ou5
1986 | zhu1 zh u1
1987 | zhu2 zh u2
1988 | zhu3 zh u3
1989 | zhu4 zh u4
1990 | zhu5 zh u5
1991 | zhua1 zh ua1
1992 | zhua2 zh ua2
1993 | zhua3 zh ua3
1994 | zhua4 zh ua4
1995 | zhua5 zh ua5
1996 | zhuai1 zh uai1
1997 | zhuai2 zh uai2
1998 | zhuai3 zh uai3
1999 | zhuai4 zh uai4
2000 | zhuai5 zh uai5
2001 | zhuan1 zh uan1
2002 | zhuan2 zh uan2
2003 | zhuan3 zh uan3
2004 | zhuan4 zh uan4
2005 | zhuan5 zh uan5
2006 | zhuang1 zh uang1
2007 | zhuang2 zh uang2
2008 | zhuang3 zh uang3
2009 | zhuang4 zh uang4
2010 | zhuang5 zh uang5
2011 | zhui1 zh uei1
2012 | zhui2 zh uei2
2013 | zhui3 zh uei3
2014 | zhui4 zh uei4
2015 | zhui5 zh uei5
2016 | zhun1 zh uen1
2017 | zhun2 zh uen2
2018 | zhun3 zh uen3
2019 | zhun4 zh uen4
2020 | zhun5 zh uen5
2021 | zhuo1 zh uo1
2022 | zhuo2 zh uo2
2023 | zhuo3 zh uo3
2024 | zhuo4 zh uo4
2025 | zhuo5 zh uo5
2026 | zi1 z ii1
2027 | zi2 z ii2
2028 | zi3 z ii3
2029 | zi4 z ii4
2030 | zi5 z ii5
2031 | zong1 z ong1
2032 | zong2 z ong2
2033 | zong3 z ong3
2034 | zong4 z ong4
2035 | zong5 z ong5
2036 | zou1 z ou1
2037 | zou2 z ou2
2038 | zou3 z ou3
2039 | zou4 z ou4
2040 | zou5 z ou5
2041 | zu1 z u1
2042 | zu2 z u2
2043 | zu3 z u3
2044 | zu4 z u4
2045 | zu5 z u5
2046 | zuan1 z uan1
2047 | zuan2 z uan2
2048 | zuan3 z uan3
2049 | zuan4 z uan4
2050 | zuan5 z uan5
2051 | zui1 z uei1
2052 | zui2 z uei2
2053 | zui3 z uei3
2054 | zui4 z uei4
2055 | zui5 z uei5
2056 | zun1 z uen1
2057 | zun2 z uen2
2058 | zun3 z uen3
2059 | zun4 z uen4
2060 | zun5 z uen5
2061 | zuo1 z uo1
2062 | zuo2 z uo2
2063 | zuo3 z uo3
2064 | zuo4 z uo4
2065 | zuo5 z uo5
2066 | ar1 a1 rr
2067 | ar2 a2 rr
2068 | ar3 a3 rr
2069 | ar4 a4 rr
2070 | ar5 a5 rr
2071 | air1 ai1 rr
2072 | air2 ai2 rr
2073 | air3 ai3 rr
2074 | air4 ai4 rr
2075 | air5 ai5 rr
2076 | anr1 an1 rr
2077 | anr2 an2 rr
2078 | anr3 an3 rr
2079 | anr4 an4 rr
2080 | anr5 an5 rr
2081 | angr1 ang1 rr
2082 | angr2 ang2 rr
2083 | angr3 ang3 rr
2084 | angr4 ang4 rr
2085 | angr5 ang5 rr
2086 | aor1 ao1 rr
2087 | aor2 ao2 rr
2088 | aor3 ao3 rr
2089 | aor4 ao4 rr
2090 | aor5 ao5 rr
2091 | bar1 b a1 rr
2092 | bar2 b a2 rr
2093 | bar3 b a3 rr
2094 | bar4 b a4 rr
2095 | bar5 b a5 rr
2096 | bair1 b ai1 rr
2097 | bair2 b ai2 rr
2098 | bair3 b ai3 rr
2099 | bair4 b ai4 rr
2100 | bair5 b ai5 rr
2101 | banr1 b an1 rr
2102 | banr2 b an2 rr
2103 | banr3 b an3 rr
2104 | banr4 b an4 rr
2105 | banr5 b an5 rr
2106 | bangr1 b ang1 rr
2107 | bangr2 b ang2 rr
2108 | bangr3 b ang3 rr
2109 | bangr4 b ang4 rr
2110 | bangr5 b ang5 rr
2111 | baor1 b ao1 rr
2112 | baor2 b ao2 rr
2113 | baor3 b ao3 rr
2114 | baor4 b ao4 rr
2115 | baor5 b ao5 rr
2116 | beir1 b ei1 rr
2117 | beir2 b ei2 rr
2118 | beir3 b ei3 rr
2119 | beir4 b ei4 rr
2120 | beir5 b ei5 rr
2121 | benr1 b en1 rr
2122 | benr2 b en2 rr
2123 | benr3 b en3 rr
2124 | benr4 b en4 rr
2125 | benr5 b en5 rr
2126 | bengr1 b eng1 rr
2127 | bengr2 b eng2 rr
2128 | bengr3 b eng3 rr
2129 | bengr4 b eng4 rr
2130 | bengr5 b eng5 rr
2131 | bir1 b i1 rr
2132 | bir2 b i2 rr
2133 | bir3 b i3 rr
2134 | bir4 b i4 rr
2135 | bir5 b i5 rr
2136 | bianr1 b ian1 rr
2137 | bianr2 b ian2 rr
2138 | bianr3 b ian3 rr
2139 | bianr4 b ian4 rr
2140 | bianr5 b ian5 rr
2141 | biaor1 b iao1 rr
2142 | biaor2 b iao2 rr
2143 | biaor3 b iao3 rr
2144 | biaor4 b iao4 rr
2145 | biaor5 b iao5 rr
2146 | bier1 b ie1 rr
2147 | bier2 b ie2 rr
2148 | bier3 b ie3 rr
2149 | bier4 b ie4 rr
2150 | bier5 b ie5 rr
2151 | binr1 b in1 rr
2152 | binr2 b in2 rr
2153 | binr3 b in3 rr
2154 | binr4 b in4 rr
2155 | binr5 b in5 rr
2156 | bingr1 b ing1 rr
2157 | bingr2 b ing2 rr
2158 | bingr3 b ing3 rr
2159 | bingr4 b ing4 rr
2160 | bingr5 b ing5 rr
2161 | bor1 b o1 rr
2162 | bor2 b o2 rr
2163 | bor3 b o3 rr
2164 | bor4 b o4 rr
2165 | bor5 b o5 rr
2166 | bur1 b u1 rr
2167 | bur2 b u2 rr
2168 | bur3 b u3 rr
2169 | bur4 b u4 rr
2170 | bur5 b u5 rr
2171 | car1 c a1 rr
2172 | car2 c a2 rr
2173 | car3 c a3 rr
2174 | car4 c a4 rr
2175 | car5 c a5 rr
2176 | cair1 c ai1 rr
2177 | cair2 c ai2 rr
2178 | cair3 c ai3 rr
2179 | cair4 c ai4 rr
2180 | cair5 c ai5 rr
2181 | canr1 c an1 rr
2182 | canr2 c an2 rr
2183 | canr3 c an3 rr
2184 | canr4 c an4 rr
2185 | canr5 c an5 rr
2186 | cangr1 c ang1 rr
2187 | cangr2 c ang2 rr
2188 | cangr3 c ang3 rr
2189 | cangr4 c ang4 rr
2190 | cangr5 c ang5 rr
2191 | caor1 c ao1 rr
2192 | caor2 c ao2 rr
2193 | caor3 c ao3 rr
2194 | caor4 c ao4 rr
2195 | caor5 c ao5 rr
2196 | cer1 c e1 rr
2197 | cer2 c e2 rr
2198 | cer3 c e3 rr
2199 | cer4 c e4 rr
2200 | cer5 c e5 rr
2201 | cenr1 c en1 rr
2202 | cenr2 c en2 rr
2203 | cenr3 c en3 rr
2204 | cenr4 c en4 rr
2205 | cenr5 c en5 rr
2206 | cengr1 c eng1 rr
2207 | cengr2 c eng2 rr
2208 | cengr3 c eng3 rr
2209 | cengr4 c eng4 rr
2210 | cengr5 c eng5 rr
2211 | char1 ch a1 rr
2212 | char2 ch a2 rr
2213 | char3 ch a3 rr
2214 | char4 ch a4 rr
2215 | char5 ch a5 rr
2216 | chair1 ch ai1 rr
2217 | chair2 ch ai2 rr
2218 | chair3 ch ai3 rr
2219 | chair4 ch ai4 rr
2220 | chair5 ch ai5 rr
2221 | chanr1 ch an1 rr
2222 | chanr2 ch an2 rr
2223 | chanr3 ch an3 rr
2224 | chanr4 ch an4 rr
2225 | chanr5 ch an5 rr
2226 | changr1 ch ang1 rr
2227 | changr2 ch ang2 rr
2228 | changr3 ch ang3 rr
2229 | changr4 ch ang4 rr
2230 | changr5 ch ang5 rr
2231 | chaor1 ch ao1 rr
2232 | chaor2 ch ao2 rr
2233 | chaor3 ch ao3 rr
2234 | chaor4 ch ao4 rr
2235 | chaor5 ch ao5 rr
2236 | cher1 ch e1 rr
2237 | cher2 ch e2 rr
2238 | cher3 ch e3 rr
2239 | cher4 ch e4 rr
2240 | cher5 ch e5 rr
2241 | chenr1 ch en1 rr
2242 | chenr2 ch en2 rr
2243 | chenr3 ch en3 rr
2244 | chenr4 ch en4 rr
2245 | chenr5 ch en5 rr
2246 | chengr1 ch eng1 rr
2247 | chengr2 ch eng2 rr
2248 | chengr3 ch eng3 rr
2249 | chengr4 ch eng4 rr
2250 | chengr5 ch eng5 rr
2251 | chir1 ch iii1 rr
2252 | chir2 ch iii2 rr
2253 | chir3 ch iii3 rr
2254 | chir4 ch iii4 rr
2255 | chir5 ch iii5 rr
2256 | chongr1 ch ong1 rr
2257 | chongr2 ch ong2 rr
2258 | chongr3 ch ong3 rr
2259 | chongr4 ch ong4 rr
2260 | chongr5 ch ong5 rr
2261 | chour1 ch ou1 rr
2262 | chour2 ch ou2 rr
2263 | chour3 ch ou3 rr
2264 | chour4 ch ou4 rr
2265 | chour5 ch ou5 rr
2266 | chur1 ch u1 rr
2267 | chur2 ch u2 rr
2268 | chur3 ch u3 rr
2269 | chur4 ch u4 rr
2270 | chur5 ch u5 rr
2271 | chuair1 ch uai1 rr
2272 | chuair2 ch uai2 rr
2273 | chuair3 ch uai3 rr
2274 | chuair4 ch uai4 rr
2275 | chuair5 ch uai5 rr
2276 | chuanr1 ch uan1 rr
2277 | chuanr2 ch uan2 rr
2278 | chuanr3 ch uan3 rr
2279 | chuanr4 ch uan4 rr
2280 | chuanr5 ch uan5 rr
2281 | chuangr1 ch uang1 rr
2282 | chuangr2 ch uang2 rr
2283 | chuangr3 ch uang3 rr
2284 | chuangr4 ch uang4 rr
2285 | chuangr5 ch uang5 rr
2286 | chuir1 ch uei1 rr
2287 | chuir2 ch uei2 rr
2288 | chuir3 ch uei3 rr
2289 | chuir4 ch uei4 rr
2290 | chuir5 ch uei5 rr
2291 | chunr1 ch uen1 rr
2292 | chunr2 ch uen2 rr
2293 | chunr3 ch uen3 rr
2294 | chunr4 ch uen4 rr
2295 | chunr5 ch uen5 rr
2296 | chuor1 ch uo1 rr
2297 | chuor2 ch uo2 rr
2298 | chuor3 ch uo3 rr
2299 | chuor4 ch uo4 rr
2300 | chuor5 ch uo5 rr
2301 | cir1 c ii1 rr
2302 | cir2 c ii2 rr
2303 | cir3 c ii3 rr
2304 | cir4 c ii4 rr
2305 | cir5 c ii5 rr
2306 | congr1 c ong1 rr
2307 | congr2 c ong2 rr
2308 | congr3 c ong3 rr
2309 | congr4 c ong4 rr
2310 | congr5 c ong5 rr
2311 | cour1 c ou1 rr
2312 | cour2 c ou2 rr
2313 | cour3 c ou3 rr
2314 | cour4 c ou4 rr
2315 | cour5 c ou5 rr
2316 | cur1 c u1 rr
2317 | cur2 c u2 rr
2318 | cur3 c u3 rr
2319 | cur4 c u4 rr
2320 | cur5 c u5 rr
2321 | cuanr1 c uan1 rr
2322 | cuanr2 c uan2 rr
2323 | cuanr3 c uan3 rr
2324 | cuanr4 c uan4 rr
2325 | cuanr5 c uan5 rr
2326 | cuir1 c uei1 rr
2327 | cuir2 c uei2 rr
2328 | cuir3 c uei3 rr
2329 | cuir4 c uei4 rr
2330 | cuir5 c uei5 rr
2331 | cunr1 c uen1 rr
2332 | cunr2 c uen2 rr
2333 | cunr3 c uen3 rr
2334 | cunr4 c uen4 rr
2335 | cunr5 c uen5 rr
2336 | cuor1 c uo1 rr
2337 | cuor2 c uo2 rr
2338 | cuor3 c uo3 rr
2339 | cuor4 c uo4 rr
2340 | cuor5 c uo5 rr
2341 | dar1 d a1 rr
2342 | dar2 d a2 rr
2343 | dar3 d a3 rr
2344 | dar4 d a4 rr
2345 | dar5 d a5 rr
2346 | dair1 d ai1 rr
2347 | dair2 d ai2 rr
2348 | dair3 d ai3 rr
2349 | dair4 d ai4 rr
2350 | dair5 d ai5 rr
2351 | danr1 d an1 rr
2352 | danr2 d an2 rr
2353 | danr3 d an3 rr
2354 | danr4 d an4 rr
2355 | danr5 d an5 rr
2356 | dangr1 d ang1 rr
2357 | dangr2 d ang2 rr
2358 | dangr3 d ang3 rr
2359 | dangr4 d ang4 rr
2360 | dangr5 d ang5 rr
2361 | daor1 d ao1 rr
2362 | daor2 d ao2 rr
2363 | daor3 d ao3 rr
2364 | daor4 d ao4 rr
2365 | daor5 d ao5 rr
2366 | der1 d e1 rr
2367 | der2 d e2 rr
2368 | der3 d e3 rr
2369 | der4 d e4 rr
2370 | der5 d e5 rr
2371 | deir1 d ei1 rr
2372 | deir2 d ei2 rr
2373 | deir3 d ei3 rr
2374 | deir4 d ei4 rr
2375 | deir5 d ei5 rr
2376 | denr1 d en1 rr
2377 | denr2 d en2 rr
2378 | denr3 d en3 rr
2379 | denr4 d en4 rr
2380 | denr5 d en5 rr
2381 | dengr1 d eng1 rr
2382 | dengr2 d eng2 rr
2383 | dengr3 d eng3 rr
2384 | dengr4 d eng4 rr
2385 | dengr5 d eng5 rr
2386 | dir1 d i1 rr
2387 | dir2 d i2 rr
2388 | dir3 d i3 rr
2389 | dir4 d i4 rr
2390 | dir5 d i5 rr
2391 | diar1 d ia1 rr
2392 | diar2 d ia2 rr
2393 | diar3 d ia3 rr
2394 | diar4 d ia4 rr
2395 | diar5 d ia5 rr
2396 | dianr1 d ian1 rr
2397 | dianr2 d ian2 rr
2398 | dianr3 d ian3 rr
2399 | dianr4 d ian4 rr
2400 | dianr5 d ian5 rr
2401 | diaor1 d iao1 rr
2402 | diaor2 d iao2 rr
2403 | diaor3 d iao3 rr
2404 | diaor4 d iao4 rr
2405 | diaor5 d iao5 rr
2406 | dier1 d ie1 rr
2407 | dier2 d ie2 rr
2408 | dier3 d ie3 rr
2409 | dier4 d ie4 rr
2410 | dier5 d ie5 rr
2411 | dingr1 d ing1 rr
2412 | dingr2 d ing2 rr
2413 | dingr3 d ing3 rr
2414 | dingr4 d ing4 rr
2415 | dingr5 d ing5 rr
2416 | diur1 d iou1 rr
2417 | diur2 d iou2 rr
2418 | diur3 d iou3 rr
2419 | diur4 d iou4 rr
2420 | diur5 d iou5 rr
2421 | dongr1 d ong1 rr
2422 | dongr2 d ong2 rr
2423 | dongr3 d ong3 rr
2424 | dongr4 d ong4 rr
2425 | dongr5 d ong5 rr
2426 | dour1 d ou1 rr
2427 | dour2 d ou2 rr
2428 | dour3 d ou3 rr
2429 | dour4 d ou4 rr
2430 | dour5 d ou5 rr
2431 | dur1 d u1 rr
2432 | dur2 d u2 rr
2433 | dur3 d u3 rr
2434 | dur4 d u4 rr
2435 | dur5 d u5 rr
2436 | duanr1 d uan1 rr
2437 | duanr2 d uan2 rr
2438 | duanr3 d uan3 rr
2439 | duanr4 d uan4 rr
2440 | duanr5 d uan5 rr
2441 | duir1 d uei1 rr
2442 | duir2 d uei2 rr
2443 | duir3 d uei3 rr
2444 | duir4 d uei4 rr
2445 | duir5 d uei5 rr
2446 | dunr1 d uen1 rr
2447 | dunr2 d uen2 rr
2448 | dunr3 d uen3 rr
2449 | dunr4 d uen4 rr
2450 | dunr5 d uen5 rr
2451 | duor1 d uo1 rr
2452 | duor2 d uo2 rr
2453 | duor3 d uo3 rr
2454 | duor4 d uo4 rr
2455 | duor5 d uo5 rr
2456 | er1 e1 rr
2457 | er2 e2 rr
2458 | er3 e3 rr
2459 | er4 e4 rr
2460 | er5 e5 rr
2461 | eir1 ei1 rr
2462 | eir2 ei2 rr
2463 | eir3 ei3 rr
2464 | eir4 ei4 rr
2465 | eir5 ei5 rr
2466 | enr1 en1 rr
2467 | enr2 en2 rr
2468 | enr3 en3 rr
2469 | enr4 en4 rr
2470 | enr5 en5 rr
2471 | engr1 eng1 rr
2472 | engr2 eng2 rr
2473 | engr3 eng3 rr
2474 | engr4 eng4 rr
2475 | engr5 eng5 rr
2476 | far1 f a1 rr
2477 | far2 f a2 rr
2478 | far3 f a3 rr
2479 | far4 f a4 rr
2480 | far5 f a5 rr
2481 | fanr1 f an1 rr
2482 | fanr2 f an2 rr
2483 | fanr3 f an3 rr
2484 | fanr4 f an4 rr
2485 | fanr5 f an5 rr
2486 | fangr1 f ang1 rr
2487 | fangr2 f ang2 rr
2488 | fangr3 f ang3 rr
2489 | fangr4 f ang4 rr
2490 | fangr5 f ang5 rr
2491 | feir1 f ei1 rr
2492 | feir2 f ei2 rr
2493 | feir3 f ei3 rr
2494 | feir4 f ei4 rr
2495 | feir5 f ei5 rr
2496 | fenr1 f en1 rr
2497 | fenr2 f en2 rr
2498 | fenr3 f en3 rr
2499 | fenr4 f en4 rr
2500 | fenr5 f en5 rr
2501 | fengr1 f eng1 rr
2502 | fengr2 f eng2 rr
2503 | fengr3 f eng3 rr
2504 | fengr4 f eng4 rr
2505 | fengr5 f eng5 rr
2506 | for1 f o1 rr
2507 | for2 f o2 rr
2508 | for3 f o3 rr
2509 | for4 f o4 rr
2510 | for5 f o5 rr
2511 | four1 f ou1 rr
2512 | four2 f ou2 rr
2513 | four3 f ou3 rr
2514 | four4 f ou4 rr
2515 | four5 f ou5 rr
2516 | fur1 f u1 rr
2517 | fur2 f u2 rr
2518 | fur3 f u3 rr
2519 | fur4 f u4 rr
2520 | fur5 f u5 rr
2521 | gar1 g a1 rr
2522 | gar2 g a2 rr
2523 | gar3 g a3 rr
2524 | gar4 g a4 rr
2525 | gar5 g a5 rr
2526 | gair1 g ai1 rr
2527 | gair2 g ai2 rr
2528 | gair3 g ai3 rr
2529 | gair4 g ai4 rr
2530 | gair5 g ai5 rr
2531 | ganr1 g an1 rr
2532 | ganr2 g an2 rr
2533 | ganr3 g an3 rr
2534 | ganr4 g an4 rr
2535 | ganr5 g an5 rr
2536 | gangr1 g ang1 rr
2537 | gangr2 g ang2 rr
2538 | gangr3 g ang3 rr
2539 | gangr4 g ang4 rr
2540 | gangr5 g ang5 rr
2541 | gaor1 g ao1 rr
2542 | gaor2 g ao2 rr
2543 | gaor3 g ao3 rr
2544 | gaor4 g ao4 rr
2545 | gaor5 g ao5 rr
2546 | ger1 g e1 rr
2547 | ger2 g e2 rr
2548 | ger3 g e3 rr
2549 | ger4 g e4 rr
2550 | ger5 g e5 rr
2551 | geir1 g ei1 rr
2552 | geir2 g ei2 rr
2553 | geir3 g ei3 rr
2554 | geir4 g ei4 rr
2555 | geir5 g ei5 rr
2556 | genr1 g en1 rr
2557 | genr2 g en2 rr
2558 | genr3 g en3 rr
2559 | genr4 g en4 rr
2560 | genr5 g en5 rr
2561 | gengr1 g eng1 rr
2562 | gengr2 g eng2 rr
2563 | gengr3 g eng3 rr
2564 | gengr4 g eng4 rr
2565 | gengr5 g eng5 rr
2566 | gongr1 g ong1 rr
2567 | gongr2 g ong2 rr
2568 | gongr3 g ong3 rr
2569 | gongr4 g ong4 rr
2570 | gongr5 g ong5 rr
2571 | gour1 g ou1 rr
2572 | gour2 g ou2 rr
2573 | gour3 g ou3 rr
2574 | gour4 g ou4 rr
2575 | gour5 g ou5 rr
2576 | gur1 g u1 rr
2577 | gur2 g u2 rr
2578 | gur3 g u3 rr
2579 | gur4 g u4 rr
2580 | gur5 g u5 rr
2581 | guar1 g ua1 rr
2582 | guar2 g ua2 rr
2583 | guar3 g ua3 rr
2584 | guar4 g ua4 rr
2585 | guar5 g ua5 rr
2586 | guair1 g uai1 rr
2587 | guair2 g uai2 rr
2588 | guair3 g uai3 rr
2589 | guair4 g uai4 rr
2590 | guair5 g uai5 rr
2591 | guanr1 g uan1 rr
2592 | guanr2 g uan2 rr
2593 | guanr3 g uan3 rr
2594 | guanr4 g uan4 rr
2595 | guanr5 g uan5 rr
2596 | guangr1 g uang1 rr
2597 | guangr2 g uang2 rr
2598 | guangr3 g uang3 rr
2599 | guangr4 g uang4 rr
2600 | guangr5 g uang5 rr
2601 | guir1 g uei1 rr
2602 | guir2 g uei2 rr
2603 | guir3 g uei3 rr
2604 | guir4 g uei4 rr
2605 | guir5 g uei5 rr
2606 | gunr1 g uen1 rr
2607 | gunr2 g uen2 rr
2608 | gunr3 g uen3 rr
2609 | gunr4 g uen4 rr
2610 | gunr5 g uen5 rr
2611 | guor1 g uo1 rr
2612 | guor2 g uo2 rr
2613 | guor3 g uo3 rr
2614 | guor4 g uo4 rr
2615 | guor5 g uo5 rr
2616 | har1 h a1 rr
2617 | har2 h a2 rr
2618 | har3 h a3 rr
2619 | har4 h a4 rr
2620 | har5 h a5 rr
2621 | hair1 h ai1 rr
2622 | hair2 h ai2 rr
2623 | hair3 h ai3 rr
2624 | hair4 h ai4 rr
2625 | hair5 h ai5 rr
2626 | hanr1 h an1 rr
2627 | hanr2 h an2 rr
2628 | hanr3 h an3 rr
2629 | hanr4 h an4 rr
2630 | hanr5 h an5 rr
2631 | hangr1 h ang1 rr
2632 | hangr2 h ang2 rr
2633 | hangr3 h ang3 rr
2634 | hangr4 h ang4 rr
2635 | hangr5 h ang5 rr
2636 | haor1 h ao1 rr
2637 | haor2 h ao2 rr
2638 | haor3 h ao3 rr
2639 | haor4 h ao4 rr
2640 | haor5 h ao5 rr
2641 | her1 h e1 rr
2642 | her2 h e2 rr
2643 | her3 h e3 rr
2644 | her4 h e4 rr
2645 | her5 h e5 rr
2646 | heir1 h ei1 rr
2647 | heir2 h ei2 rr
2648 | heir3 h ei3 rr
2649 | heir4 h ei4 rr
2650 | heir5 h ei5 rr
2651 | henr1 h en1 rr
2652 | henr2 h en2 rr
2653 | henr3 h en3 rr
2654 | henr4 h en4 rr
2655 | henr5 h en5 rr
2656 | hengr1 h eng1 rr
2657 | hengr2 h eng2 rr
2658 | hengr3 h eng3 rr
2659 | hengr4 h eng4 rr
2660 | hengr5 h eng5 rr
2661 | hongr1 h ong1 rr
2662 | hongr2 h ong2 rr
2663 | hongr3 h ong3 rr
2664 | hongr4 h ong4 rr
2665 | hongr5 h ong5 rr
2666 | hour1 h ou1 rr
2667 | hour2 h ou2 rr
2668 | hour3 h ou3 rr
2669 | hour4 h ou4 rr
2670 | hour5 h ou5 rr
2671 | hur1 h u1 rr
2672 | hur2 h u2 rr
2673 | hur3 h u3 rr
2674 | hur4 h u4 rr
2675 | hur5 h u5 rr
2676 | huar1 h ua1 rr
2677 | huar2 h ua2 rr
2678 | huar3 h ua3 rr
2679 | huar4 h ua4 rr
2680 | huar5 h ua5 rr
2681 | huair1 h uai1 rr
2682 | huair2 h uai2 rr
2683 | huair3 h uai3 rr
2684 | huair4 h uai4 rr
2685 | huair5 h uai5 rr
2686 | huanr1 h uan1 rr
2687 | huanr2 h uan2 rr
2688 | huanr3 h uan3 rr
2689 | huanr4 h uan4 rr
2690 | huanr5 h uan5 rr
2691 | huangr1 h uang1 rr
2692 | huangr2 h uang2 rr
2693 | huangr3 h uang3 rr
2694 | huangr4 h uang4 rr
2695 | huangr5 h uang5 rr
2696 | huir1 h uei1 rr
2697 | huir2 h uei2 rr
2698 | huir3 h uei3 rr
2699 | huir4 h uei4 rr
2700 | huir5 h uei5 rr
2701 | hunr1 h uen1 rr
2702 | hunr2 h uen2 rr
2703 | hunr3 h uen3 rr
2704 | hunr4 h uen4 rr
2705 | hunr5 h uen5 rr
2706 | huor1 h uo1 rr
2707 | huor2 h uo2 rr
2708 | huor3 h uo3 rr
2709 | huor4 h uo4 rr
2710 | huor5 h uo5 rr
2711 | jir1 j i1 rr
2712 | jir2 j i2 rr
2713 | jir3 j i3 rr
2714 | jir4 j i4 rr
2715 | jir5 j i5 rr
2716 | jiar1 j ia1 rr
2717 | jiar2 j ia2 rr
2718 | jiar3 j ia3 rr
2719 | jiar4 j ia4 rr
2720 | jiar5 j ia5 rr
2721 | jianr1 j ian1 rr
2722 | jianr2 j ian2 rr
2723 | jianr3 j ian3 rr
2724 | jianr4 j ian4 rr
2725 | jianr5 j ian5 rr
2726 | jiangr1 j iang1 rr
2727 | jiangr2 j iang2 rr
2728 | jiangr3 j iang3 rr
2729 | jiangr4 j iang4 rr
2730 | jiangr5 j iang5 rr
2731 | jiaor1 j iao1 rr
2732 | jiaor2 j iao2 rr
2733 | jiaor3 j iao3 rr
2734 | jiaor4 j iao4 rr
2735 | jiaor5 j iao5 rr
2736 | jier1 j ie1 rr
2737 | jier2 j ie2 rr
2738 | jier3 j ie3 rr
2739 | jier4 j ie4 rr
2740 | jier5 j ie5 rr
2741 | jinr1 j in1 rr
2742 | jinr2 j in2 rr
2743 | jinr3 j in3 rr
2744 | jinr4 j in4 rr
2745 | jinr5 j in5 rr
2746 | jingr1 j ing1 rr
2747 | jingr2 j ing2 rr
2748 | jingr3 j ing3 rr
2749 | jingr4 j ing4 rr
2750 | jingr5 j ing5 rr
2751 | jiongr1 j iong1 rr
2752 | jiongr2 j iong2 rr
2753 | jiongr3 j iong3 rr
2754 | jiongr4 j iong4 rr
2755 | jiongr5 j iong5 rr
2756 | jiur1 j iou1 rr
2757 | jiur2 j iou2 rr
2758 | jiur3 j iou3 rr
2759 | jiur4 j iou4 rr
2760 | jiur5 j iou5 rr
2761 | jur1 j v1 rr
2762 | jur2 j v2 rr
2763 | jur3 j v3 rr
2764 | jur4 j v4 rr
2765 | jur5 j v5 rr
2766 | juanr1 j van1 rr
2767 | juanr2 j van2 rr
2768 | juanr3 j van3 rr
2769 | juanr4 j van4 rr
2770 | juanr5 j van5 rr
2771 | juer1 j ve1 rr
2772 | juer2 j ve2 rr
2773 | juer3 j ve3 rr
2774 | juer4 j ve4 rr
2775 | juer5 j ve5 rr
2776 | junr1 j vn1 rr
2777 | junr2 j vn2 rr
2778 | junr3 j vn3 rr
2779 | junr4 j vn4 rr
2780 | junr5 j vn5 rr
2781 | kar1 k a1 rr
2782 | kar2 k a2 rr
2783 | kar3 k a3 rr
2784 | kar4 k a4 rr
2785 | kar5 k a5 rr
2786 | kair1 k ai1 rr
2787 | kair2 k ai2 rr
2788 | kair3 k ai3 rr
2789 | kair4 k ai4 rr
2790 | kair5 k ai5 rr
2791 | kanr1 k an1 rr
2792 | kanr2 k an2 rr
2793 | kanr3 k an3 rr
2794 | kanr4 k an4 rr
2795 | kanr5 k an5 rr
2796 | kangr1 k ang1 rr
2797 | kangr2 k ang2 rr
2798 | kangr3 k ang3 rr
2799 | kangr4 k ang4 rr
2800 | kangr5 k ang5 rr
2801 | kaor1 k ao1 rr
2802 | kaor2 k ao2 rr
2803 | kaor3 k ao3 rr
2804 | kaor4 k ao4 rr
2805 | kaor5 k ao5 rr
2806 | ker1 k e1 rr
2807 | ker2 k e2 rr
2808 | ker3 k e3 rr
2809 | ker4 k e4 rr
2810 | ker5 k e5 rr
2811 | keir1 k ei1 rr
2812 | keir2 k ei2 rr
2813 | keir3 k ei3 rr
2814 | keir4 k ei4 rr
2815 | keir5 k ei5 rr
2816 | kenr1 k en1 rr
2817 | kenr2 k en2 rr
2818 | kenr3 k en3 rr
2819 | kenr4 k en4 rr
2820 | kenr5 k en5 rr
2821 | kengr1 k eng1 rr
2822 | kengr2 k eng2 rr
2823 | kengr3 k eng3 rr
2824 | kengr4 k eng4 rr
2825 | kengr5 k eng5 rr
2826 | kongr1 k ong1 rr
2827 | kongr2 k ong2 rr
2828 | kongr3 k ong3 rr
2829 | kongr4 k ong4 rr
2830 | kongr5 k ong5 rr
2831 | kour1 k ou1 rr
2832 | kour2 k ou2 rr
2833 | kour3 k ou3 rr
2834 | kour4 k ou4 rr
2835 | kour5 k ou5 rr
2836 | kur1 k u1 rr
2837 | kur2 k u2 rr
2838 | kur3 k u3 rr
2839 | kur4 k u4 rr
2840 | kur5 k u5 rr
2841 | kuar1 k ua1 rr
2842 | kuar2 k ua2 rr
2843 | kuar3 k ua3 rr
2844 | kuar4 k ua4 rr
2845 | kuar5 k ua5 rr
2846 | kuair1 k uai1 rr
2847 | kuair2 k uai2 rr
2848 | kuair3 k uai3 rr
2849 | kuair4 k uai4 rr
2850 | kuair5 k uai5 rr
2851 | kuanr1 k uan1 rr
2852 | kuanr2 k uan2 rr
2853 | kuanr3 k uan3 rr
2854 | kuanr4 k uan4 rr
2855 | kuanr5 k uan5 rr
2856 | kuangr1 k uang1 rr
2857 | kuangr2 k uang2 rr
2858 | kuangr3 k uang3 rr
2859 | kuangr4 k uang4 rr
2860 | kuangr5 k uang5 rr
2861 | kuir1 k uei1 rr
2862 | kuir2 k uei2 rr
2863 | kuir3 k uei3 rr
2864 | kuir4 k uei4 rr
2865 | kuir5 k uei5 rr
2866 | kunr1 k uen1 rr
2867 | kunr2 k uen2 rr
2868 | kunr3 k uen3 rr
2869 | kunr4 k uen4 rr
2870 | kunr5 k uen5 rr
2871 | kuor1 k uo1 rr
2872 | kuor2 k uo2 rr
2873 | kuor3 k uo3 rr
2874 | kuor4 k uo4 rr
2875 | kuor5 k uo5 rr
2876 | lar1 l a1 rr
2877 | lar2 l a2 rr
2878 | lar3 l a3 rr
2879 | lar4 l a4 rr
2880 | lar5 l a5 rr
2881 | lair1 l ai1 rr
2882 | lair2 l ai2 rr
2883 | lair3 l ai3 rr
2884 | lair4 l ai4 rr
2885 | lair5 l ai5 rr
2886 | lanr1 l an1 rr
2887 | lanr2 l an2 rr
2888 | lanr3 l an3 rr
2889 | lanr4 l an4 rr
2890 | lanr5 l an5 rr
2891 | langr1 l ang1 rr
2892 | langr2 l ang2 rr
2893 | langr3 l ang3 rr
2894 | langr4 l ang4 rr
2895 | langr5 l ang5 rr
2896 | laor1 l ao1 rr
2897 | laor2 l ao2 rr
2898 | laor3 l ao3 rr
2899 | laor4 l ao4 rr
2900 | laor5 l ao5 rr
2901 | ler1 l e1 rr
2902 | ler2 l e2 rr
2903 | ler3 l e3 rr
2904 | ler4 l e4 rr
2905 | ler5 l e5 rr
2906 | leir1 l ei1 rr
2907 | leir2 l ei2 rr
2908 | leir3 l ei3 rr
2909 | leir4 l ei4 rr
2910 | leir5 l ei5 rr
2911 | lengr1 l eng1 rr
2912 | lengr2 l eng2 rr
2913 | lengr3 l eng3 rr
2914 | lengr4 l eng4 rr
2915 | lengr5 l eng5 rr
2916 | lir1 l i1 rr
2917 | lir2 l i2 rr
2918 | lir3 l i3 rr
2919 | lir4 l i4 rr
2920 | lir5 l i5 rr
2921 | liar1 l ia1 rr
2922 | liar2 l ia2 rr
2923 | liar3 l ia3 rr
2924 | liar4 l ia4 rr
2925 | liar5 l ia5 rr
2926 | lianr1 l ian1 rr
2927 | lianr2 l ian2 rr
2928 | lianr3 l ian3 rr
2929 | lianr4 l ian4 rr
2930 | lianr5 l ian5 rr
2931 | liangr1 l iang1 rr
2932 | liangr2 l iang2 rr
2933 | liangr3 l iang3 rr
2934 | liangr4 l iang4 rr
2935 | liangr5 l iang5 rr
2936 | liaor1 l iao1 rr
2937 | liaor2 l iao2 rr
2938 | liaor3 l iao3 rr
2939 | liaor4 l iao4 rr
2940 | liaor5 l iao5 rr
2941 | lier1 l ie1 rr
2942 | lier2 l ie2 rr
2943 | lier3 l ie3 rr
2944 | lier4 l ie4 rr
2945 | lier5 l ie5 rr
2946 | linr1 l in1 rr
2947 | linr2 l in2 rr
2948 | linr3 l in3 rr
2949 | linr4 l in4 rr
2950 | linr5 l in5 rr
2951 | lingr1 l ing1 rr
2952 | lingr2 l ing2 rr
2953 | lingr3 l ing3 rr
2954 | lingr4 l ing4 rr
2955 | lingr5 l ing5 rr
2956 | liur1 l iou1 rr
2957 | liur2 l iou2 rr
2958 | liur3 l iou3 rr
2959 | liur4 l iou4 rr
2960 | liur5 l iou5 rr
2961 | lor1 l o1 rr
2962 | lor2 l o2 rr
2963 | lor3 l o3 rr
2964 | lor4 l o4 rr
2965 | lor5 l o5 rr
2966 | longr1 l ong1 rr
2967 | longr2 l ong2 rr
2968 | longr3 l ong3 rr
2969 | longr4 l ong4 rr
2970 | longr5 l ong5 rr
2971 | lour1 l ou1 rr
2972 | lour2 l ou2 rr
2973 | lour3 l ou3 rr
2974 | lour4 l ou4 rr
2975 | lour5 l ou5 rr
2976 | lur1 l u1 rr
2977 | lur2 l u2 rr
2978 | lur3 l u3 rr
2979 | lur4 l u4 rr
2980 | lur5 l u5 rr
2981 | luanr1 l uan1 rr
2982 | luanr2 l uan2 rr
2983 | luanr3 l uan3 rr
2984 | luanr4 l uan4 rr
2985 | luanr5 l uan5 rr
2986 | luer1 l ve1 rr
2987 | luer2 l ve2 rr
2988 | luer3 l ve3 rr
2989 | luer4 l ve4 rr
2990 | luer5 l ve5 rr
2991 | lver1 l ve1 rr
2992 | lver2 l ve2 rr
2993 | lver3 l ve3 rr
2994 | lver4 l ve4 rr
2995 | lver5 l ve5 rr
2996 | lunr1 l uen1 rr
2997 | lunr2 l uen2 rr
2998 | lunr3 l uen3 rr
2999 | lunr4 l uen4 rr
3000 | lunr5 l uen5 rr
3001 | luor1 l uo1 rr
3002 | luor2 l uo2 rr
3003 | luor3 l uo3 rr
3004 | luor4 l uo4 rr
3005 | luor5 l uo5 rr
3006 | lvr1 l v1 rr
3007 | lvr2 l v2 rr
3008 | lvr3 l v3 rr
3009 | lvr4 l v4 rr
3010 | lvr5 l v5 rr
3011 | mar1 m a1 rr
3012 | mar2 m a2 rr
3013 | mar3 m a3 rr
3014 | mar4 m a4 rr
3015 | mar5 m a5 rr
3016 | mair1 m ai1 rr
3017 | mair2 m ai2 rr
3018 | mair3 m ai3 rr
3019 | mair4 m ai4 rr
3020 | mair5 m ai5 rr
3021 | manr1 m an1 rr
3022 | manr2 m an2 rr
3023 | manr3 m an3 rr
3024 | manr4 m an4 rr
3025 | manr5 m an5 rr
3026 | mangr1 m ang1 rr
3027 | mangr2 m ang2 rr
3028 | mangr3 m ang3 rr
3029 | mangr4 m ang4 rr
3030 | mangr5 m ang5 rr
3031 | maor1 m ao1 rr
3032 | maor2 m ao2 rr
3033 | maor3 m ao3 rr
3034 | maor4 m ao4 rr
3035 | maor5 m ao5 rr
3036 | mer1 m e1 rr
3037 | mer2 m e2 rr
3038 | mer3 m e3 rr
3039 | mer4 m e4 rr
3040 | mer5 m e5 rr
3041 | meir1 m ei1 rr
3042 | meir2 m ei2 rr
3043 | meir3 m ei3 rr
3044 | meir4 m ei4 rr
3045 | meir5 m ei5 rr
3046 | menr1 m en1 rr
3047 | menr2 m en2 rr
3048 | menr3 m en3 rr
3049 | menr4 m en4 rr
3050 | menr5 m en5 rr
3051 | mengr1 m eng1 rr
3052 | mengr2 m eng2 rr
3053 | mengr3 m eng3 rr
3054 | mengr4 m eng4 rr
3055 | mengr5 m eng5 rr
3056 | mir1 m i1 rr
3057 | mir2 m i2 rr
3058 | mir3 m i3 rr
3059 | mir4 m i4 rr
3060 | mir5 m i5 rr
3061 | mianr1 m ian1 rr
3062 | mianr2 m ian2 rr
3063 | mianr3 m ian3 rr
3064 | mianr4 m ian4 rr
3065 | mianr5 m ian5 rr
3066 | miaor1 m iao1 rr
3067 | miaor2 m iao2 rr
3068 | miaor3 m iao3 rr
3069 | miaor4 m iao4 rr
3070 | miaor5 m iao5 rr
3071 | mier1 m ie1 rr
3072 | mier2 m ie2 rr
3073 | mier3 m ie3 rr
3074 | mier4 m ie4 rr
3075 | mier5 m ie5 rr
3076 | minr1 m in1 rr
3077 | minr2 m in2 rr
3078 | minr3 m in3 rr
3079 | minr4 m in4 rr
3080 | minr5 m in5 rr
3081 | mingr1 m ing1 rr
3082 | mingr2 m ing2 rr
3083 | mingr3 m ing3 rr
3084 | mingr4 m ing4 rr
3085 | mingr5 m ing5 rr
3086 | miur1 m iou1 rr
3087 | miur2 m iou2 rr
3088 | miur3 m iou3 rr
3089 | miur4 m iou4 rr
3090 | miur5 m iou5 rr
3091 | mor1 m o1 rr
3092 | mor2 m o2 rr
3093 | mor3 m o3 rr
3094 | mor4 m o4 rr
3095 | mor5 m o5 rr
3096 | mour1 m ou1 rr
3097 | mour2 m ou2 rr
3098 | mour3 m ou3 rr
3099 | mour4 m ou4 rr
3100 | mour5 m ou5 rr
3101 | mur1 m u1 rr
3102 | mur2 m u2 rr
3103 | mur3 m u3 rr
3104 | mur4 m u4 rr
3105 | mur5 m u5 rr
3106 | nar1 n a1 rr
3107 | nar2 n a2 rr
3108 | nar3 n a3 rr
3109 | nar4 n a4 rr
3110 | nar5 n a5 rr
3111 | nair1 n ai1 rr
3112 | nair2 n ai2 rr
3113 | nair3 n ai3 rr
3114 | nair4 n ai4 rr
3115 | nair5 n ai5 rr
3116 | nanr1 n an1 rr
3117 | nanr2 n an2 rr
3118 | nanr3 n an3 rr
3119 | nanr4 n an4 rr
3120 | nanr5 n an5 rr
3121 | nangr1 n ang1 rr
3122 | nangr2 n ang2 rr
3123 | nangr3 n ang3 rr
3124 | nangr4 n ang4 rr
3125 | nangr5 n ang5 rr
3126 | naor1 n ao1 rr
3127 | naor2 n ao2 rr
3128 | naor3 n ao3 rr
3129 | naor4 n ao4 rr
3130 | naor5 n ao5 rr
3131 | ner1 n e1 rr
3132 | ner2 n e2 rr
3133 | ner3 n e3 rr
3134 | ner4 n e4 rr
3135 | ner5 n e5 rr
3136 | neir1 n ei1 rr
3137 | neir2 n ei2 rr
3138 | neir3 n ei3 rr
3139 | neir4 n ei4 rr
3140 | neir5 n ei5 rr
3141 | nenr1 n en1 rr
3142 | nenr2 n en2 rr
3143 | nenr3 n en3 rr
3144 | nenr4 n en4 rr
3145 | nenr5 n en5 rr
3146 | nengr1 n eng1 rr
3147 | nengr2 n eng2 rr
3148 | nengr3 n eng3 rr
3149 | nengr4 n eng4 rr
3150 | nengr5 n eng5 rr
3151 | nir1 n i1 rr
3152 | nir2 n i2 rr
3153 | nir3 n i3 rr
3154 | nir4 n i4 rr
3155 | nir5 n i5 rr
3156 | nianr1 n ian1 rr
3157 | nianr2 n ian2 rr
3158 | nianr3 n ian3 rr
3159 | nianr4 n ian4 rr
3160 | nianr5 n ian5 rr
3161 | niangr1 n iang1 rr
3162 | niangr2 n iang2 rr
3163 | niangr3 n iang3 rr
3164 | niangr4 n iang4 rr
3165 | niangr5 n iang5 rr
3166 | niaor1 n iao1 rr
3167 | niaor2 n iao2 rr
3168 | niaor3 n iao3 rr
3169 | niaor4 n iao4 rr
3170 | niaor5 n iao5 rr
3171 | nier1 n ie1 rr
3172 | nier2 n ie2 rr
3173 | nier3 n ie3 rr
3174 | nier4 n ie4 rr
3175 | nier5 n ie5 rr
3176 | ninr1 n in1 rr
3177 | ninr2 n in2 rr
3178 | ninr3 n in3 rr
3179 | ninr4 n in4 rr
3180 | ninr5 n in5 rr
3181 | ningr1 n ing1 rr
3182 | ningr2 n ing2 rr
3183 | ningr3 n ing3 rr
3184 | ningr4 n ing4 rr
3185 | ningr5 n ing5 rr
3186 | niur1 n iou1 rr
3187 | niur2 n iou2 rr
3188 | niur3 n iou3 rr
3189 | niur4 n iou4 rr
3190 | niur5 n iou5 rr
3191 | nongr1 n ong1 rr
3192 | nongr2 n ong2 rr
3193 | nongr3 n ong3 rr
3194 | nongr4 n ong4 rr
3195 | nongr5 n ong5 rr
3196 | nour1 n ou1 rr
3197 | nour2 n ou2 rr
3198 | nour3 n ou3 rr
3199 | nour4 n ou4 rr
3200 | nour5 n ou5 rr
3201 | nur1 n u1 rr
3202 | nur2 n u2 rr
3203 | nur3 n u3 rr
3204 | nur4 n u4 rr
3205 | nur5 n u5 rr
3206 | nuanr1 n uan1 rr
3207 | nuanr2 n uan2 rr
3208 | nuanr3 n uan3 rr
3209 | nuanr4 n uan4 rr
3210 | nuanr5 n uan5 rr
3211 | nuer1 n ve1 rr
3212 | nuer2 n ve2 rr
3213 | nuer3 n ve3 rr
3214 | nuer4 n ve4 rr
3215 | nuer5 n ve5 rr
3216 | nver1 n ve1 rr
3217 | nver2 n ve2 rr
3218 | nver3 n ve3 rr
3219 | nver4 n ve4 rr
3220 | nver5 n ve5 rr
3221 | nuor1 n uo1 rr
3222 | nuor2 n uo2 rr
3223 | nuor3 n uo3 rr
3224 | nuor4 n uo4 rr
3225 | nuor5 n uo5 rr
3226 | nvr1 n v1 rr
3227 | nvr2 n v2 rr
3228 | nvr3 n v3 rr
3229 | nvr4 n v4 rr
3230 | nvr5 n v5 rr
3231 | or1 o1 rr
3232 | or2 o2 rr
3233 | or3 o3 rr
3234 | or4 o4 rr
3235 | or5 o5 rr
3236 | our1 ou1 rr
3237 | our2 ou2 rr
3238 | our3 ou3 rr
3239 | our4 ou4 rr
3240 | our5 ou5 rr
3241 | par1 p a1 rr
3242 | par2 p a2 rr
3243 | par3 p a3 rr
3244 | par4 p a4 rr
3245 | par5 p a5 rr
3246 | pair1 p ai1 rr
3247 | pair2 p ai2 rr
3248 | pair3 p ai3 rr
3249 | pair4 p ai4 rr
3250 | pair5 p ai5 rr
3251 | panr1 p an1 rr
3252 | panr2 p an2 rr
3253 | panr3 p an3 rr
3254 | panr4 p an4 rr
3255 | panr5 p an5 rr
3256 | pangr1 p ang1 rr
3257 | pangr2 p ang2 rr
3258 | pangr3 p ang3 rr
3259 | pangr4 p ang4 rr
3260 | pangr5 p ang5 rr
3261 | paor1 p ao1 rr
3262 | paor2 p ao2 rr
3263 | paor3 p ao3 rr
3264 | paor4 p ao4 rr
3265 | paor5 p ao5 rr
3266 | peir1 p ei1 rr
3267 | peir2 p ei2 rr
3268 | peir3 p ei3 rr
3269 | peir4 p ei4 rr
3270 | peir5 p ei5 rr
3271 | penr1 p en1 rr
3272 | penr2 p en2 rr
3273 | penr3 p en3 rr
3274 | penr4 p en4 rr
3275 | penr5 p en5 rr
3276 | pengr1 p eng1 rr
3277 | pengr2 p eng2 rr
3278 | pengr3 p eng3 rr
3279 | pengr4 p eng4 rr
3280 | pengr5 p eng5 rr
3281 | pir1 p i1 rr
3282 | pir2 p i2 rr
3283 | pir3 p i3 rr
3284 | pir4 p i4 rr
3285 | pir5 p i5 rr
3286 | pianr1 p ian1 rr
3287 | pianr2 p ian2 rr
3288 | pianr3 p ian3 rr
3289 | pianr4 p ian4 rr
3290 | pianr5 p ian5 rr
3291 | piaor1 p iao1 rr
3292 | piaor2 p iao2 rr
3293 | piaor3 p iao3 rr
3294 | piaor4 p iao4 rr
3295 | piaor5 p iao5 rr
3296 | pier1 p ie1 rr
3297 | pier2 p ie2 rr
3298 | pier3 p ie3 rr
3299 | pier4 p ie4 rr
3300 | pier5 p ie5 rr
3301 | pinr1 p in1 rr
3302 | pinr2 p in2 rr
3303 | pinr3 p in3 rr
3304 | pinr4 p in4 rr
3305 | pinr5 p in5 rr
3306 | pingr1 p ing1 rr
3307 | pingr2 p ing2 rr
3308 | pingr3 p ing3 rr
3309 | pingr4 p ing4 rr
3310 | pingr5 p ing5 rr
3311 | por1 p o1 rr
3312 | por2 p o2 rr
3313 | por3 p o3 rr
3314 | por4 p o4 rr
3315 | por5 p o5 rr
3316 | pour1 p ou1 rr
3317 | pour2 p ou2 rr
3318 | pour3 p ou3 rr
3319 | pour4 p ou4 rr
3320 | pour5 p ou5 rr
3321 | pur1 p u1 rr
3322 | pur2 p u2 rr
3323 | pur3 p u3 rr
3324 | pur4 p u4 rr
3325 | pur5 p u5 rr
3326 | qir1 q i1 rr
3327 | qir2 q i2 rr
3328 | qir3 q i3 rr
3329 | qir4 q i4 rr
3330 | qir5 q i5 rr
3331 | qiar1 q ia1 rr
3332 | qiar2 q ia2 rr
3333 | qiar3 q ia3 rr
3334 | qiar4 q ia4 rr
3335 | qiar5 q ia5 rr
3336 | qianr1 q ian1 rr
3337 | qianr2 q ian2 rr
3338 | qianr3 q ian3 rr
3339 | qianr4 q ian4 rr
3340 | qianr5 q ian5 rr
3341 | qiangr1 q iang1 rr
3342 | qiangr2 q iang2 rr
3343 | qiangr3 q iang3 rr
3344 | qiangr4 q iang4 rr
3345 | qiangr5 q iang5 rr
3346 | qiaor1 q iao1 rr
3347 | qiaor2 q iao2 rr
3348 | qiaor3 q iao3 rr
3349 | qiaor4 q iao4 rr
3350 | qiaor5 q iao5 rr
3351 | qier1 q ie1 rr
3352 | qier2 q ie2 rr
3353 | qier3 q ie3 rr
3354 | qier4 q ie4 rr
3355 | qier5 q ie5 rr
3356 | qinr1 q in1 rr
3357 | qinr2 q in2 rr
3358 | qinr3 q in3 rr
3359 | qinr4 q in4 rr
3360 | qinr5 q in5 rr
3361 | qingr1 q ing1 rr
3362 | qingr2 q ing2 rr
3363 | qingr3 q ing3 rr
3364 | qingr4 q ing4 rr
3365 | qingr5 q ing5 rr
3366 | qiongr1 q iong1 rr
3367 | qiongr2 q iong2 rr
3368 | qiongr3 q iong3 rr
3369 | qiongr4 q iong4 rr
3370 | qiongr5 q iong5 rr
3371 | qiur1 q iou1 rr
3372 | qiur2 q iou2 rr
3373 | qiur3 q iou3 rr
3374 | qiur4 q iou4 rr
3375 | qiur5 q iou5 rr
3376 | qur1 q v1 rr
3377 | qur2 q v2 rr
3378 | qur3 q v3 rr
3379 | qur4 q v4 rr
3380 | qur5 q v5 rr
3381 | quanr1 q van1 rr
3382 | quanr2 q van2 rr
3383 | quanr3 q van3 rr
3384 | quanr4 q van4 rr
3385 | quanr5 q van5 rr
3386 | quer1 q ve1 rr
3387 | quer2 q ve2 rr
3388 | quer3 q ve3 rr
3389 | quer4 q ve4 rr
3390 | quer5 q ve5 rr
3391 | qunr1 q vn1 rr
3392 | qunr2 q vn2 rr
3393 | qunr3 q vn3 rr
3394 | qunr4 q vn4 rr
3395 | qunr5 q vn5 rr
3396 | ranr1 r an1 rr
3397 | ranr2 r an2 rr
3398 | ranr3 r an3 rr
3399 | ranr4 r an4 rr
3400 | ranr5 r an5 rr
3401 | rangr1 r ang1 rr
3402 | rangr2 r ang2 rr
3403 | rangr3 r ang3 rr
3404 | rangr4 r ang4 rr
3405 | rangr5 r ang5 rr
3406 | raor1 r ao1 rr
3407 | raor2 r ao2 rr
3408 | raor3 r ao3 rr
3409 | raor4 r ao4 rr
3410 | raor5 r ao5 rr
3411 | rer1 r e1 rr
3412 | rer2 r e2 rr
3413 | rer3 r e3 rr
3414 | rer4 r e4 rr
3415 | rer5 r e5 rr
3416 | renr1 r en1 rr
3417 | renr2 r en2 rr
3418 | renr3 r en3 rr
3419 | renr4 r en4 rr
3420 | renr5 r en5 rr
3421 | rengr1 r eng1 rr
3422 | rengr2 r eng2 rr
3423 | rengr3 r eng3 rr
3424 | rengr4 r eng4 rr
3425 | rengr5 r eng5 rr
3426 | rir1 r iii1 rr
3427 | rir2 r iii2 rr
3428 | rir3 r iii3 rr
3429 | rir4 r iii4 rr
3430 | rir5 r iii5 rr
3431 | rongr1 r ong1 rr
3432 | rongr2 r ong2 rr
3433 | rongr3 r ong3 rr
3434 | rongr4 r ong4 rr
3435 | rongr5 r ong5 rr
3436 | rour1 r ou1 rr
3437 | rour2 r ou2 rr
3438 | rour3 r ou3 rr
3439 | rour4 r ou4 rr
3440 | rour5 r ou5 rr
3441 | rur1 r u1 rr
3442 | rur2 r u2 rr
3443 | rur3 r u3 rr
3444 | rur4 r u4 rr
3445 | rur5 r u5 rr
3446 | ruar1 r ua1 rr
3447 | ruar2 r ua2 rr
3448 | ruar3 r ua3 rr
3449 | ruar4 r ua4 rr
3450 | ruar5 r ua5 rr
3451 | ruanr1 r uan1 rr
3452 | ruanr2 r uan2 rr
3453 | ruanr3 r uan3 rr
3454 | ruanr4 r uan4 rr
3455 | ruanr5 r uan5 rr
3456 | ruir1 r uei1 rr
3457 | ruir2 r uei2 rr
3458 | ruir3 r uei3 rr
3459 | ruir4 r uei4 rr
3460 | ruir5 r uei5 rr
3461 | runr1 r uen1 rr
3462 | runr2 r uen2 rr
3463 | runr3 r uen3 rr
3464 | runr4 r uen4 rr
3465 | runr5 r uen5 rr
3466 | ruor1 r uo1 rr
3467 | ruor2 r uo2 rr
3468 | ruor3 r uo3 rr
3469 | ruor4 r uo4 rr
3470 | ruor5 r uo5 rr
3471 | sar1 s a1 rr
3472 | sar2 s a2 rr
3473 | sar3 s a3 rr
3474 | sar4 s a4 rr
3475 | sar5 s a5 rr
3476 | sair1 s ai1 rr
3477 | sair2 s ai2 rr
3478 | sair3 s ai3 rr
3479 | sair4 s ai4 rr
3480 | sair5 s ai5 rr
3481 | sanr1 s an1 rr
3482 | sanr2 s an2 rr
3483 | sanr3 s an3 rr
3484 | sanr4 s an4 rr
3485 | sanr5 s an5 rr
3486 | sangr1 s ang1 rr
3487 | sangr2 s ang2 rr
3488 | sangr3 s ang3 rr
3489 | sangr4 s ang4 rr
3490 | sangr5 s ang5 rr
3491 | saor1 s ao1 rr
3492 | saor2 s ao2 rr
3493 | saor3 s ao3 rr
3494 | saor4 s ao4 rr
3495 | saor5 s ao5 rr
3496 | ser1 s e1 rr
3497 | ser2 s e2 rr
3498 | ser3 s e3 rr
3499 | ser4 s e4 rr
3500 | ser5 s e5 rr
3501 | senr1 s en1 rr
3502 | senr2 s en2 rr
3503 | senr3 s en3 rr
3504 | senr4 s en4 rr
3505 | senr5 s en5 rr
3506 | sengr1 s eng1 rr
3507 | sengr2 s eng2 rr
3508 | sengr3 s eng3 rr
3509 | sengr4 s eng4 rr
3510 | sengr5 s eng5 rr
3511 | shar1 sh a1 rr
3512 | shar2 sh a2 rr
3513 | shar3 sh a3 rr
3514 | shar4 sh a4 rr
3515 | shar5 sh a5 rr
3516 | shair1 sh ai1 rr
3517 | shair2 sh ai2 rr
3518 | shair3 sh ai3 rr
3519 | shair4 sh ai4 rr
3520 | shair5 sh ai5 rr
3521 | shanr1 sh an1 rr
3522 | shanr2 sh an2 rr
3523 | shanr3 sh an3 rr
3524 | shanr4 sh an4 rr
3525 | shanr5 sh an5 rr
3526 | shangr1 sh ang1 rr
3527 | shangr2 sh ang2 rr
3528 | shangr3 sh ang3 rr
3529 | shangr4 sh ang4 rr
3530 | shangr5 sh ang5 rr
3531 | shaor1 sh ao1 rr
3532 | shaor2 sh ao2 rr
3533 | shaor3 sh ao3 rr
3534 | shaor4 sh ao4 rr
3535 | shaor5 sh ao5 rr
3536 | sher1 sh e1 rr
3537 | sher2 sh e2 rr
3538 | sher3 sh e3 rr
3539 | sher4 sh e4 rr
3540 | sher5 sh e5 rr
3541 | sheir1 sh ei1 rr
3542 | sheir2 sh ei2 rr
3543 | sheir3 sh ei3 rr
3544 | sheir4 sh ei4 rr
3545 | sheir5 sh ei5 rr
3546 | shenr1 sh en1 rr
3547 | shenr2 sh en2 rr
3548 | shenr3 sh en3 rr
3549 | shenr4 sh en4 rr
3550 | shenr5 sh en5 rr
3551 | shengr1 sh eng1 rr
3552 | shengr2 sh eng2 rr
3553 | shengr3 sh eng3 rr
3554 | shengr4 sh eng4 rr
3555 | shengr5 sh eng5 rr
3556 | shir1 sh iii1 rr
3557 | shir2 sh iii2 rr
3558 | shir3 sh iii3 rr
3559 | shir4 sh iii4 rr
3560 | shir5 sh iii5 rr
3561 | shour1 sh ou1 rr
3562 | shour2 sh ou2 rr
3563 | shour3 sh ou3 rr
3564 | shour4 sh ou4 rr
3565 | shour5 sh ou5 rr
3566 | shur1 sh u1 rr
3567 | shur2 sh u2 rr
3568 | shur3 sh u3 rr
3569 | shur4 sh u4 rr
3570 | shur5 sh u5 rr
3571 | shuar1 sh ua1 rr
3572 | shuar2 sh ua2 rr
3573 | shuar3 sh ua3 rr
3574 | shuar4 sh ua4 rr
3575 | shuar5 sh ua5 rr
3576 | shuair1 sh uai1 rr
3577 | shuair2 sh uai2 rr
3578 | shuair3 sh uai3 rr
3579 | shuair4 sh uai4 rr
3580 | shuair5 sh uai5 rr
3581 | shuanr1 sh uan1 rr
3582 | shuanr2 sh uan2 rr
3583 | shuanr3 sh uan3 rr
3584 | shuanr4 sh uan4 rr
3585 | shuanr5 sh uan5 rr
3586 | shuangr1 sh uang1 rr
3587 | shuangr2 sh uang2 rr
3588 | shuangr3 sh uang3 rr
3589 | shuangr4 sh uang4 rr
3590 | shuangr5 sh uang5 rr
3591 | shuir1 sh uei1 rr
3592 | shuir2 sh uei2 rr
3593 | shuir3 sh uei3 rr
3594 | shuir4 sh uei4 rr
3595 | shuir5 sh uei5 rr
3596 | shunr1 sh uen1 rr
3597 | shunr2 sh uen2 rr
3598 | shunr3 sh uen3 rr
3599 | shunr4 sh uen4 rr
3600 | shunr5 sh uen5 rr
3601 | shuor1 sh uo1 rr
3602 | shuor2 sh uo2 rr
3603 | shuor3 sh uo3 rr
3604 | shuor4 sh uo4 rr
3605 | shuor5 sh uo5 rr
3606 | sir1 s ii1 rr
3607 | sir2 s ii2 rr
3608 | sir3 s ii3 rr
3609 | sir4 s ii4 rr
3610 | sir5 s ii5 rr
3611 | songr1 s ong1 rr
3612 | songr2 s ong2 rr
3613 | songr3 s ong3 rr
3614 | songr4 s ong4 rr
3615 | songr5 s ong5 rr
3616 | sour1 s ou1 rr
3617 | sour2 s ou2 rr
3618 | sour3 s ou3 rr
3619 | sour4 s ou4 rr
3620 | sour5 s ou5 rr
3621 | sur1 s u1 rr
3622 | sur2 s u2 rr
3623 | sur3 s u3 rr
3624 | sur4 s u4 rr
3625 | sur5 s u5 rr
3626 | suanr1 s uan1 rr
3627 | suanr2 s uan2 rr
3628 | suanr3 s uan3 rr
3629 | suanr4 s uan4 rr
3630 | suanr5 s uan5 rr
3631 | suir1 s uei1 rr
3632 | suir2 s uei2 rr
3633 | suir3 s uei3 rr
3634 | suir4 s uei4 rr
3635 | suir5 s uei5 rr
3636 | sunr1 s uen1 rr
3637 | sunr2 s uen2 rr
3638 | sunr3 s uen3 rr
3639 | sunr4 s uen4 rr
3640 | sunr5 s uen5 rr
3641 | suor1 s uo1 rr
3642 | suor2 s uo2 rr
3643 | suor3 s uo3 rr
3644 | suor4 s uo4 rr
3645 | suor5 s uo5 rr
3646 | tar1 t a1 rr
3647 | tar2 t a2 rr
3648 | tar3 t a3 rr
3649 | tar4 t a4 rr
3650 | tar5 t a5 rr
3651 | tair1 t ai1 rr
3652 | tair2 t ai2 rr
3653 | tair3 t ai3 rr
3654 | tair4 t ai4 rr
3655 | tair5 t ai5 rr
3656 | tanr1 t an1 rr
3657 | tanr2 t an2 rr
3658 | tanr3 t an3 rr
3659 | tanr4 t an4 rr
3660 | tanr5 t an5 rr
3661 | tangr1 t ang1 rr
3662 | tangr2 t ang2 rr
3663 | tangr3 t ang3 rr
3664 | tangr4 t ang4 rr
3665 | tangr5 t ang5 rr
3666 | taor1 t ao1 rr
3667 | taor2 t ao2 rr
3668 | taor3 t ao3 rr
3669 | taor4 t ao4 rr
3670 | taor5 t ao5 rr
3671 | ter1 t e1 rr
3672 | ter2 t e2 rr
3673 | ter3 t e3 rr
3674 | ter4 t e4 rr
3675 | ter5 t e5 rr
3676 | teir1 t ei1 rr
3677 | teir2 t ei2 rr
3678 | teir3 t ei3 rr
3679 | teir4 t ei4 rr
3680 | teir5 t ei5 rr
3681 | tengr1 t eng1 rr
3682 | tengr2 t eng2 rr
3683 | tengr3 t eng3 rr
3684 | tengr4 t eng4 rr
3685 | tengr5 t eng5 rr
3686 | tir1 t i1 rr
3687 | tir2 t i2 rr
3688 | tir3 t i3 rr
3689 | tir4 t i4 rr
3690 | tir5 t i5 rr
3691 | tianr1 t ian1 rr
3692 | tianr2 t ian2 rr
3693 | tianr3 t ian3 rr
3694 | tianr4 t ian4 rr
3695 | tianr5 t ian5 rr
3696 | tiaor1 t iao1 rr
3697 | tiaor2 t iao2 rr
3698 | tiaor3 t iao3 rr
3699 | tiaor4 t iao4 rr
3700 | tiaor5 t iao5 rr
3701 | tier1 t ie1 rr
3702 | tier2 t ie2 rr
3703 | tier3 t ie3 rr
3704 | tier4 t ie4 rr
3705 | tier5 t ie5 rr
3706 | tingr1 t ing1 rr
3707 | tingr2 t ing2 rr
3708 | tingr3 t ing3 rr
3709 | tingr4 t ing4 rr
3710 | tingr5 t ing5 rr
3711 | tongr1 t ong1 rr
3712 | tongr2 t ong2 rr
3713 | tongr3 t ong3 rr
3714 | tongr4 t ong4 rr
3715 | tongr5 t ong5 rr
3716 | tour1 t ou1 rr
3717 | tour2 t ou2 rr
3718 | tour3 t ou3 rr
3719 | tour4 t ou4 rr
3720 | tour5 t ou5 rr
3721 | tur1 t u1 rr
3722 | tur2 t u2 rr
3723 | tur3 t u3 rr
3724 | tur4 t u4 rr
3725 | tur5 t u5 rr
3726 | tuanr1 t uan1 rr
3727 | tuanr2 t uan2 rr
3728 | tuanr3 t uan3 rr
3729 | tuanr4 t uan4 rr
3730 | tuanr5 t uan5 rr
3731 | tuir1 t uei1 rr
3732 | tuir2 t uei2 rr
3733 | tuir3 t uei3 rr
3734 | tuir4 t uei4 rr
3735 | tuir5 t uei5 rr
3736 | tunr1 t uen1 rr
3737 | tunr2 t uen2 rr
3738 | tunr3 t uen3 rr
3739 | tunr4 t uen4 rr
3740 | tunr5 t uen5 rr
3741 | tuor1 t uo1 rr
3742 | tuor2 t uo2 rr
3743 | tuor3 t uo3 rr
3744 | tuor4 t uo4 rr
3745 | tuor5 t uo5 rr
3746 | war1 w ua1 rr
3747 | war2 w ua2 rr
3748 | war3 w ua3 rr
3749 | war4 w ua4 rr
3750 | war5 w ua5 rr
3751 | wair1 w uai1 rr
3752 | wair2 w uai2 rr
3753 | wair3 w uai3 rr
3754 | wair4 w uai4 rr
3755 | wair5 w uai5 rr
3756 | wanr1 w uan1 rr
3757 | wanr2 w uan2 rr
3758 | wanr3 w uan3 rr
3759 | wanr4 w uan4 rr
3760 | wanr5 w uan5 rr
3761 | wangr1 w uang1 rr
3762 | wangr2 w uang2 rr
3763 | wangr3 w uang3 rr
3764 | wangr4 w uang4 rr
3765 | wangr5 w uang5 rr
3766 | weir1 w uei1 rr
3767 | weir2 w uei2 rr
3768 | weir3 w uei3 rr
3769 | weir4 w uei4 rr
3770 | weir5 w uei5 rr
3771 | wenr1 w uen1 rr
3772 | wenr2 w uen2 rr
3773 | wenr3 w uen3 rr
3774 | wenr4 w uen4 rr
3775 | wenr5 w uen5 rr
3776 | wengr1 w uen1 rr
3777 | wengr2 w uen2 rr
3778 | wengr3 w uen3 rr
3779 | wengr4 w uen4 rr
3780 | wengr5 w uen5 rr
3781 | wor1 w uo1 rr
3782 | wor2 w uo2 rr
3783 | wor3 w uo3 rr
3784 | wor4 w uo4 rr
3785 | wor5 w uo5 rr
3786 | wur1 w u1 rr
3787 | wur2 w u2 rr
3788 | wur3 w u3 rr
3789 | wur4 w u4 rr
3790 | wur5 w u5 rr
3791 | xir1 x i1 rr
3792 | xir2 x i2 rr
3793 | xir3 x i3 rr
3794 | xir4 x i4 rr
3795 | xir5 x i5 rr
3796 | xiar1 x ia1 rr
3797 | xiar2 x ia2 rr
3798 | xiar3 x ia3 rr
3799 | xiar4 x ia4 rr
3800 | xiar5 x ia5 rr
3801 | xianr1 x ian1 rr
3802 | xianr2 x ian2 rr
3803 | xianr3 x ian3 rr
3804 | xianr4 x ian4 rr
3805 | xianr5 x ian5 rr
3806 | xiangr1 x iang1 rr
3807 | xiangr2 x iang2 rr
3808 | xiangr3 x iang3 rr
3809 | xiangr4 x iang4 rr
3810 | xiangr5 x iang5 rr
3811 | xiaor1 x iao1 rr
3812 | xiaor2 x iao2 rr
3813 | xiaor3 x iao3 rr
3814 | xiaor4 x iao4 rr
3815 | xiaor5 x iao5 rr
3816 | xier1 x ie1 rr
3817 | xier2 x ie2 rr
3818 | xier3 x ie3 rr
3819 | xier4 x ie4 rr
3820 | xier5 x ie5 rr
3821 | xinr1 x in1 rr
3822 | xinr2 x in2 rr
3823 | xinr3 x in3 rr
3824 | xinr4 x in4 rr
3825 | xinr5 x in5 rr
3826 | xingr1 x ing1 rr
3827 | xingr2 x ing2 rr
3828 | xingr3 x ing3 rr
3829 | xingr4 x ing4 rr
3830 | xingr5 x ing5 rr
3831 | xiongr1 x iong1 rr
3832 | xiongr2 x iong2 rr
3833 | xiongr3 x iong3 rr
3834 | xiongr4 x iong4 rr
3835 | xiongr5 x iong5 rr
3836 | xiur1 x iou1 rr
3837 | xiur2 x iou2 rr
3838 | xiur3 x iou3 rr
3839 | xiur4 x iou4 rr
3840 | xiur5 x iou5 rr
3841 | xur1 x v1 rr
3842 | xur2 x v2 rr
3843 | xur3 x v3 rr
3844 | xur4 x v4 rr
3845 | xur5 x v5 rr
3846 | xuanr1 x van1 rr
3847 | xuanr2 x van2 rr
3848 | xuanr3 x van3 rr
3849 | xuanr4 x van4 rr
3850 | xuanr5 x van5 rr
3851 | xuer1 x ve1 rr
3852 | xuer2 x ve2 rr
3853 | xuer3 x ve3 rr
3854 | xuer4 x ve4 rr
3855 | xuer5 x ve5 rr
3856 | xunr1 x vn1 rr
3857 | xunr2 x vn2 rr
3858 | xunr3 x vn3 rr
3859 | xunr4 x vn4 rr
3860 | xunr5 x vn5 rr
3861 | yar1 y ia1 rr
3862 | yar2 y ia2 rr
3863 | yar3 y ia3 rr
3864 | yar4 y ia4 rr
3865 | yar5 y ia5 rr
3866 | yanr1 y ian1 rr
3867 | yanr2 y ian2 rr
3868 | yanr3 y ian3 rr
3869 | yanr4 y ian4 rr
3870 | yanr5 y ian5 rr
3871 | yangr1 y iang1 rr
3872 | yangr2 y iang2 rr
3873 | yangr3 y iang3 rr
3874 | yangr4 y iang4 rr
3875 | yangr5 y iang5 rr
3876 | yaor1 y iao1 rr
3877 | yaor2 y iao2 rr
3878 | yaor3 y iao3 rr
3879 | yaor4 y iao4 rr
3880 | yaor5 y iao5 rr
3881 | yer1 y ie1 rr
3882 | yer2 y ie2 rr
3883 | yer3 y ie3 rr
3884 | yer4 y ie4 rr
3885 | yer5 y ie5 rr
3886 | yir1 y i1 rr
3887 | yir2 y i2 rr
3888 | yir3 y i3 rr
3889 | yir4 y i4 rr
3890 | yir5 y i5 rr
3891 | yinr1 y in1 rr
3892 | yinr2 y in2 rr
3893 | yinr3 y in3 rr
3894 | yinr4 y in4 rr
3895 | yinr5 y in5 rr
3896 | yingr1 y ing1 rr
3897 | yingr2 y ing2 rr
3898 | yingr3 y ing3 rr
3899 | yingr4 y ing4 rr
3900 | yingr5 y ing5 rr
3901 | yor1 y iou1 rr
3902 | yor2 y iou2 rr
3903 | yor3 y iou3 rr
3904 | yor4 y iou4 rr
3905 | yor5 y iou5 rr
3906 | yongr1 y iong1 rr
3907 | yongr2 y iong2 rr
3908 | yongr3 y iong3 rr
3909 | yongr4 y iong4 rr
3910 | yongr5 y iong5 rr
3911 | your1 y iou1 rr
3912 | your2 y iou2 rr
3913 | your3 y iou3 rr
3914 | your4 y iou4 rr
3915 | your5 y iou5 rr
3916 | yur1 y v1 rr
3917 | yur2 y v2 rr
3918 | yur3 y v3 rr
3919 | yur4 y v4 rr
3920 | yur5 y v5 rr
3921 | yuanr1 y van1 rr
3922 | yuanr2 y van2 rr
3923 | yuanr3 y van3 rr
3924 | yuanr4 y van4 rr
3925 | yuanr5 y van5 rr
3926 | yuer1 y ve1 rr
3927 | yuer2 y ve2 rr
3928 | yuer3 y ve3 rr
3929 | yuer4 y ve4 rr
3930 | yuer5 y ve5 rr
3931 | yunr1 y vn1 rr
3932 | yunr2 y vn2 rr
3933 | yunr3 y vn3 rr
3934 | yunr4 y vn4 rr
3935 | yunr5 y vn5 rr
3936 | zar1 z a1 rr
3937 | zar2 z a2 rr
3938 | zar3 z a3 rr
3939 | zar4 z a4 rr
3940 | zar5 z a5 rr
3941 | zair1 z ai1 rr
3942 | zair2 z ai2 rr
3943 | zair3 z ai3 rr
3944 | zair4 z ai4 rr
3945 | zair5 z ai5 rr
3946 | zanr1 z an1 rr
3947 | zanr2 z an2 rr
3948 | zanr3 z an3 rr
3949 | zanr4 z an4 rr
3950 | zanr5 z an5 rr
3951 | zangr1 z ang1 rr
3952 | zangr2 z ang2 rr
3953 | zangr3 z ang3 rr
3954 | zangr4 z ang4 rr
3955 | zangr5 z ang5 rr
3956 | zaor1 z ao1 rr
3957 | zaor2 z ao2 rr
3958 | zaor3 z ao3 rr
3959 | zaor4 z ao4 rr
3960 | zaor5 z ao5 rr
3961 | zer1 z e1 rr
3962 | zer2 z e2 rr
3963 | zer3 z e3 rr
3964 | zer4 z e4 rr
3965 | zer5 z e5 rr
3966 | zeir1 z ei1 rr
3967 | zeir2 z ei2 rr
3968 | zeir3 z ei3 rr
3969 | zeir4 z ei4 rr
3970 | zeir5 z ei5 rr
3971 | zenr1 z en1 rr
3972 | zenr2 z en2 rr
3973 | zenr3 z en3 rr
3974 | zenr4 z en4 rr
3975 | zenr5 z en5 rr
3976 | zengr1 z eng1 rr
3977 | zengr2 z eng2 rr
3978 | zengr3 z eng3 rr
3979 | zengr4 z eng4 rr
3980 | zengr5 z eng5 rr
3981 | zhar1 zh a1 rr
3982 | zhar2 zh a2 rr
3983 | zhar3 zh a3 rr
3984 | zhar4 zh a4 rr
3985 | zhar5 zh a5 rr
3986 | zhair1 zh ai1 rr
3987 | zhair2 zh ai2 rr
3988 | zhair3 zh ai3 rr
3989 | zhair4 zh ai4 rr
3990 | zhair5 zh ai5 rr
3991 | zhanr1 zh an1 rr
3992 | zhanr2 zh an2 rr
3993 | zhanr3 zh an3 rr
3994 | zhanr4 zh an4 rr
3995 | zhanr5 zh an5 rr
3996 | zhangr1 zh ang1 rr
3997 | zhangr2 zh ang2 rr
3998 | zhangr3 zh ang3 rr
3999 | zhangr4 zh ang4 rr
4000 | zhangr5 zh ang5 rr
4001 | zhaor1 zh ao1 rr
4002 | zhaor2 zh ao2 rr
4003 | zhaor3 zh ao3 rr
4004 | zhaor4 zh ao4 rr
4005 | zhaor5 zh ao5 rr
4006 | zher1 zh e1 rr
4007 | zher2 zh e2 rr
4008 | zher3 zh e3 rr
4009 | zher4 zh e4 rr
4010 | zher5 zh e5 rr
4011 | zheir1 zh ei1 rr
4012 | zheir2 zh ei2 rr
4013 | zheir3 zh ei3 rr
4014 | zheir4 zh ei4 rr
4015 | zheir5 zh ei5 rr
4016 | zhenr1 zh en1 rr
4017 | zhenr2 zh en2 rr
4018 | zhenr3 zh en3 rr
4019 | zhenr4 zh en4 rr
4020 | zhenr5 zh en5 rr
4021 | zhengr1 zh eng1 rr
4022 | zhengr2 zh eng2 rr
4023 | zhengr3 zh eng3 rr
4024 | zhengr4 zh eng4 rr
4025 | zhengr5 zh eng5 rr
4026 | zhir1 zh iii1 rr
4027 | zhir2 zh iii2 rr
4028 | zhir3 zh iii3 rr
4029 | zhir4 zh iii4 rr
4030 | zhir5 zh iii5 rr
4031 | zhongr1 zh ong1 rr
4032 | zhongr2 zh ong2 rr
4033 | zhongr3 zh ong3 rr
4034 | zhongr4 zh ong4 rr
4035 | zhongr5 zh ong5 rr
4036 | zhour1 zh ou1 rr
4037 | zhour2 zh ou2 rr
4038 | zhour3 zh ou3 rr
4039 | zhour4 zh ou4 rr
4040 | zhour5 zh ou5 rr
4041 | zhur1 zh u1 rr
4042 | zhur2 zh u2 rr
4043 | zhur3 zh u3 rr
4044 | zhur4 zh u4 rr
4045 | zhur5 zh u5 rr
4046 | zhuar1 zh ua1 rr
4047 | zhuar2 zh ua2 rr
4048 | zhuar3 zh ua3 rr
4049 | zhuar4 zh ua4 rr
4050 | zhuar5 zh ua5 rr
4051 | zhuair1 zh uai1 rr
4052 | zhuair2 zh uai2 rr
4053 | zhuair3 zh uai3 rr
4054 | zhuair4 zh uai4 rr
4055 | zhuair5 zh uai5 rr
4056 | zhuanr1 zh uan1 rr
4057 | zhuanr2 zh uan2 rr
4058 | zhuanr3 zh uan3 rr
4059 | zhuanr4 zh uan4 rr
4060 | zhuanr5 zh uan5 rr
4061 | zhuangr1 zh uang1 rr
4062 | zhuangr2 zh uang2 rr
4063 | zhuangr3 zh uang3 rr
4064 | zhuangr4 zh uang4 rr
4065 | zhuangr5 zh uang5 rr
4066 | zhuir1 zh uei1 rr
4067 | zhuir2 zh uei2 rr
4068 | zhuir3 zh uei3 rr
4069 | zhuir4 zh uei4 rr
4070 | zhuir5 zh uei5 rr
4071 | zhunr1 zh uen1 rr
4072 | zhunr2 zh uen2 rr
4073 | zhunr3 zh uen3 rr
4074 | zhunr4 zh uen4 rr
4075 | zhunr5 zh uen5 rr
4076 | zhuor1 zh uo1 rr
4077 | zhuor2 zh uo2 rr
4078 | zhuor3 zh uo3 rr
4079 | zhuor4 zh uo4 rr
4080 | zhuor5 zh uo5 rr
4081 | zir1 z ii1 rr
4082 | zir2 z ii2 rr
4083 | zir3 z ii3 rr
4084 | zir4 z ii4 rr
4085 | zir5 z ii5 rr
4086 | zongr1 z ong1 rr
4087 | zongr2 z ong2 rr
4088 | zongr3 z ong3 rr
4089 | zongr4 z ong4 rr
4090 | zongr5 z ong5 rr
4091 | zour1 z ou1 rr
4092 | zour2 z ou2 rr
4093 | zour3 z ou3 rr
4094 | zour4 z ou4 rr
4095 | zour5 z ou5 rr
4096 | zur1 z u1 rr
4097 | zur2 z u2 rr
4098 | zur3 z u3 rr
4099 | zur4 z u4 rr
4100 | zur5 z u5 rr
4101 | zuanr1 z uan1 rr
4102 | zuanr2 z uan2 rr
4103 | zuanr3 z uan3 rr
4104 | zuanr4 z uan4 rr
4105 | zuanr5 z uan5 rr
4106 | zuir1 z uei1 rr
4107 | zuir2 z uei2 rr
4108 | zuir3 z uei3 rr
4109 | zuir4 z uei4 rr
4110 | zuir5 z uei5 rr
4111 | zunr1 z uen1 rr
4112 | zunr2 z uen2 rr
4113 | zunr3 z uen3 rr
4114 | zunr4 z uen4 rr
4115 | zunr5 z uen5 rr
4116 | zuor1 z uo1 rr
4117 | zuor2 z uo2 rr
4118 | zuor3 z uo3 rr
4119 | zuor4 z uo4 rr
4120 | zuor5 z uo5 rr
4121 |
--------------------------------------------------------------------------------
/model/FastPitchFormant.py:
--------------------------------------------------------------------------------
1 | import os
2 | import json
3 |
4 | import torch
5 | import torch.nn as nn
6 | import torch.nn.functional as F
7 |
8 | from .modules import TextEncoder, Decoder, VarianceAdaptor, Generator
9 | from utils.tools import get_mask_from_lengths
10 |
11 |
12 | class FastPitchFormant(nn.Module):
13 | """ FastPitchFormant """
14 |
15 | def __init__(self, preprocess_config, model_config):
16 | super(FastPitchFormant, self).__init__()
17 | self.model_config = model_config
18 |
19 | self.encoder = TextEncoder(model_config)
20 | self.variance_adaptor = VarianceAdaptor(preprocess_config, model_config)
21 | self.formant_generator = Generator(model_config)
22 | self.excitation_generator = Generator(model_config, query_projection=True)
23 | self.decoder = Decoder(preprocess_config, model_config)
24 |
25 | self.speaker_emb = None
26 | if model_config["multi_speaker"]:
27 | with open(
28 | os.path.join(
29 | preprocess_config["path"]["preprocessed_path"], "speakers.json"
30 | ),
31 | "r",
32 | ) as f:
33 | n_speaker = len(json.load(f))
34 | self.speaker_emb = nn.Embedding(
35 | n_speaker,
36 | model_config["transformer"]["encoder_hidden"],
37 | )
38 |
39 | def forward(
40 | self,
41 | speakers,
42 | texts,
43 | src_lens,
44 | max_src_len,
45 | mels=None,
46 | mel_lens=None,
47 | max_mel_len=None,
48 | p_targets=None,
49 | d_targets=None,
50 | p_control=1.0,
51 | d_control=1.0,
52 | ):
53 | src_masks = get_mask_from_lengths(src_lens, max_src_len)
54 | mel_masks = (
55 | get_mask_from_lengths(mel_lens, max_mel_len)
56 | if mel_lens is not None
57 | else None
58 | )
59 |
60 | output = self.encoder(texts, src_masks)
61 |
62 | speaker_embedding = None
63 | if self.speaker_emb is not None:
64 | speaker_embedding = self.speaker_emb(speakers).unsqueeze(1).expand(
65 | -1, max_src_len, -1
66 | )
67 | output = output + speaker_embedding
68 |
69 | (
70 | h,
71 | p,
72 | p_predictions,
73 | log_d_predictions,
74 | d_rounded,
75 | mel_lens,
76 | mel_masks,
77 | ) = self.variance_adaptor(
78 | output,
79 | speaker_embedding,
80 | src_masks,
81 | mel_masks,
82 | max_mel_len,
83 | p_targets,
84 | d_targets,
85 | p_control,
86 | d_control,
87 | )
88 |
89 | formant_hidden = self.formant_generator(h, mel_masks)
90 | excitation_hidden = self.excitation_generator(p, mel_masks, hidden_query=h)
91 |
92 | mel_iters, mel_masks = self.decoder(formant_hidden, excitation_hidden, mel_masks)
93 |
94 | return (
95 | mel_iters,
96 | p_predictions,
97 | log_d_predictions,
98 | d_rounded,
99 | src_masks,
100 | mel_masks,
101 | src_lens,
102 | mel_lens,
103 | )
--------------------------------------------------------------------------------
/model/__init__.py:
--------------------------------------------------------------------------------
1 | from .FastPitchFormant import FastPitchFormant
2 | from .loss import FastPitchFormantLoss
3 | from .optimizer import ScheduledOptim
--------------------------------------------------------------------------------
/model/blocks.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torch.nn as nn
3 | import numpy as np
4 | from torch.nn import functional as F
5 |
6 |
7 | class LinearNorm(nn.Module):
8 | """ LinearNorm Projection """
9 |
10 | def __init__(self, in_features, out_features, bias=False):
11 | super(LinearNorm, self).__init__()
12 | self.linear = nn.Linear(in_features, out_features, bias)
13 |
14 | nn.init.xavier_uniform_(self.linear.weight)
15 | if bias:
16 | nn.init.constant_(self.linear.bias, 0.0)
17 |
18 | def forward(self, x):
19 | x = self.linear(x)
20 | return x
21 |
22 |
23 | class Conv1DBlock(nn.Module):
24 | """ 1D Convolutional Block """
25 |
26 | def __init__(self, in_channels, out_channels, kernel_size, activation=None, dropout=None):
27 | super(Conv1DBlock, self).__init__()
28 |
29 | self.conv_layer = nn.Sequential()
30 | self.conv_layer.add_module(
31 | "conv_layer",
32 | ConvNorm(
33 | in_channels,
34 | out_channels,
35 | kernel_size=kernel_size,
36 | stride=1,
37 | padding=int((kernel_size - 1) / 2),
38 | dilation=1,
39 | w_init_gain="tanh",
40 | ),
41 | )
42 | if activation is not None:
43 | self.conv_layer.add_module("activ", activation)
44 | self.dropout = dropout
45 |
46 | def forward(self, x, mask=None):
47 | x = x.contiguous().transpose(1, 2)
48 | x = self.conv_layer(x)
49 |
50 | if self.dropout is not None:
51 | x = F.dropout(x, self.dropout, self.training)
52 |
53 | x = x.contiguous().transpose(1, 2)
54 | if mask is not None:
55 | x = x.masked_fill(mask.unsqueeze(-1), 0)
56 |
57 | return x
58 |
59 |
60 | class ConvNorm(nn.Module):
61 | """ 1D Convolution """
62 |
63 | def __init__(
64 | self,
65 | in_channels,
66 | out_channels,
67 | kernel_size=1,
68 | stride=1,
69 | padding=None,
70 | dilation=1,
71 | bias=True,
72 | w_init_gain="linear",
73 | ):
74 | super(ConvNorm, self).__init__()
75 |
76 | if padding is None:
77 | assert kernel_size % 2 == 1
78 | padding = int(dilation * (kernel_size - 1) / 2)
79 |
80 | self.conv = nn.Conv1d(
81 | in_channels,
82 | out_channels,
83 | kernel_size=kernel_size,
84 | stride=stride,
85 | padding=padding,
86 | dilation=dilation,
87 | bias=bias,
88 | )
89 |
90 | def forward(self, signal):
91 | conv_signal = self.conv(signal)
92 |
93 | return conv_signal
94 |
95 |
96 | class FFTBlock(nn.Module):
97 | """ FFT Block """
98 |
99 | def __init__(self, d_model, n_head, d_k, d_v, d_inner, kernel_size, dropout=0.1, query_projection=False):
100 | super(FFTBlock, self).__init__()
101 | self.slf_attn = MultiHeadAttention(n_head, d_model, d_k, d_v, dropout=dropout)
102 | self.pos_ffn = PositionwiseFeedForward(
103 | d_model, d_inner, kernel_size, dropout=dropout
104 | )
105 | if query_projection:
106 | self.query_linear = LinearNorm(d_model, d_model, bias=True)
107 |
108 | def forward(self, enc_input, mask=None, slf_attn_mask=None, hidden_query=None):
109 | enc_output, enc_slf_attn = self.slf_attn(
110 | self.query_linear(enc_input + hidden_query) if hidden_query is not None else enc_input, \
111 | enc_input, enc_input, mask=slf_attn_mask
112 | )
113 | if mask is not None:
114 | enc_output = enc_output.masked_fill(mask.unsqueeze(-1), 0)
115 |
116 | enc_output = self.pos_ffn(enc_output)
117 | if mask is not None:
118 | enc_output = enc_output.masked_fill(mask.unsqueeze(-1), 0)
119 |
120 | return enc_output, enc_slf_attn
121 |
122 |
123 | class MultiHeadAttention(nn.Module):
124 | """ Multi-Head Attention """
125 |
126 | def __init__(self, n_head, d_model, d_k, d_v, dropout=0.1):
127 | super(MultiHeadAttention, self).__init__()
128 |
129 | self.n_head = n_head
130 | self.d_k = d_k
131 | self.d_v = d_v
132 |
133 | self.w_qs = LinearNorm(d_model, n_head * d_k)
134 | self.w_ks = LinearNorm(d_model, n_head * d_k)
135 | self.w_vs = LinearNorm(d_model, n_head * d_v)
136 |
137 | self.attention = ScaledDotProductAttention(temperature=np.power(d_k, 0.5))
138 | self.layer_norm = nn.LayerNorm(d_model)
139 |
140 | self.fc = LinearNorm(n_head * d_v, d_model)
141 |
142 | self.dropout = nn.Dropout(dropout)
143 |
144 | def forward(self, q, k, v, mask=None):
145 |
146 | d_k, d_v, n_head = self.d_k, self.d_v, self.n_head
147 |
148 | sz_b, len_q, _ = q.size()
149 | sz_b, len_k, _ = k.size()
150 | sz_b, len_v, _ = v.size()
151 |
152 | residual = q
153 |
154 | q = self.w_qs(q).view(sz_b, len_q, n_head, d_k)
155 | k = self.w_ks(k).view(sz_b, len_k, n_head, d_k)
156 | v = self.w_vs(v).view(sz_b, len_v, n_head, d_v)
157 | q = q.permute(2, 0, 1, 3).contiguous().view(-1, len_q, d_k) # (n*b) x lq x dk
158 | k = k.permute(2, 0, 1, 3).contiguous().view(-1, len_k, d_k) # (n*b) x lk x dk
159 | v = v.permute(2, 0, 1, 3).contiguous().view(-1, len_v, d_v) # (n*b) x lv x dv
160 |
161 | mask = mask.repeat(n_head, 1, 1) # (n*b) x .. x ..
162 | output, attn = self.attention(q, k, v, mask=mask)
163 |
164 | output = output.view(n_head, sz_b, len_q, d_v)
165 | output = (
166 | output.permute(1, 2, 0, 3).contiguous().view(sz_b, len_q, -1)
167 | ) # b x lq x (n*dv)
168 |
169 | output = self.dropout(self.fc(output))
170 | output = self.layer_norm(output + residual)
171 |
172 | return output, attn
173 |
174 |
175 | class ScaledDotProductAttention(nn.Module):
176 | """ Scaled Dot-Product Attention """
177 |
178 | def __init__(self, temperature):
179 | super(ScaledDotProductAttention, self).__init__()
180 | self.temperature = temperature
181 | self.softmax = nn.Softmax(dim=2)
182 |
183 | def forward(self, q, k, v, mask=None):
184 |
185 | attn = torch.bmm(q, k.transpose(1, 2))
186 | attn = attn / self.temperature
187 |
188 | if mask is not None:
189 | attn = attn.masked_fill(mask, -np.inf)
190 |
191 | attn = self.softmax(attn)
192 | output = torch.bmm(attn, v)
193 |
194 | return output, attn
195 |
196 |
197 | class PositionwiseFeedForward(nn.Module):
198 | """ A two-feed-forward-layer """
199 |
200 | def __init__(self, d_in, d_hid, kernel_size, dropout=0.1):
201 | super(PositionwiseFeedForward, self).__init__()
202 |
203 | # Use Conv1D
204 | # position-wise
205 | self.w_1 = nn.Conv1d(
206 | d_in,
207 | d_hid,
208 | kernel_size=kernel_size[0],
209 | padding=(kernel_size[0] - 1) // 2,
210 | )
211 | # position-wise
212 | self.w_2 = nn.Conv1d(
213 | d_hid,
214 | d_in,
215 | kernel_size=kernel_size[1],
216 | padding=(kernel_size[1] - 1) // 2,
217 | )
218 |
219 | self.layer_norm = nn.LayerNorm(d_in)
220 | self.dropout = nn.Dropout(dropout)
221 |
222 | def forward(self, x):
223 | residual = x
224 | output = x.transpose(1, 2)
225 | output = self.w_2(F.relu(self.w_1(output)))
226 | output = output.transpose(1, 2)
227 | output = self.dropout(output)
228 | output = self.layer_norm(output + residual)
229 |
230 | return output
231 |
--------------------------------------------------------------------------------
/model/loss.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torch.nn as nn
3 |
4 |
5 | class FastPitchFormantLoss(nn.Module):
6 | """ FastPitchFormant Loss """
7 |
8 | def __init__(self, preprocess_config, model_config):
9 | super(FastPitchFormantLoss, self).__init__()
10 | self.pitch_feature_level = preprocess_config["preprocessing"]["pitch"][
11 | "feature"
12 | ]
13 | self.n_mel_channels = preprocess_config["preprocessing"]["mel"]["n_mel_channels"]
14 | self.mse_loss_sum = nn.MSELoss(reduction='sum')
15 | self.mse_loss = nn.MSELoss()
16 | # self.mae_loss = nn.L1Loss()
17 |
18 | def forward(self, inputs, predictions):
19 | (
20 | mel_targets,
21 | mel_lens_targets,
22 | _,
23 | pitch_targets,
24 | duration_targets,
25 | ) = inputs[6:]
26 | (
27 | mel_iters,
28 | pitch_predictions,
29 | log_duration_predictions,
30 | _,
31 | src_masks,
32 | mel_masks,
33 | _,
34 | _,
35 | ) = predictions
36 | src_masks = ~src_masks
37 | mel_masks = ~mel_masks
38 | log_duration_targets = torch.log(duration_targets.float() + 1)
39 | mel_targets = mel_targets[:, : mel_masks.shape[1], :]
40 | mel_masks = mel_masks[:, :mel_masks.shape[1]]
41 |
42 | log_duration_targets.requires_grad = False
43 | pitch_targets.requires_grad = False
44 | mel_targets.requires_grad = False
45 | mel_lens_targets.requires_grad = False
46 |
47 | pitch_predictions = pitch_predictions.masked_select(src_masks)
48 | pitch_targets = pitch_targets.masked_select(src_masks)
49 |
50 | log_duration_predictions = log_duration_predictions.masked_select(src_masks)
51 | log_duration_targets = log_duration_targets.masked_select(src_masks)
52 |
53 | mel_targets = mel_targets.masked_select(mel_masks.unsqueeze(-1))
54 |
55 | mel_loss = 0
56 | for mel_iter in mel_iters:
57 | mel_predictions = mel_iter.masked_select(mel_masks.unsqueeze(-1))
58 | mel_loss += self.mse_loss_sum(mel_predictions, mel_targets)
59 | mel_loss = (mel_loss / (self.n_mel_channels * mel_lens_targets)).mean()
60 |
61 | pitch_loss = self.mse_loss(pitch_predictions, pitch_targets)
62 | duration_loss = self.mse_loss(log_duration_predictions, log_duration_targets)
63 |
64 | total_loss = (
65 | mel_loss + duration_loss + pitch_loss
66 | )
67 |
68 | return (
69 | total_loss,
70 | mel_loss,
71 | pitch_loss,
72 | duration_loss,
73 | )
74 |
--------------------------------------------------------------------------------
/model/modules.py:
--------------------------------------------------------------------------------
1 | import os
2 | import json
3 | import copy
4 | import math
5 | from collections import OrderedDict
6 |
7 | import torch
8 | import torch.nn as nn
9 | import numpy as np
10 | import torch.nn.functional as F
11 |
12 | from utils.tools import get_mask_from_lengths, pad
13 |
14 | from .blocks import (
15 | LinearNorm,
16 | Conv1DBlock,
17 | FFTBlock,
18 | )
19 | from text.symbols import symbols
20 |
21 | device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
22 |
23 |
24 | def get_sinusoid_encoding_table(n_position, d_hid, padding_idx=None):
25 | """ Sinusoid position encoding table """
26 |
27 | def cal_angle(position, hid_idx):
28 | return position / np.power(10000, 2 * (hid_idx // 2) / d_hid)
29 |
30 | def get_posi_angle_vec(position):
31 | return [cal_angle(position, hid_j) for hid_j in range(d_hid)]
32 |
33 | sinusoid_table = np.array(
34 | [get_posi_angle_vec(pos_i) for pos_i in range(n_position)]
35 | )
36 |
37 | sinusoid_table[:, 0::2] = np.sin(sinusoid_table[:, 0::2]) # dim 2i
38 | sinusoid_table[:, 1::2] = np.cos(sinusoid_table[:, 1::2]) # dim 2i+1
39 |
40 | if padding_idx is not None:
41 | # zero vector for padding dimension
42 | sinusoid_table[padding_idx] = 0.0
43 |
44 | return torch.FloatTensor(sinusoid_table)
45 |
46 |
47 | class TextEncoder(nn.Module):
48 | """ Text Encoder """
49 |
50 | def __init__(self, config):
51 | super(TextEncoder, self).__init__()
52 |
53 | n_position = config["max_seq_len"] + 1
54 | n_src_vocab = len(symbols) + 1
55 | d_word_vec = config["transformer"]["encoder_hidden"]
56 | n_layers = config["transformer"]["encoder_layer"]
57 | n_head = config["transformer"]["encoder_head"]
58 | d_k = d_v = (
59 | config["transformer"]["encoder_hidden"]
60 | // config["transformer"]["encoder_head"]
61 | )
62 | d_model = config["transformer"]["encoder_hidden"]
63 | d_inner = config["transformer"]["conv_filter_size"]
64 | kernel_size = config["transformer"]["conv_kernel_size"]
65 | dropout = config["transformer"]["encoder_dropout"]
66 |
67 | self.max_seq_len = config["max_seq_len"]
68 | self.d_model = d_model
69 |
70 | self.src_word_emb = nn.Embedding(
71 | n_src_vocab, d_word_vec, padding_idx=0
72 | )
73 | self.position_enc = nn.Parameter(
74 | get_sinusoid_encoding_table(n_position, d_word_vec).unsqueeze(0),
75 | requires_grad=False,
76 | )
77 |
78 | self.layer_stack = nn.ModuleList(
79 | [
80 | FFTBlock(
81 | d_model, n_head, d_k, d_v, d_inner, kernel_size, dropout=dropout
82 | )
83 | for _ in range(n_layers)
84 | ]
85 | )
86 |
87 | def forward(self, src_seq, mask, return_attns=False):
88 |
89 | enc_slf_attn_list = []
90 | batch_size, max_len = src_seq.shape[0], src_seq.shape[1]
91 |
92 | # -- Prepare masks
93 | slf_attn_mask = mask.unsqueeze(1).expand(-1, max_len, -1)
94 |
95 | # -- Forward
96 | if not self.training and src_seq.shape[1] > self.max_seq_len:
97 | enc_output = self.src_word_emb(src_seq) + get_sinusoid_encoding_table(
98 | src_seq.shape[1], self.d_model
99 | )[: src_seq.shape[1], :].unsqueeze(0).expand(batch_size, -1, -1).to(
100 | src_seq.device
101 | )
102 | else:
103 | enc_output = self.src_word_emb(src_seq) + self.position_enc[
104 | :, :max_len, :
105 | ].expand(batch_size, -1, -1)
106 |
107 | for enc_layer in self.layer_stack:
108 | enc_output, enc_slf_attn = enc_layer(
109 | enc_output, mask=mask, slf_attn_mask=slf_attn_mask
110 | )
111 | if return_attns:
112 | enc_slf_attn_list += [enc_slf_attn]
113 |
114 | return enc_output
115 |
116 |
117 | class Decoder(nn.Module):
118 | """ Spectrogram Decoder With Iterative Mel Prediction """
119 |
120 | def __init__(self, preprocess_config, model_config):
121 | super(Decoder, self).__init__()
122 |
123 | n_mel_channels = preprocess_config["preprocessing"]["mel"]["n_mel_channels"]
124 | n_layers = model_config["transformer"]["decoder_layer"]
125 | n_head = model_config["transformer"]["decoder_head"]
126 | d_k = d_v = (
127 | model_config["transformer"]["decoder_hidden"]
128 | // model_config["transformer"]["decoder_head"]
129 | )
130 | d_model = model_config["transformer"]["decoder_hidden"]
131 | d_inner = model_config["transformer"]["conv_filter_size"]
132 | kernel_size = model_config["transformer"]["conv_kernel_size"]
133 | dropout = model_config["transformer"]["decoder_dropout"]
134 |
135 | self.n_mel_channels = n_mel_channels
136 | self.max_seq_len = model_config["max_seq_len"]
137 | self.d_model = d_model
138 |
139 | self.n_layers = n_layers
140 | self.layer_stack = nn.ModuleList(
141 | [
142 | FFTBlock(
143 | d_model, n_head, d_k, d_v, d_inner, kernel_size, dropout=dropout
144 | )
145 | for _ in range(n_layers)
146 | ]
147 | )
148 |
149 | self.fc_layer_1 = LinearNorm(
150 | d_model*2, n_mel_channels*2
151 | )
152 | self.fc_layers = nn.ModuleList(
153 | [
154 | LinearNorm(
155 | d_model, n_mel_channels
156 | )
157 | for _ in range(n_layers)
158 | ]
159 | )
160 |
161 | def forward(self, formant_hidden, excitation_hidden, mask):
162 |
163 | mel_iters = list()
164 | max_len = formant_hidden.shape[1]
165 |
166 | # -- Prepare masks
167 | slf_attn_mask = mask.unsqueeze(1).expand(-1, max_len, -1)
168 |
169 | # -- FC 1
170 | f_mel, e_mel = torch.split(self.fc_layer_1(torch.cat([formant_hidden, excitation_hidden], dim=-1)), \
171 | self.n_mel_channels, dim=-1)
172 | mel_iters.append(f_mel + e_mel)
173 |
174 | # -- FC 2, 3
175 | dec_output = formant_hidden + excitation_hidden
176 | for i, (dec_layer, linear) in enumerate(zip(self.layer_stack, self.fc_layers)):
177 | dec_output, dec_slf_attn = dec_layer(
178 | dec_output, mask=mask, slf_attn_mask=slf_attn_mask
179 | )
180 | mel_iters.append(
181 | linear(dec_output).masked_fill(mask.unsqueeze(-1), 0)
182 | )
183 |
184 | return mel_iters, mask
185 |
186 |
187 | class VarianceAdaptor(nn.Module):
188 | """ Variance Adaptor """
189 |
190 | def __init__(self, preprocess_config, model_config):
191 | super(VarianceAdaptor, self).__init__()
192 | self.duration_predictor = VariancePredictor(model_config)
193 | self.length_regulator = LengthRegulator()
194 | self.pitch_predictor = VariancePredictor(model_config)
195 |
196 | self.pitch_feature_level = preprocess_config["preprocessing"]["pitch"][
197 | "feature"
198 | ]
199 | assert self.pitch_feature_level == "phoneme_level" # should be phoneme level
200 |
201 | d_model = model_config["transformer"]["encoder_hidden"]
202 | kernel_size = model_config["variance_embedding"]["kernel_size"]
203 | self.pitch_embedding = Conv1DBlock(
204 | 1, d_model, kernel_size
205 | )
206 |
207 | def get_pitch_embedding(self, x, target, mask, control):
208 | prediction = self.pitch_predictor(x, mask)
209 | if target is not None:
210 | embedding = self.pitch_embedding(target.unsqueeze(-1))
211 | else:
212 | prediction = prediction * control
213 | embedding = self.pitch_embedding(prediction.unsqueeze(-1))
214 | return prediction, embedding
215 |
216 | def upsample(self, x, mel_mask, max_len, log_duration_prediction=None, duration_target=None, d_control=1.0):
217 | if duration_target is not None:
218 | x, mel_len = self.length_regulator(x, duration_target, max_len)
219 | duration_rounded = duration_target
220 | else:
221 | duration_rounded = torch.clamp(
222 | (torch.round(torch.exp(log_duration_prediction) - 1) * d_control),
223 | min=0,
224 | )
225 | x, mel_len = self.length_regulator(x, duration_rounded, max_len)
226 | mel_mask = get_mask_from_lengths(mel_len)
227 | return x, duration_rounded, mel_len, mel_mask
228 |
229 | def forward(
230 | self,
231 | x,
232 | speaker_embedding,
233 | src_mask,
234 | mel_mask=None,
235 | max_len=None,
236 | pitch_target=None,
237 | duration_target=None,
238 | p_control=1.0,
239 | d_control=1.0,
240 | ):
241 |
242 | log_duration_prediction = self.duration_predictor(x, src_mask)
243 | pitch_prediction, pitch_embedding = self.get_pitch_embedding(
244 | x, pitch_target, src_mask, p_control
245 | )
246 |
247 | if speaker_embedding is not None:
248 | pitch_embedding = pitch_embedding + speaker_embedding
249 |
250 | x, duration_rounded, mel_len, mel_mask = self.upsample(
251 | torch.cat([x, pitch_embedding], dim=-1), mel_mask, max_len, \
252 | log_duration_prediction=log_duration_prediction, duration_target=duration_target, d_control=d_control
253 | )
254 |
255 | text_hidden, pitch_hidden = torch.split(x, x.shape[-1]//2, dim=-1)
256 |
257 | return (
258 | text_hidden,
259 | pitch_hidden,
260 | pitch_prediction,
261 | log_duration_prediction,
262 | duration_rounded,
263 | mel_len,
264 | mel_mask,
265 | )
266 |
267 |
268 | class Generator(nn.Module):
269 | """ Formant, Excitation Generator """
270 |
271 | def __init__(self, config, query_projection=False):
272 | super(Generator, self).__init__()
273 |
274 | n_position = config["max_seq_len"] + 1
275 | d_word_vec = config["transformer"]["encoder_hidden"]
276 | n_layers = config["transformer"]["generator_layer"]
277 | n_head = config["transformer"]["encoder_head"]
278 | d_k = d_v = (
279 | config["transformer"]["encoder_hidden"]
280 | // config["transformer"]["encoder_head"]
281 | )
282 | d_model = config["transformer"]["encoder_hidden"]
283 | d_inner = config["transformer"]["conv_filter_size"]
284 | kernel_size = config["transformer"]["conv_kernel_size"]
285 | dropout = config["transformer"]["encoder_dropout"]
286 |
287 | self.query_projection = query_projection
288 | self.max_seq_len = config["max_seq_len"]
289 | self.d_model = d_model
290 |
291 | self.position_enc = nn.Parameter(
292 | get_sinusoid_encoding_table(n_position, d_word_vec).unsqueeze(0),
293 | requires_grad=False,
294 | )
295 |
296 | self.layer_stack = nn.ModuleList(
297 | [
298 | FFTBlock(
299 | d_model, n_head, d_k, d_v, d_inner, kernel_size, dropout=dropout, query_projection=query_projection
300 | )
301 | for _ in range(n_layers)
302 | ]
303 | )
304 |
305 | def forward(self, hidden, mask, hidden_query=None):
306 |
307 | if self.query_projection:
308 | assert hidden_query is not None, "Query should be given for the Excitation Generator."
309 |
310 | batch_size, max_len = hidden.shape[0], hidden.shape[1]
311 |
312 | # -- Prepare masks
313 | slf_attn_mask = mask.unsqueeze(1).expand(-1, max_len, -1)
314 |
315 | # -- Forward
316 | if not self.training and hidden.shape[1] > self.max_seq_len:
317 | output = hidden + get_sinusoid_encoding_table(
318 | hidden.shape[1], self.d_model
319 | )[: hidden.shape[1], :].unsqueeze(0).expand(batch_size, -1, -1).to(
320 | hidden.device
321 | )
322 | else:
323 | output = hidden + self.position_enc[
324 | :, :max_len, :
325 | ].expand(batch_size, -1, -1)
326 |
327 | for i, enc_layer in enumerate(self.layer_stack):
328 | output, enc_slf_attn = enc_layer(
329 | output, mask=mask, slf_attn_mask=slf_attn_mask, hidden_query=hidden_query if i==0 else None
330 | )
331 |
332 | return output
333 |
334 |
335 | class LengthRegulator(nn.Module):
336 | """ Length Regulator """
337 |
338 | def __init__(self):
339 | super(LengthRegulator, self).__init__()
340 |
341 | def LR(self, x, duration, max_len):
342 | output = list()
343 | mel_len = list()
344 | for batch, expand_target in zip(x, duration):
345 | expanded = self.expand(batch, expand_target)
346 | output.append(expanded)
347 | mel_len.append(expanded.shape[0])
348 |
349 | if max_len is not None:
350 | output = pad(output, max_len)
351 | else:
352 | output = pad(output)
353 |
354 | return output, torch.LongTensor(mel_len).to(device)
355 |
356 | def expand(self, batch, predicted):
357 | out = list()
358 |
359 | for i, vec in enumerate(batch):
360 | expand_size = predicted[i].item()
361 | out.append(vec.expand(max(int(expand_size), 0), -1))
362 | out = torch.cat(out, 0)
363 |
364 | return out
365 |
366 | def forward(self, x, duration, max_len):
367 | output, mel_len = self.LR(x, duration, max_len)
368 | return output, mel_len
369 |
370 |
371 | class VariancePredictor(nn.Module):
372 | """ Duration, Pitch Predictor """
373 |
374 | def __init__(self, model_config):
375 | super(VariancePredictor, self).__init__()
376 |
377 | self.input_size = model_config["transformer"]["encoder_hidden"]
378 | self.filter_size = model_config["variance_predictor"]["filter_size"]
379 | self.kernel = model_config["variance_predictor"]["kernel_size"]
380 | self.conv_output_size = model_config["variance_predictor"]["filter_size"]
381 | self.dropout = model_config["variance_predictor"]["dropout"]
382 |
383 | self.conv_layer = nn.Sequential(
384 | OrderedDict(
385 | [
386 | (
387 | "conv1d_1",
388 | Conv(
389 | self.input_size,
390 | self.filter_size,
391 | kernel_size=self.kernel,
392 | padding=(self.kernel - 1) // 2,
393 | ),
394 | ),
395 | ("relu_1", nn.ReLU()),
396 | ("layer_norm_1", nn.LayerNorm(self.filter_size)),
397 | ("dropout_1", nn.Dropout(self.dropout)),
398 | (
399 | "conv1d_2",
400 | Conv(
401 | self.filter_size,
402 | self.filter_size,
403 | kernel_size=self.kernel,
404 | padding=1,
405 | ),
406 | ),
407 | ("relu_2", nn.ReLU()),
408 | ("layer_norm_2", nn.LayerNorm(self.filter_size)),
409 | ("dropout_2", nn.Dropout(self.dropout)),
410 | ]
411 | )
412 | )
413 |
414 | self.linear_layer = nn.Linear(self.conv_output_size, 1)
415 |
416 | def forward(self, encoder_output, mask):
417 | out = self.conv_layer(encoder_output)
418 | out = self.linear_layer(out)
419 | out = out.squeeze(-1)
420 |
421 | if mask is not None:
422 | out = out.masked_fill(mask, 0.0)
423 |
424 | return out
425 |
426 |
427 | class Conv(nn.Module):
428 | """
429 | Convolution Module
430 | """
431 |
432 | def __init__(
433 | self,
434 | in_channels,
435 | out_channels,
436 | kernel_size=1,
437 | stride=1,
438 | padding=0,
439 | dilation=1,
440 | bias=True,
441 | w_init="linear",
442 | ):
443 | """
444 | :param in_channels: dimension of input
445 | :param out_channels: dimension of output
446 | :param kernel_size: size of kernel
447 | :param stride: size of stride
448 | :param padding: size of padding
449 | :param dilation: dilation rate
450 | :param bias: boolean. if True, bias is included.
451 | :param w_init: str. weight inits with xavier initialization.
452 | """
453 | super(Conv, self).__init__()
454 |
455 | self.conv = nn.Conv1d(
456 | in_channels,
457 | out_channels,
458 | kernel_size=kernel_size,
459 | stride=stride,
460 | padding=padding,
461 | dilation=dilation,
462 | bias=bias,
463 | )
464 |
465 | def forward(self, x):
466 | x = x.contiguous().transpose(1, 2)
467 | x = self.conv(x)
468 | x = x.contiguous().transpose(1, 2)
469 |
470 | return x
471 |
--------------------------------------------------------------------------------
/model/optimizer.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import numpy as np
3 |
4 |
5 | class ScheduledOptim:
6 | """ A simple wrapper class for learning rate scheduling """
7 |
8 | def __init__(self, model, train_config, model_config, current_step):
9 |
10 | self._optimizer = torch.optim.Adam(
11 | model.parameters(),
12 | betas=train_config["optimizer"]["betas"],
13 | eps=train_config["optimizer"]["eps"],
14 | weight_decay=train_config["optimizer"]["weight_decay"],
15 | )
16 | self.n_warmup_steps = train_config["optimizer"]["warm_up_step"]
17 | self.anneal_steps = train_config["optimizer"]["anneal_steps"]
18 | self.anneal_rate = train_config["optimizer"]["anneal_rate"]
19 | self.current_step = current_step
20 | self.init_lr = train_config["optimizer"]["init_lr"]
21 |
22 | def step_and_update_lr(self):
23 | self._update_learning_rate()
24 | self._optimizer.step()
25 |
26 | def zero_grad(self):
27 | # print(self.init_lr)
28 | self._optimizer.zero_grad()
29 |
30 | def load_state_dict(self, path):
31 | self._optimizer.load_state_dict(path)
32 |
33 | def _get_lr_scale(self):
34 | lr = np.min(
35 | [
36 | np.power(self.current_step, -0.5),
37 | np.power(self.n_warmup_steps, -1.5) * self.current_step,
38 | ]
39 | )
40 | for s in self.anneal_steps:
41 | if self.current_step > s:
42 | lr = lr * self.anneal_rate
43 | return lr
44 |
45 | def _update_learning_rate(self):
46 | """ Learning rate scheduling per step """
47 | self.current_step += 1
48 | lr = self.init_lr * self._get_lr_scale()
49 |
50 | for param_group in self._optimizer.param_groups:
51 | param_group["lr"] = lr
52 |
--------------------------------------------------------------------------------
/prepare_align.py:
--------------------------------------------------------------------------------
1 | import argparse
2 |
3 | import yaml
4 |
5 | from preprocessor import ljspeech
6 |
7 |
8 | def main(config):
9 | if "LJSpeech" in config["dataset"]:
10 | ljspeech.prepare_align(config)
11 |
12 |
13 | if __name__ == "__main__":
14 | parser = argparse.ArgumentParser()
15 | parser.add_argument("config", type=str, help="path to preprocess.yaml")
16 | args = parser.parse_args()
17 |
18 | config = yaml.load(open(args.config, "r"), Loader=yaml.FullLoader)
19 | main(config)
20 |
--------------------------------------------------------------------------------
/preprocess.py:
--------------------------------------------------------------------------------
1 | import argparse
2 |
3 | import yaml
4 |
5 | from preprocessor.preprocessor import Preprocessor
6 |
7 |
8 | if __name__ == "__main__":
9 | parser = argparse.ArgumentParser()
10 | parser.add_argument("config", type=str, help="path to preprocess.yaml")
11 | args = parser.parse_args()
12 |
13 | config = yaml.load(open(args.config, "r"), Loader=yaml.FullLoader)
14 | preprocessor = Preprocessor(config)
15 | preprocessor.build_from_path()
16 |
--------------------------------------------------------------------------------
/preprocessed_data/LJSpeech/speakers.json:
--------------------------------------------------------------------------------
1 | {"LJSpeech": 0}
--------------------------------------------------------------------------------
/preprocessed_data/LJSpeech/stats.json:
--------------------------------------------------------------------------------
1 | {"pitch": [-2.9170793047299424, 11.391254536985798, 207.63098600265945, 46.77559025098991], "energy": [-1.431044578552246, 8.184337615966797, 37.32621679053811, 26.04418078283586]}
--------------------------------------------------------------------------------
/preprocessor/ljspeech.py:
--------------------------------------------------------------------------------
1 | import os
2 |
3 | import librosa
4 | import numpy as np
5 | from scipy.io import wavfile
6 | from tqdm import tqdm
7 |
8 | from text import _clean_text
9 |
10 |
11 | def prepare_align(config):
12 | in_dir = config["path"]["corpus_path"]
13 | out_dir = config["path"]["raw_path"]
14 | sampling_rate = config["preprocessing"]["audio"]["sampling_rate"]
15 | max_wav_value = config["preprocessing"]["audio"]["max_wav_value"]
16 | cleaners = config["preprocessing"]["text"]["text_cleaners"]
17 | speaker = "LJSpeech"
18 | with open(os.path.join(in_dir, "metadata.csv"), encoding="utf-8") as f:
19 | for line in tqdm(f):
20 | parts = line.strip().split("|")
21 | base_name = parts[0]
22 | text = parts[2]
23 | text = _clean_text(text, cleaners)
24 |
25 | wav_path = os.path.join(in_dir, "wavs", "{}.wav".format(base_name))
26 | if os.path.exists(wav_path):
27 | os.makedirs(os.path.join(out_dir, speaker), exist_ok=True)
28 | wav, _ = librosa.load(wav_path, sampling_rate)
29 | wav = wav / max(abs(wav)) * max_wav_value
30 | wavfile.write(
31 | os.path.join(out_dir, speaker, "{}.wav".format(base_name)),
32 | sampling_rate,
33 | wav.astype(np.int16),
34 | )
35 | with open(
36 | os.path.join(out_dir, speaker, "{}.lab".format(base_name)),
37 | "w",
38 | ) as f1:
39 | f1.write(text)
--------------------------------------------------------------------------------
/preprocessor/preprocessor.py:
--------------------------------------------------------------------------------
1 | import os
2 | import random
3 | import json
4 |
5 | import tgt
6 | import librosa
7 | import numpy as np
8 | import pyworld as pw
9 | from scipy.interpolate import interp1d
10 | from sklearn.preprocessing import StandardScaler
11 | from tqdm import tqdm
12 |
13 | import audio as Audio
14 |
15 |
16 | class Preprocessor:
17 | def __init__(self, config):
18 | self.config = config
19 | self.in_dir = config["path"]["raw_path"]
20 | self.out_dir = config["path"]["preprocessed_path"]
21 | self.val_size = config["preprocessing"]["val_size"]
22 | self.sampling_rate = config["preprocessing"]["audio"]["sampling_rate"]
23 | self.hop_length = config["preprocessing"]["stft"]["hop_length"]
24 |
25 | assert config["preprocessing"]["pitch"]["feature"] == "phoneme_level" # should be phoneme level
26 |
27 | self.pitch_phoneme_averaging = (
28 | config["preprocessing"]["pitch"]["feature"] == "phoneme_level"
29 | )
30 |
31 | self.pitch_normalization = config["preprocessing"]["pitch"]["normalization"]
32 |
33 | self.STFT = Audio.stft.TacotronSTFT(
34 | config["preprocessing"]["stft"]["filter_length"],
35 | config["preprocessing"]["stft"]["hop_length"],
36 | config["preprocessing"]["stft"]["win_length"],
37 | config["preprocessing"]["mel"]["n_mel_channels"],
38 | config["preprocessing"]["audio"]["sampling_rate"],
39 | config["preprocessing"]["mel"]["mel_fmin"],
40 | config["preprocessing"]["mel"]["mel_fmax"],
41 | )
42 |
43 | def build_from_path(self):
44 | os.makedirs((os.path.join(self.out_dir, "mel")), exist_ok=True)
45 | os.makedirs((os.path.join(self.out_dir, "pitch")), exist_ok=True)
46 | os.makedirs((os.path.join(self.out_dir, "duration")), exist_ok=True)
47 |
48 | print("Processing Data ...")
49 | out = list()
50 | n_frames = 0
51 | pitch_scaler = StandardScaler()
52 |
53 | # Compute pitch, duration, and mel-spectrogram
54 | speakers = {}
55 | for i, speaker in enumerate(tqdm(os.listdir(self.in_dir))):
56 | speakers[speaker] = i
57 | for wav_name in tqdm(os.listdir(os.path.join(self.in_dir, speaker))):
58 | if ".wav" not in wav_name:
59 | continue
60 |
61 | basename = wav_name.split(".")[0]
62 | tg_path = os.path.join(
63 | self.out_dir, "TextGrid", speaker, "{}.TextGrid".format(basename)
64 | )
65 | if os.path.exists(tg_path):
66 | ret = self.process_utterance(speaker, basename)
67 | if ret is None:
68 | continue
69 | else:
70 | info, pitch, n = ret
71 | out.append(info)
72 |
73 | if len(pitch) > 0:
74 | pitch_scaler.partial_fit(pitch.reshape((-1, 1)))
75 |
76 | n_frames += n
77 |
78 | print("Computing statistic quantities ...")
79 | # Perform normalization if necessary
80 | if self.pitch_normalization:
81 | pitch_mean = pitch_scaler.mean_[0]
82 | pitch_std = pitch_scaler.scale_[0]
83 | else:
84 | # A numerical trick to avoid normalization...
85 | pitch_mean = 0
86 | pitch_std = 1
87 |
88 | pitch_min, pitch_max = self.normalize(
89 | os.path.join(self.out_dir, "pitch"), pitch_mean, pitch_std
90 | )
91 |
92 | # Save files
93 | with open(os.path.join(self.out_dir, "speakers.json"), "w") as f:
94 | f.write(json.dumps(speakers))
95 |
96 | with open(os.path.join(self.out_dir, "stats.json"), "w") as f:
97 | stats = {
98 | "pitch": [
99 | float(pitch_min),
100 | float(pitch_max),
101 | float(pitch_mean),
102 | float(pitch_std),
103 | ],
104 | }
105 | f.write(json.dumps(stats))
106 |
107 | print(
108 | "Total time: {} hours".format(
109 | n_frames * self.hop_length / self.sampling_rate / 3600
110 | )
111 | )
112 |
113 | random.shuffle(out)
114 | out = [r for r in out if r is not None]
115 |
116 | # Write metadata
117 | with open(os.path.join(self.out_dir, "train.txt"), "w", encoding="utf-8") as f:
118 | for m in out[self.val_size :]:
119 | f.write(m + "\n")
120 | with open(os.path.join(self.out_dir, "val.txt"), "w", encoding="utf-8") as f:
121 | for m in out[: self.val_size]:
122 | f.write(m + "\n")
123 |
124 | return out
125 |
126 | def process_utterance(self, speaker, basename):
127 | wav_path = os.path.join(self.in_dir, speaker, "{}.wav".format(basename))
128 | text_path = os.path.join(self.in_dir, speaker, "{}.lab".format(basename))
129 | tg_path = os.path.join(
130 | self.out_dir, "TextGrid", speaker, "{}.TextGrid".format(basename)
131 | )
132 |
133 | # Get alignments
134 | textgrid = tgt.io.read_textgrid(tg_path)
135 | phone, duration, start, end = self.get_alignment(
136 | textgrid.get_tier_by_name("phones")
137 | )
138 | text = "{" + " ".join(phone) + "}"
139 | if start >= end:
140 | return None
141 |
142 | # Read and trim wav files
143 | wav, _ = librosa.load(wav_path)
144 | wav = wav[
145 | int(self.sampling_rate * start) : int(self.sampling_rate * end)
146 | ].astype(np.float32)
147 |
148 | # Read raw text
149 | with open(text_path, "r") as f:
150 | raw_text = f.readline().strip("\n")
151 |
152 | # Compute fundamental frequency
153 | pitch, t = pw.dio(
154 | wav.astype(np.float64),
155 | self.sampling_rate,
156 | frame_period=self.hop_length / self.sampling_rate * 1000,
157 | )
158 | pitch = pw.stonemask(wav.astype(np.float64), pitch, t, self.sampling_rate)
159 |
160 | pitch = pitch[: sum(duration)]
161 | if np.sum(pitch != 0) <= 1:
162 | return None
163 |
164 | # Compute mel-scale spectrogram and energy
165 | mel_spectrogram, _ = Audio.tools.get_mel_from_wav(wav, self.STFT)
166 | mel_spectrogram = mel_spectrogram[:, : sum(duration)]
167 |
168 | if self.pitch_phoneme_averaging:
169 | # perform linear interpolation
170 | nonzero_ids = np.where(pitch != 0)[0]
171 | interp_fn = interp1d(
172 | nonzero_ids,
173 | pitch[nonzero_ids],
174 | fill_value=(pitch[nonzero_ids[0]], pitch[nonzero_ids[-1]]),
175 | bounds_error=False,
176 | )
177 | pitch = interp_fn(np.arange(0, len(pitch)))
178 |
179 | # Phoneme-level average
180 | pos = 0
181 | for i, d in enumerate(duration):
182 | if d > 0:
183 | pitch[i] = np.mean(pitch[pos : pos + d])
184 | else:
185 | pitch[i] = 0
186 | pos += d
187 | pitch = pitch[: len(duration)]
188 |
189 | # Save files
190 | dur_filename = "{}-duration-{}.npy".format(speaker, basename)
191 | np.save(os.path.join(self.out_dir, "duration", dur_filename), duration)
192 |
193 | pitch_filename = "{}-pitch-{}.npy".format(speaker, basename)
194 | np.save(os.path.join(self.out_dir, "pitch", pitch_filename), pitch)
195 |
196 | mel_filename = "{}-mel-{}.npy".format(speaker, basename)
197 | np.save(
198 | os.path.join(self.out_dir, "mel", mel_filename),
199 | mel_spectrogram.T,
200 | )
201 |
202 | return (
203 | "|".join([basename, speaker, text, raw_text]),
204 | self.remove_outlier(pitch),
205 | mel_spectrogram.shape[1],
206 | )
207 |
208 | def get_alignment(self, tier):
209 | sil_phones = ["sil", "sp", "spn"]
210 |
211 | phones = []
212 | durations = []
213 | start_time = 0
214 | end_time = 0
215 | end_idx = 0
216 | for t in tier._objects:
217 | s, e, p = t.start_time, t.end_time, t.text
218 |
219 | # Trim leading silences
220 | if phones == []:
221 | if p in sil_phones:
222 | continue
223 | else:
224 | start_time = s
225 |
226 | if p not in sil_phones:
227 | # For ordinary phones
228 | phones.append(p)
229 | end_time = e
230 | end_idx = len(phones)
231 | else:
232 | # For silent phones
233 | phones.append(p)
234 |
235 | durations.append(
236 | int(
237 | np.round(e * self.sampling_rate / self.hop_length)
238 | - np.round(s * self.sampling_rate / self.hop_length)
239 | )
240 | )
241 |
242 | # Trim tailing silences
243 | phones = phones[:end_idx]
244 | durations = durations[:end_idx]
245 |
246 | return phones, durations, start_time, end_time
247 |
248 | def remove_outlier(self, values):
249 | values = np.array(values)
250 | p25 = np.percentile(values, 25)
251 | p75 = np.percentile(values, 75)
252 | lower = p25 - 1.5 * (p75 - p25)
253 | upper = p75 + 1.5 * (p75 - p25)
254 | normal_indices = np.logical_and(values > lower, values < upper)
255 |
256 | return values[normal_indices]
257 |
258 | def normalize(self, in_dir, mean, std):
259 | max_value = np.finfo(np.float64).min
260 | min_value = np.finfo(np.float64).max
261 | for filename in os.listdir(in_dir):
262 | filename = os.path.join(in_dir, filename)
263 | values = (np.load(filename) - mean) / std
264 | np.save(filename, values)
265 |
266 | max_value = max(max_value, max(values))
267 | min_value = min(min_value, min(values))
268 |
269 | return min_value, max_value
270 |
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | g2p-en==2.1.0
2 | inflect==4.1.0
3 | librosa==0.7.2
4 | matplotlib==3.4.2
5 | # numba==0.48
6 | # numpy==1.19.0
7 | pypinyin==0.39.0
8 | pyworld==0.3.0
9 | PyYAML==5.4.1
10 | scikit-learn==0.23.2
11 | scipy==1.6.3
12 | soundfile==0.10.3.post1
13 | tensorboard==2.2.2
14 | tgt==1.4.4
15 | torch==1.8.1
16 | tqdm==4.46.1
17 | unidecode==1.1.1
--------------------------------------------------------------------------------
/synthesize.py:
--------------------------------------------------------------------------------
1 | import re
2 | import argparse
3 | from string import punctuation
4 |
5 | import torch
6 | import yaml
7 | import numpy as np
8 | from torch.utils.data import DataLoader
9 | from g2p_en import G2p
10 | from pypinyin import pinyin, Style
11 |
12 | from utils.model import get_model, get_vocoder
13 | from utils.tools import to_device, synth_samples
14 | from dataset import TextDataset
15 | from text import text_to_sequence
16 |
17 | device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
18 |
19 |
20 | def read_lexicon(lex_path):
21 | lexicon = {}
22 | with open(lex_path) as f:
23 | for line in f:
24 | temp = re.split(r"\s+", line.strip("\n"))
25 | word = temp[0]
26 | phones = temp[1:]
27 | if word.lower() not in lexicon:
28 | lexicon[word.lower()] = phones
29 | return lexicon
30 |
31 |
32 | def preprocess_english(text, preprocess_config):
33 | text = text.rstrip(punctuation)
34 | lexicon = read_lexicon(preprocess_config["path"]["lexicon_path"])
35 |
36 | g2p = G2p()
37 | phones = []
38 | words = re.split(r"([,;.\-\?\!\s+])", text)
39 | for w in words:
40 | if w.lower() in lexicon:
41 | phones += lexicon[w.lower()]
42 | else:
43 | phones += list(filter(lambda p: p != " ", g2p(w)))
44 | phones = "{" + "}{".join(phones) + "}"
45 | phones = re.sub(r"\{[^\w\s]?\}", "{sp}", phones)
46 | phones = phones.replace("}{", " ")
47 |
48 | print("Raw Text Sequence: {}".format(text))
49 | print("Phoneme Sequence: {}".format(phones))
50 | sequence = np.array(
51 | text_to_sequence(
52 | phones, preprocess_config["preprocessing"]["text"]["text_cleaners"]
53 | )
54 | )
55 |
56 | return np.array(sequence)
57 |
58 |
59 | def preprocess_mandarin(text, preprocess_config):
60 | lexicon = read_lexicon(preprocess_config["path"]["lexicon_path"])
61 |
62 | phones = []
63 | pinyins = [
64 | p[0]
65 | for p in pinyin(
66 | text, style=Style.TONE3, strict=False, neutral_tone_with_five=True
67 | )
68 | ]
69 | for p in pinyins:
70 | if p in lexicon:
71 | phones += lexicon[p]
72 | else:
73 | phones.append("sp")
74 |
75 | phones = "{" + " ".join(phones) + "}"
76 | print("Raw Text Sequence: {}".format(text))
77 | print("Phoneme Sequence: {}".format(phones))
78 | sequence = np.array(
79 | text_to_sequence(
80 | phones, preprocess_config["preprocessing"]["text"]["text_cleaners"]
81 | )
82 | )
83 |
84 | return np.array(sequence)
85 |
86 |
87 | def synthesize(model, step, configs, vocoder, batchs, control_values):
88 | preprocess_config, model_config, train_config = configs
89 | pitch_control, duration_control = control_values
90 |
91 | for batch in batchs:
92 | batch = to_device(batch, device)
93 | with torch.no_grad():
94 | # Forward
95 | output = model(
96 | *(batch[2:]),
97 | p_control=pitch_control,
98 | d_control=duration_control
99 | )
100 | synth_samples(
101 | batch,
102 | output,
103 | vocoder,
104 | model_config,
105 | preprocess_config,
106 | train_config["path"]["result_path"],
107 | )
108 |
109 |
110 | if __name__ == "__main__":
111 |
112 | parser = argparse.ArgumentParser()
113 | parser.add_argument("--restore_step", type=int, required=True)
114 | parser.add_argument(
115 | "--mode",
116 | type=str,
117 | choices=["batch", "single"],
118 | required=True,
119 | help="Synthesize a whole dataset or a single sentence",
120 | )
121 | parser.add_argument(
122 | "--source",
123 | type=str,
124 | default=None,
125 | help="path to a source file with format like train.txt and val.txt, for batch mode only",
126 | )
127 | parser.add_argument(
128 | "--text",
129 | type=str,
130 | default=None,
131 | help="raw text to synthesize, for single-sentence mode only",
132 | )
133 | parser.add_argument(
134 | "--speaker_id",
135 | type=int,
136 | default=0,
137 | help="speaker ID for multi-speaker synthesis, for single-sentence mode only",
138 | )
139 | parser.add_argument(
140 | "-p",
141 | "--preprocess_config",
142 | type=str,
143 | required=True,
144 | help="path to preprocess.yaml",
145 | )
146 | parser.add_argument(
147 | "-m", "--model_config", type=str, required=True, help="path to model.yaml"
148 | )
149 | parser.add_argument(
150 | "-t", "--train_config", type=str, required=True, help="path to train.yaml"
151 | )
152 | parser.add_argument(
153 | "--pitch_control",
154 | type=float,
155 | default=1.0,
156 | help="control the pitch of the whole utterance, larger value for higher pitch",
157 | )
158 | parser.add_argument(
159 | "--duration_control",
160 | type=float,
161 | default=1.0,
162 | help="control the speed of the whole utterance, larger value for slower speaking rate",
163 | )
164 | args = parser.parse_args()
165 |
166 | # Check source texts
167 | if args.mode == "batch":
168 | assert args.source is not None and args.text is None
169 | if args.mode == "single":
170 | assert args.source is None and args.text is not None
171 |
172 | # Read Config
173 | preprocess_config = yaml.load(
174 | open(args.preprocess_config, "r"), Loader=yaml.FullLoader
175 | )
176 | model_config = yaml.load(open(args.model_config, "r"), Loader=yaml.FullLoader)
177 | train_config = yaml.load(open(args.train_config, "r"), Loader=yaml.FullLoader)
178 | configs = (preprocess_config, model_config, train_config)
179 |
180 | # Get model
181 | model = get_model(args, configs, device, train=False)
182 |
183 | # Load vocoder
184 | vocoder = get_vocoder(model_config, device)
185 |
186 | # Preprocess texts
187 | if args.mode == "batch":
188 | # Get dataset
189 | dataset = TextDataset(args.source, preprocess_config)
190 | batchs = DataLoader(
191 | dataset,
192 | batch_size=8,
193 | collate_fn=dataset.collate_fn,
194 | )
195 | if args.mode == "single":
196 | ids = raw_texts = [args.text[:100]]
197 | speakers = np.array([args.speaker_id])
198 | if preprocess_config["preprocessing"]["text"]["language"] == "en":
199 | texts = np.array([preprocess_english(args.text, preprocess_config)])
200 | elif preprocess_config["preprocessing"]["text"]["language"] == "zh":
201 | texts = np.array([preprocess_mandarin(args.text, preprocess_config)])
202 | text_lens = np.array([len(texts[0])])
203 | batchs = [(ids, raw_texts, speakers, texts, text_lens, max(text_lens))]
204 |
205 | control_values = args.pitch_control, args.duration_control
206 |
207 | synthesize(model, args.restore_step, configs, vocoder, batchs, control_values)
208 |
--------------------------------------------------------------------------------
/text/__init__.py:
--------------------------------------------------------------------------------
1 | """ from https://github.com/keithito/tacotron """
2 | import re
3 | from text import cleaners
4 | from text.symbols import symbols
5 |
6 |
7 | # Mappings from symbol to numeric ID and vice versa:
8 | _symbol_to_id = {s: i for i, s in enumerate(symbols)}
9 | _id_to_symbol = {i: s for i, s in enumerate(symbols)}
10 |
11 | # Regular expression matching text enclosed in curly braces:
12 | _curly_re = re.compile(r"(.*?)\{(.+?)\}(.*)")
13 |
14 |
15 | def text_to_sequence(text, cleaner_names):
16 | """Converts a string of text to a sequence of IDs corresponding to the symbols in the text.
17 |
18 | The text can optionally have ARPAbet sequences enclosed in curly braces embedded
19 | in it. For example, "Turn left on {HH AW1 S S T AH0 N} Street."
20 |
21 | Args:
22 | text: string to convert to a sequence
23 | cleaner_names: names of the cleaner functions to run the text through
24 |
25 | Returns:
26 | List of integers corresponding to the symbols in the text
27 | """
28 | sequence = []
29 |
30 | # Check for curly braces and treat their contents as ARPAbet:
31 | while len(text):
32 | m = _curly_re.match(text)
33 |
34 | if not m:
35 | sequence += _symbols_to_sequence(_clean_text(text, cleaner_names))
36 | break
37 | sequence += _symbols_to_sequence(_clean_text(m.group(1), cleaner_names))
38 | sequence += _arpabet_to_sequence(m.group(2))
39 | text = m.group(3)
40 |
41 | return sequence
42 |
43 |
44 | def sequence_to_text(sequence):
45 | """Converts a sequence of IDs back to a string"""
46 | result = ""
47 | for symbol_id in sequence:
48 | if symbol_id in _id_to_symbol:
49 | s = _id_to_symbol[symbol_id]
50 | # Enclose ARPAbet back in curly braces:
51 | if len(s) > 1 and s[0] == "@":
52 | s = "{%s}" % s[1:]
53 | result += s
54 | return result.replace("}{", " ")
55 |
56 |
57 | def _clean_text(text, cleaner_names):
58 | for name in cleaner_names:
59 | cleaner = getattr(cleaners, name)
60 | if not cleaner:
61 | raise Exception("Unknown cleaner: %s" % name)
62 | text = cleaner(text)
63 | return text
64 |
65 |
66 | def _symbols_to_sequence(symbols):
67 | return [_symbol_to_id[s] for s in symbols if _should_keep_symbol(s)]
68 |
69 |
70 | def _arpabet_to_sequence(text):
71 | return _symbols_to_sequence(["@" + s for s in text.split()])
72 |
73 |
74 | def _should_keep_symbol(s):
75 | return s in _symbol_to_id and s != "_" and s != "~"
76 |
--------------------------------------------------------------------------------
/text/cleaners.py:
--------------------------------------------------------------------------------
1 | """ from https://github.com/keithito/tacotron """
2 |
3 | '''
4 | Cleaners are transformations that run over the input text at both training and eval time.
5 |
6 | Cleaners can be selected by passing a comma-delimited list of cleaner names as the "cleaners"
7 | hyperparameter. Some cleaners are English-specific. You'll typically want to use:
8 | 1. "english_cleaners" for English text
9 | 2. "transliteration_cleaners" for non-English text that can be transliterated to ASCII using
10 | the Unidecode library (https://pypi.python.org/pypi/Unidecode)
11 | 3. "basic_cleaners" if you do not want to transliterate (in this case, you should also update
12 | the symbols in symbols.py to match your data).
13 | '''
14 |
15 |
16 | # Regular expression matching whitespace:
17 | import re
18 | from unidecode import unidecode
19 | from .numbers import normalize_numbers
20 | _whitespace_re = re.compile(r'\s+')
21 |
22 | # List of (regular expression, replacement) pairs for abbreviations:
23 | _abbreviations = [(re.compile('\\b%s\\.' % x[0], re.IGNORECASE), x[1]) for x in [
24 | ('mrs', 'misess'),
25 | ('mr', 'mister'),
26 | ('dr', 'doctor'),
27 | ('st', 'saint'),
28 | ('co', 'company'),
29 | ('jr', 'junior'),
30 | ('maj', 'major'),
31 | ('gen', 'general'),
32 | ('drs', 'doctors'),
33 | ('rev', 'reverend'),
34 | ('lt', 'lieutenant'),
35 | ('hon', 'honorable'),
36 | ('sgt', 'sergeant'),
37 | ('capt', 'captain'),
38 | ('esq', 'esquire'),
39 | ('ltd', 'limited'),
40 | ('col', 'colonel'),
41 | ('ft', 'fort'),
42 | ]]
43 |
44 |
45 | def expand_abbreviations(text):
46 | for regex, replacement in _abbreviations:
47 | text = re.sub(regex, replacement, text)
48 | return text
49 |
50 |
51 | def expand_numbers(text):
52 | return normalize_numbers(text)
53 |
54 |
55 | def lowercase(text):
56 | return text.lower()
57 |
58 |
59 | def collapse_whitespace(text):
60 | return re.sub(_whitespace_re, ' ', text)
61 |
62 |
63 | def convert_to_ascii(text):
64 | return unidecode(text)
65 |
66 |
67 | def basic_cleaners(text):
68 | '''Basic pipeline that lowercases and collapses whitespace without transliteration.'''
69 | text = lowercase(text)
70 | text = collapse_whitespace(text)
71 | return text
72 |
73 |
74 | def transliteration_cleaners(text):
75 | '''Pipeline for non-English text that transliterates to ASCII.'''
76 | text = convert_to_ascii(text)
77 | text = lowercase(text)
78 | text = collapse_whitespace(text)
79 | return text
80 |
81 |
82 | def english_cleaners(text):
83 | '''Pipeline for English text, including number and abbreviation expansion.'''
84 | text = convert_to_ascii(text)
85 | text = lowercase(text)
86 | text = expand_numbers(text)
87 | text = expand_abbreviations(text)
88 | text = collapse_whitespace(text)
89 | return text
90 |
--------------------------------------------------------------------------------
/text/cmudict.py:
--------------------------------------------------------------------------------
1 | """ from https://github.com/keithito/tacotron """
2 |
3 | import re
4 |
5 |
6 | valid_symbols = [
7 | "AA",
8 | "AA0",
9 | "AA1",
10 | "AA2",
11 | "AE",
12 | "AE0",
13 | "AE1",
14 | "AE2",
15 | "AH",
16 | "AH0",
17 | "AH1",
18 | "AH2",
19 | "AO",
20 | "AO0",
21 | "AO1",
22 | "AO2",
23 | "AW",
24 | "AW0",
25 | "AW1",
26 | "AW2",
27 | "AY",
28 | "AY0",
29 | "AY1",
30 | "AY2",
31 | "B",
32 | "CH",
33 | "D",
34 | "DH",
35 | "EH",
36 | "EH0",
37 | "EH1",
38 | "EH2",
39 | "ER",
40 | "ER0",
41 | "ER1",
42 | "ER2",
43 | "EY",
44 | "EY0",
45 | "EY1",
46 | "EY2",
47 | "F",
48 | "G",
49 | "HH",
50 | "IH",
51 | "IH0",
52 | "IH1",
53 | "IH2",
54 | "IY",
55 | "IY0",
56 | "IY1",
57 | "IY2",
58 | "JH",
59 | "K",
60 | "L",
61 | "M",
62 | "N",
63 | "NG",
64 | "OW",
65 | "OW0",
66 | "OW1",
67 | "OW2",
68 | "OY",
69 | "OY0",
70 | "OY1",
71 | "OY2",
72 | "P",
73 | "R",
74 | "S",
75 | "SH",
76 | "T",
77 | "TH",
78 | "UH",
79 | "UH0",
80 | "UH1",
81 | "UH2",
82 | "UW",
83 | "UW0",
84 | "UW1",
85 | "UW2",
86 | "V",
87 | "W",
88 | "Y",
89 | "Z",
90 | "ZH",
91 | ]
92 |
93 | _valid_symbol_set = set(valid_symbols)
94 |
95 |
96 | class CMUDict:
97 | """Thin wrapper around CMUDict data. http://www.speech.cs.cmu.edu/cgi-bin/cmudict"""
98 |
99 | def __init__(self, file_or_path, keep_ambiguous=True):
100 | if isinstance(file_or_path, str):
101 | with open(file_or_path, encoding="latin-1") as f:
102 | entries = _parse_cmudict(f)
103 | else:
104 | entries = _parse_cmudict(file_or_path)
105 | if not keep_ambiguous:
106 | entries = {word: pron for word, pron in entries.items() if len(pron) == 1}
107 | self._entries = entries
108 |
109 | def __len__(self):
110 | return len(self._entries)
111 |
112 | def lookup(self, word):
113 | """Returns list of ARPAbet pronunciations of the given word."""
114 | return self._entries.get(word.upper())
115 |
116 |
117 | _alt_re = re.compile(r"\([0-9]+\)")
118 |
119 |
120 | def _parse_cmudict(file):
121 | cmudict = {}
122 | for line in file:
123 | if len(line) and (line[0] >= "A" and line[0] <= "Z" or line[0] == "'"):
124 | parts = line.split(" ")
125 | word = re.sub(_alt_re, "", parts[0])
126 | pronunciation = _get_pronunciation(parts[1])
127 | if pronunciation:
128 | if word in cmudict:
129 | cmudict[word].append(pronunciation)
130 | else:
131 | cmudict[word] = [pronunciation]
132 | return cmudict
133 |
134 |
135 | def _get_pronunciation(s):
136 | parts = s.strip().split(" ")
137 | for part in parts:
138 | if part not in _valid_symbol_set:
139 | return None
140 | return " ".join(parts)
141 |
--------------------------------------------------------------------------------
/text/numbers.py:
--------------------------------------------------------------------------------
1 | """ from https://github.com/keithito/tacotron """
2 |
3 | import inflect
4 | import re
5 |
6 |
7 | _inflect = inflect.engine()
8 | _comma_number_re = re.compile(r"([0-9][0-9\,]+[0-9])")
9 | _decimal_number_re = re.compile(r"([0-9]+\.[0-9]+)")
10 | _pounds_re = re.compile(r"£([0-9\,]*[0-9]+)")
11 | _dollars_re = re.compile(r"\$([0-9\.\,]*[0-9]+)")
12 | _ordinal_re = re.compile(r"[0-9]+(st|nd|rd|th)")
13 | _number_re = re.compile(r"[0-9]+")
14 |
15 |
16 | def _remove_commas(m):
17 | return m.group(1).replace(",", "")
18 |
19 |
20 | def _expand_decimal_point(m):
21 | return m.group(1).replace(".", " point ")
22 |
23 |
24 | def _expand_dollars(m):
25 | match = m.group(1)
26 | parts = match.split(".")
27 | if len(parts) > 2:
28 | return match + " dollars" # Unexpected format
29 | dollars = int(parts[0]) if parts[0] else 0
30 | cents = int(parts[1]) if len(parts) > 1 and parts[1] else 0
31 | if dollars and cents:
32 | dollar_unit = "dollar" if dollars == 1 else "dollars"
33 | cent_unit = "cent" if cents == 1 else "cents"
34 | return "%s %s, %s %s" % (dollars, dollar_unit, cents, cent_unit)
35 | elif dollars:
36 | dollar_unit = "dollar" if dollars == 1 else "dollars"
37 | return "%s %s" % (dollars, dollar_unit)
38 | elif cents:
39 | cent_unit = "cent" if cents == 1 else "cents"
40 | return "%s %s" % (cents, cent_unit)
41 | else:
42 | return "zero dollars"
43 |
44 |
45 | def _expand_ordinal(m):
46 | return _inflect.number_to_words(m.group(0))
47 |
48 |
49 | def _expand_number(m):
50 | num = int(m.group(0))
51 | if num > 1000 and num < 3000:
52 | if num == 2000:
53 | return "two thousand"
54 | elif num > 2000 and num < 2010:
55 | return "two thousand " + _inflect.number_to_words(num % 100)
56 | elif num % 100 == 0:
57 | return _inflect.number_to_words(num // 100) + " hundred"
58 | else:
59 | return _inflect.number_to_words(
60 | num, andword="", zero="oh", group=2
61 | ).replace(", ", " ")
62 | else:
63 | return _inflect.number_to_words(num, andword="")
64 |
65 |
66 | def normalize_numbers(text):
67 | text = re.sub(_comma_number_re, _remove_commas, text)
68 | text = re.sub(_pounds_re, r"\1 pounds", text)
69 | text = re.sub(_dollars_re, _expand_dollars, text)
70 | text = re.sub(_decimal_number_re, _expand_decimal_point, text)
71 | text = re.sub(_ordinal_re, _expand_ordinal, text)
72 | text = re.sub(_number_re, _expand_number, text)
73 | return text
74 |
--------------------------------------------------------------------------------
/text/pinyin.py:
--------------------------------------------------------------------------------
1 | initials = [
2 | "b",
3 | "c",
4 | "ch",
5 | "d",
6 | "f",
7 | "g",
8 | "h",
9 | "j",
10 | "k",
11 | "l",
12 | "m",
13 | "n",
14 | "p",
15 | "q",
16 | "r",
17 | "s",
18 | "sh",
19 | "t",
20 | "w",
21 | "x",
22 | "y",
23 | "z",
24 | "zh",
25 | ]
26 | finals = [
27 | "a1",
28 | "a2",
29 | "a3",
30 | "a4",
31 | "a5",
32 | "ai1",
33 | "ai2",
34 | "ai3",
35 | "ai4",
36 | "ai5",
37 | "an1",
38 | "an2",
39 | "an3",
40 | "an4",
41 | "an5",
42 | "ang1",
43 | "ang2",
44 | "ang3",
45 | "ang4",
46 | "ang5",
47 | "ao1",
48 | "ao2",
49 | "ao3",
50 | "ao4",
51 | "ao5",
52 | "e1",
53 | "e2",
54 | "e3",
55 | "e4",
56 | "e5",
57 | "ei1",
58 | "ei2",
59 | "ei3",
60 | "ei4",
61 | "ei5",
62 | "en1",
63 | "en2",
64 | "en3",
65 | "en4",
66 | "en5",
67 | "eng1",
68 | "eng2",
69 | "eng3",
70 | "eng4",
71 | "eng5",
72 | "er1",
73 | "er2",
74 | "er3",
75 | "er4",
76 | "er5",
77 | "i1",
78 | "i2",
79 | "i3",
80 | "i4",
81 | "i5",
82 | "ia1",
83 | "ia2",
84 | "ia3",
85 | "ia4",
86 | "ia5",
87 | "ian1",
88 | "ian2",
89 | "ian3",
90 | "ian4",
91 | "ian5",
92 | "iang1",
93 | "iang2",
94 | "iang3",
95 | "iang4",
96 | "iang5",
97 | "iao1",
98 | "iao2",
99 | "iao3",
100 | "iao4",
101 | "iao5",
102 | "ie1",
103 | "ie2",
104 | "ie3",
105 | "ie4",
106 | "ie5",
107 | "ii1",
108 | "ii2",
109 | "ii3",
110 | "ii4",
111 | "ii5",
112 | "iii1",
113 | "iii2",
114 | "iii3",
115 | "iii4",
116 | "iii5",
117 | "in1",
118 | "in2",
119 | "in3",
120 | "in4",
121 | "in5",
122 | "ing1",
123 | "ing2",
124 | "ing3",
125 | "ing4",
126 | "ing5",
127 | "iong1",
128 | "iong2",
129 | "iong3",
130 | "iong4",
131 | "iong5",
132 | "iou1",
133 | "iou2",
134 | "iou3",
135 | "iou4",
136 | "iou5",
137 | "o1",
138 | "o2",
139 | "o3",
140 | "o4",
141 | "o5",
142 | "ong1",
143 | "ong2",
144 | "ong3",
145 | "ong4",
146 | "ong5",
147 | "ou1",
148 | "ou2",
149 | "ou3",
150 | "ou4",
151 | "ou5",
152 | "u1",
153 | "u2",
154 | "u3",
155 | "u4",
156 | "u5",
157 | "ua1",
158 | "ua2",
159 | "ua3",
160 | "ua4",
161 | "ua5",
162 | "uai1",
163 | "uai2",
164 | "uai3",
165 | "uai4",
166 | "uai5",
167 | "uan1",
168 | "uan2",
169 | "uan3",
170 | "uan4",
171 | "uan5",
172 | "uang1",
173 | "uang2",
174 | "uang3",
175 | "uang4",
176 | "uang5",
177 | "uei1",
178 | "uei2",
179 | "uei3",
180 | "uei4",
181 | "uei5",
182 | "uen1",
183 | "uen2",
184 | "uen3",
185 | "uen4",
186 | "uen5",
187 | "uo1",
188 | "uo2",
189 | "uo3",
190 | "uo4",
191 | "uo5",
192 | "v1",
193 | "v2",
194 | "v3",
195 | "v4",
196 | "v5",
197 | "van1",
198 | "van2",
199 | "van3",
200 | "van4",
201 | "van5",
202 | "ve1",
203 | "ve2",
204 | "ve3",
205 | "ve4",
206 | "ve5",
207 | "vn1",
208 | "vn2",
209 | "vn3",
210 | "vn4",
211 | "vn5",
212 | ]
213 | valid_symbols = initials + finals + ["rr"]
--------------------------------------------------------------------------------
/text/symbols.py:
--------------------------------------------------------------------------------
1 | """ from https://github.com/keithito/tacotron """
2 |
3 | """
4 | Defines the set of symbols used in text input to the model.
5 |
6 | The default is a set of ASCII characters that works well for English or text that has been run through Unidecode. For other data, you can modify _characters. See TRAINING_DATA.md for details. """
7 |
8 | from text import cmudict, pinyin
9 |
10 | _pad = "_"
11 | _punctuation = "!'(),.:;? "
12 | _special = "-"
13 | _letters = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"
14 | _silences = ["@sp", "@spn", "@sil"]
15 |
16 | # Prepend "@" to ARPAbet symbols to ensure uniqueness (some are the same as uppercase letters):
17 | _arpabet = ["@" + s for s in cmudict.valid_symbols]
18 | _pinyin = ["@" + s for s in pinyin.valid_symbols]
19 |
20 | # Export all symbols:
21 | symbols = (
22 | [_pad]
23 | + list(_special)
24 | + list(_punctuation)
25 | + list(_letters)
26 | + _arpabet
27 | + _pinyin
28 | + _silences
29 | )
30 |
--------------------------------------------------------------------------------
/train.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | import os
3 |
4 | import torch
5 | import yaml
6 | import torch.nn as nn
7 | from torch.utils.data import DataLoader
8 | from torch.utils.tensorboard import SummaryWriter
9 | from tqdm import tqdm
10 |
11 | from utils.model import get_model, get_vocoder, get_param_num
12 | from utils.tools import to_device, log, synth_one_sample
13 | from model import FastPitchFormantLoss
14 | from dataset import Dataset
15 |
16 | from evaluate import evaluate
17 |
18 | device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
19 |
20 |
21 | def main(args, configs):
22 | print("Prepare training ...")
23 |
24 | preprocess_config, model_config, train_config = configs
25 |
26 | # Get dataset
27 | dataset = Dataset(
28 | "train.txt", preprocess_config, train_config, sort=True, drop_last=True
29 | )
30 | batch_size = train_config["optimizer"]["batch_size"]
31 | group_size = 4 # Set this larger than 1 to enable sorting in Dataset
32 | assert batch_size * group_size < len(dataset)
33 | loader = DataLoader(
34 | dataset,
35 | batch_size=batch_size * group_size,
36 | shuffle=True,
37 | collate_fn=dataset.collate_fn,
38 | )
39 |
40 | # Prepare model
41 | model, optimizer = get_model(args, configs, device, train=True)
42 | model = nn.DataParallel(model)
43 | num_param = get_param_num(model)
44 | Loss = FastPitchFormantLoss(preprocess_config, model_config).to(device)
45 | print("Number of FastPitchFormant Parameters:", num_param)
46 |
47 | # Load vocoder
48 | vocoder = get_vocoder(model_config, device)
49 |
50 | # Init logger
51 | for p in train_config["path"].values():
52 | os.makedirs(p, exist_ok=True)
53 | train_log_path = os.path.join(train_config["path"]["log_path"], "train")
54 | val_log_path = os.path.join(train_config["path"]["log_path"], "val")
55 | os.makedirs(train_log_path, exist_ok=True)
56 | os.makedirs(val_log_path, exist_ok=True)
57 | train_logger = SummaryWriter(train_log_path)
58 | val_logger = SummaryWriter(val_log_path)
59 |
60 | # Training
61 | step = args.restore_step + 1
62 | epoch = 1
63 | grad_acc_step = train_config["optimizer"]["grad_acc_step"]
64 | grad_clip_thresh = train_config["optimizer"]["grad_clip_thresh"]
65 | total_step = train_config["step"]["total_step"]
66 | log_step = train_config["step"]["log_step"]
67 | save_step = train_config["step"]["save_step"]
68 | synth_step = train_config["step"]["synth_step"]
69 | val_step = train_config["step"]["val_step"]
70 |
71 | outer_bar = tqdm(total=total_step, desc="Training", position=0)
72 | outer_bar.n = args.restore_step
73 | outer_bar.update()
74 |
75 | while True:
76 | inner_bar = tqdm(total=len(loader), desc="Epoch {}".format(epoch), position=1)
77 | for batchs in loader:
78 | for batch in batchs:
79 | batch = to_device(batch, device)
80 |
81 | # Forward
82 | output = model(*(batch[2:]))
83 |
84 | # Cal Loss
85 | losses = Loss(batch, output)
86 | total_loss = losses[0]
87 |
88 | # Backward
89 | total_loss = total_loss / grad_acc_step
90 | total_loss.backward()
91 | if step % grad_acc_step == 0:
92 | # Clipping gradients to avoid gradient explosion
93 | nn.utils.clip_grad_norm_(model.parameters(), grad_clip_thresh)
94 |
95 | # Update weights
96 | optimizer.step_and_update_lr()
97 | optimizer.zero_grad()
98 |
99 | if step % log_step == 0:
100 | losses = [l.item() for l in losses]
101 | message1 = "Step {}/{}, ".format(step, total_step)
102 | message2 = "Total Loss: {:.4f}, Mel Loss: {:.4f}, Pitch Loss: {:.4f}, Duration Loss: {:.4f}".format(
103 | *losses
104 | )
105 |
106 | with open(os.path.join(train_log_path, "log.txt"), "a") as f:
107 | f.write(message1 + message2 + "\n")
108 |
109 | outer_bar.write(message1 + message2)
110 |
111 | log(train_logger, step, losses=losses)
112 |
113 | if step % synth_step == 0:
114 | fig, wav_reconstruction, wav_prediction, tag = synth_one_sample(
115 | batch,
116 | output,
117 | vocoder,
118 | model_config,
119 | preprocess_config,
120 | )
121 | log(
122 | train_logger,
123 | fig=fig,
124 | tag="Training/step_{}_{}".format(step, tag),
125 | )
126 | sampling_rate = preprocess_config["preprocessing"]["audio"][
127 | "sampling_rate"
128 | ]
129 | log(
130 | train_logger,
131 | audio=wav_reconstruction,
132 | sampling_rate=sampling_rate,
133 | tag="Training/step_{}_{}_reconstructed".format(step, tag),
134 | )
135 | log(
136 | train_logger,
137 | audio=wav_prediction,
138 | sampling_rate=sampling_rate,
139 | tag="Training/step_{}_{}_synthesized".format(step, tag),
140 | )
141 |
142 | if step % val_step == 0:
143 | model.eval()
144 | message = evaluate(model, step, configs, val_logger, vocoder, len(losses))
145 | with open(os.path.join(val_log_path, "log.txt"), "a") as f:
146 | f.write(message + "\n")
147 | outer_bar.write(message)
148 |
149 | model.train()
150 |
151 | if step % save_step == 0:
152 | torch.save(
153 | {
154 | "model": model.module.state_dict(),
155 | "optimizer": optimizer._optimizer.state_dict(),
156 | },
157 | os.path.join(
158 | train_config["path"]["ckpt_path"],
159 | "{}.pth.tar".format(step),
160 | ),
161 | )
162 |
163 | if step == total_step:
164 | quit()
165 | step += 1
166 | outer_bar.update(1)
167 |
168 | inner_bar.update(1)
169 | epoch += 1
170 |
171 |
172 | if __name__ == "__main__":
173 | parser = argparse.ArgumentParser()
174 | parser.add_argument("--restore_step", type=int, default=0)
175 | parser.add_argument(
176 | "-p",
177 | "--preprocess_config",
178 | type=str,
179 | required=True,
180 | help="path to preprocess.yaml",
181 | )
182 | parser.add_argument(
183 | "-m", "--model_config", type=str, required=True, help="path to model.yaml"
184 | )
185 | parser.add_argument(
186 | "-t", "--train_config", type=str, required=True, help="path to train.yaml"
187 | )
188 | args = parser.parse_args()
189 |
190 | # Read Config
191 | preprocess_config = yaml.load(
192 | open(args.preprocess_config, "r"), Loader=yaml.FullLoader
193 | )
194 | model_config = yaml.load(open(args.model_config, "r"), Loader=yaml.FullLoader)
195 | train_config = yaml.load(open(args.train_config, "r"), Loader=yaml.FullLoader)
196 | configs = (preprocess_config, model_config, train_config)
197 |
198 | main(args, configs)
199 |
--------------------------------------------------------------------------------
/utils/model.py:
--------------------------------------------------------------------------------
1 | import os
2 | import json
3 |
4 | import torch
5 | import numpy as np
6 |
7 | import hifigan
8 | from model import FastPitchFormant, ScheduledOptim
9 |
10 |
11 | def get_model(args, configs, device, train=False):
12 | (preprocess_config, model_config, train_config) = configs
13 |
14 | model = FastPitchFormant(preprocess_config, model_config).to(device)
15 | if args.restore_step:
16 | ckpt_path = os.path.join(
17 | train_config["path"]["ckpt_path"],
18 | "{}.pth.tar".format(args.restore_step),
19 | )
20 | ckpt = torch.load(ckpt_path)
21 | model.load_state_dict(ckpt["model"])
22 |
23 | if train:
24 | scheduled_optim = ScheduledOptim(
25 | model, train_config, model_config, args.restore_step
26 | )
27 | if args.restore_step:
28 | scheduled_optim.load_state_dict(ckpt["optimizer"])
29 | model.train()
30 | return model, scheduled_optim
31 |
32 | model.eval()
33 | model.requires_grad_ = False
34 | return model
35 |
36 |
37 | def get_param_num(model):
38 | num_param = sum(param.numel() for param in model.parameters())
39 | return num_param
40 |
41 |
42 | def get_vocoder(config, device):
43 | name = config["vocoder"]["model"]
44 | speaker = config["vocoder"]["speaker"]
45 |
46 | if name == "MelGAN":
47 | if speaker == "LJSpeech":
48 | vocoder = torch.hub.load(
49 | "descriptinc/melgan-neurips", "load_melgan", "linda_johnson"
50 | )
51 | elif speaker == "universal":
52 | vocoder = torch.hub.load(
53 | "descriptinc/melgan-neurips", "load_melgan", "multi_speaker"
54 | )
55 | vocoder.mel2wav.eval()
56 | vocoder.mel2wav.to(device)
57 | elif name == "HiFi-GAN":
58 | with open("hifigan/config.json", "r") as f:
59 | config = json.load(f)
60 | config = hifigan.AttrDict(config)
61 | vocoder = hifigan.Generator(config)
62 | if speaker == "LJSpeech":
63 | ckpt = torch.load("hifigan/generator_LJSpeech.pth.tar")
64 | elif speaker == "universal":
65 | ckpt = torch.load("hifigan/generator_universal.pth.tar")
66 | vocoder.load_state_dict(ckpt["generator"])
67 | vocoder.eval()
68 | vocoder.remove_weight_norm()
69 | vocoder.to(device)
70 |
71 | return vocoder
72 |
73 |
74 | def vocoder_infer(mels, vocoder, model_config, preprocess_config, lengths=None):
75 | name = model_config["vocoder"]["model"]
76 | with torch.no_grad():
77 | if name == "MelGAN":
78 | wavs = vocoder.inverse(mels / np.log(10))
79 | elif name == "HiFi-GAN":
80 | wavs = vocoder(mels).squeeze(1)
81 |
82 | wavs = (
83 | wavs.cpu().numpy()
84 | * preprocess_config["preprocessing"]["audio"]["max_wav_value"]
85 | ).astype("int16")
86 | wavs = [wav for wav in wavs]
87 |
88 | for i in range(len(mels)):
89 | if lengths is not None:
90 | wavs[i] = wavs[i][: lengths[i]]
91 |
92 | return wavs
93 |
--------------------------------------------------------------------------------
/utils/tools.py:
--------------------------------------------------------------------------------
1 | import os
2 | import json
3 |
4 | import torch
5 | import torch.nn.functional as F
6 | import numpy as np
7 | import matplotlib
8 | from scipy.io import wavfile
9 | from matplotlib import pyplot as plt
10 |
11 |
12 | matplotlib.use("Agg")
13 |
14 |
15 | device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
16 |
17 |
18 | def to_device(data, device):
19 | if len(data) == 11:
20 | (
21 | ids,
22 | raw_texts,
23 | speakers,
24 | texts,
25 | src_lens,
26 | max_src_len,
27 | mels,
28 | mel_lens,
29 | max_mel_len,
30 | pitches,
31 | durations,
32 | ) = data
33 |
34 | speakers = torch.from_numpy(speakers).long().to(device)
35 | texts = torch.from_numpy(texts).long().to(device)
36 | src_lens = torch.from_numpy(src_lens).to(device)
37 | mels = torch.from_numpy(mels).float().to(device)
38 | mel_lens = torch.from_numpy(mel_lens).to(device)
39 | pitches = torch.from_numpy(pitches).float().to(device)
40 | durations = torch.from_numpy(durations).long().to(device)
41 |
42 | return (
43 | ids,
44 | raw_texts,
45 | speakers,
46 | texts,
47 | src_lens,
48 | max_src_len,
49 | mels,
50 | mel_lens,
51 | max_mel_len,
52 | pitches,
53 | durations,
54 | )
55 |
56 | if len(data) == 6:
57 | (ids, raw_texts, speakers, texts, src_lens, max_src_len) = data
58 |
59 | speakers = torch.from_numpy(speakers).long().to(device)
60 | texts = torch.from_numpy(texts).long().to(device)
61 | src_lens = torch.from_numpy(src_lens).to(device)
62 |
63 | return (ids, raw_texts, speakers, texts, src_lens, max_src_len)
64 |
65 |
66 | def log(
67 | logger, step=None, losses=None, fig=None, audio=None, sampling_rate=22050, tag=""
68 | ):
69 | if losses is not None:
70 | logger.add_scalar("Loss/total_loss", losses[0], step)
71 | logger.add_scalar("Loss/mel_loss", losses[1], step)
72 | logger.add_scalar("Loss/pitch_loss", losses[2], step)
73 | logger.add_scalar("Loss/duration_loss", losses[3], step)
74 |
75 | if fig is not None:
76 | logger.add_figure(tag, fig)
77 |
78 | if audio is not None:
79 | logger.add_audio(
80 | tag,
81 | audio / max(abs(audio)),
82 | sample_rate=sampling_rate,
83 | )
84 |
85 |
86 | def get_mask_from_lengths(lengths, max_len=None):
87 | batch_size = lengths.shape[0]
88 | if max_len is None:
89 | max_len = torch.max(lengths).item()
90 |
91 | ids = torch.arange(0, max_len).unsqueeze(0).expand(batch_size, -1).to(device)
92 | mask = ids >= lengths.unsqueeze(1).expand(-1, max_len)
93 |
94 | return mask
95 |
96 |
97 | def expand(values, durations):
98 | out = list()
99 | for value, d in zip(values, durations):
100 | out += [value] * max(0, int(d))
101 | return np.array(out)
102 |
103 |
104 | def synth_one_sample(targets, predictions, vocoder, model_config, preprocess_config):
105 |
106 | basename = targets[0][0]
107 | src_len = predictions[6][0].item()
108 | mel_len = predictions[7][0].item()
109 | mel_target = targets[6][0, :mel_len].detach().transpose(0, 1)
110 | mel_prediction = predictions[0][-1][0, :mel_len].detach().transpose(0, 1) # pick the last iteration
111 | duration = targets[10][0, :src_len].detach().cpu().numpy()
112 | pitch = targets[9][0, :src_len].detach().cpu().numpy()
113 | pitch = expand(pitch, duration)
114 |
115 | with open(
116 | os.path.join(preprocess_config["path"]["preprocessed_path"], "stats.json")
117 | ) as f:
118 | stats = json.load(f)
119 | stats = stats["pitch"]
120 |
121 | fig = plot_mel(
122 | [
123 | (mel_prediction.cpu().numpy(), pitch),
124 | (mel_target.cpu().numpy(), pitch),
125 | ],
126 | stats,
127 | ["Synthetized Spectrogram", "Ground-Truth Spectrogram"],
128 | )
129 |
130 | if vocoder is not None:
131 | from .model import vocoder_infer
132 |
133 | wav_reconstruction = vocoder_infer(
134 | mel_target.unsqueeze(0),
135 | vocoder,
136 | model_config,
137 | preprocess_config,
138 | )[0]
139 | wav_prediction = vocoder_infer(
140 | mel_prediction.unsqueeze(0),
141 | vocoder,
142 | model_config,
143 | preprocess_config,
144 | )[0]
145 | else:
146 | wav_reconstruction = wav_prediction = None
147 |
148 | return fig, wav_reconstruction, wav_prediction, basename
149 |
150 |
151 | def synth_samples(targets, predictions, vocoder, model_config, preprocess_config, path):
152 |
153 | basenames = targets[0]
154 | for i in range(len(targets[0])):
155 | basename = basenames[i]
156 | src_len = predictions[6][i].item()
157 | mel_len = predictions[7][i].item()
158 | mel_prediction = predictions[0][-1][i, :mel_len].detach().transpose(0, 1) # pick the last iteration
159 | duration = predictions[3][i, :src_len].detach().cpu().numpy()
160 | pitch = predictions[1][i, :src_len].detach().cpu().numpy()
161 | pitch = expand(pitch, duration)
162 |
163 | with open(
164 | os.path.join(preprocess_config["path"]["preprocessed_path"], "stats.json")
165 | ) as f:
166 | stats = json.load(f)
167 | stats = stats["pitch"]
168 |
169 | fig = plot_mel(
170 | [
171 | (mel_prediction.cpu().numpy(), pitch),
172 | ],
173 | stats,
174 | ["Synthetized Spectrogram"],
175 | )
176 | plt.savefig(os.path.join(path, "{}.png".format(basename)))
177 | plt.close()
178 |
179 | from .model import vocoder_infer
180 |
181 | mel_predictions = predictions[0][-1].transpose(1, 2)
182 | lengths = predictions[7] * preprocess_config["preprocessing"]["stft"]["hop_length"]
183 | wav_predictions = vocoder_infer(
184 | mel_predictions, vocoder, model_config, preprocess_config, lengths=lengths
185 | )
186 |
187 | sampling_rate = preprocess_config["preprocessing"]["audio"]["sampling_rate"]
188 | for wav, basename in zip(wav_predictions, basenames):
189 | wavfile.write(os.path.join(path, "{}.wav".format(basename)), sampling_rate, wav)
190 |
191 |
192 | def plot_mel(data, stats, titles):
193 | fig, axes = plt.subplots(len(data), 1, squeeze=False)
194 | if titles is None:
195 | titles = [None for i in range(len(data))]
196 | pitch_min, pitch_max, pitch_mean, pitch_std = stats
197 | pitch_min = pitch_min * pitch_std + pitch_mean
198 | pitch_max = pitch_max * pitch_std + pitch_mean
199 |
200 | def add_axis(fig, old_ax):
201 | ax = fig.add_axes(old_ax.get_position(), anchor="W")
202 | ax.set_facecolor("None")
203 | return ax
204 |
205 | for i in range(len(data)):
206 | mel, pitch = data[i]
207 | pitch = pitch * pitch_std + pitch_mean
208 | axes[i][0].imshow(mel, origin="lower")
209 | axes[i][0].set_aspect(2.5, adjustable="box")
210 | axes[i][0].set_ylim(0, mel.shape[0])
211 | axes[i][0].set_title(titles[i], fontsize="medium")
212 | axes[i][0].tick_params(labelsize="x-small", left=False, labelleft=False)
213 | axes[i][0].set_anchor("W")
214 |
215 | ax1 = add_axis(fig, axes[i][0])
216 | ax1.plot(pitch, color="tomato", linewidth=.7)
217 | ax1.set_xlim(0, mel.shape[1])
218 | ax1.set_ylim(0, pitch_max)
219 | ax1.set_ylabel("F0", color="tomato")
220 | ax1.tick_params(
221 | labelsize="x-small", colors="tomato", bottom=False, labelbottom=False
222 | )
223 |
224 | return fig
225 |
226 |
227 | def pad_1D(inputs, PAD=0):
228 | def pad_data(x, length, PAD):
229 | x_padded = np.pad(
230 | x, (0, length - x.shape[0]), mode="constant", constant_values=PAD
231 | )
232 | return x_padded
233 |
234 | max_len = max((len(x) for x in inputs))
235 | padded = np.stack([pad_data(x, max_len, PAD) for x in inputs])
236 |
237 | return padded
238 |
239 |
240 | def pad_2D(inputs, maxlen=None):
241 | def pad(x, max_len):
242 | PAD = 0
243 | if np.shape(x)[0] > max_len:
244 | raise ValueError("not max_len")
245 |
246 | s = np.shape(x)[1]
247 | x_padded = np.pad(
248 | x, (0, max_len - np.shape(x)[0]), mode="constant", constant_values=PAD
249 | )
250 | return x_padded[:, :s]
251 |
252 | if maxlen:
253 | output = np.stack([pad(x, maxlen) for x in inputs])
254 | else:
255 | max_len = max(np.shape(x)[0] for x in inputs)
256 | output = np.stack([pad(x, max_len) for x in inputs])
257 |
258 | return output
259 |
260 |
261 | def pad(input_ele, mel_max_length=None):
262 | if mel_max_length:
263 | max_len = mel_max_length
264 | else:
265 | max_len = max([input_ele[i].size(0) for i in range(len(input_ele))])
266 |
267 | out_list = list()
268 | for i, batch in enumerate(input_ele):
269 | if len(batch.shape) == 1:
270 | one_batch_padded = F.pad(
271 | batch, (0, max_len - batch.size(0)), "constant", 0.0
272 | )
273 | elif len(batch.shape) == 2:
274 | one_batch_padded = F.pad(
275 | batch, (0, 0, 0, max_len - batch.size(0)), "constant", 0.0
276 | )
277 | out_list.append(one_batch_padded)
278 | out_padded = torch.stack(out_list)
279 | return out_padded
280 |
--------------------------------------------------------------------------------