├── .cardboardlint.yml ├── .dockerignore ├── .github ├── ISSUE_TEMPLATE │ ├── bug_report.yaml │ ├── config.yml │ └── feature_request.md ├── PR_TEMPLATE.md ├── stale.yml └── workflows │ ├── aux_tests.yml │ ├── data_tests.yml │ ├── docker.yaml │ ├── inference_tests.yml │ ├── pypi-release.yml │ ├── style_check.yml │ ├── text_tests.yml │ ├── tts_tests.yml │ ├── tts_tests2.yml │ ├── vocoder_tests.yml │ ├── xtts_tests.yml │ ├── zoo_tests0.yml │ ├── zoo_tests1.yml │ └── zoo_tests2.yml ├── .gitignore ├── .pre-commit-config.yaml ├── .pylintrc ├── .readthedocs.yml ├── CITATION.cff ├── CODE_OF_CONDUCT.md ├── CODE_OWNERS.rst ├── CONTRIBUTING.md ├── Dockerfile ├── LICENSE.txt ├── MANIFEST.in ├── Makefile ├── README.md ├── TTS ├── .models.json ├── VERSION ├── __init__.py ├── api.py ├── bin │ ├── __init__.py │ ├── collect_env_info.py │ ├── compute_attention_masks.py │ ├── compute_embeddings.py │ ├── compute_statistics.py │ ├── eval_encoder.py │ ├── extract_tts_spectrograms.py │ ├── find_unique_chars.py │ ├── find_unique_phonemes.py │ ├── remove_silence_using_vad.py │ ├── resample.py │ ├── synthesize.py │ ├── train_encoder.py │ ├── train_tts.py │ ├── train_vocoder.py │ └── tune_wavegrad.py ├── config │ ├── __init__.py │ └── shared_configs.py ├── demos │ └── xtts_ft_demo │ │ ├── requirements.txt │ │ ├── utils │ │ ├── formatter.py │ │ └── gpt_train.py │ │ └── xtts_demo.py ├── encoder │ ├── README.md │ ├── __init__.py │ ├── configs │ │ ├── base_encoder_config.py │ │ ├── emotion_encoder_config.py │ │ └── speaker_encoder_config.py │ ├── dataset.py │ ├── losses.py │ ├── models │ │ ├── base_encoder.py │ │ ├── lstm.py │ │ └── resnet.py │ ├── requirements.txt │ └── utils │ │ ├── __init__.py │ │ ├── generic_utils.py │ │ ├── prepare_voxceleb.py │ │ ├── training.py │ │ └── visual.py ├── model.py ├── server │ ├── README.md │ ├── __init__.py │ ├── conf.json │ ├── server.py │ ├── static │ │ └── coqui-log-green-TTS.png │ └── templates │ │ ├── details.html │ │ └── index.html ├── tts │ ├── __init__.py │ ├── configs │ │ ├── __init__.py │ │ ├── align_tts_config.py │ │ ├── bark_config.py │ │ ├── delightful_tts_config.py │ │ ├── fast_pitch_config.py │ │ ├── fast_speech_config.py │ │ ├── fastspeech2_config.py │ │ ├── glow_tts_config.py │ │ ├── neuralhmm_tts_config.py │ │ ├── overflow_config.py │ │ ├── shared_configs.py │ │ ├── speedy_speech_config.py │ │ ├── tacotron2_config.py │ │ ├── tacotron_config.py │ │ ├── tortoise_config.py │ │ ├── vits_config.py │ │ └── xtts_config.py │ ├── datasets │ │ ├── __init__.py │ │ ├── dataset.py │ │ └── formatters.py │ ├── layers │ │ ├── __init__.py │ │ ├── align_tts │ │ │ ├── __init__.py │ │ │ ├── duration_predictor.py │ │ │ └── mdn.py │ │ ├── bark │ │ │ ├── __init__.py │ │ │ ├── hubert │ │ │ │ ├── __init__.py │ │ │ │ ├── hubert_manager.py │ │ │ │ ├── kmeans_hubert.py │ │ │ │ └── tokenizer.py │ │ │ ├── inference_funcs.py │ │ │ ├── load_model.py │ │ │ ├── model.py │ │ │ └── model_fine.py │ │ ├── delightful_tts │ │ │ ├── __init__.py │ │ │ ├── acoustic_model.py │ │ │ ├── conformer.py │ │ │ ├── conv_layers.py │ │ │ ├── encoders.py │ │ │ ├── energy_adaptor.py │ │ │ ├── kernel_predictor.py │ │ │ ├── networks.py │ │ │ ├── phoneme_prosody_predictor.py │ │ │ ├── pitch_adaptor.py │ │ │ └── variance_predictor.py │ │ ├── feed_forward │ │ │ ├── __init__.py │ │ │ ├── decoder.py │ │ │ ├── duration_predictor.py │ │ │ └── encoder.py │ │ ├── generic │ │ │ ├── __init__.py │ │ │ ├── aligner.py │ │ │ ├── gated_conv.py │ │ │ ├── normalization.py │ │ │ ├── pos_encoding.py │ │ │ ├── res_conv_bn.py │ │ │ ├── time_depth_sep_conv.py │ │ │ ├── transformer.py │ │ │ └── wavenet.py │ │ ├── glow_tts │ │ │ ├── __init__.py │ │ │ ├── decoder.py │ │ │ ├── duration_predictor.py │ │ │ ├── encoder.py │ │ │ ├── glow.py │ │ │ └── transformer.py │ │ ├── losses.py │ │ ├── overflow │ │ │ ├── __init__.py │ │ │ ├── common_layers.py │ │ │ ├── decoder.py │ │ │ ├── neural_hmm.py │ │ │ └── plotting_utils.py │ │ ├── tacotron │ │ │ ├── __init__.py │ │ │ ├── attentions.py │ │ │ ├── capacitron_layers.py │ │ │ ├── common_layers.py │ │ │ ├── gst_layers.py │ │ │ ├── tacotron.py │ │ │ └── tacotron2.py │ │ ├── tortoise │ │ │ ├── arch_utils.py │ │ │ ├── audio_utils.py │ │ │ ├── autoregressive.py │ │ │ ├── classifier.py │ │ │ ├── clvp.py │ │ │ ├── diffusion.py │ │ │ ├── diffusion_decoder.py │ │ │ ├── dpm_solver.py │ │ │ ├── random_latent_generator.py │ │ │ ├── tokenizer.py │ │ │ ├── transformer.py │ │ │ ├── utils.py │ │ │ ├── vocoder.py │ │ │ ├── wav2vec_alignment.py │ │ │ └── xtransformers.py │ │ ├── vits │ │ │ ├── discriminator.py │ │ │ ├── networks.py │ │ │ ├── stochastic_duration_predictor.py │ │ │ └── transforms.py │ │ └── xtts │ │ │ ├── dvae.py │ │ │ ├── gpt.py │ │ │ ├── gpt_inference.py │ │ │ ├── hifigan_decoder.py │ │ │ ├── latent_encoder.py │ │ │ ├── perceiver_encoder.py │ │ │ ├── stream_generator.py │ │ │ ├── tokenizer.py │ │ │ ├── trainer │ │ │ ├── dataset.py │ │ │ └── gpt_trainer.py │ │ │ ├── xtts_manager.py │ │ │ └── zh_num2words.py │ ├── models │ │ ├── __init__.py │ │ ├── align_tts.py │ │ ├── bark.py │ │ ├── base_tacotron.py │ │ ├── base_tts.py │ │ ├── delightful_tts.py │ │ ├── forward_tts.py │ │ ├── glow_tts.py │ │ ├── neuralhmm_tts.py │ │ ├── overflow.py │ │ ├── tacotron.py │ │ ├── tacotron2.py │ │ ├── tortoise.py │ │ ├── vits.py │ │ └── xtts.py │ └── utils │ │ ├── __init__.py │ │ ├── assets │ │ └── tortoise │ │ │ └── tokenizer.json │ │ ├── data.py │ │ ├── fairseq.py │ │ ├── helpers.py │ │ ├── languages.py │ │ ├── managers.py │ │ ├── measures.py │ │ ├── monotonic_align │ │ ├── __init__.py │ │ ├── core.pyx │ │ └── setup.py │ │ ├── speakers.py │ │ ├── ssim.py │ │ ├── synthesis.py │ │ ├── text │ │ ├── __init__.py │ │ ├── bangla │ │ │ ├── __init__.py │ │ │ └── phonemizer.py │ │ ├── belarusian │ │ │ ├── __init__.py │ │ │ └── phonemizer.py │ │ ├── characters.py │ │ ├── chinese_mandarin │ │ │ ├── __init__.py │ │ │ ├── numbers.py │ │ │ ├── phonemizer.py │ │ │ └── pinyinToPhonemes.py │ │ ├── cleaners.py │ │ ├── cmudict.py │ │ ├── english │ │ │ ├── __init__.py │ │ │ ├── abbreviations.py │ │ │ ├── number_norm.py │ │ │ └── time_norm.py │ │ ├── french │ │ │ ├── __init__.py │ │ │ └── abbreviations.py │ │ ├── japanese │ │ │ ├── __init__.py │ │ │ └── phonemizer.py │ │ ├── korean │ │ │ ├── __init__.py │ │ │ ├── ko_dictionary.py │ │ │ ├── korean.py │ │ │ └── phonemizer.py │ │ ├── phonemizers │ │ │ ├── __init__.py │ │ │ ├── bangla_phonemizer.py │ │ │ ├── base.py │ │ │ ├── belarusian_phonemizer.py │ │ │ ├── espeak_wrapper.py │ │ │ ├── gruut_wrapper.py │ │ │ ├── ja_jp_phonemizer.py │ │ │ ├── ko_kr_phonemizer.py │ │ │ ├── multi_phonemizer.py │ │ │ └── zh_cn_phonemizer.py │ │ ├── punctuation.py │ │ └── tokenizer.py │ │ └── visual.py ├── utils │ ├── __init__.py │ ├── audio │ │ ├── __init__.py │ │ ├── numpy_transforms.py │ │ ├── processor.py │ │ └── torch_transforms.py │ ├── callbacks.py │ ├── capacitron_optimizer.py │ ├── distribute.py │ ├── download.py │ ├── downloaders.py │ ├── generic_utils.py │ ├── io.py │ ├── manage.py │ ├── radam.py │ ├── samplers.py │ ├── synthesizer.py │ ├── training.py │ └── vad.py ├── vc │ ├── configs │ │ ├── __init__.py │ │ ├── freevc_config.py │ │ └── shared_configs.py │ ├── models │ │ ├── __init__.py │ │ ├── base_vc.py │ │ └── freevc.py │ └── modules │ │ ├── __init__.py │ │ └── freevc │ │ ├── __init__.py │ │ ├── commons.py │ │ ├── mel_processing.py │ │ ├── modules.py │ │ ├── speaker_encoder │ │ ├── __init__.py │ │ ├── audio.py │ │ ├── hparams.py │ │ └── speaker_encoder.py │ │ └── wavlm │ │ ├── __init__.py │ │ ├── config.json │ │ ├── modules.py │ │ └── wavlm.py └── vocoder │ ├── README.md │ ├── __init__.py │ ├── configs │ ├── __init__.py │ ├── fullband_melgan_config.py │ ├── hifigan_config.py │ ├── melgan_config.py │ ├── multiband_melgan_config.py │ ├── parallel_wavegan_config.py │ ├── shared_configs.py │ ├── univnet_config.py │ ├── wavegrad_config.py │ └── wavernn_config.py │ ├── datasets │ ├── __init__.py │ ├── gan_dataset.py │ ├── preprocess.py │ ├── wavegrad_dataset.py │ └── wavernn_dataset.py │ ├── layers │ ├── __init__.py │ ├── hifigan.py │ ├── losses.py │ ├── lvc_block.py │ ├── melgan.py │ ├── parallel_wavegan.py │ ├── pqmf.py │ ├── qmf.dat │ ├── upsample.py │ └── wavegrad.py │ ├── models │ ├── __init__.py │ ├── base_vocoder.py │ ├── fullband_melgan_generator.py │ ├── gan.py │ ├── hifigan_discriminator.py │ ├── hifigan_generator.py │ ├── melgan_discriminator.py │ ├── melgan_generator.py │ ├── melgan_multiscale_discriminator.py │ ├── multiband_melgan_generator.py │ ├── parallel_wavegan_discriminator.py │ ├── parallel_wavegan_generator.py │ ├── random_window_discriminator.py │ ├── univnet_discriminator.py │ ├── univnet_generator.py │ ├── wavegrad.py │ └── wavernn.py │ ├── pqmf_output.wav │ └── utils │ ├── __init__.py │ ├── distribution.py │ └── generic_utils.py ├── dockerfiles └── Dockerfile.dev ├── docs ├── Makefile ├── README.md ├── requirements.txt └── source │ ├── _static │ └── logo.png │ ├── _templates │ └── page.html │ ├── conf.py │ ├── configuration.md │ ├── contributing.md │ ├── docker_images.md │ ├── faq.md │ ├── finetuning.md │ ├── formatting_your_dataset.md │ ├── implementing_a_new_language_frontend.md │ ├── implementing_a_new_model.md │ ├── index.md │ ├── inference.md │ ├── installation.md │ ├── main_classes │ ├── audio_processor.md │ ├── dataset.md │ ├── gan.md │ ├── model_api.md │ ├── speaker_manager.md │ └── trainer_api.md │ ├── make.bat │ ├── marytts.md │ ├── models │ ├── bark.md │ ├── forward_tts.md │ ├── glow_tts.md │ ├── overflow.md │ ├── tacotron1-2.md │ ├── tortoise.md │ ├── vits.md │ └── xtts.md │ ├── training_a_model.md │ ├── tts_datasets.md │ ├── tutorial_for_nervous_beginners.md │ └── what_makes_a_good_dataset.md ├── hubconf.py ├── images ├── TTS-performance.png ├── coqui-log-green-TTS.png ├── demo_server.gif ├── example_model_output.png ├── model.png ├── tts_cli.gif └── tts_performance.png ├── notebooks ├── ExtractTTSpectrogram.ipynb ├── PlotUmapLibriTTS.ipynb ├── TestAttention.ipynb ├── Tortoise.ipynb ├── Tutorial_1_use-pretrained-TTS.ipynb ├── Tutorial_2_train_your_first_TTS_model.ipynb └── dataset_analysis │ ├── AnalyzeDataset.ipynb │ ├── CheckDatasetSNR.ipynb │ ├── CheckPitch.ipynb │ ├── CheckSpectrograms.ipynb │ ├── PhonemeCoverage.ipynb │ ├── README.md │ └── analyze.py ├── pyproject.toml ├── recipes ├── README.md ├── bel-alex73 │ ├── .gitignore │ ├── README.md │ ├── choose_speaker.ipynb │ ├── docker-prepare-start.sh │ ├── docker-prepare │ │ ├── Dockerfile │ │ └── runtime.sh │ ├── dump_config.py │ ├── train_glowtts.py │ └── train_hifigan.py ├── blizzard2013 │ ├── README.md │ ├── tacotron1-Capacitron │ │ └── train_capacitron_t1.py │ └── tacotron2-Capacitron │ │ └── train_capacitron_t2.py ├── kokoro │ └── tacotron2-DDC │ │ ├── run.sh │ │ └── tacotron2-DDC.json ├── ljspeech │ ├── README.md │ ├── align_tts │ │ └── train_aligntts.py │ ├── delightful_tts │ │ └── train_delightful_tts.py │ ├── download_ljspeech.sh │ ├── fast_pitch │ │ └── train_fast_pitch.py │ ├── fast_speech │ │ └── train_fast_speech.py │ ├── fastspeech2 │ │ └── train_fastspeech2.py │ ├── glow_tts │ │ └── train_glowtts.py │ ├── hifigan │ │ └── train_hifigan.py │ ├── multiband_melgan │ │ └── train_multiband_melgan.py │ ├── neuralhmm_tts │ │ └── train_neuralhmmtts.py │ ├── overflow │ │ ├── lj_parameters.pt │ │ └── train_overflow.py │ ├── speedy_speech │ │ └── train_speedy_speech.py │ ├── tacotron2-Capacitron │ │ └── train_capacitron_t2.py │ ├── tacotron2-DCA │ │ └── train_tacotron_dca.py │ ├── tacotron2-DDC │ │ └── train_tacotron_ddc.py │ ├── univnet │ │ └── train.py │ ├── vits_tts │ │ └── train_vits.py │ ├── wavegrad │ │ └── train_wavegrad.py │ ├── wavernn │ │ └── train_wavernn.py │ ├── xtts_v1 │ │ └── train_gpt_xtts.py │ └── xtts_v2 │ │ └── train_gpt_xtts.py ├── multilingual │ ├── cml_yourtts │ │ └── train_yourtts.py │ └── vits_tts │ │ ├── train_vits_tts.py │ │ └── train_vits_tts_phonemes.py ├── thorsten_DE │ ├── README.md │ ├── align_tts │ │ └── train_aligntts.py │ ├── download_thorsten_DE.sh │ ├── glow_tts │ │ └── train_glowtts.py │ ├── hifigan │ │ └── train_hifigan.py │ ├── multiband_melgan │ │ └── train_multiband_melgan.py │ ├── speedy_speech │ │ └── train_speedy_speech.py │ ├── tacotron2-DDC │ │ └── train_tacotron_ddc.py │ ├── univnet │ │ └── train_univnet.py │ ├── vits_tts │ │ └── train_vits.py │ ├── wavegrad │ │ └── train_wavegrad.py │ └── wavernn │ │ └── train_wavernn.py └── vctk │ ├── delightful_tts │ └── train_delightful_tts.py │ ├── download_vctk.sh │ ├── fast_pitch │ └── train_fast_pitch.py │ ├── fast_speech │ └── train_fast_speech.py │ ├── glow_tts │ └── train_glow_tts.py │ ├── resnet_speaker_encoder │ └── train_encoder.py │ ├── speedy_speech │ └── train_speedy_speech.py │ ├── tacotron-DDC │ └── train_tacotron-DDC.py │ ├── tacotron2-DDC │ └── train_tacotron2-ddc.py │ ├── tacotron2 │ └── train_tacotron2.py │ ├── vits │ └── train_vits.py │ └── yourtts │ └── train_yourtts.py ├── requirements.dev.txt ├── requirements.ja.txt ├── requirements.notebooks.txt ├── requirements.txt ├── run_bash_tests.sh ├── scripts └── sync_readme.py ├── setup.cfg ├── setup.py └── tests ├── __init__.py ├── aux_tests ├── __init__.py ├── test_audio_processor.py ├── test_embedding_manager.py ├── test_extract_tts_spectrograms.py ├── test_find_unique_phonemes.py ├── test_numpy_transforms.py ├── test_readme.py ├── test_speaker_encoder.py ├── test_speaker_encoder_train.py ├── test_speaker_manager.py └── test_stft_torch.py ├── bash_tests ├── test_compute_statistics.sh └── test_demo_server.sh ├── data ├── dummy_speakers.json ├── dummy_speakers.pth ├── dummy_speakers2.json └── ljspeech │ ├── f0_cache │ └── pitch_stats.npy │ ├── metadata.csv │ ├── metadata_attn_mask.txt │ ├── metadata_flac.csv │ ├── metadata_mp3.csv │ ├── metadata_wav.csv │ ├── speakers.json │ └── wavs │ ├── LJ001-0001.flac │ ├── LJ001-0001.mp3 │ ├── LJ001-0001.npy │ ├── LJ001-0001.wav │ ├── LJ001-0002.flac │ ├── LJ001-0002.mp3 │ ├── LJ001-0002.npy │ ├── LJ001-0002.wav │ ├── LJ001-0003.flac │ ├── LJ001-0003.mp3 │ ├── LJ001-0003.npy │ ├── LJ001-0003.wav │ ├── LJ001-0004.flac │ ├── LJ001-0004.mp3 │ ├── LJ001-0004.npy │ ├── LJ001-0004.wav │ ├── LJ001-0005.flac │ ├── LJ001-0005.mp3 │ ├── LJ001-0005.npy │ ├── LJ001-0005.wav │ ├── LJ001-0006.flac │ ├── LJ001-0006.mp3 │ ├── LJ001-0006.npy │ ├── LJ001-0006.wav │ ├── LJ001-0007.flac │ ├── LJ001-0007.mp3 │ ├── LJ001-0007.npy │ ├── LJ001-0007.wav │ ├── LJ001-0008.flac │ ├── LJ001-0008.mp3 │ ├── LJ001-0008.npy │ ├── LJ001-0008.wav │ ├── LJ001-0009.flac │ ├── LJ001-0009.mp3 │ ├── LJ001-0009.npy │ ├── LJ001-0009.wav │ ├── LJ001-0010.flac │ ├── LJ001-0010.mp3 │ ├── LJ001-0010.npy │ ├── LJ001-0010.wav │ ├── LJ001-0011.flac │ ├── LJ001-0011.mp3 │ ├── LJ001-0011.npy │ ├── LJ001-0011.wav │ ├── LJ001-0012.flac │ ├── LJ001-0012.mp3 │ ├── LJ001-0012.npy │ ├── LJ001-0012.wav │ ├── LJ001-0013.flac │ ├── LJ001-0013.mp3 │ ├── LJ001-0013.npy │ ├── LJ001-0013.wav │ ├── LJ001-0014.flac │ ├── LJ001-0014.mp3 │ ├── LJ001-0014.npy │ ├── LJ001-0014.wav │ ├── LJ001-0015.flac │ ├── LJ001-0015.mp3 │ ├── LJ001-0015.npy │ ├── LJ001-0015.wav │ ├── LJ001-0016.flac │ ├── LJ001-0016.mp3 │ ├── LJ001-0016.npy │ ├── LJ001-0016.wav │ ├── LJ001-0017.flac │ ├── LJ001-0017.mp3 │ ├── LJ001-0017.npy │ ├── LJ001-0017.wav │ ├── LJ001-0018.flac │ ├── LJ001-0018.mp3 │ ├── LJ001-0018.npy │ ├── LJ001-0018.wav │ ├── LJ001-0019.flac │ ├── LJ001-0019.mp3 │ ├── LJ001-0019.npy │ ├── LJ001-0019.wav │ ├── LJ001-0020.flac │ ├── LJ001-0020.mp3 │ ├── LJ001-0020.npy │ ├── LJ001-0020.wav │ ├── LJ001-0021.flac │ ├── LJ001-0021.mp3 │ ├── LJ001-0021.npy │ ├── LJ001-0021.wav │ ├── LJ001-0022.flac │ ├── LJ001-0022.mp3 │ ├── LJ001-0022.npy │ ├── LJ001-0022.wav │ ├── LJ001-0023.flac │ ├── LJ001-0023.mp3 │ ├── LJ001-0023.npy │ ├── LJ001-0023.wav │ ├── LJ001-0024.flac │ ├── LJ001-0024.mp3 │ ├── LJ001-0024.npy │ ├── LJ001-0024.wav │ ├── LJ001-0025.flac │ ├── LJ001-0025.mp3 │ ├── LJ001-0025.npy │ ├── LJ001-0025.wav │ ├── LJ001-0026.flac │ ├── LJ001-0026.mp3 │ ├── LJ001-0026.npy │ ├── LJ001-0026.wav │ ├── LJ001-0027.flac │ ├── LJ001-0027.mp3 │ ├── LJ001-0027.npy │ ├── LJ001-0027.wav │ ├── LJ001-0028.flac │ ├── LJ001-0028.mp3 │ ├── LJ001-0028.npy │ ├── LJ001-0028.wav │ ├── LJ001-0029.flac │ ├── LJ001-0029.mp3 │ ├── LJ001-0029.npy │ ├── LJ001-0029.wav │ ├── LJ001-0030.flac │ ├── LJ001-0030.mp3 │ ├── LJ001-0030.npy │ ├── LJ001-0030.wav │ ├── LJ001-0031.flac │ ├── LJ001-0031.mp3 │ ├── LJ001-0031.npy │ ├── LJ001-0031.wav │ ├── LJ001-0032.flac │ ├── LJ001-0032.mp3 │ ├── LJ001-0032.npy │ └── LJ001-0032.wav ├── data_tests ├── __init__.py ├── test_dataset_formatters.py ├── test_loader.py └── test_samplers.py ├── inference_tests ├── __init__.py ├── test_synthesize.py └── test_synthesizer.py ├── inputs ├── common_voice.tsv ├── dummy_model_config.json ├── example_1.wav ├── language_ids.json ├── scale_stats.npy ├── server_config.json ├── test_align_tts.json ├── test_config.json ├── test_glow_tts.json ├── test_speaker_encoder_config.json ├── test_speedy_speech.json ├── test_tacotron2_config.json ├── test_tacotron_bd_config.json ├── test_tacotron_config.json ├── test_vocoder_audio_config.json ├── test_vocoder_multiband_melgan_config.json ├── test_vocoder_wavegrad.json ├── test_vocoder_wavernn_config.json └── xtts_vocab.json ├── text_tests ├── __init__.py ├── test_belarusian_phonemizer.py ├── test_characters.py ├── test_japanese_phonemizer.py ├── test_korean_phonemizer.py ├── test_phonemizer.py ├── test_punctuation.py ├── test_text_cleaners.py └── test_tokenizer.py ├── tts_tests ├── __init__.py ├── test_helpers.py ├── test_losses.py ├── test_neuralhmm_tts_train.py ├── test_overflow.py ├── test_overflow_train.py ├── test_speedy_speech_train.py ├── test_tacotron2_d-vectors_train.py ├── test_tacotron2_model.py ├── test_tacotron2_speaker_emb_train.py ├── test_tacotron2_train.py ├── test_tacotron_layers.py ├── test_tacotron_model.py ├── test_tacotron_train.py ├── test_vits.py ├── test_vits_d-vectors_train.py ├── test_vits_multilingual_speaker_emb_train.py ├── test_vits_multilingual_train-d_vectors.py ├── test_vits_speaker_emb_train.py └── test_vits_train.py ├── tts_tests2 ├── __init__.py ├── test_align_tts_train.py ├── test_delightful_tts_d-vectors_train.py ├── test_delightful_tts_emb_spk.py ├── test_delightful_tts_layers.py ├── test_delightful_tts_train.py ├── test_fast_pitch_speaker_emb_train.py ├── test_fast_pitch_train.py ├── test_fastspeech_2_speaker_emb_train.py ├── test_fastspeech_2_train.py ├── test_feed_forward_layers.py ├── test_forward_tts.py ├── test_glow_tts.py ├── test_glow_tts_d-vectors_train.py ├── test_glow_tts_speaker_emb_train.py └── test_glow_tts_train.py ├── vc_tests ├── __init__.py └── test_freevc.py ├── vocoder_tests ├── __init__.py ├── test_fullband_melgan_train.py ├── test_hifigan_train.py ├── test_melgan_train.py ├── test_multiband_melgan_train.py ├── test_parallel_wavegan_train.py ├── test_vocoder_gan_datasets.py ├── test_vocoder_losses.py ├── test_vocoder_melgan_discriminator.py ├── test_vocoder_melgan_generator.py ├── test_vocoder_parallel_wavegan_discriminator.py ├── test_vocoder_parallel_wavegan_generator.py ├── test_vocoder_pqmf.py ├── test_vocoder_rwd.py ├── test_vocoder_wavernn.py ├── test_vocoder_wavernn_datasets.py ├── test_wavegrad.py ├── test_wavegrad_layers.py ├── test_wavegrad_train.py └── test_wavernn_train.py ├── xtts_tests ├── test_xtts_gpt_train.py └── test_xtts_v2-0_gpt_train.py └── zoo_tests ├── __init__.py └── test_models.py /.cardboardlint.yml: -------------------------------------------------------------------------------- 1 | linters: 2 | - pylint: 3 | # pylintrc: pylintrc 4 | filefilter: ['- test_*.py', '+ *.py', '- *.npy'] 5 | # exclude: -------------------------------------------------------------------------------- /.dockerignore: -------------------------------------------------------------------------------- 1 | .git/ 2 | Dockerfile 3 | build/ 4 | dist/ 5 | TTS.egg-info/ 6 | tests/outputs/* 7 | tests/train_outputs/* 8 | __pycache__/ 9 | *.pyc -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/config.yml: -------------------------------------------------------------------------------- 1 | blank_issues_enabled: false 2 | contact_links: 3 | - name: CoquiTTS GitHub Discussions 4 | url: https://github.com/coqui-ai/TTS/discussions 5 | about: Please ask and answer questions here. 6 | - name: Coqui Security issue disclosure 7 | url: mailto:info@coqui.ai 8 | about: Please report security vulnerabilities here. 9 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/feature_request.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: 🚀 Feature request 3 | about: Suggest a feature or an idea for this project 4 | title: '[Feature request] ' 5 | labels: feature request 6 | assignees: '' 7 | 8 | --- 9 | 11 | **🚀 Feature Description** 12 | 13 | 14 | 15 | **Solution** 16 | 17 | 18 | 19 | **Alternative Solutions** 20 | 21 | 22 | 23 | **Additional context** 24 | 25 | 26 | -------------------------------------------------------------------------------- /.github/PR_TEMPLATE.md: -------------------------------------------------------------------------------- 1 | # Pull request guidelines 2 | 3 | Welcome to the 🐸TTS project! We are excited to see your interest, and appreciate your support! 4 | 5 | This repository is governed by the Contributor Covenant Code of Conduct. For more details, see the [CODE_OF_CONDUCT.md](CODE_OF_CONDUCT.md) file. 6 | 7 | In order to make a good pull request, please see our [CONTRIBUTING.md](CONTRIBUTING.md) file. 8 | 9 | Before accepting your pull request, you will be asked to sign a [Contributor License Agreement](https://cla-assistant.io/coqui-ai/TTS). 10 | 11 | This [Contributor License Agreement](https://cla-assistant.io/coqui-ai/TTS): 12 | 13 | - Protects you, Coqui, and the users of the code. 14 | - Does not change your rights to use your contributions for any purpose. 15 | - Does not change the license of the 🐸TTS project. It just makes the terms of your contribution clearer and lets us know you are OK to contribute. 16 | -------------------------------------------------------------------------------- /.github/stale.yml: -------------------------------------------------------------------------------- 1 | # Number of days of inactivity before an issue becomes stale 2 | daysUntilStale: 30 3 | # Number of days of inactivity before a stale issue is closed 4 | daysUntilClose: 7 5 | # Issues with these labels will never be considered stale 6 | exemptLabels: 7 | - pinned 8 | - security 9 | # Label to use when marking an issue as stale 10 | staleLabel: wontfix 11 | # Comment to post when marking an issue as stale. Set to `false` to disable 12 | markComment: > 13 | This issue has been automatically marked as stale because it has not had 14 | recent activity. It will be closed if no further activity occurs. Thank you 15 | for your contributions. You might also look our discussion channels. 16 | # Comment to post when closing a stale issue. Set to `false` to disable 17 | closeComment: false 18 | 19 | -------------------------------------------------------------------------------- /.github/workflows/aux_tests.yml: -------------------------------------------------------------------------------- 1 | name: aux-tests 2 | 3 | on: 4 | push: 5 | branches: 6 | - main 7 | pull_request: 8 | types: [opened, synchronize, reopened] 9 | jobs: 10 | check_skip: 11 | runs-on: ubuntu-latest 12 | if: "! contains(github.event.head_commit.message, '[ci skip]')" 13 | steps: 14 | - run: echo "${{ github.event.head_commit.message }}" 15 | 16 | test: 17 | runs-on: ubuntu-latest 18 | strategy: 19 | fail-fast: false 20 | matrix: 21 | python-version: [3.9, "3.10", "3.11"] 22 | experimental: [false] 23 | steps: 24 | - uses: actions/checkout@v3 25 | - name: Set up Python ${{ matrix.python-version }} 26 | uses: actions/setup-python@v4 27 | with: 28 | python-version: ${{ matrix.python-version }} 29 | architecture: x64 30 | cache: 'pip' 31 | cache-dependency-path: 'requirements*' 32 | - name: check OS 33 | run: cat /etc/os-release 34 | - name: set ENV 35 | run: export TRAINER_TELEMETRY=0 36 | - name: Install dependencies 37 | run: | 38 | sudo apt-get update 39 | sudo apt-get install -y git make gcc 40 | make system-deps 41 | - name: Install/upgrade Python setup deps 42 | run: python3 -m pip install --upgrade pip setuptools wheel 43 | - name: Replace scarf urls 44 | run: | 45 | sed -i 's/https:\/\/coqui.gateway.scarf.sh\//https:\/\/github.com\/coqui-ai\/TTS\/releases\/download\//g' TTS/.models.json 46 | - name: Install TTS 47 | run: | 48 | python3 -m pip install .[all] 49 | python3 setup.py egg_info 50 | - name: Unit tests 51 | run: make test_aux 52 | -------------------------------------------------------------------------------- /.github/workflows/data_tests.yml: -------------------------------------------------------------------------------- 1 | name: data-tests 2 | 3 | on: 4 | push: 5 | branches: 6 | - main 7 | pull_request: 8 | types: [opened, synchronize, reopened] 9 | jobs: 10 | check_skip: 11 | runs-on: ubuntu-latest 12 | if: "! contains(github.event.head_commit.message, '[ci skip]')" 13 | steps: 14 | - run: echo "${{ github.event.head_commit.message }}" 15 | 16 | test: 17 | runs-on: ubuntu-latest 18 | strategy: 19 | fail-fast: false 20 | matrix: 21 | python-version: [3.9, "3.10", "3.11"] 22 | experimental: [false] 23 | steps: 24 | - uses: actions/checkout@v3 25 | - name: Set up Python ${{ matrix.python-version }} 26 | uses: actions/setup-python@v4 27 | with: 28 | python-version: ${{ matrix.python-version }} 29 | architecture: x64 30 | cache: 'pip' 31 | cache-dependency-path: 'requirements*' 32 | - name: check OS 33 | run: cat /etc/os-release 34 | - name: set ENV 35 | run: export TRAINER_TELEMETRY=0 36 | - name: Install dependencies 37 | run: | 38 | sudo apt-get update 39 | sudo apt-get install -y --no-install-recommends git make gcc 40 | make system-deps 41 | - name: Install/upgrade Python setup deps 42 | run: python3 -m pip install --upgrade pip setuptools wheel 43 | - name: Replace scarf urls 44 | run: | 45 | sed -i 's/https:\/\/coqui.gateway.scarf.sh\//https:\/\/github.com\/coqui-ai\/TTS\/releases\/download\//g' TTS/.models.json 46 | - name: Install TTS 47 | run: | 48 | python3 -m pip install .[all] 49 | python3 setup.py egg_info 50 | - name: Unit tests 51 | run: make data_tests 52 | -------------------------------------------------------------------------------- /.github/workflows/inference_tests.yml: -------------------------------------------------------------------------------- 1 | name: inference_tests 2 | 3 | on: 4 | push: 5 | branches: 6 | - main 7 | pull_request: 8 | types: [opened, synchronize, reopened] 9 | jobs: 10 | check_skip: 11 | runs-on: ubuntu-latest 12 | if: "! contains(github.event.head_commit.message, '[ci skip]')" 13 | steps: 14 | - run: echo "${{ github.event.head_commit.message }}" 15 | 16 | test: 17 | runs-on: ubuntu-latest 18 | strategy: 19 | fail-fast: false 20 | matrix: 21 | python-version: [3.9, "3.10", "3.11"] 22 | experimental: [false] 23 | steps: 24 | - uses: actions/checkout@v3 25 | - name: Set up Python ${{ matrix.python-version }} 26 | uses: actions/setup-python@v4 27 | with: 28 | python-version: ${{ matrix.python-version }} 29 | architecture: x64 30 | cache: 'pip' 31 | cache-dependency-path: 'requirements*' 32 | - name: check OS 33 | run: cat /etc/os-release 34 | - name: set ENV 35 | run: | 36 | export TRAINER_TELEMETRY=0 37 | - name: Install dependencies 38 | run: | 39 | sudo apt-get update 40 | sudo apt-get install -y --no-install-recommends git make gcc 41 | sudo apt-get install espeak-ng 42 | make system-deps 43 | - name: Install/upgrade Python setup deps 44 | run: python3 -m pip install --upgrade pip setuptools wheel 45 | - name: Replace scarf urls 46 | run: | 47 | sed -i 's/https:\/\/coqui.gateway.scarf.sh\//https:\/\/github.com\/coqui-ai\/TTS\/releases\/download\//g' TTS/.models.json 48 | - name: Install TTS 49 | run: | 50 | python3 -m pip install .[all] 51 | python3 setup.py egg_info 52 | - name: Unit tests 53 | run: make inference_tests 54 | -------------------------------------------------------------------------------- /.github/workflows/style_check.yml: -------------------------------------------------------------------------------- 1 | name: style-check 2 | 3 | on: 4 | push: 5 | branches: 6 | - main 7 | pull_request: 8 | types: [opened, synchronize, reopened] 9 | jobs: 10 | check_skip: 11 | runs-on: ubuntu-latest 12 | if: "! contains(github.event.head_commit.message, '[ci skip]')" 13 | steps: 14 | - run: echo "${{ github.event.head_commit.message }}" 15 | 16 | test: 17 | runs-on: ubuntu-latest 18 | strategy: 19 | fail-fast: false 20 | matrix: 21 | python-version: [3.9] 22 | experimental: [false] 23 | steps: 24 | - uses: actions/checkout@v3 25 | - name: Set up Python ${{ matrix.python-version }} 26 | uses: actions/setup-python@v4 27 | with: 28 | python-version: ${{ matrix.python-version }} 29 | architecture: x64 30 | cache: 'pip' 31 | cache-dependency-path: 'requirements*' 32 | - name: check OS 33 | run: cat /etc/os-release 34 | - name: Install dependencies 35 | run: | 36 | sudo apt-get update 37 | sudo apt-get install -y git make gcc 38 | make system-deps 39 | - name: Install/upgrade Python setup deps 40 | run: python3 -m pip install --upgrade pip setuptools wheel 41 | - name: Install TTS 42 | run: | 43 | python3 -m pip install .[all] 44 | python3 setup.py egg_info 45 | - name: Style check 46 | run: make style 47 | -------------------------------------------------------------------------------- /.github/workflows/text_tests.yml: -------------------------------------------------------------------------------- 1 | name: text-tests 2 | 3 | on: 4 | push: 5 | branches: 6 | - main 7 | pull_request: 8 | types: [opened, synchronize, reopened] 9 | jobs: 10 | check_skip: 11 | runs-on: ubuntu-latest 12 | if: "! contains(github.event.head_commit.message, '[ci skip]')" 13 | steps: 14 | - run: echo "${{ github.event.head_commit.message }}" 15 | 16 | test: 17 | runs-on: ubuntu-latest 18 | strategy: 19 | fail-fast: false 20 | matrix: 21 | python-version: [3.9, "3.10", "3.11"] 22 | experimental: [false] 23 | steps: 24 | - uses: actions/checkout@v3 25 | - name: Set up Python ${{ matrix.python-version }} 26 | uses: actions/setup-python@v4 27 | with: 28 | python-version: ${{ matrix.python-version }} 29 | architecture: x64 30 | cache: 'pip' 31 | cache-dependency-path: 'requirements*' 32 | - name: check OS 33 | run: cat /etc/os-release 34 | - name: set ENV 35 | run: export TRAINER_TELEMETRY=0 36 | - name: Install dependencies 37 | run: | 38 | sudo apt-get update 39 | sudo apt-get install -y --no-install-recommends git make gcc 40 | sudo apt-get install espeak 41 | sudo apt-get install espeak-ng 42 | make system-deps 43 | - name: Install/upgrade Python setup deps 44 | run: python3 -m pip install --upgrade pip setuptools wheel 45 | - name: Install TTS 46 | run: | 47 | python3 -m pip install .[all] 48 | python3 setup.py egg_info 49 | - name: Unit tests 50 | run: make test_text 51 | -------------------------------------------------------------------------------- /.github/workflows/tts_tests.yml: -------------------------------------------------------------------------------- 1 | name: tts-tests 2 | 3 | on: 4 | push: 5 | branches: 6 | - main 7 | pull_request: 8 | types: [opened, synchronize, reopened] 9 | jobs: 10 | check_skip: 11 | runs-on: ubuntu-latest 12 | if: "! contains(github.event.head_commit.message, '[ci skip]')" 13 | steps: 14 | - run: echo "${{ github.event.head_commit.message }}" 15 | 16 | test: 17 | runs-on: ubuntu-latest 18 | strategy: 19 | fail-fast: false 20 | matrix: 21 | python-version: [3.9, "3.10", "3.11"] 22 | experimental: [false] 23 | steps: 24 | - uses: actions/checkout@v3 25 | - name: Set up Python ${{ matrix.python-version }} 26 | uses: actions/setup-python@v4 27 | with: 28 | python-version: ${{ matrix.python-version }} 29 | architecture: x64 30 | cache: 'pip' 31 | cache-dependency-path: 'requirements*' 32 | - name: check OS 33 | run: cat /etc/os-release 34 | - name: set ENV 35 | run: export TRAINER_TELEMETRY=0 36 | - name: Install dependencies 37 | run: | 38 | sudo apt-get update 39 | sudo apt-get install -y --no-install-recommends git make gcc 40 | sudo apt-get install espeak 41 | sudo apt-get install espeak-ng 42 | make system-deps 43 | - name: Install/upgrade Python setup deps 44 | run: python3 -m pip install --upgrade pip setuptools wheel 45 | - name: Replace scarf urls 46 | run: | 47 | sed -i 's/https:\/\/coqui.gateway.scarf.sh\//https:\/\/github.com\/coqui-ai\/TTS\/releases\/download\//g' TTS/.models.json 48 | - name: Install TTS 49 | run: | 50 | python3 -m pip install .[all] 51 | python3 setup.py egg_info 52 | - name: Unit tests 53 | run: make test_tts 54 | -------------------------------------------------------------------------------- /.github/workflows/tts_tests2.yml: -------------------------------------------------------------------------------- 1 | name: tts-tests2 2 | 3 | on: 4 | push: 5 | branches: 6 | - main 7 | pull_request: 8 | types: [opened, synchronize, reopened] 9 | jobs: 10 | check_skip: 11 | runs-on: ubuntu-latest 12 | if: "! contains(github.event.head_commit.message, '[ci skip]')" 13 | steps: 14 | - run: echo "${{ github.event.head_commit.message }}" 15 | 16 | test: 17 | runs-on: ubuntu-latest 18 | strategy: 19 | fail-fast: false 20 | matrix: 21 | python-version: [3.9, "3.10", "3.11"] 22 | experimental: [false] 23 | steps: 24 | - uses: actions/checkout@v3 25 | - name: Set up Python ${{ matrix.python-version }} 26 | uses: actions/setup-python@v4 27 | with: 28 | python-version: ${{ matrix.python-version }} 29 | architecture: x64 30 | cache: 'pip' 31 | cache-dependency-path: 'requirements*' 32 | - name: check OS 33 | run: cat /etc/os-release 34 | - name: set ENV 35 | run: export TRAINER_TELEMETRY=0 36 | - name: Install dependencies 37 | run: | 38 | sudo apt-get update 39 | sudo apt-get install -y --no-install-recommends git make gcc 40 | sudo apt-get install espeak 41 | sudo apt-get install espeak-ng 42 | make system-deps 43 | - name: Install/upgrade Python setup deps 44 | run: python3 -m pip install --upgrade pip setuptools wheel 45 | - name: Replace scarf urls 46 | run: | 47 | sed -i 's/https:\/\/coqui.gateway.scarf.sh\//https:\/\/github.com\/coqui-ai\/TTS\/releases\/download\//g' TTS/.models.json 48 | - name: Install TTS 49 | run: | 50 | python3 -m pip install .[all] 51 | python3 setup.py egg_info 52 | - name: Unit tests 53 | run: make test_tts2 54 | -------------------------------------------------------------------------------- /.github/workflows/vocoder_tests.yml: -------------------------------------------------------------------------------- 1 | name: vocoder-tests 2 | 3 | on: 4 | push: 5 | branches: 6 | - main 7 | pull_request: 8 | types: [opened, synchronize, reopened] 9 | jobs: 10 | check_skip: 11 | runs-on: ubuntu-latest 12 | if: "! contains(github.event.head_commit.message, '[ci skip]')" 13 | steps: 14 | - run: echo "${{ github.event.head_commit.message }}" 15 | 16 | test: 17 | runs-on: ubuntu-latest 18 | strategy: 19 | fail-fast: false 20 | matrix: 21 | python-version: [3.9, "3.10", "3.11"] 22 | experimental: [false] 23 | steps: 24 | - uses: actions/checkout@v3 25 | - name: Set up Python ${{ matrix.python-version }} 26 | uses: actions/setup-python@v4 27 | with: 28 | python-version: ${{ matrix.python-version }} 29 | architecture: x64 30 | cache: 'pip' 31 | cache-dependency-path: 'requirements*' 32 | - name: check OS 33 | run: cat /etc/os-release 34 | - name: set ENV 35 | run: export TRAINER_TELEMETRY=0 36 | - name: Install dependencies 37 | run: | 38 | sudo apt-get update 39 | sudo apt-get install -y git make gcc 40 | make system-deps 41 | - name: Install/upgrade Python setup deps 42 | run: python3 -m pip install --upgrade pip setuptools wheel 43 | - name: Install TTS 44 | run: | 45 | python3 -m pip install .[all] 46 | python3 setup.py egg_info 47 | - name: Unit tests 48 | run: make test_vocoder 49 | -------------------------------------------------------------------------------- /.github/workflows/xtts_tests.yml: -------------------------------------------------------------------------------- 1 | name: xtts-tests 2 | 3 | on: 4 | push: 5 | branches: 6 | - main 7 | pull_request: 8 | types: [opened, synchronize, reopened] 9 | jobs: 10 | check_skip: 11 | runs-on: ubuntu-latest 12 | if: "! contains(github.event.head_commit.message, '[ci skip]')" 13 | steps: 14 | - run: echo "${{ github.event.head_commit.message }}" 15 | 16 | test: 17 | runs-on: ubuntu-latest 18 | strategy: 19 | fail-fast: false 20 | matrix: 21 | python-version: [3.9, "3.10", "3.11"] 22 | experimental: [false] 23 | steps: 24 | - uses: actions/checkout@v3 25 | - name: Set up Python ${{ matrix.python-version }} 26 | uses: actions/setup-python@v4 27 | with: 28 | python-version: ${{ matrix.python-version }} 29 | architecture: x64 30 | cache: 'pip' 31 | cache-dependency-path: 'requirements*' 32 | - name: check OS 33 | run: cat /etc/os-release 34 | - name: set ENV 35 | run: export TRAINER_TELEMETRY=0 36 | - name: Install dependencies 37 | run: | 38 | sudo apt-get update 39 | sudo apt-get install -y --no-install-recommends git make gcc 40 | sudo apt-get install espeak 41 | sudo apt-get install espeak-ng 42 | make system-deps 43 | - name: Install/upgrade Python setup deps 44 | run: python3 -m pip install --upgrade pip setuptools wheel 45 | - name: Replace scarf urls 46 | run: | 47 | sed -i 's/https:\/\/coqui.gateway.scarf.sh\//https:\/\/github.com\/coqui-ai\/TTS\/releases\/download\//g' TTS/.models.json 48 | - name: Install TTS 49 | run: | 50 | python3 -m pip install .[all] 51 | python3 setup.py egg_info 52 | - name: Unit tests 53 | run: make test_xtts 54 | -------------------------------------------------------------------------------- /.github/workflows/zoo_tests2.yml: -------------------------------------------------------------------------------- 1 | name: zoo-tests-2 2 | 3 | on: 4 | push: 5 | branches: 6 | - main 7 | pull_request: 8 | types: [opened, synchronize, reopened] 9 | jobs: 10 | check_skip: 11 | runs-on: ubuntu-latest 12 | if: "! contains(github.event.head_commit.message, '[ci skip]')" 13 | steps: 14 | - run: echo "${{ github.event.head_commit.message }}" 15 | 16 | test: 17 | runs-on: ubuntu-latest 18 | strategy: 19 | fail-fast: false 20 | matrix: 21 | python-version: [3.9, "3.10", "3.11"] 22 | experimental: [false] 23 | steps: 24 | - uses: actions/checkout@v3 25 | - name: Set up Python ${{ matrix.python-version }} 26 | uses: actions/setup-python@v4 27 | with: 28 | python-version: ${{ matrix.python-version }} 29 | architecture: x64 30 | cache: 'pip' 31 | cache-dependency-path: 'requirements*' 32 | - name: check OS 33 | run: cat /etc/os-release 34 | - name: set ENV 35 | run: export TRAINER_TELEMETRY=0 36 | - name: Install dependencies 37 | run: | 38 | sudo apt-get update 39 | sudo apt-get install -y git make gcc 40 | sudo apt-get install espeak espeak-ng 41 | make system-deps 42 | - name: Install/upgrade Python setup deps 43 | run: python3 -m pip install --upgrade pip setuptools wheel 44 | - name: Replace scarf urls 45 | run: | 46 | sed -i 's/https:\/\/coqui.gateway.scarf.sh\//https:\/\/github.com\/coqui-ai\/TTS\/releases\/download\//g' TTS/.models.json 47 | - name: Install TTS 48 | run: | 49 | python3 -m pip install .[all] 50 | python3 setup.py egg_info 51 | - name: Unit tests 52 | run: nose2 -F -v -B --with-coverage --coverage TTS tests.zoo_tests.test_models.test_models_offset_2_step_3 53 | -------------------------------------------------------------------------------- /.pre-commit-config.yaml: -------------------------------------------------------------------------------- 1 | repos: 2 | - repo: 'https://github.com/pre-commit/pre-commit-hooks' 3 | rev: v2.3.0 4 | hooks: 5 | - id: check-yaml 6 | - id: end-of-file-fixer 7 | - id: trailing-whitespace 8 | - repo: 'https://github.com/psf/black' 9 | rev: 22.3.0 10 | hooks: 11 | - id: black 12 | language_version: python3 13 | - repo: https://github.com/pycqa/isort 14 | rev: 5.8.0 15 | hooks: 16 | - id: isort 17 | name: isort (python) 18 | - id: isort 19 | name: isort (cython) 20 | types: [cython] 21 | - id: isort 22 | name: isort (pyi) 23 | types: [pyi] 24 | - repo: https://github.com/pycqa/pylint 25 | rev: v2.8.2 26 | hooks: 27 | - id: pylint 28 | -------------------------------------------------------------------------------- /.readthedocs.yml: -------------------------------------------------------------------------------- 1 | # .readthedocs.yml 2 | # Read the Docs configuration file 3 | # See https://docs.readthedocs.io/en/stable/config-file/v2.html for details 4 | 5 | # Required 6 | version: 2 7 | 8 | # Set the version of Python and other tools you might need 9 | build: 10 | os: ubuntu-22.04 11 | tools: 12 | python: "3.11" 13 | 14 | # Optionally set the version of Python and requirements required to build your docs 15 | python: 16 | install: 17 | - requirements: docs/requirements.txt 18 | - requirements: requirements.txt 19 | 20 | # Build documentation in the docs/ directory with Sphinx 21 | sphinx: 22 | builder: html 23 | configuration: docs/source/conf.py 24 | -------------------------------------------------------------------------------- /CITATION.cff: -------------------------------------------------------------------------------- 1 | cff-version: 1.2.0 2 | message: "If you want to cite 🐸💬, feel free to use this (but only if you loved it 😊)" 3 | title: "Coqui TTS" 4 | abstract: "A deep learning toolkit for Text-to-Speech, battle-tested in research and production" 5 | date-released: 2021-01-01 6 | authors: 7 | - family-names: "Eren" 8 | given-names: "Gölge" 9 | - name: "The Coqui TTS Team" 10 | version: 1.4 11 | doi: 10.5281/zenodo.6334862 12 | license: "MPL-2.0" 13 | url: "https://www.coqui.ai" 14 | repository-code: "https://github.com/coqui-ai/TTS" 15 | keywords: 16 | - machine learning 17 | - deep learning 18 | - artificial intelligence 19 | - text to speech 20 | - TTS -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | ARG BASE=nvidia/cuda:11.8.0-base-ubuntu22.04 2 | FROM ${BASE} 3 | 4 | RUN apt-get update && apt-get upgrade -y 5 | RUN apt-get install -y --no-install-recommends gcc g++ make python3 python3-dev python3-pip python3-venv python3-wheel espeak-ng libsndfile1-dev && rm -rf /var/lib/apt/lists/* 6 | RUN pip3 install llvmlite --ignore-installed 7 | 8 | # Install Dependencies: 9 | RUN pip3 install torch torchaudio --extra-index-url https://download.pytorch.org/whl/cu118 10 | RUN rm -rf /root/.cache/pip 11 | 12 | # Copy TTS repository contents: 13 | WORKDIR /root 14 | COPY . /root 15 | 16 | RUN make install 17 | 18 | ENTRYPOINT ["tts"] 19 | CMD ["--help"] 20 | -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | include README.md 2 | include LICENSE.txt 3 | include requirements.*.txt 4 | include *.cff 5 | include requirements.txt 6 | include TTS/VERSION 7 | recursive-include TTS *.json 8 | recursive-include TTS *.html 9 | recursive-include TTS *.png 10 | recursive-include TTS *.md 11 | recursive-include TTS *.py 12 | recursive-include TTS *.pyx 13 | recursive-include images *.png 14 | recursive-exclude tests * 15 | prune tests* 16 | -------------------------------------------------------------------------------- /TTS/VERSION: -------------------------------------------------------------------------------- 1 | 0.22.0 2 | -------------------------------------------------------------------------------- /TTS/__init__.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | with open(os.path.join(os.path.dirname(__file__), "VERSION"), "r", encoding="utf-8") as f: 4 | version = f.read().strip() 5 | 6 | __version__ = version 7 | -------------------------------------------------------------------------------- /TTS/bin/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/coqui-ai/TTS/dbf1a08a0d4e47fdad6172e433eeb34bc6b13b4e/TTS/bin/__init__.py -------------------------------------------------------------------------------- /TTS/bin/collect_env_info.py: -------------------------------------------------------------------------------- 1 | """Get detailed info about the working environment.""" 2 | import os 3 | import platform 4 | import sys 5 | 6 | import numpy 7 | import torch 8 | 9 | sys.path += [os.path.abspath(".."), os.path.abspath(".")] 10 | import json 11 | 12 | import TTS 13 | 14 | 15 | def system_info(): 16 | return { 17 | "OS": platform.system(), 18 | "architecture": platform.architecture(), 19 | "version": platform.version(), 20 | "processor": platform.processor(), 21 | "python": platform.python_version(), 22 | } 23 | 24 | 25 | def cuda_info(): 26 | return { 27 | "GPU": [torch.cuda.get_device_name(i) for i in range(torch.cuda.device_count())], 28 | "available": torch.cuda.is_available(), 29 | "version": torch.version.cuda, 30 | } 31 | 32 | 33 | def package_info(): 34 | return { 35 | "numpy": numpy.__version__, 36 | "PyTorch_version": torch.__version__, 37 | "PyTorch_debug": torch.version.debug, 38 | "TTS": TTS.__version__, 39 | } 40 | 41 | 42 | def main(): 43 | details = {"System": system_info(), "CUDA": cuda_info(), "Packages": package_info()} 44 | print(json.dumps(details, indent=4, sort_keys=True)) 45 | 46 | 47 | if __name__ == "__main__": 48 | main() 49 | -------------------------------------------------------------------------------- /TTS/bin/find_unique_chars.py: -------------------------------------------------------------------------------- 1 | """Find all the unique characters in a dataset""" 2 | import argparse 3 | from argparse import RawTextHelpFormatter 4 | 5 | from TTS.config import load_config 6 | from TTS.tts.datasets import load_tts_samples 7 | 8 | 9 | def main(): 10 | # pylint: disable=bad-option-value 11 | parser = argparse.ArgumentParser( 12 | description="""Find all the unique characters or phonemes in a dataset.\n\n""" 13 | """ 14 | Example runs: 15 | 16 | python TTS/bin/find_unique_chars.py --config_path config.json 17 | """, 18 | formatter_class=RawTextHelpFormatter, 19 | ) 20 | parser.add_argument("--config_path", type=str, help="Path to dataset config file.", required=True) 21 | args = parser.parse_args() 22 | 23 | c = load_config(args.config_path) 24 | 25 | # load all datasets 26 | train_items, eval_items = load_tts_samples( 27 | c.datasets, eval_split=True, eval_split_max_size=c.eval_split_max_size, eval_split_size=c.eval_split_size 28 | ) 29 | 30 | items = train_items + eval_items 31 | 32 | texts = "".join(item["text"] for item in items) 33 | chars = set(texts) 34 | lower_chars = filter(lambda c: c.islower(), chars) 35 | chars_force_lower = [c.lower() for c in chars] 36 | chars_force_lower = set(chars_force_lower) 37 | 38 | print(f" > Number of unique characters: {len(chars)}") 39 | print(f" > Unique characters: {''.join(sorted(chars))}") 40 | print(f" > Unique lower characters: {''.join(sorted(lower_chars))}") 41 | print(f" > Unique all forced to lower characters: {''.join(sorted(chars_force_lower))}") 42 | 43 | 44 | if __name__ == "__main__": 45 | main() 46 | -------------------------------------------------------------------------------- /TTS/demos/xtts_ft_demo/requirements.txt: -------------------------------------------------------------------------------- 1 | faster_whisper==0.9.0 2 | gradio==4.7.1 -------------------------------------------------------------------------------- /TTS/encoder/README.md: -------------------------------------------------------------------------------- 1 | ### Speaker Encoder 2 | 3 | This is an implementation of https://arxiv.org/abs/1710.10467. This model can be used for voice and speaker embedding. 4 | 5 | With the code here you can generate d-vectors for both multi-speaker and single-speaker TTS datasets, then visualise and explore them along with the associated audio files in an interactive chart. 6 | 7 | Below is an example showing embedding results of various speakers. You can generate the same plot with the provided notebook as demonstrated in [this video](https://youtu.be/KW3oO7JVa7Q). 8 | 9 | ![](umap.png) 10 | 11 | Download a pretrained model from [Released Models](https://github.com/mozilla/TTS/wiki/Released-Models) page. 12 | 13 | To run the code, you need to follow the same flow as in TTS. 14 | 15 | - Define 'config.json' for your needs. Note that, audio parameters should match your TTS model. 16 | - Example training call ```python speaker_encoder/train.py --config_path speaker_encoder/config.json --data_path ~/Data/Libri-TTS/train-clean-360``` 17 | - Generate embedding vectors ```python speaker_encoder/compute_embeddings.py --use_cuda true /model/path/best_model.pth model/config/path/config.json dataset/path/ output_path``` . This code parses all .wav files at the given dataset path and generates the same folder structure under the output path with the generated embedding files. 18 | - Watch training on Tensorboard as in TTS 19 | -------------------------------------------------------------------------------- /TTS/encoder/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/coqui-ai/TTS/dbf1a08a0d4e47fdad6172e433eeb34bc6b13b4e/TTS/encoder/__init__.py -------------------------------------------------------------------------------- /TTS/encoder/configs/emotion_encoder_config.py: -------------------------------------------------------------------------------- 1 | from dataclasses import asdict, dataclass 2 | 3 | from TTS.encoder.configs.base_encoder_config import BaseEncoderConfig 4 | 5 | 6 | @dataclass 7 | class EmotionEncoderConfig(BaseEncoderConfig): 8 | """Defines parameters for Emotion Encoder model.""" 9 | 10 | model: str = "emotion_encoder" 11 | map_classid_to_classname: dict = None 12 | class_name_key: str = "emotion_name" 13 | -------------------------------------------------------------------------------- /TTS/encoder/configs/speaker_encoder_config.py: -------------------------------------------------------------------------------- 1 | from dataclasses import asdict, dataclass 2 | 3 | from TTS.encoder.configs.base_encoder_config import BaseEncoderConfig 4 | 5 | 6 | @dataclass 7 | class SpeakerEncoderConfig(BaseEncoderConfig): 8 | """Defines parameters for Speaker Encoder model.""" 9 | 10 | model: str = "speaker_encoder" 11 | class_name_key: str = "speaker_name" 12 | -------------------------------------------------------------------------------- /TTS/encoder/requirements.txt: -------------------------------------------------------------------------------- 1 | umap-learn 2 | numpy>=1.17.0 3 | -------------------------------------------------------------------------------- /TTS/encoder/utils/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/coqui-ai/TTS/dbf1a08a0d4e47fdad6172e433eeb34bc6b13b4e/TTS/encoder/utils/__init__.py -------------------------------------------------------------------------------- /TTS/encoder/utils/visual.py: -------------------------------------------------------------------------------- 1 | import matplotlib 2 | import matplotlib.pyplot as plt 3 | import numpy as np 4 | import umap 5 | 6 | matplotlib.use("Agg") 7 | 8 | 9 | colormap = ( 10 | np.array( 11 | [ 12 | [76, 255, 0], 13 | [0, 127, 70], 14 | [255, 0, 0], 15 | [255, 217, 38], 16 | [0, 135, 255], 17 | [165, 0, 165], 18 | [255, 167, 255], 19 | [0, 255, 255], 20 | [255, 96, 38], 21 | [142, 76, 0], 22 | [33, 0, 127], 23 | [0, 0, 0], 24 | [183, 183, 183], 25 | ], 26 | dtype=float, 27 | ) 28 | / 255 29 | ) 30 | 31 | 32 | def plot_embeddings(embeddings, num_classes_in_batch): 33 | num_utter_per_class = embeddings.shape[0] // num_classes_in_batch 34 | 35 | # if necessary get just the first 10 classes 36 | if num_classes_in_batch > 10: 37 | num_classes_in_batch = 10 38 | embeddings = embeddings[: num_classes_in_batch * num_utter_per_class] 39 | 40 | model = umap.UMAP() 41 | projection = model.fit_transform(embeddings) 42 | ground_truth = np.repeat(np.arange(num_classes_in_batch), num_utter_per_class) 43 | colors = [colormap[i] for i in ground_truth] 44 | fig, ax = plt.subplots(figsize=(16, 10)) 45 | _ = ax.scatter(projection[:, 0], projection[:, 1], c=colors) 46 | plt.gca().set_aspect("equal", "datalim") 47 | plt.title("UMAP projection") 48 | plt.tight_layout() 49 | plt.savefig("umap") 50 | return fig 51 | -------------------------------------------------------------------------------- /TTS/server/README.md: -------------------------------------------------------------------------------- 1 | # :frog: TTS demo server 2 | Before you use the server, make sure you [install](https://github.com/coqui-ai/TTS/tree/dev#install-tts)) :frog: TTS properly. Then, you can follow the steps below. 3 | 4 | **Note:** If you install :frog:TTS using ```pip```, you can also use the ```tts-server``` end point on the terminal. 5 | 6 | Examples runs: 7 | 8 | List officially released models. 9 | ```python TTS/server/server.py --list_models ``` 10 | 11 | Run the server with the official models. 12 | ```python TTS/server/server.py --model_name tts_models/en/ljspeech/tacotron2-DCA --vocoder_name vocoder_models/en/ljspeech/multiband-melgan``` 13 | 14 | Run the server with the official models on a GPU. 15 | ```CUDA_VISIBLE_DEVICES="0" python TTS/server/server.py --model_name tts_models/en/ljspeech/tacotron2-DCA --vocoder_name vocoder_models/en/ljspeech/multiband-melgan --use_cuda True``` 16 | 17 | Run the server with a custom models. 18 | ```python TTS/server/server.py --tts_checkpoint /path/to/tts/model.pth --tts_config /path/to/tts/config.json --vocoder_checkpoint /path/to/vocoder/model.pth --vocoder_config /path/to/vocoder/config.json``` 19 | -------------------------------------------------------------------------------- /TTS/server/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/coqui-ai/TTS/dbf1a08a0d4e47fdad6172e433eeb34bc6b13b4e/TTS/server/__init__.py -------------------------------------------------------------------------------- /TTS/server/conf.json: -------------------------------------------------------------------------------- 1 | { 2 | "tts_path":"/media/erogol/data_ssd/Models/libri_tts/5049/", // tts model root folder 3 | "tts_file":"best_model.pth", // tts checkpoint file 4 | "tts_config":"config.json", // tts config.json file 5 | "tts_speakers": null, // json file listing speaker ids. null if no speaker embedding. 6 | "vocoder_config":null, 7 | "vocoder_file": null, 8 | "is_wavernn_batched":true, 9 | "port": 5002, 10 | "use_cuda": true, 11 | "debug": true 12 | } 13 | -------------------------------------------------------------------------------- /TTS/server/static/coqui-log-green-TTS.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/coqui-ai/TTS/dbf1a08a0d4e47fdad6172e433eeb34bc6b13b4e/TTS/server/static/coqui-log-green-TTS.png -------------------------------------------------------------------------------- /TTS/tts/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/coqui-ai/TTS/dbf1a08a0d4e47fdad6172e433eeb34bc6b13b4e/TTS/tts/__init__.py -------------------------------------------------------------------------------- /TTS/tts/configs/__init__.py: -------------------------------------------------------------------------------- 1 | import importlib 2 | import os 3 | from inspect import isclass 4 | 5 | # import all files under configs/ 6 | # configs_dir = os.path.dirname(__file__) 7 | # for file in os.listdir(configs_dir): 8 | # path = os.path.join(configs_dir, file) 9 | # if not file.startswith("_") and not file.startswith(".") and (file.endswith(".py") or os.path.isdir(path)): 10 | # config_name = file[: file.find(".py")] if file.endswith(".py") else file 11 | # module = importlib.import_module("TTS.tts.configs." + config_name) 12 | # for attribute_name in dir(module): 13 | # attribute = getattr(module, attribute_name) 14 | 15 | # if isclass(attribute): 16 | # # Add the class to this package's variables 17 | # globals()[attribute_name] = attribute 18 | -------------------------------------------------------------------------------- /TTS/tts/configs/tacotron2_config.py: -------------------------------------------------------------------------------- 1 | from dataclasses import dataclass 2 | 3 | from TTS.tts.configs.tacotron_config import TacotronConfig 4 | 5 | 6 | @dataclass 7 | class Tacotron2Config(TacotronConfig): 8 | """Defines parameters for Tacotron2 based models. 9 | 10 | Example: 11 | 12 | >>> from TTS.tts.configs.tacotron2_config import Tacotron2Config 13 | >>> config = Tacotron2Config() 14 | 15 | Check `TacotronConfig` for argument descriptions. 16 | """ 17 | 18 | model: str = "tacotron2" 19 | out_channels: int = 80 20 | encoder_in_features: int = 512 21 | decoder_in_features: int = 512 22 | -------------------------------------------------------------------------------- /TTS/tts/layers/__init__.py: -------------------------------------------------------------------------------- 1 | from TTS.tts.layers.losses import * 2 | -------------------------------------------------------------------------------- /TTS/tts/layers/align_tts/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/coqui-ai/TTS/dbf1a08a0d4e47fdad6172e433eeb34bc6b13b4e/TTS/tts/layers/align_tts/__init__.py -------------------------------------------------------------------------------- /TTS/tts/layers/align_tts/duration_predictor.py: -------------------------------------------------------------------------------- 1 | from torch import nn 2 | 3 | from TTS.tts.layers.generic.pos_encoding import PositionalEncoding 4 | from TTS.tts.layers.generic.transformer import FFTransformerBlock 5 | 6 | 7 | class DurationPredictor(nn.Module): 8 | def __init__(self, num_chars, hidden_channels, hidden_channels_ffn, num_heads): 9 | super().__init__() 10 | self.embed = nn.Embedding(num_chars, hidden_channels) 11 | self.pos_enc = PositionalEncoding(hidden_channels, dropout_p=0.1) 12 | self.FFT = FFTransformerBlock(hidden_channels, num_heads, hidden_channels_ffn, 2, 0.1) 13 | self.out_layer = nn.Conv1d(hidden_channels, 1, 1) 14 | 15 | def forward(self, text, text_lengths): 16 | # B, L -> B, L 17 | emb = self.embed(text) 18 | emb = self.pos_enc(emb.transpose(1, 2)) 19 | x = self.FFT(emb, text_lengths) 20 | x = self.out_layer(x).squeeze(-1) 21 | return x 22 | -------------------------------------------------------------------------------- /TTS/tts/layers/align_tts/mdn.py: -------------------------------------------------------------------------------- 1 | from torch import nn 2 | 3 | 4 | class MDNBlock(nn.Module): 5 | """Mixture of Density Network implementation 6 | https://arxiv.org/pdf/2003.01950.pdf 7 | """ 8 | 9 | def __init__(self, in_channels, out_channels): 10 | super().__init__() 11 | self.out_channels = out_channels 12 | self.conv1 = nn.Conv1d(in_channels, in_channels, 1) 13 | self.norm = nn.LayerNorm(in_channels) 14 | self.relu = nn.ReLU() 15 | self.dropout = nn.Dropout(0.1) 16 | self.conv2 = nn.Conv1d(in_channels, out_channels, 1) 17 | 18 | def forward(self, x): 19 | o = self.conv1(x) 20 | o = o.transpose(1, 2) 21 | o = self.norm(o) 22 | o = o.transpose(1, 2) 23 | o = self.relu(o) 24 | o = self.dropout(o) 25 | mu_sigma = self.conv2(o) 26 | # TODO: check this sigmoid 27 | # mu = torch.sigmoid(mu_sigma[:, :self.out_channels//2, :]) 28 | mu = mu_sigma[:, : self.out_channels // 2, :] 29 | log_sigma = mu_sigma[:, self.out_channels // 2 :, :] 30 | return mu, log_sigma 31 | -------------------------------------------------------------------------------- /TTS/tts/layers/bark/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/coqui-ai/TTS/dbf1a08a0d4e47fdad6172e433eeb34bc6b13b4e/TTS/tts/layers/bark/__init__.py -------------------------------------------------------------------------------- /TTS/tts/layers/bark/hubert/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/coqui-ai/TTS/dbf1a08a0d4e47fdad6172e433eeb34bc6b13b4e/TTS/tts/layers/bark/hubert/__init__.py -------------------------------------------------------------------------------- /TTS/tts/layers/bark/hubert/hubert_manager.py: -------------------------------------------------------------------------------- 1 | # From https://github.com/gitmylo/bark-voice-cloning-HuBERT-quantizer 2 | 3 | import os.path 4 | import shutil 5 | import urllib.request 6 | 7 | import huggingface_hub 8 | 9 | 10 | class HubertManager: 11 | @staticmethod 12 | def make_sure_hubert_installed( 13 | download_url: str = "https://dl.fbaipublicfiles.com/hubert/hubert_base_ls960.pt", model_path: str = "" 14 | ): 15 | if not os.path.isfile(model_path): 16 | print("Downloading HuBERT base model") 17 | urllib.request.urlretrieve(download_url, model_path) 18 | print("Downloaded HuBERT") 19 | return model_path 20 | return None 21 | 22 | @staticmethod 23 | def make_sure_tokenizer_installed( 24 | model: str = "quantifier_hubert_base_ls960_14.pth", 25 | repo: str = "GitMylo/bark-voice-cloning", 26 | model_path: str = "", 27 | ): 28 | model_dir = os.path.dirname(model_path) 29 | if not os.path.isfile(model_path): 30 | print("Downloading HuBERT custom tokenizer") 31 | huggingface_hub.hf_hub_download(repo, model, local_dir=model_dir, local_dir_use_symlinks=False) 32 | shutil.move(os.path.join(model_dir, model), model_path) 33 | print("Downloaded tokenizer") 34 | return model_path 35 | return None 36 | -------------------------------------------------------------------------------- /TTS/tts/layers/delightful_tts/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/coqui-ai/TTS/dbf1a08a0d4e47fdad6172e433eeb34bc6b13b4e/TTS/tts/layers/delightful_tts/__init__.py -------------------------------------------------------------------------------- /TTS/tts/layers/feed_forward/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/coqui-ai/TTS/dbf1a08a0d4e47fdad6172e433eeb34bc6b13b4e/TTS/tts/layers/feed_forward/__init__.py -------------------------------------------------------------------------------- /TTS/tts/layers/feed_forward/duration_predictor.py: -------------------------------------------------------------------------------- 1 | from torch import nn 2 | 3 | from TTS.tts.layers.generic.res_conv_bn import Conv1dBN 4 | 5 | 6 | class DurationPredictor(nn.Module): 7 | """Speedy Speech duration predictor model. 8 | Predicts phoneme durations from encoder outputs. 9 | 10 | Note: 11 | Outputs interpreted as log(durations) 12 | To get actual durations, do exp transformation 13 | 14 | conv_BN_4x1 -> conv_BN_3x1 -> conv_BN_1x1 -> conv_1x1 15 | 16 | Args: 17 | hidden_channels (int): number of channels in the inner layers. 18 | """ 19 | 20 | def __init__(self, hidden_channels): 21 | super().__init__() 22 | 23 | self.layers = nn.ModuleList( 24 | [ 25 | Conv1dBN(hidden_channels, hidden_channels, 4, 1), 26 | Conv1dBN(hidden_channels, hidden_channels, 3, 1), 27 | Conv1dBN(hidden_channels, hidden_channels, 1, 1), 28 | nn.Conv1d(hidden_channels, 1, 1), 29 | ] 30 | ) 31 | 32 | def forward(self, x, x_mask): 33 | """ 34 | Shapes: 35 | x: [B, C, T] 36 | x_mask: [B, 1, T] 37 | """ 38 | o = x 39 | for layer in self.layers: 40 | o = layer(o) * x_mask 41 | return o 42 | -------------------------------------------------------------------------------- /TTS/tts/layers/generic/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/coqui-ai/TTS/dbf1a08a0d4e47fdad6172e433eeb34bc6b13b4e/TTS/tts/layers/generic/__init__.py -------------------------------------------------------------------------------- /TTS/tts/layers/generic/gated_conv.py: -------------------------------------------------------------------------------- 1 | from torch import nn 2 | 3 | from .normalization import LayerNorm 4 | 5 | 6 | class GatedConvBlock(nn.Module): 7 | """Gated convolutional block as in https://arxiv.org/pdf/1612.08083.pdf 8 | Args: 9 | in_out_channels (int): number of input/output channels. 10 | kernel_size (int): convolution kernel size. 11 | dropout_p (float): dropout rate. 12 | """ 13 | 14 | def __init__(self, in_out_channels, kernel_size, dropout_p, num_layers): 15 | super().__init__() 16 | # class arguments 17 | self.dropout_p = dropout_p 18 | self.num_layers = num_layers 19 | # define layers 20 | self.conv_layers = nn.ModuleList() 21 | self.norm_layers = nn.ModuleList() 22 | self.layers = nn.ModuleList() 23 | for _ in range(num_layers): 24 | self.conv_layers += [nn.Conv1d(in_out_channels, 2 * in_out_channels, kernel_size, padding=kernel_size // 2)] 25 | self.norm_layers += [LayerNorm(2 * in_out_channels)] 26 | 27 | def forward(self, x, x_mask): 28 | o = x 29 | res = x 30 | for idx in range(self.num_layers): 31 | o = nn.functional.dropout(o, p=self.dropout_p, training=self.training) 32 | o = self.conv_layers[idx](o * x_mask) 33 | o = self.norm_layers[idx](o) 34 | o = nn.functional.glu(o, dim=1) 35 | o = res + o 36 | res = o 37 | return o 38 | -------------------------------------------------------------------------------- /TTS/tts/layers/glow_tts/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/coqui-ai/TTS/dbf1a08a0d4e47fdad6172e433eeb34bc6b13b4e/TTS/tts/layers/glow_tts/__init__.py -------------------------------------------------------------------------------- /TTS/tts/layers/overflow/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/coqui-ai/TTS/dbf1a08a0d4e47fdad6172e433eeb34bc6b13b4e/TTS/tts/layers/overflow/__init__.py -------------------------------------------------------------------------------- /TTS/tts/layers/tacotron/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/coqui-ai/TTS/dbf1a08a0d4e47fdad6172e433eeb34bc6b13b4e/TTS/tts/layers/tacotron/__init__.py -------------------------------------------------------------------------------- /TTS/tts/layers/tortoise/random_latent_generator.py: -------------------------------------------------------------------------------- 1 | import math 2 | 3 | import torch 4 | import torch.nn as nn 5 | import torch.nn.functional as F 6 | 7 | 8 | def fused_leaky_relu(input, bias=None, negative_slope=0.2, scale=2**0.5): 9 | if bias is not None: 10 | rest_dim = [1] * (input.ndim - bias.ndim - 1) 11 | return ( 12 | F.leaky_relu( 13 | input + bias.view(1, bias.shape[0], *rest_dim), 14 | negative_slope=negative_slope, 15 | ) 16 | * scale 17 | ) 18 | else: 19 | return F.leaky_relu(input, negative_slope=0.2) * scale 20 | 21 | 22 | class EqualLinear(nn.Module): 23 | def __init__(self, in_dim, out_dim, bias=True, bias_init=0, lr_mul=1): 24 | super().__init__() 25 | self.weight = nn.Parameter(torch.randn(out_dim, in_dim).div_(lr_mul)) 26 | if bias: 27 | self.bias = nn.Parameter(torch.zeros(out_dim).fill_(bias_init)) 28 | else: 29 | self.bias = None 30 | self.scale = (1 / math.sqrt(in_dim)) * lr_mul 31 | self.lr_mul = lr_mul 32 | 33 | def forward(self, input): 34 | out = F.linear(input, self.weight * self.scale) 35 | out = fused_leaky_relu(out, self.bias * self.lr_mul) 36 | return out 37 | 38 | 39 | class RandomLatentConverter(nn.Module): 40 | def __init__(self, channels): 41 | super().__init__() 42 | self.layers = nn.Sequential( 43 | *[EqualLinear(channels, channels, lr_mul=0.1) for _ in range(5)], nn.Linear(channels, channels) 44 | ) 45 | self.channels = channels 46 | 47 | def forward(self, ref): 48 | r = torch.randn(ref.shape[0], self.channels, device=ref.device) 49 | y = self.layers(r) 50 | return y 51 | 52 | 53 | if __name__ == "__main__": 54 | model = RandomLatentConverter(512) 55 | model(torch.randn(5, 512)) 56 | -------------------------------------------------------------------------------- /TTS/tts/layers/tortoise/tokenizer.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | import torch 4 | from tokenizers import Tokenizer 5 | 6 | from TTS.tts.utils.text.cleaners import english_cleaners 7 | 8 | DEFAULT_VOCAB_FILE = os.path.join( 9 | os.path.dirname(os.path.realpath(__file__)), "../../utils/assets/tortoise/tokenizer.json" 10 | ) 11 | 12 | 13 | class VoiceBpeTokenizer: 14 | def __init__(self, vocab_file=DEFAULT_VOCAB_FILE, vocab_str=None): 15 | self.tokenizer = None 16 | if vocab_file is not None: 17 | self.tokenizer = Tokenizer.from_file(vocab_file) 18 | if vocab_str is not None: 19 | self.tokenizer = Tokenizer.from_str(vocab_str) 20 | 21 | def preprocess_text(self, txt): 22 | txt = english_cleaners(txt) 23 | return txt 24 | 25 | def encode(self, txt): 26 | txt = self.preprocess_text(txt) 27 | txt = txt.replace(" ", "[SPACE]") 28 | return self.tokenizer.encode(txt).ids 29 | 30 | def decode(self, seq): 31 | if isinstance(seq, torch.Tensor): 32 | seq = seq.cpu().numpy() 33 | txt = self.tokenizer.decode(seq, skip_special_tokens=False).replace(" ", "") 34 | txt = txt.replace("[SPACE]", " ") 35 | txt = txt.replace("[STOP]", "") 36 | txt = txt.replace("[UNK]", "") 37 | return txt 38 | -------------------------------------------------------------------------------- /TTS/tts/layers/xtts/xtts_manager.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | class SpeakerManager(): 4 | def __init__(self, speaker_file_path=None): 5 | self.speakers = torch.load(speaker_file_path) 6 | 7 | @property 8 | def name_to_id(self): 9 | return self.speakers.keys() 10 | 11 | @property 12 | def num_speakers(self): 13 | return len(self.name_to_id) 14 | 15 | @property 16 | def speaker_names(self): 17 | return list(self.name_to_id.keys()) 18 | 19 | 20 | class LanguageManager(): 21 | def __init__(self, config): 22 | self.langs = config["languages"] 23 | 24 | @property 25 | def name_to_id(self): 26 | return self.langs 27 | 28 | @property 29 | def num_languages(self): 30 | return len(self.name_to_id) 31 | 32 | @property 33 | def language_names(self): 34 | return list(self.name_to_id) 35 | -------------------------------------------------------------------------------- /TTS/tts/models/__init__.py: -------------------------------------------------------------------------------- 1 | from typing import Dict, List, Union 2 | 3 | from TTS.utils.generic_utils import find_module 4 | 5 | 6 | def setup_model(config: "Coqpit", samples: Union[List[List], List[Dict]] = None) -> "BaseTTS": 7 | print(" > Using model: {}".format(config.model)) 8 | # fetch the right model implementation. 9 | if "base_model" in config and config["base_model"] is not None: 10 | MyModel = find_module("TTS.tts.models", config.base_model.lower()) 11 | else: 12 | MyModel = find_module("TTS.tts.models", config.model.lower()) 13 | model = MyModel.init_from_config(config=config, samples=samples) 14 | return model 15 | -------------------------------------------------------------------------------- /TTS/tts/utils/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/coqui-ai/TTS/dbf1a08a0d4e47fdad6172e433eeb34bc6b13b4e/TTS/tts/utils/__init__.py -------------------------------------------------------------------------------- /TTS/tts/utils/measures.py: -------------------------------------------------------------------------------- 1 | def alignment_diagonal_score(alignments, binary=False): 2 | """ 3 | Compute how diagonal alignment predictions are. It is useful 4 | to measure the alignment consistency of a model 5 | Args: 6 | alignments (torch.Tensor): batch of alignments. 7 | binary (bool): if True, ignore scores and consider attention 8 | as a binary mask. 9 | Shape: 10 | - alignments : :math:`[B, T_de, T_en]` 11 | """ 12 | maxs = alignments.max(dim=1)[0] 13 | if binary: 14 | maxs[maxs > 0] = 1 15 | return maxs.mean(dim=1).mean(dim=0).item() 16 | -------------------------------------------------------------------------------- /TTS/tts/utils/monotonic_align/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/coqui-ai/TTS/dbf1a08a0d4e47fdad6172e433eeb34bc6b13b4e/TTS/tts/utils/monotonic_align/__init__.py -------------------------------------------------------------------------------- /TTS/tts/utils/monotonic_align/core.pyx: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | cimport cython 4 | cimport numpy as np 5 | 6 | from cython.parallel import prange 7 | 8 | 9 | @cython.boundscheck(False) 10 | @cython.wraparound(False) 11 | cdef void maximum_path_each(int[:,::1] path, float[:,::1] value, int t_x, int t_y, float max_neg_val) nogil: 12 | cdef int x 13 | cdef int y 14 | cdef float v_prev 15 | cdef float v_cur 16 | cdef float tmp 17 | cdef int index = t_x - 1 18 | 19 | for y in range(t_y): 20 | for x in range(max(0, t_x + y - t_y), min(t_x, y + 1)): 21 | if x == y: 22 | v_cur = max_neg_val 23 | else: 24 | v_cur = value[x, y-1] 25 | if x == 0: 26 | if y == 0: 27 | v_prev = 0. 28 | else: 29 | v_prev = max_neg_val 30 | else: 31 | v_prev = value[x-1, y-1] 32 | value[x, y] = max(v_cur, v_prev) + value[x, y] 33 | 34 | for y in range(t_y - 1, -1, -1): 35 | path[index, y] = 1 36 | if index != 0 and (index == y or value[index, y-1] < value[index-1, y-1]): 37 | index = index - 1 38 | 39 | 40 | @cython.boundscheck(False) 41 | @cython.wraparound(False) 42 | cpdef void maximum_path_c(int[:,:,::1] paths, float[:,:,::1] values, int[::1] t_xs, int[::1] t_ys, float max_neg_val=-1e9) nogil: 43 | cdef int b = values.shape[0] 44 | 45 | cdef int i 46 | for i in prange(b, nogil=True): 47 | maximum_path_each(paths[i], values[i], t_xs[i], t_ys[i], max_neg_val) 48 | -------------------------------------------------------------------------------- /TTS/tts/utils/monotonic_align/setup.py: -------------------------------------------------------------------------------- 1 | # from distutils.core import setup 2 | # from Cython.Build import cythonize 3 | # import numpy 4 | 5 | # setup(name='monotonic_align', 6 | # ext_modules=cythonize("core.pyx"), 7 | # include_dirs=[numpy.get_include()]) 8 | -------------------------------------------------------------------------------- /TTS/tts/utils/text/__init__.py: -------------------------------------------------------------------------------- 1 | from TTS.tts.utils.text.tokenizer import TTSTokenizer 2 | -------------------------------------------------------------------------------- /TTS/tts/utils/text/bangla/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/coqui-ai/TTS/dbf1a08a0d4e47fdad6172e433eeb34bc6b13b4e/TTS/tts/utils/text/bangla/__init__.py -------------------------------------------------------------------------------- /TTS/tts/utils/text/belarusian/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/coqui-ai/TTS/dbf1a08a0d4e47fdad6172e433eeb34bc6b13b4e/TTS/tts/utils/text/belarusian/__init__.py -------------------------------------------------------------------------------- /TTS/tts/utils/text/belarusian/phonemizer.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | finder = None 4 | 5 | 6 | def init(): 7 | try: 8 | import jpype 9 | import jpype.imports 10 | except ModuleNotFoundError: 11 | raise ModuleNotFoundError( 12 | "Belarusian phonemizer requires to install module 'jpype1' manually. Try `pip install jpype1`." 13 | ) 14 | 15 | try: 16 | jar_path = os.environ["BEL_FANETYKA_JAR"] 17 | except KeyError: 18 | raise KeyError("You need to define 'BEL_FANETYKA_JAR' environment variable as path to the fanetyka.jar file") 19 | 20 | jpype.startJVM(classpath=[jar_path]) 21 | 22 | # import the Java modules 23 | from org.alex73.korpus.base import GrammarDB2, GrammarFinder 24 | 25 | grammar_db = GrammarDB2.initializeFromJar() 26 | global finder 27 | finder = GrammarFinder(grammar_db) 28 | 29 | 30 | def belarusian_text_to_phonemes(text: str) -> str: 31 | # Initialize only on first run 32 | if finder is None: 33 | init() 34 | 35 | from org.alex73.fanetyka.impl import FanetykaText 36 | 37 | return str(FanetykaText(finder, text).ipa) 38 | -------------------------------------------------------------------------------- /TTS/tts/utils/text/chinese_mandarin/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/coqui-ai/TTS/dbf1a08a0d4e47fdad6172e433eeb34bc6b13b4e/TTS/tts/utils/text/chinese_mandarin/__init__.py -------------------------------------------------------------------------------- /TTS/tts/utils/text/chinese_mandarin/phonemizer.py: -------------------------------------------------------------------------------- 1 | from typing import List 2 | 3 | import jieba 4 | import pypinyin 5 | 6 | from .pinyinToPhonemes import PINYIN_DICT 7 | 8 | 9 | def _chinese_character_to_pinyin(text: str) -> List[str]: 10 | pinyins = pypinyin.pinyin(text, style=pypinyin.Style.TONE3, heteronym=False, neutral_tone_with_five=True) 11 | pinyins_flat_list = [item for sublist in pinyins for item in sublist] 12 | return pinyins_flat_list 13 | 14 | 15 | def _chinese_pinyin_to_phoneme(pinyin: str) -> str: 16 | segment = pinyin[:-1] 17 | tone = pinyin[-1] 18 | phoneme = PINYIN_DICT.get(segment, [""])[0] 19 | return phoneme + tone 20 | 21 | 22 | def chinese_text_to_phonemes(text: str, seperator: str = "|") -> str: 23 | tokenized_text = jieba.cut(text, HMM=False) 24 | tokenized_text = " ".join(tokenized_text) 25 | pinyined_text: List[str] = _chinese_character_to_pinyin(tokenized_text) 26 | 27 | results: List[str] = [] 28 | 29 | for token in pinyined_text: 30 | if token[-1] in "12345": # TODO transform to is_pinyin() 31 | pinyin_phonemes = _chinese_pinyin_to_phoneme(token) 32 | 33 | results += list(pinyin_phonemes) 34 | else: # is ponctuation or other 35 | results += list(token) 36 | 37 | return seperator.join(results) 38 | -------------------------------------------------------------------------------- /TTS/tts/utils/text/english/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/coqui-ai/TTS/dbf1a08a0d4e47fdad6172e433eeb34bc6b13b4e/TTS/tts/utils/text/english/__init__.py -------------------------------------------------------------------------------- /TTS/tts/utils/text/english/abbreviations.py: -------------------------------------------------------------------------------- 1 | import re 2 | 3 | # List of (regular expression, replacement) pairs for abbreviations in english: 4 | abbreviations_en = [ 5 | (re.compile("\\b%s\\." % x[0], re.IGNORECASE), x[1]) 6 | for x in [ 7 | ("mrs", "misess"), 8 | ("mr", "mister"), 9 | ("dr", "doctor"), 10 | ("st", "saint"), 11 | ("co", "company"), 12 | ("jr", "junior"), 13 | ("maj", "major"), 14 | ("gen", "general"), 15 | ("drs", "doctors"), 16 | ("rev", "reverend"), 17 | ("lt", "lieutenant"), 18 | ("hon", "honorable"), 19 | ("sgt", "sergeant"), 20 | ("capt", "captain"), 21 | ("esq", "esquire"), 22 | ("ltd", "limited"), 23 | ("col", "colonel"), 24 | ("ft", "fort"), 25 | ] 26 | ] 27 | -------------------------------------------------------------------------------- /TTS/tts/utils/text/english/time_norm.py: -------------------------------------------------------------------------------- 1 | import re 2 | 3 | import inflect 4 | 5 | _inflect = inflect.engine() 6 | 7 | _time_re = re.compile( 8 | r"""\b 9 | ((0?[0-9])|(1[0-1])|(1[2-9])|(2[0-3])) # hours 10 | : 11 | ([0-5][0-9]) # minutes 12 | \s*(a\\.m\\.|am|pm|p\\.m\\.|a\\.m|p\\.m)? # am/pm 13 | \b""", 14 | re.IGNORECASE | re.X, 15 | ) 16 | 17 | 18 | def _expand_num(n: int) -> str: 19 | return _inflect.number_to_words(n) 20 | 21 | 22 | def _expand_time_english(match: "re.Match") -> str: 23 | hour = int(match.group(1)) 24 | past_noon = hour >= 12 25 | time = [] 26 | if hour > 12: 27 | hour -= 12 28 | elif hour == 0: 29 | hour = 12 30 | past_noon = True 31 | time.append(_expand_num(hour)) 32 | 33 | minute = int(match.group(6)) 34 | if minute > 0: 35 | if minute < 10: 36 | time.append("oh") 37 | time.append(_expand_num(minute)) 38 | am_pm = match.group(7) 39 | if am_pm is None: 40 | time.append("p m" if past_noon else "a m") 41 | else: 42 | time.extend(list(am_pm.replace(".", ""))) 43 | return " ".join(time) 44 | 45 | 46 | def expand_time_english(text: str) -> str: 47 | return re.sub(_time_re, _expand_time_english, text) 48 | -------------------------------------------------------------------------------- /TTS/tts/utils/text/french/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/coqui-ai/TTS/dbf1a08a0d4e47fdad6172e433eeb34bc6b13b4e/TTS/tts/utils/text/french/__init__.py -------------------------------------------------------------------------------- /TTS/tts/utils/text/french/abbreviations.py: -------------------------------------------------------------------------------- 1 | import re 2 | 3 | # List of (regular expression, replacement) pairs for abbreviations in french: 4 | abbreviations_fr = [ 5 | (re.compile("\\b%s\\." % x[0], re.IGNORECASE), x[1]) 6 | for x in [ 7 | ("M", "monsieur"), 8 | ("Mlle", "mademoiselle"), 9 | ("Mlles", "mesdemoiselles"), 10 | ("Mme", "Madame"), 11 | ("Mmes", "Mesdames"), 12 | ("N.B", "nota bene"), 13 | ("M", "monsieur"), 14 | ("p.c.q", "parce que"), 15 | ("Pr", "professeur"), 16 | ("qqch", "quelque chose"), 17 | ("rdv", "rendez-vous"), 18 | ("max", "maximum"), 19 | ("min", "minimum"), 20 | ("no", "numéro"), 21 | ("adr", "adresse"), 22 | ("dr", "docteur"), 23 | ("st", "saint"), 24 | ("co", "companie"), 25 | ("jr", "junior"), 26 | ("sgt", "sergent"), 27 | ("capt", "capitain"), 28 | ("col", "colonel"), 29 | ("av", "avenue"), 30 | ("av. J.-C", "avant Jésus-Christ"), 31 | ("apr. J.-C", "après Jésus-Christ"), 32 | ("art", "article"), 33 | ("boul", "boulevard"), 34 | ("c.-à-d", "c’est-à-dire"), 35 | ("etc", "et cetera"), 36 | ("ex", "exemple"), 37 | ("excl", "exclusivement"), 38 | ("boul", "boulevard"), 39 | ] 40 | ] + [ 41 | (re.compile("\\b%s" % x[0]), x[1]) 42 | for x in [ 43 | ("Mlle", "mademoiselle"), 44 | ("Mlles", "mesdemoiselles"), 45 | ("Mme", "Madame"), 46 | ("Mmes", "Mesdames"), 47 | ] 48 | ] 49 | -------------------------------------------------------------------------------- /TTS/tts/utils/text/japanese/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/coqui-ai/TTS/dbf1a08a0d4e47fdad6172e433eeb34bc6b13b4e/TTS/tts/utils/text/japanese/__init__.py -------------------------------------------------------------------------------- /TTS/tts/utils/text/korean/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/coqui-ai/TTS/dbf1a08a0d4e47fdad6172e433eeb34bc6b13b4e/TTS/tts/utils/text/korean/__init__.py -------------------------------------------------------------------------------- /TTS/tts/utils/text/korean/ko_dictionary.py: -------------------------------------------------------------------------------- 1 | # coding: utf-8 2 | # Add the word you want to the dictionary. 3 | etc_dictionary = {"1+1": "원플러스원", "2+1": "투플러스원"} 4 | 5 | 6 | english_dictionary = { 7 | "KOREA": "코리아", 8 | "IDOL": "아이돌", 9 | "IT": "아이티", 10 | "IQ": "아이큐", 11 | "UP": "업", 12 | "DOWN": "다운", 13 | "PC": "피씨", 14 | "CCTV": "씨씨티비", 15 | "SNS": "에스엔에스", 16 | "AI": "에이아이", 17 | "CEO": "씨이오", 18 | "A": "에이", 19 | "B": "비", 20 | "C": "씨", 21 | "D": "디", 22 | "E": "이", 23 | "F": "에프", 24 | "G": "지", 25 | "H": "에이치", 26 | "I": "아이", 27 | "J": "제이", 28 | "K": "케이", 29 | "L": "엘", 30 | "M": "엠", 31 | "N": "엔", 32 | "O": "오", 33 | "P": "피", 34 | "Q": "큐", 35 | "R": "알", 36 | "S": "에스", 37 | "T": "티", 38 | "U": "유", 39 | "V": "브이", 40 | "W": "더블유", 41 | "X": "엑스", 42 | "Y": "와이", 43 | "Z": "제트", 44 | } 45 | -------------------------------------------------------------------------------- /TTS/tts/utils/text/korean/korean.py: -------------------------------------------------------------------------------- 1 | # coding: utf-8 2 | # Code based on https://github.com/carpedm20/multi-speaker-tacotron-tensorflow/blob/master/text/korean.py 3 | import re 4 | 5 | from TTS.tts.utils.text.korean.ko_dictionary import english_dictionary, etc_dictionary 6 | 7 | 8 | def normalize(text): 9 | text = text.strip() 10 | text = re.sub("[⺀-⺙⺛-⻳⼀-⿕々〇〡-〩〸-〺〻㐀-䶵一-鿃豈-鶴侮-頻並-龎]", "", text) 11 | text = normalize_with_dictionary(text, etc_dictionary) 12 | text = normalize_english(text) 13 | text = text.lower() 14 | return text 15 | 16 | 17 | def normalize_with_dictionary(text, dic): 18 | if any(key in text for key in dic.keys()): 19 | pattern = re.compile("|".join(re.escape(key) for key in dic.keys())) 20 | return pattern.sub(lambda x: dic[x.group()], text) 21 | return text 22 | 23 | 24 | def normalize_english(text): 25 | def fn(m): 26 | word = m.group() 27 | if word in english_dictionary: 28 | return english_dictionary.get(word) 29 | return word 30 | 31 | text = re.sub("([A-Za-z]+)", fn, text) 32 | return text 33 | -------------------------------------------------------------------------------- /TTS/tts/utils/text/korean/phonemizer.py: -------------------------------------------------------------------------------- 1 | from jamo import hangul_to_jamo 2 | 3 | from TTS.tts.utils.text.korean.korean import normalize 4 | 5 | g2p = None 6 | 7 | 8 | def korean_text_to_phonemes(text, character: str = "hangeul") -> str: 9 | """ 10 | 11 | The input and output values look the same, but they are different in Unicode. 12 | 13 | example : 14 | 15 | input = '하늘' (Unicode : \ud558\ub298), (하 + 늘) 16 | output = '하늘' (Unicode :\u1112\u1161\u1102\u1173\u11af), (ᄒ + ᅡ + ᄂ + ᅳ + ᆯ) 17 | 18 | """ 19 | global g2p # pylint: disable=global-statement 20 | if g2p is None: 21 | from g2pkk import G2p 22 | 23 | g2p = G2p() 24 | 25 | if character == "english": 26 | from anyascii import anyascii 27 | 28 | text = normalize(text) 29 | text = g2p(text) 30 | text = anyascii(text) 31 | return text 32 | 33 | text = normalize(text) 34 | text = g2p(text) 35 | text = list(hangul_to_jamo(text)) # '하늘' --> ['ᄒ', 'ᅡ', 'ᄂ', 'ᅳ', 'ᆯ'] 36 | return "".join(text) 37 | -------------------------------------------------------------------------------- /TTS/tts/utils/text/phonemizers/belarusian_phonemizer.py: -------------------------------------------------------------------------------- 1 | from typing import Dict 2 | 3 | from TTS.tts.utils.text.belarusian.phonemizer import belarusian_text_to_phonemes 4 | from TTS.tts.utils.text.phonemizers.base import BasePhonemizer 5 | 6 | _DEF_BE_PUNCS = ",!." # TODO 7 | 8 | 9 | class BEL_Phonemizer(BasePhonemizer): 10 | """🐸TTS be phonemizer using functions in `TTS.tts.utils.text.belarusian.phonemizer` 11 | 12 | Args: 13 | punctuations (str): 14 | Set of characters to be treated as punctuation. Defaults to `_DEF_BE_PUNCS`. 15 | 16 | keep_puncs (bool): 17 | If True, keep the punctuations after phonemization. Defaults to False. 18 | """ 19 | 20 | language = "be" 21 | 22 | def __init__(self, punctuations=_DEF_BE_PUNCS, keep_puncs=True, **kwargs): # pylint: disable=unused-argument 23 | super().__init__(self.language, punctuations=punctuations, keep_puncs=keep_puncs) 24 | 25 | @staticmethod 26 | def name(): 27 | return "be_phonemizer" 28 | 29 | @staticmethod 30 | def phonemize_be(text: str, separator: str = "|") -> str: # pylint: disable=unused-argument 31 | return belarusian_text_to_phonemes(text) 32 | 33 | def _phonemize(self, text, separator): 34 | return self.phonemize_be(text, separator) 35 | 36 | @staticmethod 37 | def supported_languages() -> Dict: 38 | return {"be": "Belarusian"} 39 | 40 | def version(self) -> str: 41 | return "0.0.1" 42 | 43 | def is_available(self) -> bool: 44 | return True 45 | 46 | 47 | if __name__ == "__main__": 48 | txt = "тэст" 49 | e = BEL_Phonemizer() 50 | print(e.supported_languages()) 51 | print(e.version()) 52 | print(e.language) 53 | print(e.name()) 54 | print(e.is_available()) 55 | print("`" + e.phonemize(txt) + "`") 56 | -------------------------------------------------------------------------------- /TTS/utils/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/coqui-ai/TTS/dbf1a08a0d4e47fdad6172e433eeb34bc6b13b4e/TTS/utils/__init__.py -------------------------------------------------------------------------------- /TTS/utils/audio/__init__.py: -------------------------------------------------------------------------------- 1 | from TTS.utils.audio.processor import AudioProcessor 2 | -------------------------------------------------------------------------------- /TTS/utils/distribute.py: -------------------------------------------------------------------------------- 1 | # edited from https://github.com/fastai/imagenet-fast/blob/master/imagenet_nv/distributed.py 2 | import torch 3 | import torch.distributed as dist 4 | 5 | 6 | def reduce_tensor(tensor, num_gpus): 7 | rt = tensor.clone() 8 | dist.all_reduce(rt, op=dist.reduce_op.SUM) 9 | rt /= num_gpus 10 | return rt 11 | 12 | 13 | def init_distributed(rank, num_gpus, group_name, dist_backend, dist_url): 14 | assert torch.cuda.is_available(), "Distributed mode requires CUDA." 15 | 16 | # Set cuda device so everything is done on the right GPU. 17 | torch.cuda.set_device(rank % torch.cuda.device_count()) 18 | 19 | # Initialize distributed communication 20 | dist.init_process_group(dist_backend, init_method=dist_url, world_size=num_gpus, rank=rank, group_name=group_name) 21 | -------------------------------------------------------------------------------- /TTS/utils/training.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import torch 3 | 4 | 5 | def check_update(model, grad_clip, ignore_stopnet=False, amp_opt_params=None): 6 | r"""Check model gradient against unexpected jumps and failures""" 7 | skip_flag = False 8 | if ignore_stopnet: 9 | if not amp_opt_params: 10 | grad_norm = torch.nn.utils.clip_grad_norm_( 11 | [param for name, param in model.named_parameters() if "stopnet" not in name], grad_clip 12 | ) 13 | else: 14 | grad_norm = torch.nn.utils.clip_grad_norm_(amp_opt_params, grad_clip) 15 | else: 16 | if not amp_opt_params: 17 | grad_norm = torch.nn.utils.clip_grad_norm_(model.parameters(), grad_clip) 18 | else: 19 | grad_norm = torch.nn.utils.clip_grad_norm_(amp_opt_params, grad_clip) 20 | 21 | # compatibility with different torch versions 22 | if isinstance(grad_norm, float): 23 | if np.isinf(grad_norm): 24 | print(" | > Gradient is INF !!") 25 | skip_flag = True 26 | else: 27 | if torch.isinf(grad_norm): 28 | print(" | > Gradient is INF !!") 29 | skip_flag = True 30 | return grad_norm, skip_flag 31 | 32 | 33 | def gradual_training_scheduler(global_step, config): 34 | """Setup the gradual training schedule wrt number 35 | of active GPUs""" 36 | num_gpus = torch.cuda.device_count() 37 | if num_gpus == 0: 38 | num_gpus = 1 39 | new_values = None 40 | # we set the scheduling wrt num_gpus 41 | for values in config.gradual_training: 42 | if global_step * num_gpus >= values[0]: 43 | new_values = values 44 | return new_values[1], new_values[2] 45 | -------------------------------------------------------------------------------- /TTS/vc/configs/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/coqui-ai/TTS/dbf1a08a0d4e47fdad6172e433eeb34bc6b13b4e/TTS/vc/configs/__init__.py -------------------------------------------------------------------------------- /TTS/vc/models/__init__.py: -------------------------------------------------------------------------------- 1 | import importlib 2 | import re 3 | from typing import Dict, List, Union 4 | 5 | 6 | def to_camel(text): 7 | text = text.capitalize() 8 | return re.sub(r"(?!^)_([a-zA-Z])", lambda m: m.group(1).upper(), text) 9 | 10 | 11 | def setup_model(config: "Coqpit", samples: Union[List[List], List[Dict]] = None) -> "BaseVC": 12 | print(" > Using model: {}".format(config.model)) 13 | # fetch the right model implementation. 14 | if "model" in config and config["model"].lower() == "freevc": 15 | MyModel = importlib.import_module("TTS.vc.models.freevc").FreeVC 16 | model = MyModel.init_from_config(config, samples) 17 | return model 18 | -------------------------------------------------------------------------------- /TTS/vc/modules/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/coqui-ai/TTS/dbf1a08a0d4e47fdad6172e433eeb34bc6b13b4e/TTS/vc/modules/__init__.py -------------------------------------------------------------------------------- /TTS/vc/modules/freevc/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/coqui-ai/TTS/dbf1a08a0d4e47fdad6172e433eeb34bc6b13b4e/TTS/vc/modules/freevc/__init__.py -------------------------------------------------------------------------------- /TTS/vc/modules/freevc/speaker_encoder/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/coqui-ai/TTS/dbf1a08a0d4e47fdad6172e433eeb34bc6b13b4e/TTS/vc/modules/freevc/speaker_encoder/__init__.py -------------------------------------------------------------------------------- /TTS/vc/modules/freevc/speaker_encoder/hparams.py: -------------------------------------------------------------------------------- 1 | ## Mel-filterbank 2 | mel_window_length = 25 # In milliseconds 3 | mel_window_step = 10 # In milliseconds 4 | mel_n_channels = 40 5 | 6 | 7 | ## Audio 8 | sampling_rate = 16000 9 | # Number of spectrogram frames in a partial utterance 10 | partials_n_frames = 160 # 1600 ms 11 | 12 | 13 | ## Voice Activation Detection 14 | # Window size of the VAD. Must be either 10, 20 or 30 milliseconds. 15 | # This sets the granularity of the VAD. Should not need to be changed. 16 | vad_window_length = 30 # In milliseconds 17 | # Number of frames to average together when performing the moving average smoothing. 18 | # The larger this value, the larger the VAD variations must be to not get smoothed out. 19 | vad_moving_average_width = 8 20 | # Maximum number of consecutive silent frames a segment can have. 21 | vad_max_silence_length = 6 22 | 23 | 24 | ## Audio volume normalization 25 | audio_norm_target_dBFS = -30 26 | 27 | 28 | ## Model parameters 29 | model_hidden_size = 256 30 | model_embedding_size = 256 31 | model_num_layers = 3 32 | -------------------------------------------------------------------------------- /TTS/vc/modules/freevc/wavlm/__init__.py: -------------------------------------------------------------------------------- 1 | import os 2 | import urllib.request 3 | 4 | import torch 5 | 6 | from TTS.utils.generic_utils import get_user_data_dir 7 | from TTS.vc.modules.freevc.wavlm.wavlm import WavLM, WavLMConfig 8 | 9 | model_uri = "https://github.com/coqui-ai/TTS/releases/download/v0.13.0_models/WavLM-Large.pt" 10 | 11 | 12 | def get_wavlm(device="cpu"): 13 | """Download the model and return the model object.""" 14 | 15 | output_path = get_user_data_dir("tts") 16 | 17 | output_path = os.path.join(output_path, "wavlm") 18 | if not os.path.exists(output_path): 19 | os.makedirs(output_path) 20 | 21 | output_path = os.path.join(output_path, "WavLM-Large.pt") 22 | if not os.path.exists(output_path): 23 | print(f" > Downloading WavLM model to {output_path} ...") 24 | urllib.request.urlretrieve(model_uri, output_path) 25 | 26 | checkpoint = torch.load(output_path, map_location=torch.device(device)) 27 | cfg = WavLMConfig(checkpoint["cfg"]) 28 | wavlm = WavLM(cfg).to(device) 29 | wavlm.load_state_dict(checkpoint["model"]) 30 | wavlm.eval() 31 | return wavlm 32 | 33 | 34 | if __name__ == "__main__": 35 | wavlm = get_wavlm() 36 | -------------------------------------------------------------------------------- /TTS/vocoder/README.md: -------------------------------------------------------------------------------- 1 | # Mozilla TTS Vocoders (Experimental) 2 | 3 | Here there are vocoder model implementations which can be combined with the other TTS models. 4 | 5 | Currently, following models are implemented: 6 | 7 | - Melgan 8 | - MultiBand-Melgan 9 | - ParallelWaveGAN 10 | - GAN-TTS (Discriminator Only) 11 | 12 | It is also very easy to adapt different vocoder models as we provide a flexible and modular (but not too modular) framework. 13 | 14 | ## Training a model 15 | 16 | You can see here an example (Soon)[Colab Notebook]() training MelGAN with LJSpeech dataset. 17 | 18 | In order to train a new model, you need to gather all wav files into a folder and give this folder to `data_path` in '''config.json''' 19 | 20 | You need to define other relevant parameters in your ```config.json``` and then start traning with the following command. 21 | 22 | ```CUDA_VISIBLE_DEVICES='0' python tts/bin/train_vocoder.py --config_path path/to/config.json``` 23 | 24 | Example config files can be found under `tts/vocoder/configs/` folder. 25 | 26 | You can continue a previous training run by the following command. 27 | 28 | ```CUDA_VISIBLE_DEVICES='0' python tts/bin/train_vocoder.py --continue_path path/to/your/model/folder``` 29 | 30 | You can fine-tune a pre-trained model by the following command. 31 | 32 | ```CUDA_VISIBLE_DEVICES='0' python tts/bin/train_vocoder.py --restore_path path/to/your/model.pth``` 33 | 34 | Restoring a model starts a new training in a different folder. It only restores model weights with the given checkpoint file. However, continuing a training starts from the same directory where the previous training run left off. 35 | 36 | You can also follow your training runs on Tensorboard as you do with our TTS models. 37 | 38 | ## Acknowledgement 39 | Thanks to @kan-bayashi for his [repository](https://github.com/kan-bayashi/ParallelWaveGAN) being the start point of our work. 40 | -------------------------------------------------------------------------------- /TTS/vocoder/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/coqui-ai/TTS/dbf1a08a0d4e47fdad6172e433eeb34bc6b13b4e/TTS/vocoder/__init__.py -------------------------------------------------------------------------------- /TTS/vocoder/configs/__init__.py: -------------------------------------------------------------------------------- 1 | import importlib 2 | import os 3 | from inspect import isclass 4 | 5 | # import all files under configs/ 6 | configs_dir = os.path.dirname(__file__) 7 | for file in os.listdir(configs_dir): 8 | path = os.path.join(configs_dir, file) 9 | if not file.startswith("_") and not file.startswith(".") and (file.endswith(".py") or os.path.isdir(path)): 10 | config_name = file[: file.find(".py")] if file.endswith(".py") else file 11 | module = importlib.import_module("TTS.vocoder.configs." + config_name) 12 | for attribute_name in dir(module): 13 | attribute = getattr(module, attribute_name) 14 | 15 | if isclass(attribute): 16 | # Add the class to this package's variables 17 | globals()[attribute_name] = attribute 18 | -------------------------------------------------------------------------------- /TTS/vocoder/layers/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/coqui-ai/TTS/dbf1a08a0d4e47fdad6172e433eeb34bc6b13b4e/TTS/vocoder/layers/__init__.py -------------------------------------------------------------------------------- /TTS/vocoder/layers/melgan.py: -------------------------------------------------------------------------------- 1 | from torch import nn 2 | from torch.nn.utils.parametrizations import weight_norm 3 | from torch.nn.utils.parametrize import remove_parametrizations 4 | 5 | 6 | class ResidualStack(nn.Module): 7 | def __init__(self, channels, num_res_blocks, kernel_size): 8 | super().__init__() 9 | 10 | assert (kernel_size - 1) % 2 == 0, " [!] kernel_size has to be odd." 11 | base_padding = (kernel_size - 1) // 2 12 | 13 | self.blocks = nn.ModuleList() 14 | for idx in range(num_res_blocks): 15 | layer_kernel_size = kernel_size 16 | layer_dilation = layer_kernel_size**idx 17 | layer_padding = base_padding * layer_dilation 18 | self.blocks += [ 19 | nn.Sequential( 20 | nn.LeakyReLU(0.2), 21 | nn.ReflectionPad1d(layer_padding), 22 | weight_norm( 23 | nn.Conv1d(channels, channels, kernel_size=kernel_size, dilation=layer_dilation, bias=True) 24 | ), 25 | nn.LeakyReLU(0.2), 26 | weight_norm(nn.Conv1d(channels, channels, kernel_size=1, bias=True)), 27 | ) 28 | ] 29 | 30 | self.shortcuts = nn.ModuleList( 31 | [weight_norm(nn.Conv1d(channels, channels, kernel_size=1, bias=True)) for _ in range(num_res_blocks)] 32 | ) 33 | 34 | def forward(self, x): 35 | for block, shortcut in zip(self.blocks, self.shortcuts): 36 | x = shortcut(x) + block(x) 37 | return x 38 | 39 | def remove_weight_norm(self): 40 | for block, shortcut in zip(self.blocks, self.shortcuts): 41 | remove_parametrizations(block[2], "weight") 42 | remove_parametrizations(block[4], "weight") 43 | remove_parametrizations(shortcut, "weight") 44 | -------------------------------------------------------------------------------- /TTS/vocoder/models/fullband_melgan_generator.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | from TTS.vocoder.models.melgan_generator import MelganGenerator 4 | 5 | 6 | class FullbandMelganGenerator(MelganGenerator): 7 | def __init__( 8 | self, 9 | in_channels=80, 10 | out_channels=1, 11 | proj_kernel=7, 12 | base_channels=512, 13 | upsample_factors=(2, 8, 2, 2), 14 | res_kernel=3, 15 | num_res_blocks=4, 16 | ): 17 | super().__init__( 18 | in_channels=in_channels, 19 | out_channels=out_channels, 20 | proj_kernel=proj_kernel, 21 | base_channels=base_channels, 22 | upsample_factors=upsample_factors, 23 | res_kernel=res_kernel, 24 | num_res_blocks=num_res_blocks, 25 | ) 26 | 27 | @torch.no_grad() 28 | def inference(self, cond_features): 29 | cond_features = cond_features.to(self.layers[1].weight.device) 30 | cond_features = torch.nn.functional.pad( 31 | cond_features, (self.inference_padding, self.inference_padding), "replicate" 32 | ) 33 | return self.layers(cond_features) 34 | -------------------------------------------------------------------------------- /TTS/vocoder/models/melgan_multiscale_discriminator.py: -------------------------------------------------------------------------------- 1 | from torch import nn 2 | 3 | from TTS.vocoder.models.melgan_discriminator import MelganDiscriminator 4 | 5 | 6 | class MelganMultiscaleDiscriminator(nn.Module): 7 | def __init__( 8 | self, 9 | in_channels=1, 10 | out_channels=1, 11 | num_scales=3, 12 | kernel_sizes=(5, 3), 13 | base_channels=16, 14 | max_channels=1024, 15 | downsample_factors=(4, 4, 4), 16 | pooling_kernel_size=4, 17 | pooling_stride=2, 18 | pooling_padding=2, 19 | groups_denominator=4, 20 | ): 21 | super().__init__() 22 | 23 | self.discriminators = nn.ModuleList( 24 | [ 25 | MelganDiscriminator( 26 | in_channels=in_channels, 27 | out_channels=out_channels, 28 | kernel_sizes=kernel_sizes, 29 | base_channels=base_channels, 30 | max_channels=max_channels, 31 | downsample_factors=downsample_factors, 32 | groups_denominator=groups_denominator, 33 | ) 34 | for _ in range(num_scales) 35 | ] 36 | ) 37 | 38 | self.pooling = nn.AvgPool1d( 39 | kernel_size=pooling_kernel_size, stride=pooling_stride, padding=pooling_padding, count_include_pad=False 40 | ) 41 | 42 | def forward(self, x): 43 | scores = [] 44 | feats = [] 45 | for disc in self.discriminators: 46 | score, feat = disc(x) 47 | scores.append(score) 48 | feats.append(feat) 49 | x = self.pooling(x) 50 | return scores, feats 51 | -------------------------------------------------------------------------------- /TTS/vocoder/models/multiband_melgan_generator.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | from TTS.vocoder.layers.pqmf import PQMF 4 | from TTS.vocoder.models.melgan_generator import MelganGenerator 5 | 6 | 7 | class MultibandMelganGenerator(MelganGenerator): 8 | def __init__( 9 | self, 10 | in_channels=80, 11 | out_channels=4, 12 | proj_kernel=7, 13 | base_channels=384, 14 | upsample_factors=(2, 8, 2, 2), 15 | res_kernel=3, 16 | num_res_blocks=3, 17 | ): 18 | super().__init__( 19 | in_channels=in_channels, 20 | out_channels=out_channels, 21 | proj_kernel=proj_kernel, 22 | base_channels=base_channels, 23 | upsample_factors=upsample_factors, 24 | res_kernel=res_kernel, 25 | num_res_blocks=num_res_blocks, 26 | ) 27 | self.pqmf_layer = PQMF(N=4, taps=62, cutoff=0.15, beta=9.0) 28 | 29 | def pqmf_analysis(self, x): 30 | return self.pqmf_layer.analysis(x) 31 | 32 | def pqmf_synthesis(self, x): 33 | return self.pqmf_layer.synthesis(x) 34 | 35 | @torch.no_grad() 36 | def inference(self, cond_features): 37 | cond_features = cond_features.to(self.layers[1].weight.device) 38 | cond_features = torch.nn.functional.pad( 39 | cond_features, (self.inference_padding, self.inference_padding), "replicate" 40 | ) 41 | return self.pqmf_synthesis(self.layers(cond_features)) 42 | -------------------------------------------------------------------------------- /TTS/vocoder/pqmf_output.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/coqui-ai/TTS/dbf1a08a0d4e47fdad6172e433eeb34bc6b13b4e/TTS/vocoder/pqmf_output.wav -------------------------------------------------------------------------------- /TTS/vocoder/utils/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/coqui-ai/TTS/dbf1a08a0d4e47fdad6172e433eeb34bc6b13b4e/TTS/vocoder/utils/__init__.py -------------------------------------------------------------------------------- /dockerfiles/Dockerfile.dev: -------------------------------------------------------------------------------- 1 | ARG BASE=nvidia/cuda:11.8.0-base-ubuntu22.04 2 | FROM ${BASE} 3 | 4 | # Install OS dependencies: 5 | RUN apt-get update && apt-get upgrade -y 6 | RUN apt-get install -y --no-install-recommends \ 7 | gcc g++ \ 8 | make \ 9 | python3 python3-dev python3-pip python3-venv python3-wheel \ 10 | espeak-ng libsndfile1-dev \ 11 | && rm -rf /var/lib/apt/lists/* 12 | 13 | # Install Major Python Dependencies: 14 | RUN pip3 install llvmlite --ignore-installed 15 | RUN pip3 install torch torchaudio --extra-index-url https://download.pytorch.org/whl/cu118 16 | RUN rm -rf /root/.cache/pip 17 | 18 | WORKDIR /root 19 | 20 | # Copy Dependency Lock Files: 21 | COPY \ 22 | Makefile \ 23 | pyproject.toml \ 24 | setup.py \ 25 | requirements.dev.txt \ 26 | requirements.ja.txt \ 27 | requirements.notebooks.txt \ 28 | requirements.txt \ 29 | /root/ 30 | 31 | # Install Project Dependencies 32 | # Separate stage to limit re-downloading: 33 | RUN pip install \ 34 | -r requirements.txt \ 35 | -r requirements.dev.txt \ 36 | -r requirements.ja.txt \ 37 | -r requirements.notebooks.txt 38 | 39 | # Copy TTS repository contents: 40 | COPY . /root 41 | 42 | # Installing the TTS package itself: 43 | RUN make install 44 | 45 | -------------------------------------------------------------------------------- /docs/Makefile: -------------------------------------------------------------------------------- 1 | # Minimal makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line, and also 5 | # from the environment for the first two. 6 | SPHINXOPTS ?= -j auto -WT --keep-going 7 | SPHINXBUILD ?= sphinx-build 8 | SOURCEDIR = source 9 | BUILDDIR = _build 10 | 11 | # Put it first so that "make" without argument is like "make help". 12 | help: 13 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 14 | 15 | .PHONY: help Makefile 16 | 17 | # Catch-all target: route all unknown targets to Sphinx using the new 18 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). 19 | %: Makefile 20 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 21 | -------------------------------------------------------------------------------- /docs/README.md: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/coqui-ai/TTS/dbf1a08a0d4e47fdad6172e433eeb34bc6b13b4e/docs/README.md -------------------------------------------------------------------------------- /docs/requirements.txt: -------------------------------------------------------------------------------- 1 | furo 2 | myst-parser == 2.0.0 3 | sphinx == 7.2.5 4 | sphinx_inline_tabs 5 | sphinx_copybutton 6 | linkify-it-py -------------------------------------------------------------------------------- /docs/source/_static/logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/coqui-ai/TTS/dbf1a08a0d4e47fdad6172e433eeb34bc6b13b4e/docs/source/_static/logo.png -------------------------------------------------------------------------------- /docs/source/_templates/page.html: -------------------------------------------------------------------------------- 1 | {% extends "!page.html" %} 2 | {% block scripts %} 3 | {{ super() }} 4 | {% endblock %} 5 | -------------------------------------------------------------------------------- /docs/source/contributing.md: -------------------------------------------------------------------------------- 1 | ```{include} ../../CONTRIBUTING.md 2 | :relative-images: 3 | ``` 4 | -------------------------------------------------------------------------------- /docs/source/implementing_a_new_language_frontend.md: -------------------------------------------------------------------------------- 1 | # Implementing a New Language Frontend 2 | 3 | - Language frontends are located under `TTS.tts.utils.text` 4 | - Each special language has a separate folder. 5 | - Each folder contains all the utilities for processing the text input. 6 | - `TTS.tts.utils.text.phonemizers` contains the main phonemizer for a language. This is the class that uses the utilities 7 | from the previous step and used to convert the text to phonemes or graphemes for the model. 8 | - After you implement your phonemizer, you need to add it to the `TTS/tts/utils/text/phonemizers/__init__.py` to be able to 9 | map the language code in the model config - `config.phoneme_language` - to the phonemizer class and initiate the phonemizer automatically. 10 | - You should also add tests to `tests/text_tests` if you want to make a PR. 11 | 12 | We suggest you to check the available implementations as reference. Good luck! 13 | -------------------------------------------------------------------------------- /docs/source/index.md: -------------------------------------------------------------------------------- 1 | 2 | ```{include} ../../README.md 3 | :relative-images: 4 | ``` 5 | ---- 6 | 7 | # Documentation Content 8 | ```{eval-rst} 9 | .. toctree:: 10 | :maxdepth: 2 11 | :caption: Get started 12 | 13 | tutorial_for_nervous_beginners 14 | installation 15 | faq 16 | contributing 17 | 18 | .. toctree:: 19 | :maxdepth: 2 20 | :caption: Using 🐸TTS 21 | 22 | inference 23 | docker_images 24 | implementing_a_new_model 25 | implementing_a_new_language_frontend 26 | training_a_model 27 | finetuning 28 | configuration 29 | formatting_your_dataset 30 | what_makes_a_good_dataset 31 | tts_datasets 32 | marytts 33 | 34 | .. toctree:: 35 | :maxdepth: 2 36 | :caption: Main Classes 37 | 38 | main_classes/trainer_api 39 | main_classes/audio_processor 40 | main_classes/model_api 41 | main_classes/dataset 42 | main_classes/gan 43 | main_classes/speaker_manager 44 | 45 | .. toctree:: 46 | :maxdepth: 2 47 | :caption: `tts` Models 48 | 49 | models/glow_tts.md 50 | models/vits.md 51 | models/forward_tts.md 52 | models/tacotron1-2.md 53 | models/overflow.md 54 | models/tortoise.md 55 | models/bark.md 56 | models/xtts.md 57 | 58 | .. toctree:: 59 | :maxdepth: 2 60 | :caption: `vocoder` Models 61 | 62 | ``` 63 | -------------------------------------------------------------------------------- /docs/source/installation.md: -------------------------------------------------------------------------------- 1 | # Installation 2 | 3 | 🐸TTS supports python >=3.7 <3.11.0 and tested on Ubuntu 18.10, 19.10, 20.10. 4 | 5 | ## Using `pip` 6 | 7 | `pip` is recommended if you want to use 🐸TTS only for inference. 8 | 9 | You can install from PyPI as follows: 10 | 11 | ```bash 12 | pip install TTS # from PyPI 13 | ``` 14 | 15 | Or install from Github: 16 | 17 | ```bash 18 | pip install git+https://github.com/coqui-ai/TTS # from Github 19 | ``` 20 | 21 | ## Installing From Source 22 | 23 | This is recommended for development and more control over 🐸TTS. 24 | 25 | ```bash 26 | git clone https://github.com/coqui-ai/TTS/ 27 | cd TTS 28 | make system-deps # only on Linux systems. 29 | make install 30 | ``` 31 | 32 | ## On Windows 33 | If you are on Windows, 👑@GuyPaddock wrote installation instructions [here](https://stackoverflow.com/questions/66726331/ -------------------------------------------------------------------------------- /docs/source/main_classes/audio_processor.md: -------------------------------------------------------------------------------- 1 | # AudioProcessor API 2 | 3 | `TTS.utils.audio.AudioProcessor` is the core class for all the audio processing routines. It provides an API for 4 | 5 | - Feature extraction. 6 | - Sound normalization. 7 | - Reading and writing audio files. 8 | - Sampling audio signals. 9 | - Normalizing and denormalizing audio signals. 10 | - Griffin-Lim vocoder. 11 | 12 | The `AudioProcessor` needs to be initialized with `TTS.config.shared_configs.BaseAudioConfig`. Any model config 13 | also must inherit or initiate `BaseAudioConfig`. 14 | 15 | ## AudioProcessor 16 | ```{eval-rst} 17 | .. autoclass:: TTS.utils.audio.AudioProcessor 18 | :members: 19 | ``` 20 | 21 | ## BaseAudioConfig 22 | ```{eval-rst} 23 | .. autoclass:: TTS.config.shared_configs.BaseAudioConfig 24 | :members: 25 | ``` -------------------------------------------------------------------------------- /docs/source/main_classes/dataset.md: -------------------------------------------------------------------------------- 1 | # Datasets 2 | 3 | ## TTS Dataset 4 | 5 | ```{eval-rst} 6 | .. autoclass:: TTS.tts.datasets.TTSDataset 7 | :members: 8 | ``` 9 | 10 | ## Vocoder Dataset 11 | 12 | ```{eval-rst} 13 | .. autoclass:: TTS.vocoder.datasets.gan_dataset.GANDataset 14 | :members: 15 | ``` 16 | 17 | ```{eval-rst} 18 | .. autoclass:: TTS.vocoder.datasets.wavegrad_dataset.WaveGradDataset 19 | :members: 20 | ``` 21 | 22 | ```{eval-rst} 23 | .. autoclass:: TTS.vocoder.datasets.wavernn_dataset.WaveRNNDataset 24 | :members: 25 | ``` -------------------------------------------------------------------------------- /docs/source/main_classes/gan.md: -------------------------------------------------------------------------------- 1 | # GAN API 2 | 3 | The {class}`TTS.vocoder.models.gan.GAN` provides an easy way to implementing new GAN based models. You just need 4 | to define the model architectures for the generator and the discriminator networks and give them to the `GAN` class 5 | to do its ✨️. 6 | 7 | 8 | ## GAN 9 | ```{eval-rst} 10 | .. autoclass:: TTS.vocoder.models.gan.GAN 11 | :members: 12 | ``` -------------------------------------------------------------------------------- /docs/source/main_classes/model_api.md: -------------------------------------------------------------------------------- 1 | # Model API 2 | Model API provides you a set of functions that easily make your model compatible with the `Trainer`, 3 | `Synthesizer` and `ModelZoo`. 4 | 5 | ## Base TTS Model 6 | 7 | ```{eval-rst} 8 | .. autoclass:: TTS.model.BaseTrainerModel 9 | :members: 10 | ``` 11 | 12 | ## Base tts Model 13 | 14 | ```{eval-rst} 15 | .. autoclass:: TTS.tts.models.base_tts.BaseTTS 16 | :members: 17 | ``` 18 | 19 | ## Base vocoder Model 20 | 21 | ```{eval-rst} 22 | .. autoclass:: TTS.vocoder.models.base_vocoder.BaseVocoder 23 | :members: 24 | ``` -------------------------------------------------------------------------------- /docs/source/main_classes/speaker_manager.md: -------------------------------------------------------------------------------- 1 | # Speaker Manager API 2 | 3 | The {class}`TTS.tts.utils.speakers.SpeakerManager` organize speaker related data and information for 🐸TTS models. It is 4 | especially useful for multi-speaker models. 5 | 6 | 7 | ## Speaker Manager 8 | ```{eval-rst} 9 | .. automodule:: TTS.tts.utils.speakers 10 | :members: 11 | ``` -------------------------------------------------------------------------------- /docs/source/main_classes/trainer_api.md: -------------------------------------------------------------------------------- 1 | # Trainer API 2 | 3 | We made the trainer a separate project on https://github.com/coqui-ai/Trainer 4 | -------------------------------------------------------------------------------- /docs/source/make.bat: -------------------------------------------------------------------------------- 1 | @ECHO OFF 2 | 3 | pushd %~dp0 4 | 5 | REM Command file for Sphinx documentation 6 | 7 | if "%SPHINXBUILD%" == "" ( 8 | set SPHINXBUILD=sphinx-build 9 | ) 10 | set SOURCEDIR=. 11 | set BUILDDIR=_build 12 | 13 | if "%1" == "" goto help 14 | 15 | %SPHINXBUILD% >NUL 2>NUL 16 | if errorlevel 9009 ( 17 | echo. 18 | echo.The 'sphinx-build' command was not found. Make sure you have Sphinx 19 | echo.installed, then set the SPHINXBUILD environment variable to point 20 | echo.to the full path of the 'sphinx-build' executable. Alternatively you 21 | echo.may add the Sphinx directory to PATH. 22 | echo. 23 | echo.If you don't have Sphinx installed, grab it from 24 | echo.http://sphinx-doc.org/ 25 | exit /b 1 26 | ) 27 | 28 | %SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% 29 | goto end 30 | 31 | :help 32 | %SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% 33 | 34 | :end 35 | popd 36 | -------------------------------------------------------------------------------- /docs/source/models/glow_tts.md: -------------------------------------------------------------------------------- 1 | # Glow TTS 2 | 3 | Glow TTS is a normalizing flow model for text-to-speech. It is built on the generic Glow model that is previously 4 | used in computer vision and vocoder models. It uses "monotonic alignment search" (MAS) to fine the text-to-speech alignment 5 | and uses the output to train a separate duration predictor network for faster inference run-time. 6 | 7 | ## Important resources & papers 8 | - GlowTTS: https://arxiv.org/abs/2005.11129 9 | - Glow (Generative Flow with invertible 1x1 Convolutions): https://arxiv.org/abs/1807.03039 10 | - Normalizing Flows: https://blog.evjang.com/2018/01/nf1.html 11 | 12 | ## GlowTTS Config 13 | ```{eval-rst} 14 | .. autoclass:: TTS.tts.configs.glow_tts_config.GlowTTSConfig 15 | :members: 16 | ``` 17 | 18 | ## GlowTTS Model 19 | ```{eval-rst} 20 | .. autoclass:: TTS.tts.models.glow_tts.GlowTTS 21 | :members: 22 | ``` 23 | -------------------------------------------------------------------------------- /docs/source/models/overflow.md: -------------------------------------------------------------------------------- 1 | # Overflow TTS 2 | 3 | Neural HMMs are a type of neural transducer recently proposed for 4 | sequence-to-sequence modelling in text-to-speech. They combine the best features 5 | of classic statistical speech synthesis and modern neural TTS, requiring less 6 | data and fewer training updates, and are less prone to gibberish output caused 7 | by neural attention failures. In this paper, we combine neural HMM TTS with 8 | normalising flows for describing the highly non-Gaussian distribution of speech 9 | acoustics. The result is a powerful, fully probabilistic model of durations and 10 | acoustics that can be trained using exact maximum likelihood. Compared to 11 | dominant flow-based acoustic models, our approach integrates autoregression for 12 | improved modelling of long-range dependences such as utterance-level prosody. 13 | Experiments show that a system based on our proposal gives more accurate 14 | pronunciations and better subjective speech quality than comparable methods, 15 | whilst retaining the original advantages of neural HMMs. Audio examples and code 16 | are available at https://shivammehta25.github.io/OverFlow/. 17 | 18 | 19 | ## Important resources & papers 20 | - HMM: https://de.wikipedia.org/wiki/Hidden_Markov_Model 21 | - OverflowTTS paper: https://arxiv.org/abs/2211.06892 22 | - Neural HMM: https://arxiv.org/abs/2108.13320 23 | - Audio Samples: https://shivammehta25.github.io/OverFlow/ 24 | 25 | 26 | ## OverflowConfig 27 | ```{eval-rst} 28 | .. autoclass:: TTS.tts.configs.overflow_config.OverflowConfig 29 | :members: 30 | ``` 31 | 32 | ## Overflow Model 33 | ```{eval-rst} 34 | .. autoclass:: TTS.tts.models.overflow.Overflow 35 | :members: 36 | ``` -------------------------------------------------------------------------------- /docs/source/models/vits.md: -------------------------------------------------------------------------------- 1 | # VITS 2 | 3 | VITS (Conditional Variational Autoencoder with Adversarial Learning for End-to-End Text-to-Speech 4 | ) is an End-to-End (encoder -> vocoder together) TTS model that takes advantage of SOTA DL techniques like GANs, VAE, 5 | Normalizing Flows. It does not require external alignment annotations and learns the text-to-audio alignment 6 | using MAS, as explained in the paper. The model architecture is a combination of GlowTTS encoder and HiFiGAN vocoder. 7 | It is a feed-forward model with x67.12 real-time factor on a GPU. 8 | 9 | 🐸 YourTTS is a multi-speaker and multi-lingual TTS model that can perform voice conversion and zero-shot speaker adaptation. 10 | It can also learn a new language or voice with a ~ 1 minute long audio clip. This is a big open gate for training 11 | TTS models in low-resources languages. 🐸 YourTTS uses VITS as the backbone architecture coupled with a speaker encoder model. 12 | 13 | ## Important resources & papers 14 | - 🐸 YourTTS: https://arxiv.org/abs/2112.02418 15 | - VITS: https://arxiv.org/pdf/2106.06103.pdf 16 | - Neural Spline Flows: https://arxiv.org/abs/1906.04032 17 | - Variational Autoencoder: https://arxiv.org/pdf/1312.6114.pdf 18 | - Generative Adversarial Networks: https://arxiv.org/abs/1406.2661 19 | - HiFiGAN: https://arxiv.org/abs/2010.05646 20 | - Normalizing Flows: https://blog.evjang.com/2018/01/nf1.html 21 | 22 | ## VitsConfig 23 | ```{eval-rst} 24 | .. autoclass:: TTS.tts.configs.vits_config.VitsConfig 25 | :members: 26 | ``` 27 | 28 | ## VitsArgs 29 | ```{eval-rst} 30 | .. autoclass:: TTS.tts.models.vits.VitsArgs 31 | :members: 32 | ``` 33 | 34 | ## Vits Model 35 | ```{eval-rst} 36 | .. autoclass:: TTS.tts.models.vits.Vits 37 | :members: 38 | ``` 39 | -------------------------------------------------------------------------------- /docs/source/tts_datasets.md: -------------------------------------------------------------------------------- 1 | # TTS Datasets 2 | 3 | Some of the known public datasets that we successfully applied 🐸TTS: 4 | 5 | - [English - LJ Speech](https://keithito.com/LJ-Speech-Dataset/) 6 | - [English - Nancy](http://www.cstr.ed.ac.uk/projects/blizzard/2011/lessac_blizzard2011/) 7 | - [English - TWEB](https://www.kaggle.com/bryanpark/the-world-english-bible-speech-dataset) 8 | - [English - LibriTTS](https://openslr.org/60/) 9 | - [English - VCTK](https://datashare.ed.ac.uk/handle/10283/2950) 10 | - [Multilingual - M-AI-Labs](http://www.caito.de/2019/01/the-m-ailabs-speech-dataset/) 11 | - [Spanish](https://drive.google.com/file/d/1Sm_zyBo67XHkiFhcRSQ4YaHPYM0slO_e/view?usp=sharing) - thx! @carlfm01 12 | - [German - Thorsten OGVD](https://github.com/thorstenMueller/deep-learning-german-tts) 13 | - [Japanese - Kokoro](https://www.kaggle.com/kaiida/kokoro-speech-dataset-v11-small/version/1) 14 | - [Chinese](https://www.data-baker.com/data/index/source/) 15 | - [Ukrainian - LADA](https://github.com/egorsmkv/ukrainian-tts-datasets/tree/main/lada) 16 | 17 | Let us know if you use 🐸TTS on a different dataset. 18 | -------------------------------------------------------------------------------- /images/TTS-performance.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/coqui-ai/TTS/dbf1a08a0d4e47fdad6172e433eeb34bc6b13b4e/images/TTS-performance.png -------------------------------------------------------------------------------- /images/coqui-log-green-TTS.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/coqui-ai/TTS/dbf1a08a0d4e47fdad6172e433eeb34bc6b13b4e/images/coqui-log-green-TTS.png -------------------------------------------------------------------------------- /images/demo_server.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/coqui-ai/TTS/dbf1a08a0d4e47fdad6172e433eeb34bc6b13b4e/images/demo_server.gif -------------------------------------------------------------------------------- /images/example_model_output.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/coqui-ai/TTS/dbf1a08a0d4e47fdad6172e433eeb34bc6b13b4e/images/example_model_output.png -------------------------------------------------------------------------------- /images/model.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/coqui-ai/TTS/dbf1a08a0d4e47fdad6172e433eeb34bc6b13b4e/images/model.png -------------------------------------------------------------------------------- /images/tts_cli.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/coqui-ai/TTS/dbf1a08a0d4e47fdad6172e433eeb34bc6b13b4e/images/tts_cli.gif -------------------------------------------------------------------------------- /images/tts_performance.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/coqui-ai/TTS/dbf1a08a0d4e47fdad6172e433eeb34bc6b13b4e/images/tts_performance.png -------------------------------------------------------------------------------- /notebooks/dataset_analysis/README.md: -------------------------------------------------------------------------------- 1 | ## Simple Notebook to Analyze a Dataset 2 | 3 | By the use of this notebook, you can easily analyze a brand new dataset, find exceptional cases and define your training set. 4 | 5 | What we are looking in here is reasonable distribution of instances in terms of sequence-length, audio-length and word-coverage. 6 | 7 | This notebook is inspired from https://github.com/MycroftAI/mimic2 8 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [build-system] 2 | requires = [ 3 | "setuptools", 4 | "wheel", 5 | "cython~=0.29.30", 6 | "numpy>=1.22.0", 7 | "packaging", 8 | ] 9 | 10 | [flake8] 11 | max-line-length=120 12 | 13 | [tool.black] 14 | line-length = 120 15 | target-version = ['py39'] 16 | 17 | [tool.isort] 18 | line_length = 120 19 | profile = "black" 20 | multi_line_output = 3 21 | -------------------------------------------------------------------------------- /recipes/README.md: -------------------------------------------------------------------------------- 1 | # 🐸💬 TTS Training Recipes 2 | 3 | TTS recipes intended to host scripts running all the necessary steps to train a TTS model on a particular dataset. 4 | 5 | For each dataset, you need to download the dataset once. Then you run the training for the model you want. 6 | 7 | Run each script from the root TTS folder as follows. 8 | 9 | ```console 10 | $ sh ./recipes//download_.sh 11 | $ python recipes///train.py 12 | ``` 13 | 14 | For some datasets you might need to resample the audio files. For example, VCTK dataset can be resampled to 22050Hz as follows. 15 | 16 | ```console 17 | python TTS/bin/resample.py --input_dir recipes/vctk/VCTK/wav48_silence_trimmed --output_sr 22050 --output_dir recipes/vctk/VCTK/wav48_silence_trimmed --n_jobs 8 --file_ext flac 18 | ``` 19 | 20 | If you train a new model using TTS, feel free to share your training to expand the list of recipes. 21 | 22 | You can also open a new discussion and share your progress with the 🐸 community. -------------------------------------------------------------------------------- /recipes/bel-alex73/.gitignore: -------------------------------------------------------------------------------- 1 | /docker-prepare/*.txt 2 | -------------------------------------------------------------------------------- /recipes/bel-alex73/docker-prepare-start.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -x 3 | 4 | cd $( dirname -- "$0"; ) 5 | 6 | cp ../../requirements*.txt docker-prepare/ 7 | 8 | docker build -t tts-learn -f docker-prepare/Dockerfile docker-prepare/ 9 | 10 | mkdir -p ../../../storage 11 | docker run --rm -it \ 12 | -p 2525:2525 \ 13 | --shm-size=256M \ 14 | --name tts-learn-run \ 15 | -v $(pwd)/../../:/a/TTS \ 16 | -v $(pwd)/../../../cv-corpus:/a/cv-corpus \ 17 | -v $(pwd)/../../../fanetyka/:/a/fanetyka/ \ 18 | -v $(pwd)/../../../storage:/storage \ 19 | tts-learn 20 | -------------------------------------------------------------------------------- /recipes/bel-alex73/docker-prepare/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM ubuntu:22.04 2 | 3 | RUN apt -y update 4 | RUN apt -y upgrade 5 | RUN apt -y install --no-install-recommends pip ffmpeg openjdk-19-jre-headless 6 | 7 | RUN mkdir /a/ 8 | ADD requirements*.txt /a/ 9 | WORKDIR /a/ 10 | RUN pip install -r requirements.txt -r requirements.dev.txt -r requirements.notebooks.txt 11 | RUN pip install seaborn pydub notebook 12 | 13 | RUN apt -y install --no-install-recommends gcc libpython3.10-dev 14 | 15 | ADD runtime.sh /a/ 16 | 17 | WORKDIR /a/TTS/ 18 | CMD /a/runtime.sh 19 | -------------------------------------------------------------------------------- /recipes/bel-alex73/docker-prepare/runtime.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | cd /a/TTS 4 | pip install -e .[all,dev,notebooks] 5 | 6 | LANG=C.utf8 bash 7 | -------------------------------------------------------------------------------- /recipes/bel-alex73/dump_config.py: -------------------------------------------------------------------------------- 1 | import json 2 | import re 3 | 4 | from train_glowtts import config 5 | 6 | s = json.dumps(config, default=vars, indent=2) 7 | s = re.sub(r'"test_sentences":\s*\[\],', "", s) 8 | print(s) 9 | -------------------------------------------------------------------------------- /recipes/bel-alex73/train_hifigan.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | from coqpit import Coqpit 4 | from trainer import Trainer, TrainerArgs 5 | 6 | from TTS.tts.configs.shared_configs import BaseAudioConfig 7 | from TTS.utils.audio import AudioProcessor 8 | from TTS.vocoder.configs.hifigan_config import * 9 | from TTS.vocoder.datasets.preprocess import load_wav_data 10 | from TTS.vocoder.models.gan import GAN 11 | 12 | output_path = "/storage/output-hifigan/" 13 | 14 | audio_config = BaseAudioConfig( 15 | mel_fmin=50, 16 | mel_fmax=8000, 17 | hop_length=256, 18 | stats_path="/storage/TTS/scale_stats.npy", 19 | ) 20 | 21 | config = HifiganConfig( 22 | batch_size=74, 23 | eval_batch_size=16, 24 | num_loader_workers=8, 25 | num_eval_loader_workers=8, 26 | lr_disc=0.0002, 27 | lr_gen=0.0002, 28 | run_eval=True, 29 | test_delay_epochs=5, 30 | epochs=1000, 31 | use_noise_augment=True, 32 | seq_len=8192, 33 | pad_short=2000, 34 | save_step=5000, 35 | print_step=50, 36 | print_eval=True, 37 | mixed_precision=False, 38 | eval_split_size=30, 39 | save_n_checkpoints=2, 40 | save_best_after=5000, 41 | data_path="/storage/filtered_dataset", 42 | output_path=output_path, 43 | audio=audio_config, 44 | ) 45 | 46 | # init audio processor 47 | ap = AudioProcessor.init_from_config(config) 48 | 49 | # load training samples 50 | print("config.eval_split_size = ", config.eval_split_size) 51 | eval_samples, train_samples = load_wav_data(config.data_path, config.eval_split_size) 52 | 53 | # init model 54 | model = GAN(config, ap) 55 | 56 | # init the trainer and 🚀 57 | trainer = Trainer( 58 | TrainerArgs(), config, output_path, model=model, train_samples=train_samples, eval_samples=eval_samples 59 | ) 60 | trainer.fit() 61 | -------------------------------------------------------------------------------- /recipes/blizzard2013/README.md: -------------------------------------------------------------------------------- 1 | # How to get the Blizzard 2013 Dataset 2 | 3 | The Capacitron model is a variational encoder extension of standard Tacotron based models to model prosody. 4 | 5 | To take full advantage of the model, it is advised to train the model with a dataset that contains a significant amount of prosodic information in the utterances. A tested candidate for such applications is the blizzard2013 dataset from the Blizzard Challenge, containing many hours of high quality audio book recordings. 6 | 7 | To get a license and download link for this dataset, you need to visit the [website](https://www.cstr.ed.ac.uk/projects/blizzard/2013/lessac_blizzard2013/license.html) of the Centre for Speech Technology Research of the University of Edinburgh. 8 | 9 | You get access to the raw dataset in a couple of days. There are a few preprocessing steps you need to do to be able to use the high fidelity dataset. 10 | 11 | 1. Get the forced time alignments for the blizzard dataset from [here](https://github.com/mueller91/tts_alignments). 12 | 2. Segment the high fidelity audio-book files based on the instructions [here](https://github.com/Tomiinek/Blizzard2013_Segmentation). -------------------------------------------------------------------------------- /recipes/kokoro/tacotron2-DDC/run.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # take the scripts's parent's directory to prefix all the output paths. 3 | RUN_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )" 4 | CORPUS=kokoro-speech-v1_1-small 5 | echo $RUN_DIR 6 | if [ \! -d $RUN_DIR/$CORPUS ] ; then 7 | echo "$RUN_DIR/$CORPUS doesn't exist." 8 | echo "Follow the instruction of https://github.com/kaiidams/Kokoro-Speech-Dataset to make the corpus." 9 | exit 1 10 | fi 11 | # create train-val splits 12 | shuf $RUN_DIR/$CORPUS/metadata.csv > $RUN_DIR/$CORPUS/metadata_shuf.csv 13 | head -n 8000 $RUN_DIR/$CORPUS/metadata_shuf.csv > $RUN_DIR/$CORPUS/metadata_train.csv 14 | tail -n 812 $RUN_DIR/$CORPUS/metadata_shuf.csv > $RUN_DIR/$CORPUS/metadata_val.csv 15 | # compute dataset mean and variance for normalization 16 | python TTS/bin/compute_statistics.py $RUN_DIR/tacotron2-DDC.json $RUN_DIR/scale_stats.npy --data_path $RUN_DIR/$CORPUS/wavs/ 17 | # training .... 18 | # change the GPU id if needed 19 | CUDA_VISIBLE_DEVICES="0" python TTS/bin/train_tts.py --config_path $RUN_DIR/tacotron2-DDC.json \ 20 | --coqpit.output_path $RUN_DIR \ 21 | --coqpit.datasets.0.path $RUN_DIR/$CORPUS \ 22 | --coqpit.audio.stats_path $RUN_DIR/scale_stats.npy \ 23 | --coqpit.phoneme_cache_path $RUN_DIR/phoneme_cache \ -------------------------------------------------------------------------------- /recipes/ljspeech/README.md: -------------------------------------------------------------------------------- 1 | # 🐸💬 TTS LJspeech Recipes 2 | 3 | For running the recipes 4 | 5 | 1. Download the LJSpeech dataset here either manually from [its official website](https://keithito.com/LJ-Speech-Dataset/) or using ```download_ljspeech.sh```. 6 | 2. Go to your desired model folder and run the training. 7 | 8 | Running Python files. (Choose the desired GPU ID for your run and set ```CUDA_VISIBLE_DEVICES```) 9 | ```terminal 10 | CUDA_VISIBLE_DEVICES="0" python train_modelX.py 11 | ``` 12 | 13 | Running bash scripts. 14 | ```terminal 15 | bash run.sh 16 | ``` 17 | 18 | 💡 Note that these runs are just templates to help you start training your first model. They are not optimized for the best 19 | result. Double-check the configurations and feel free to share your experiments to find better parameters together 💪. 20 | -------------------------------------------------------------------------------- /recipes/ljspeech/download_ljspeech.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # take the scripts's parent's directory to prefix all the output paths. 3 | RUN_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )" 4 | echo $RUN_DIR 5 | # download LJSpeech dataset 6 | wget http://data.keithito.com/data/speech/LJSpeech-1.1.tar.bz2 7 | # extract 8 | tar -xjf LJSpeech-1.1.tar.bz2 9 | # create train-val splits 10 | shuf LJSpeech-1.1/metadata.csv > LJSpeech-1.1/metadata_shuf.csv 11 | head -n 12000 LJSpeech-1.1/metadata_shuf.csv > LJSpeech-1.1/metadata_train.csv 12 | tail -n 1100 LJSpeech-1.1/metadata_shuf.csv > LJSpeech-1.1/metadata_val.csv 13 | mv LJSpeech-1.1 $RUN_DIR/recipes/ljspeech/ 14 | rm LJSpeech-1.1.tar.bz2 -------------------------------------------------------------------------------- /recipes/ljspeech/hifigan/train_hifigan.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | from trainer import Trainer, TrainerArgs 4 | 5 | from TTS.utils.audio import AudioProcessor 6 | from TTS.vocoder.configs import HifiganConfig 7 | from TTS.vocoder.datasets.preprocess import load_wav_data 8 | from TTS.vocoder.models.gan import GAN 9 | 10 | output_path = os.path.dirname(os.path.abspath(__file__)) 11 | 12 | config = HifiganConfig( 13 | batch_size=32, 14 | eval_batch_size=16, 15 | num_loader_workers=4, 16 | num_eval_loader_workers=4, 17 | run_eval=True, 18 | test_delay_epochs=5, 19 | epochs=1000, 20 | seq_len=8192, 21 | pad_short=2000, 22 | use_noise_augment=True, 23 | eval_split_size=10, 24 | print_step=25, 25 | print_eval=False, 26 | mixed_precision=False, 27 | lr_gen=1e-4, 28 | lr_disc=1e-4, 29 | data_path=os.path.join(output_path, "../LJSpeech-1.1/wavs/"), 30 | output_path=output_path, 31 | ) 32 | 33 | # init audio processor 34 | ap = AudioProcessor(**config.audio.to_dict()) 35 | 36 | # load training samples 37 | eval_samples, train_samples = load_wav_data(config.data_path, config.eval_split_size) 38 | 39 | # init model 40 | model = GAN(config, ap) 41 | 42 | # init the trainer and 🚀 43 | trainer = Trainer( 44 | TrainerArgs(), config, output_path, model=model, train_samples=train_samples, eval_samples=eval_samples 45 | ) 46 | trainer.fit() 47 | -------------------------------------------------------------------------------- /recipes/ljspeech/multiband_melgan/train_multiband_melgan.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | from trainer import Trainer, TrainerArgs 4 | 5 | from TTS.utils.audio import AudioProcessor 6 | from TTS.vocoder.configs import MultibandMelganConfig 7 | from TTS.vocoder.datasets.preprocess import load_wav_data 8 | from TTS.vocoder.models.gan import GAN 9 | 10 | output_path = os.path.dirname(os.path.abspath(__file__)) 11 | 12 | config = MultibandMelganConfig( 13 | batch_size=32, 14 | eval_batch_size=16, 15 | num_loader_workers=4, 16 | num_eval_loader_workers=4, 17 | run_eval=True, 18 | test_delay_epochs=5, 19 | epochs=1000, 20 | seq_len=8192, 21 | pad_short=2000, 22 | use_noise_augment=True, 23 | eval_split_size=10, 24 | print_step=25, 25 | print_eval=False, 26 | mixed_precision=False, 27 | lr_gen=1e-4, 28 | lr_disc=1e-4, 29 | data_path=os.path.join(output_path, "../LJSpeech-1.1/wavs/"), 30 | output_path=output_path, 31 | ) 32 | 33 | # init audio processor 34 | ap = AudioProcessor(**config.audio.to_dict()) 35 | 36 | # load training samples 37 | eval_samples, train_samples = load_wav_data(config.data_path, config.eval_split_size) 38 | 39 | # init model 40 | model = GAN(config, ap) 41 | 42 | # init the trainer and 🚀 43 | trainer = Trainer( 44 | TrainerArgs(), config, output_path, model=model, train_samples=train_samples, eval_samples=eval_samples 45 | ) 46 | trainer.fit() 47 | -------------------------------------------------------------------------------- /recipes/ljspeech/overflow/lj_parameters.pt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/coqui-ai/TTS/dbf1a08a0d4e47fdad6172e433eeb34bc6b13b4e/recipes/ljspeech/overflow/lj_parameters.pt -------------------------------------------------------------------------------- /recipes/ljspeech/univnet/train.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | from trainer import Trainer, TrainerArgs 4 | 5 | from TTS.utils.audio import AudioProcessor 6 | from TTS.vocoder.configs import UnivnetConfig 7 | from TTS.vocoder.datasets.preprocess import load_wav_data 8 | from TTS.vocoder.models.gan import GAN 9 | 10 | output_path = os.path.dirname(os.path.abspath(__file__)) 11 | config = UnivnetConfig( 12 | batch_size=64, 13 | eval_batch_size=16, 14 | num_loader_workers=4, 15 | num_eval_loader_workers=4, 16 | run_eval=True, 17 | test_delay_epochs=-1, 18 | epochs=1000, 19 | seq_len=8192, 20 | pad_short=2000, 21 | use_noise_augment=True, 22 | eval_split_size=10, 23 | print_step=25, 24 | print_eval=False, 25 | mixed_precision=False, 26 | lr_gen=1e-4, 27 | lr_disc=1e-4, 28 | data_path=os.path.join(output_path, "../LJSpeech-1.1/wavs/"), 29 | output_path=output_path, 30 | ) 31 | 32 | # init audio processor 33 | ap = AudioProcessor(**config.audio.to_dict()) 34 | 35 | # load training samples 36 | eval_samples, train_samples = load_wav_data(config.data_path, config.eval_split_size) 37 | 38 | # init model 39 | model = GAN(config, ap) 40 | 41 | # init the trainer and 🚀 42 | trainer = Trainer( 43 | TrainerArgs(), config, output_path, model=model, train_samples=train_samples, eval_samples=eval_samples 44 | ) 45 | trainer.fit() 46 | -------------------------------------------------------------------------------- /recipes/ljspeech/wavegrad/train_wavegrad.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | from trainer import Trainer, TrainerArgs 4 | 5 | from TTS.utils.audio import AudioProcessor 6 | from TTS.vocoder.configs import WavegradConfig 7 | from TTS.vocoder.datasets.preprocess import load_wav_data 8 | from TTS.vocoder.models.wavegrad import Wavegrad 9 | 10 | output_path = os.path.dirname(os.path.abspath(__file__)) 11 | config = WavegradConfig( 12 | batch_size=32, 13 | eval_batch_size=16, 14 | num_loader_workers=4, 15 | num_eval_loader_workers=4, 16 | run_eval=True, 17 | test_delay_epochs=-1, 18 | epochs=1000, 19 | seq_len=6144, 20 | pad_short=2000, 21 | use_noise_augment=True, 22 | eval_split_size=50, 23 | print_step=50, 24 | print_eval=True, 25 | mixed_precision=False, 26 | data_path=os.path.join(output_path, "../LJSpeech-1.1/wavs/"), 27 | output_path=output_path, 28 | ) 29 | 30 | # init audio processor 31 | ap = AudioProcessor(**config.audio.to_dict()) 32 | 33 | # load training samples 34 | eval_samples, train_samples = load_wav_data(config.data_path, config.eval_split_size) 35 | 36 | # init model 37 | model = Wavegrad(config) 38 | 39 | # init the trainer and 🚀 40 | trainer = Trainer( 41 | TrainerArgs(), 42 | config, 43 | output_path, 44 | model=model, 45 | train_samples=train_samples, 46 | eval_samples=eval_samples, 47 | training_assets={"audio_processor": ap}, 48 | ) 49 | trainer.fit() 50 | -------------------------------------------------------------------------------- /recipes/ljspeech/wavernn/train_wavernn.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | from trainer import Trainer, TrainerArgs 4 | 5 | from TTS.utils.audio import AudioProcessor 6 | from TTS.vocoder.configs import WavernnConfig 7 | from TTS.vocoder.datasets.preprocess import load_wav_data 8 | from TTS.vocoder.models.wavernn import Wavernn 9 | 10 | output_path = os.path.dirname(os.path.abspath(__file__)) 11 | config = WavernnConfig( 12 | batch_size=64, 13 | eval_batch_size=16, 14 | num_loader_workers=4, 15 | num_eval_loader_workers=4, 16 | run_eval=True, 17 | test_delay_epochs=-1, 18 | epochs=10000, 19 | seq_len=1280, 20 | pad_short=2000, 21 | use_noise_augment=False, 22 | eval_split_size=10, 23 | print_step=25, 24 | print_eval=True, 25 | mixed_precision=False, 26 | lr=1e-4, 27 | grad_clip=4, 28 | data_path=os.path.join(output_path, "../LJSpeech-1.1/wavs/"), 29 | output_path=output_path, 30 | ) 31 | 32 | # init audio processor 33 | ap = AudioProcessor(**config.audio.to_dict()) 34 | 35 | # load training samples 36 | eval_samples, train_samples = load_wav_data(config.data_path, config.eval_split_size) 37 | 38 | # init model 39 | model = Wavernn(config) 40 | 41 | # init the trainer and 🚀 42 | trainer = Trainer( 43 | TrainerArgs(), 44 | config, 45 | output_path, 46 | model=model, 47 | train_samples=train_samples, 48 | eval_samples=eval_samples, 49 | training_assets={"audio_processor": ap}, 50 | ) 51 | trainer.fit() 52 | -------------------------------------------------------------------------------- /recipes/thorsten_DE/README.md: -------------------------------------------------------------------------------- 1 | # 🐸💬 TTS Thorsten Recipes 2 | 3 | For running the recipes you need the [Thorsten-Voice](https://github.com/thorstenMueller/Thorsten-Voice) dataset. 4 | 5 | You can download it manually from [the official website](https://www.thorsten-voice.de/) or use ```download_thorsten_de.sh``` alternatively running any of the **train_modelX.py**scripts will download the dataset if not already present. 6 | 7 | Then, go to your desired model folder and run the training. 8 | 9 | Running Python files. (Choose the desired GPU ID for your run and set ```CUDA_VISIBLE_DEVICES```) 10 | ```terminal 11 | CUDA_VISIBLE_DEVICES="0" python train_modelX.py 12 | ``` 13 | 14 | 💡 Note that these runs are just templates to help you start training your first model. They are not optimized for the best 15 | result. Double-check the configurations and feel free to share your experiments to find better parameters together 💪. 16 | -------------------------------------------------------------------------------- /recipes/thorsten_DE/download_thorsten_DE.sh: -------------------------------------------------------------------------------- 1 | # create venv 2 | python3 -m venv env 3 | source .env/bin/activate 4 | pip install pip --upgrade 5 | 6 | # download Thorsten_DE dataset 7 | pip install gdown 8 | gdown --id 1yKJM1LAOQpRVojKunD9r8WN_p5KzBxjc -O dataset.tgz 9 | tar -xzf dataset.tgz 10 | 11 | # create train-val splits 12 | shuf LJSpeech-1.1/metadata.csv > LJSpeech-1.1/metadata_shuf.csv 13 | head -n 20668 LJSpeech-1.1/metadata_shuf.csv > LJSpeech-1.1/metadata_train.csv 14 | tail -n 2000 LJSpeech-1.1/metadata_shuf.csv > LJSpeech-1.1/metadata_val.csv 15 | 16 | # rename dataset and remove archive 17 | mv LJSpeech-1.1 thorsten-de 18 | rm dataset.tgz 19 | 20 | # destry venv 21 | rm -rf env 22 | -------------------------------------------------------------------------------- /recipes/thorsten_DE/hifigan/train_hifigan.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | from trainer import Trainer, TrainerArgs 4 | 5 | from TTS.utils.audio import AudioProcessor 6 | from TTS.utils.downloaders import download_thorsten_de 7 | from TTS.vocoder.configs import HifiganConfig 8 | from TTS.vocoder.datasets.preprocess import load_wav_data 9 | from TTS.vocoder.models.gan import GAN 10 | 11 | output_path = os.path.dirname(os.path.abspath(__file__)) 12 | 13 | config = HifiganConfig( 14 | batch_size=32, 15 | eval_batch_size=16, 16 | num_loader_workers=4, 17 | num_eval_loader_workers=4, 18 | run_eval=True, 19 | test_delay_epochs=5, 20 | epochs=1000, 21 | seq_len=8192, 22 | pad_short=2000, 23 | use_noise_augment=True, 24 | eval_split_size=10, 25 | print_step=25, 26 | print_eval=False, 27 | mixed_precision=False, 28 | lr_gen=1e-4, 29 | lr_disc=1e-4, 30 | data_path=os.path.join(output_path, "../thorsten-de/wavs/"), 31 | output_path=output_path, 32 | ) 33 | 34 | # download dataset if not already present 35 | if not os.path.exists(config.data_path): 36 | print("Downloading dataset") 37 | download_path = os.path.abspath(os.path.join(os.path.abspath(config.data_path), "../../")) 38 | download_thorsten_de(download_path) 39 | 40 | # init audio processor 41 | ap = AudioProcessor(**config.audio.to_dict()) 42 | 43 | # load training samples 44 | eval_samples, train_samples = load_wav_data(config.data_path, config.eval_split_size) 45 | 46 | # init model 47 | model = GAN(config, ap) 48 | 49 | # init the trainer and 🚀 50 | trainer = Trainer( 51 | TrainerArgs(), config, output_path, model=model, train_samples=train_samples, eval_samples=eval_samples 52 | ) 53 | trainer.fit() 54 | -------------------------------------------------------------------------------- /recipes/thorsten_DE/multiband_melgan/train_multiband_melgan.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | from trainer import Trainer, TrainerArgs 4 | 5 | from TTS.utils.audio import AudioProcessor 6 | from TTS.utils.downloaders import download_thorsten_de 7 | from TTS.vocoder.configs import MultibandMelganConfig 8 | from TTS.vocoder.datasets.preprocess import load_wav_data 9 | from TTS.vocoder.models.gan import GAN 10 | 11 | output_path = os.path.dirname(os.path.abspath(__file__)) 12 | 13 | config = MultibandMelganConfig( 14 | batch_size=32, 15 | eval_batch_size=16, 16 | num_loader_workers=4, 17 | num_eval_loader_workers=4, 18 | run_eval=True, 19 | test_delay_epochs=5, 20 | epochs=1000, 21 | seq_len=8192, 22 | pad_short=2000, 23 | use_noise_augment=True, 24 | eval_split_size=10, 25 | print_step=25, 26 | print_eval=False, 27 | mixed_precision=False, 28 | lr_gen=1e-4, 29 | lr_disc=1e-4, 30 | data_path=os.path.join(output_path, "../thorsten-de/wavs/"), 31 | output_path=output_path, 32 | ) 33 | 34 | # download dataset if not already present 35 | if not os.path.exists(config.data_path): 36 | print("Downloading dataset") 37 | download_path = os.path.abspath(os.path.join(os.path.abspath(config.data_path), "../../")) 38 | download_thorsten_de(download_path) 39 | 40 | # init audio processor 41 | ap = AudioProcessor(**config.audio.to_dict()) 42 | 43 | # load training samples 44 | eval_samples, train_samples = load_wav_data(config.data_path, config.eval_split_size) 45 | 46 | # init model 47 | model = GAN(config, ap) 48 | 49 | # init the trainer and 🚀 50 | trainer = Trainer( 51 | TrainerArgs(), config, output_path, model=model, train_samples=train_samples, eval_samples=eval_samples 52 | ) 53 | trainer.fit() 54 | -------------------------------------------------------------------------------- /recipes/thorsten_DE/univnet/train_univnet.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | from trainer import Trainer, TrainerArgs 4 | 5 | from TTS.utils.audio import AudioProcessor 6 | from TTS.utils.downloaders import download_thorsten_de 7 | from TTS.vocoder.configs import UnivnetConfig 8 | from TTS.vocoder.datasets.preprocess import load_wav_data 9 | from TTS.vocoder.models.gan import GAN 10 | 11 | output_path = os.path.dirname(os.path.abspath(__file__)) 12 | config = UnivnetConfig( 13 | batch_size=64, 14 | eval_batch_size=16, 15 | num_loader_workers=4, 16 | num_eval_loader_workers=4, 17 | run_eval=True, 18 | test_delay_epochs=-1, 19 | epochs=1000, 20 | seq_len=8192, 21 | pad_short=2000, 22 | use_noise_augment=True, 23 | eval_split_size=10, 24 | print_step=25, 25 | print_eval=False, 26 | mixed_precision=False, 27 | lr_gen=1e-4, 28 | lr_disc=1e-4, 29 | data_path=os.path.join(output_path, "../thorsten-de/wavs/"), 30 | output_path=output_path, 31 | ) 32 | 33 | # download dataset if not already present 34 | if not os.path.exists(config.data_path): 35 | print("Downloading dataset") 36 | download_path = os.path.abspath(os.path.join(os.path.abspath(config.data_path), "../../")) 37 | download_thorsten_de(download_path) 38 | 39 | # init audio processor 40 | ap = AudioProcessor(**config.audio.to_dict()) 41 | 42 | # load training samples 43 | eval_samples, train_samples = load_wav_data(config.data_path, config.eval_split_size) 44 | 45 | # init model 46 | model = GAN(config, ap) 47 | 48 | # init the trainer and 🚀 49 | trainer = Trainer( 50 | TrainerArgs(), config, output_path, model=model, train_samples=train_samples, eval_samples=eval_samples 51 | ) 52 | trainer.fit() 53 | -------------------------------------------------------------------------------- /recipes/thorsten_DE/wavegrad/train_wavegrad.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | from trainer import Trainer, TrainerArgs 4 | 5 | from TTS.utils.audio import AudioProcessor 6 | from TTS.utils.downloaders import download_thorsten_de 7 | from TTS.vocoder.configs import WavegradConfig 8 | from TTS.vocoder.datasets.preprocess import load_wav_data 9 | from TTS.vocoder.models.wavegrad import Wavegrad 10 | 11 | output_path = os.path.dirname(os.path.abspath(__file__)) 12 | config = WavegradConfig( 13 | batch_size=32, 14 | eval_batch_size=16, 15 | num_loader_workers=4, 16 | num_eval_loader_workers=4, 17 | run_eval=True, 18 | test_delay_epochs=-1, 19 | epochs=1000, 20 | seq_len=6144, 21 | pad_short=2000, 22 | use_noise_augment=True, 23 | eval_split_size=50, 24 | print_step=50, 25 | print_eval=True, 26 | mixed_precision=False, 27 | data_path=os.path.join(output_path, "../thorsten-de/wavs/"), 28 | output_path=output_path, 29 | ) 30 | 31 | # download dataset if not already present 32 | if not os.path.exists(config.data_path): 33 | print("Downloading dataset") 34 | download_path = os.path.abspath(os.path.join(os.path.abspath(config.data_path), "../../")) 35 | download_thorsten_de(download_path) 36 | 37 | # init audio processor 38 | ap = AudioProcessor(**config.audio.to_dict()) 39 | 40 | # load training samples 41 | eval_samples, train_samples = load_wav_data(config.data_path, config.eval_split_size) 42 | 43 | # init model 44 | model = Wavegrad(config) 45 | 46 | # init the trainer and 🚀 47 | trainer = Trainer( 48 | TrainerArgs(), 49 | config, 50 | output_path, 51 | model=model, 52 | train_samples=train_samples, 53 | eval_samples=eval_samples, 54 | training_assets={"audio_processor": ap}, 55 | ) 56 | trainer.fit() 57 | -------------------------------------------------------------------------------- /recipes/thorsten_DE/wavernn/train_wavernn.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | from trainer import Trainer, TrainerArgs 4 | 5 | from TTS.utils.audio import AudioProcessor 6 | from TTS.utils.downloaders import download_thorsten_de 7 | from TTS.vocoder.configs import WavernnConfig 8 | from TTS.vocoder.datasets.preprocess import load_wav_data 9 | from TTS.vocoder.models.wavernn import Wavernn 10 | 11 | output_path = os.path.dirname(os.path.abspath(__file__)) 12 | config = WavernnConfig( 13 | batch_size=64, 14 | eval_batch_size=16, 15 | num_loader_workers=4, 16 | num_eval_loader_workers=4, 17 | run_eval=True, 18 | test_delay_epochs=-1, 19 | epochs=10000, 20 | seq_len=1280, 21 | pad_short=2000, 22 | use_noise_augment=False, 23 | eval_split_size=10, 24 | print_step=25, 25 | print_eval=True, 26 | mixed_precision=False, 27 | lr=1e-4, 28 | grad_clip=4, 29 | data_path=os.path.join(output_path, "../thorsten-de/wavs/"), 30 | output_path=output_path, 31 | ) 32 | 33 | # download dataset if not already present 34 | if not os.path.exists(config.data_path): 35 | print("Downloading dataset") 36 | download_path = os.path.abspath(os.path.join(os.path.abspath(config.data_path), "../../")) 37 | download_thorsten_de(download_path) 38 | 39 | # init audio processor 40 | ap = AudioProcessor(**config.audio.to_dict()) 41 | 42 | # load training samples 43 | eval_samples, train_samples = load_wav_data(config.data_path, config.eval_split_size) 44 | 45 | # init model 46 | model = Wavernn(config) 47 | 48 | # init the trainer and 🚀 49 | trainer = Trainer( 50 | TrainerArgs(), 51 | config, 52 | output_path, 53 | model=model, 54 | train_samples=train_samples, 55 | eval_samples=eval_samples, 56 | training_assets={"audio_processor": ap}, 57 | ) 58 | trainer.fit() 59 | -------------------------------------------------------------------------------- /recipes/vctk/download_vctk.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # take the scripts's parent's directory to prefix all the output paths. 3 | RUN_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )" 4 | echo $RUN_DIR 5 | # download VCTK dataset 6 | wget https://datashare.ed.ac.uk/bitstream/handle/10283/3443/VCTK-Corpus-0.92.zip -O VCTK-Corpus-0.92.zip 7 | # extract 8 | mkdir VCTK 9 | unzip VCTK-Corpus-0.92 -d VCTK 10 | # create train-val splits 11 | mv VCTK $RUN_DIR/recipes/vctk/ 12 | rm VCTK-Corpus-0.92.zip 13 | -------------------------------------------------------------------------------- /requirements.dev.txt: -------------------------------------------------------------------------------- 1 | black 2 | coverage 3 | isort 4 | nose2 5 | pylint==2.10.2 6 | -------------------------------------------------------------------------------- /requirements.ja.txt: -------------------------------------------------------------------------------- 1 | # These cause some compatibility issues on some systems and are not strictly necessary 2 | # japanese g2p deps 3 | mecab-python3==1.0.6 4 | unidic-lite==1.0.8 5 | cutlet 6 | -------------------------------------------------------------------------------- /requirements.notebooks.txt: -------------------------------------------------------------------------------- 1 | bokeh==1.4.0 -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | # core deps 2 | numpy==1.22.0;python_version<="3.10" 3 | numpy>=1.24.3;python_version>"3.10" 4 | cython>=0.29.30 5 | scipy>=1.11.2 6 | torch>=2.1 7 | torchaudio 8 | soundfile>=0.12.0 9 | librosa>=0.10.0 10 | scikit-learn>=1.3.0 11 | numba==0.55.1;python_version<"3.9" 12 | numba>=0.57.0;python_version>="3.9" 13 | inflect>=5.6.0 14 | tqdm>=4.64.1 15 | anyascii>=0.3.0 16 | pyyaml>=6.0 17 | fsspec>=2023.6.0 # <= 2023.9.1 makes aux tests fail 18 | aiohttp>=3.8.1 19 | packaging>=23.1 20 | mutagen==1.47.0 21 | # deps for examples 22 | flask>=2.0.1 23 | # deps for inference 24 | pysbd>=0.3.4 25 | # deps for notebooks 26 | umap-learn>=0.5.1 27 | pandas>=1.4,<2.0 28 | # deps for training 29 | matplotlib>=3.7.0 30 | # coqui stack 31 | trainer>=0.0.36 32 | # config management 33 | coqpit>=0.0.16 34 | # chinese g2p deps 35 | jieba 36 | pypinyin 37 | # korean 38 | hangul_romanize 39 | # gruut+supported langs 40 | gruut[de,es,fr]==2.2.3 41 | # deps for korean 42 | jamo 43 | nltk 44 | g2pkk>=0.1.1 45 | # deps for bangla 46 | bangla 47 | bnnumerizer 48 | bnunicodenormalizer 49 | #deps for tortoise 50 | einops>=0.6.0 51 | transformers>=4.33.0 52 | #deps for bark 53 | encodec>=0.1.1 54 | # deps for XTTS 55 | unidecode>=1.3.2 56 | num2words 57 | spacy[ja]>=3 -------------------------------------------------------------------------------- /run_bash_tests.sh: -------------------------------------------------------------------------------- 1 | set -e 2 | TF_CPP_MIN_LOG_LEVEL=3 3 | 4 | # runtime bash based tests 5 | # TODO: move these to python 6 | ./tests/bash_tests/test_demo_server.sh && \ 7 | ./tests/bash_tests/test_compute_statistics.sh 8 | -------------------------------------------------------------------------------- /scripts/sync_readme.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | from pathlib import Path 3 | 4 | 5 | def replace_between_markers(content, marker: str, replacement: str) -> str: 6 | start_marker = f"\n\n" 7 | end_marker = f"\n\n\n" 8 | start_index = content.index(start_marker) + len(start_marker) 9 | end_index = content.index(end_marker) 10 | content = content[:start_index] + replacement + content[end_index:] 11 | return content 12 | 13 | 14 | def sync_readme(): 15 | ap = argparse.ArgumentParser() 16 | ap.add_argument("--check", action="store_true", default=False) 17 | args = ap.parse_args() 18 | readme_path = Path(__file__).parent.parent / "README.md" 19 | orig_content = readme_path.read_text() 20 | from TTS.bin.synthesize import description 21 | 22 | new_content = replace_between_markers(orig_content, "tts-readme", description.strip()) 23 | if args.check: 24 | if orig_content != new_content: 25 | print("README.md is out of sync; please edit TTS/bin/TTS_README.md and run scripts/sync_readme.py") 26 | exit(42) 27 | readme_path.write_text(new_content) 28 | print("Updated README.md") 29 | 30 | 31 | if __name__ == "__main__": 32 | sync_readme() 33 | -------------------------------------------------------------------------------- /setup.cfg: -------------------------------------------------------------------------------- 1 | [build_py] 2 | build_lib=temp_build 3 | 4 | [bdist_wheel] 5 | bdist_dir=temp_build 6 | 7 | [install_lib] 8 | build_dir=temp_build 9 | -------------------------------------------------------------------------------- /tests/aux_tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/coqui-ai/TTS/dbf1a08a0d4e47fdad6172e433eeb34bc6b13b4e/tests/aux_tests/__init__.py -------------------------------------------------------------------------------- /tests/aux_tests/test_readme.py: -------------------------------------------------------------------------------- 1 | import subprocess 2 | import sys 3 | from pathlib import Path 4 | 5 | 6 | def test_readme_up_to_date(): 7 | root = Path(__file__).parent.parent.parent 8 | sync_readme = root / "scripts" / "sync_readme.py" 9 | subprocess.check_call([sys.executable, str(sync_readme), "--check"], cwd=root) 10 | -------------------------------------------------------------------------------- /tests/aux_tests/test_stft_torch.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/coqui-ai/TTS/dbf1a08a0d4e47fdad6172e433eeb34bc6b13b4e/tests/aux_tests/test_stft_torch.py -------------------------------------------------------------------------------- /tests/bash_tests/test_compute_statistics.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -xe 3 | BASEDIR=$(dirname "$0") 4 | echo "$BASEDIR" 5 | # run training 6 | CUDA_VISIBLE_DEVICES="" python TTS/bin/compute_statistics.py --config_path $BASEDIR/../inputs/test_glow_tts.json --out_path $BASEDIR/../outputs/scale_stats.npy 7 | 8 | -------------------------------------------------------------------------------- /tests/bash_tests/test_demo_server.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -xe 3 | 4 | python -m TTS.server.server & 5 | SERVER_PID=$! 6 | 7 | echo 'Waiting for server...' 8 | sleep 30 9 | 10 | curl -o /tmp/audio.wav "http://localhost:5002/api/tts?text=synthesis%20schmynthesis" 11 | python -c 'import sys; import wave; print(wave.open(sys.argv[1]).getnframes())' /tmp/audio.wav 12 | 13 | kill $SERVER_PID 14 | 15 | rm /tmp/audio.wav 16 | -------------------------------------------------------------------------------- /tests/data/dummy_speakers.pth: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/coqui-ai/TTS/dbf1a08a0d4e47fdad6172e433eeb34bc6b13b4e/tests/data/dummy_speakers.pth -------------------------------------------------------------------------------- /tests/data/ljspeech/f0_cache/pitch_stats.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/coqui-ai/TTS/dbf1a08a0d4e47fdad6172e433eeb34bc6b13b4e/tests/data/ljspeech/f0_cache/pitch_stats.npy -------------------------------------------------------------------------------- /tests/data/ljspeech/metadata.csv: -------------------------------------------------------------------------------- 1 | LJ001-0001|Printing, in the only sense with which we are at present concerned, differs from most if not from all the arts and crafts represented in the Exhibition|Printing, in the only sense with which we are at present concerned, differs from most if not from all the arts and crafts represented in the Exhibition 2 | LJ001-0002|in being comparatively modern.|in being comparatively modern. 3 | LJ001-0003|For although the Chinese took impressions from wood blocks engraved in relief for centuries before the woodcutters of the Netherlands, by a similar process|For although the Chinese took impressions from wood blocks engraved in relief for centuries before the woodcutters of the Netherlands, by a similar process 4 | LJ001-0004|produced the block books, which were the immediate predecessors of the true printed book,|produced the block books, which were the immediate predecessors of the true printed book, 5 | LJ001-0005|the invention of movable metal letters in the middle of the fifteenth century may justly be considered as the invention of the art of printing.|the invention of movable metal letters in the middle of the fifteenth century may justly be considered as the invention of the art of printing. 6 | LJ001-0006|And it is worth mention in passing that, as an example of fine typography,|And it is worth mention in passing that, as an example of fine typography, 7 | LJ001-0007|the earliest book printed with movable types, the Gutenberg, or "forty-two line Bible" of about 1455,|the earliest book printed with movable types, the Gutenberg, or "forty-two line Bible" of about fourteen fifty-five, 8 | LJ001-0008|has never been surpassed.|has never been surpassed. 9 | -------------------------------------------------------------------------------- /tests/data/ljspeech/metadata_flac.csv: -------------------------------------------------------------------------------- 1 | audio_file|text|transcription|speaker_name 2 | wavs/LJ001-0001.flac|Printing, in the only sense with which we are at present concerned, differs from most if not from all the arts and crafts represented in the Exhibition|Printing, in the only sense with which we are at present concerned, differs from most if not from all the arts and crafts represented in the Exhibition|ljspeech-0 3 | wavs/LJ001-0002.flac|in being comparatively modern.|in being comparatively modern.|ljspeech-0 4 | wavs/LJ001-0003.flac|For although the Chinese took impressions from wood blocks engraved in relief for centuries before the woodcutters of the Netherlands, by a similar process|For although the Chinese took impressions from wood blocks engraved in relief for centuries before the woodcutters of the Netherlands, by a similar process|ljspeech-1 5 | wavs/LJ001-0004.flac|produced the block books, which were the immediate predecessors of the true printed book,|produced the block books, which were the immediate predecessors of the true printed book,|ljspeech-1 6 | wavs/LJ001-0005.flac|the invention of movable metal letters in the middle of the fifteenth century may justly be considered as the invention of the art of printing.|the invention of movable metal letters in the middle of the fifteenth century may justly be considered as the invention of the art of printing.|ljspeech-2 7 | wavs/LJ001-0006.flac|And it is worth mention in passing that, as an example of fine typography,|And it is worth mention in passing that, as an example of fine typography,|ljspeech-2 8 | wavs/LJ001-0007.flac|the earliest book printed with movable types, the Gutenberg, or "forty-two line Bible" of about 1455,|the earliest book printed with movable types, the Gutenberg, or "forty-two line Bible" of about fourteen fifty-five,|ljspeech-3 9 | wavs/LJ001-0008.flac|has never been surpassed.|has never been surpassed.|ljspeech-3 -------------------------------------------------------------------------------- /tests/data/ljspeech/metadata_mp3.csv: -------------------------------------------------------------------------------- 1 | audio_file|text|transcription|speaker_name 2 | wavs/LJ001-0001.mp3|Printing, in the only sense with which we are at present concerned, differs from most if not from all the arts and crafts represented in the Exhibition|Printing, in the only sense with which we are at present concerned, differs from most if not from all the arts and crafts represented in the Exhibition|ljspeech-0 3 | wavs/LJ001-0002.mp3|in being comparatively modern.|in being comparatively modern.|ljspeech-0 4 | wavs/LJ001-0003.mp3|For although the Chinese took impressions from wood blocks engraved in relief for centuries before the woodcutters of the Netherlands, by a similar process|For although the Chinese took impressions from wood blocks engraved in relief for centuries before the woodcutters of the Netherlands, by a similar process|ljspeech-1 5 | wavs/LJ001-0004.mp3|produced the block books, which were the immediate predecessors of the true printed book,|produced the block books, which were the immediate predecessors of the true printed book,|ljspeech-1 6 | wavs/LJ001-0005.mp3|the invention of movable metal letters in the middle of the fifteenth century may justly be considered as the invention of the art of printing.|the invention of movable metal letters in the middle of the fifteenth century may justly be considered as the invention of the art of printing.|ljspeech-2 7 | wavs/LJ001-0006.mp3|And it is worth mention in passing that, as an example of fine typography,|And it is worth mention in passing that, as an example of fine typography,|ljspeech-2 8 | wavs/LJ001-0007.mp3|the earliest book printed with movable types, the Gutenberg, or "forty-two line Bible" of about 1455,|the earliest book printed with movable types, the Gutenberg, or "forty-two line Bible" of about fourteen fifty-five,|ljspeech-3 9 | wavs/LJ001-0008.mp3|has never been surpassed.|has never been surpassed.|ljspeech-3 -------------------------------------------------------------------------------- /tests/data/ljspeech/metadata_wav.csv: -------------------------------------------------------------------------------- 1 | audio_file|text|transcription|speaker_name 2 | wavs/LJ001-0001.wav|Printing, in the only sense with which we are at present concerned, differs from most if not from all the arts and crafts represented in the Exhibition|Printing, in the only sense with which we are at present concerned, differs from most if not from all the arts and crafts represented in the Exhibition|ljspeech-0 3 | wavs/LJ001-0002.wav|in being comparatively modern.|in being comparatively modern.|ljspeech-0 4 | wavs/LJ001-0003.wav|For although the Chinese took impressions from wood blocks engraved in relief for centuries before the woodcutters of the Netherlands, by a similar process|For although the Chinese took impressions from wood blocks engraved in relief for centuries before the woodcutters of the Netherlands, by a similar process|ljspeech-1 5 | wavs/LJ001-0004.wav|produced the block books, which were the immediate predecessors of the true printed book,|produced the block books, which were the immediate predecessors of the true printed book,|ljspeech-1 6 | wavs/LJ001-0005.wav|the invention of movable metal letters in the middle of the fifteenth century may justly be considered as the invention of the art of printing.|the invention of movable metal letters in the middle of the fifteenth century may justly be considered as the invention of the art of printing.|ljspeech-2 7 | wavs/LJ001-0006.wav|And it is worth mention in passing that, as an example of fine typography,|And it is worth mention in passing that, as an example of fine typography,|ljspeech-2 8 | wavs/LJ001-0007.wav|the earliest book printed with movable types, the Gutenberg, or "forty-two line Bible" of about 1455,|the earliest book printed with movable types, the Gutenberg, or "forty-two line Bible" of about fourteen fifty-five,|ljspeech-3 9 | wavs/LJ001-0008.wav|has never been surpassed.|has never been surpassed.|ljspeech-3 -------------------------------------------------------------------------------- /tests/data/ljspeech/wavs/LJ001-0001.flac: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/coqui-ai/TTS/dbf1a08a0d4e47fdad6172e433eeb34bc6b13b4e/tests/data/ljspeech/wavs/LJ001-0001.flac -------------------------------------------------------------------------------- /tests/data/ljspeech/wavs/LJ001-0001.mp3: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/coqui-ai/TTS/dbf1a08a0d4e47fdad6172e433eeb34bc6b13b4e/tests/data/ljspeech/wavs/LJ001-0001.mp3 -------------------------------------------------------------------------------- /tests/data/ljspeech/wavs/LJ001-0001.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/coqui-ai/TTS/dbf1a08a0d4e47fdad6172e433eeb34bc6b13b4e/tests/data/ljspeech/wavs/LJ001-0001.npy -------------------------------------------------------------------------------- /tests/data/ljspeech/wavs/LJ001-0001.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/coqui-ai/TTS/dbf1a08a0d4e47fdad6172e433eeb34bc6b13b4e/tests/data/ljspeech/wavs/LJ001-0001.wav -------------------------------------------------------------------------------- /tests/data/ljspeech/wavs/LJ001-0002.flac: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/coqui-ai/TTS/dbf1a08a0d4e47fdad6172e433eeb34bc6b13b4e/tests/data/ljspeech/wavs/LJ001-0002.flac -------------------------------------------------------------------------------- /tests/data/ljspeech/wavs/LJ001-0002.mp3: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/coqui-ai/TTS/dbf1a08a0d4e47fdad6172e433eeb34bc6b13b4e/tests/data/ljspeech/wavs/LJ001-0002.mp3 -------------------------------------------------------------------------------- /tests/data/ljspeech/wavs/LJ001-0002.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/coqui-ai/TTS/dbf1a08a0d4e47fdad6172e433eeb34bc6b13b4e/tests/data/ljspeech/wavs/LJ001-0002.npy -------------------------------------------------------------------------------- /tests/data/ljspeech/wavs/LJ001-0002.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/coqui-ai/TTS/dbf1a08a0d4e47fdad6172e433eeb34bc6b13b4e/tests/data/ljspeech/wavs/LJ001-0002.wav -------------------------------------------------------------------------------- /tests/data/ljspeech/wavs/LJ001-0003.flac: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/coqui-ai/TTS/dbf1a08a0d4e47fdad6172e433eeb34bc6b13b4e/tests/data/ljspeech/wavs/LJ001-0003.flac -------------------------------------------------------------------------------- /tests/data/ljspeech/wavs/LJ001-0003.mp3: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/coqui-ai/TTS/dbf1a08a0d4e47fdad6172e433eeb34bc6b13b4e/tests/data/ljspeech/wavs/LJ001-0003.mp3 -------------------------------------------------------------------------------- /tests/data/ljspeech/wavs/LJ001-0003.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/coqui-ai/TTS/dbf1a08a0d4e47fdad6172e433eeb34bc6b13b4e/tests/data/ljspeech/wavs/LJ001-0003.npy -------------------------------------------------------------------------------- /tests/data/ljspeech/wavs/LJ001-0003.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/coqui-ai/TTS/dbf1a08a0d4e47fdad6172e433eeb34bc6b13b4e/tests/data/ljspeech/wavs/LJ001-0003.wav -------------------------------------------------------------------------------- /tests/data/ljspeech/wavs/LJ001-0004.flac: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/coqui-ai/TTS/dbf1a08a0d4e47fdad6172e433eeb34bc6b13b4e/tests/data/ljspeech/wavs/LJ001-0004.flac -------------------------------------------------------------------------------- /tests/data/ljspeech/wavs/LJ001-0004.mp3: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/coqui-ai/TTS/dbf1a08a0d4e47fdad6172e433eeb34bc6b13b4e/tests/data/ljspeech/wavs/LJ001-0004.mp3 -------------------------------------------------------------------------------- /tests/data/ljspeech/wavs/LJ001-0004.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/coqui-ai/TTS/dbf1a08a0d4e47fdad6172e433eeb34bc6b13b4e/tests/data/ljspeech/wavs/LJ001-0004.npy -------------------------------------------------------------------------------- /tests/data/ljspeech/wavs/LJ001-0004.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/coqui-ai/TTS/dbf1a08a0d4e47fdad6172e433eeb34bc6b13b4e/tests/data/ljspeech/wavs/LJ001-0004.wav -------------------------------------------------------------------------------- /tests/data/ljspeech/wavs/LJ001-0005.flac: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/coqui-ai/TTS/dbf1a08a0d4e47fdad6172e433eeb34bc6b13b4e/tests/data/ljspeech/wavs/LJ001-0005.flac -------------------------------------------------------------------------------- /tests/data/ljspeech/wavs/LJ001-0005.mp3: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/coqui-ai/TTS/dbf1a08a0d4e47fdad6172e433eeb34bc6b13b4e/tests/data/ljspeech/wavs/LJ001-0005.mp3 -------------------------------------------------------------------------------- /tests/data/ljspeech/wavs/LJ001-0005.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/coqui-ai/TTS/dbf1a08a0d4e47fdad6172e433eeb34bc6b13b4e/tests/data/ljspeech/wavs/LJ001-0005.npy -------------------------------------------------------------------------------- /tests/data/ljspeech/wavs/LJ001-0005.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/coqui-ai/TTS/dbf1a08a0d4e47fdad6172e433eeb34bc6b13b4e/tests/data/ljspeech/wavs/LJ001-0005.wav -------------------------------------------------------------------------------- /tests/data/ljspeech/wavs/LJ001-0006.flac: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/coqui-ai/TTS/dbf1a08a0d4e47fdad6172e433eeb34bc6b13b4e/tests/data/ljspeech/wavs/LJ001-0006.flac -------------------------------------------------------------------------------- /tests/data/ljspeech/wavs/LJ001-0006.mp3: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/coqui-ai/TTS/dbf1a08a0d4e47fdad6172e433eeb34bc6b13b4e/tests/data/ljspeech/wavs/LJ001-0006.mp3 -------------------------------------------------------------------------------- /tests/data/ljspeech/wavs/LJ001-0006.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/coqui-ai/TTS/dbf1a08a0d4e47fdad6172e433eeb34bc6b13b4e/tests/data/ljspeech/wavs/LJ001-0006.npy -------------------------------------------------------------------------------- /tests/data/ljspeech/wavs/LJ001-0006.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/coqui-ai/TTS/dbf1a08a0d4e47fdad6172e433eeb34bc6b13b4e/tests/data/ljspeech/wavs/LJ001-0006.wav -------------------------------------------------------------------------------- /tests/data/ljspeech/wavs/LJ001-0007.flac: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/coqui-ai/TTS/dbf1a08a0d4e47fdad6172e433eeb34bc6b13b4e/tests/data/ljspeech/wavs/LJ001-0007.flac -------------------------------------------------------------------------------- /tests/data/ljspeech/wavs/LJ001-0007.mp3: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/coqui-ai/TTS/dbf1a08a0d4e47fdad6172e433eeb34bc6b13b4e/tests/data/ljspeech/wavs/LJ001-0007.mp3 -------------------------------------------------------------------------------- /tests/data/ljspeech/wavs/LJ001-0007.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/coqui-ai/TTS/dbf1a08a0d4e47fdad6172e433eeb34bc6b13b4e/tests/data/ljspeech/wavs/LJ001-0007.npy -------------------------------------------------------------------------------- /tests/data/ljspeech/wavs/LJ001-0007.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/coqui-ai/TTS/dbf1a08a0d4e47fdad6172e433eeb34bc6b13b4e/tests/data/ljspeech/wavs/LJ001-0007.wav -------------------------------------------------------------------------------- /tests/data/ljspeech/wavs/LJ001-0008.flac: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/coqui-ai/TTS/dbf1a08a0d4e47fdad6172e433eeb34bc6b13b4e/tests/data/ljspeech/wavs/LJ001-0008.flac -------------------------------------------------------------------------------- /tests/data/ljspeech/wavs/LJ001-0008.mp3: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/coqui-ai/TTS/dbf1a08a0d4e47fdad6172e433eeb34bc6b13b4e/tests/data/ljspeech/wavs/LJ001-0008.mp3 -------------------------------------------------------------------------------- /tests/data/ljspeech/wavs/LJ001-0008.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/coqui-ai/TTS/dbf1a08a0d4e47fdad6172e433eeb34bc6b13b4e/tests/data/ljspeech/wavs/LJ001-0008.npy -------------------------------------------------------------------------------- /tests/data/ljspeech/wavs/LJ001-0008.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/coqui-ai/TTS/dbf1a08a0d4e47fdad6172e433eeb34bc6b13b4e/tests/data/ljspeech/wavs/LJ001-0008.wav -------------------------------------------------------------------------------- /tests/data/ljspeech/wavs/LJ001-0009.flac: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/coqui-ai/TTS/dbf1a08a0d4e47fdad6172e433eeb34bc6b13b4e/tests/data/ljspeech/wavs/LJ001-0009.flac -------------------------------------------------------------------------------- /tests/data/ljspeech/wavs/LJ001-0009.mp3: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/coqui-ai/TTS/dbf1a08a0d4e47fdad6172e433eeb34bc6b13b4e/tests/data/ljspeech/wavs/LJ001-0009.mp3 -------------------------------------------------------------------------------- /tests/data/ljspeech/wavs/LJ001-0009.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/coqui-ai/TTS/dbf1a08a0d4e47fdad6172e433eeb34bc6b13b4e/tests/data/ljspeech/wavs/LJ001-0009.npy -------------------------------------------------------------------------------- /tests/data/ljspeech/wavs/LJ001-0009.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/coqui-ai/TTS/dbf1a08a0d4e47fdad6172e433eeb34bc6b13b4e/tests/data/ljspeech/wavs/LJ001-0009.wav -------------------------------------------------------------------------------- /tests/data/ljspeech/wavs/LJ001-0010.flac: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/coqui-ai/TTS/dbf1a08a0d4e47fdad6172e433eeb34bc6b13b4e/tests/data/ljspeech/wavs/LJ001-0010.flac -------------------------------------------------------------------------------- /tests/data/ljspeech/wavs/LJ001-0010.mp3: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/coqui-ai/TTS/dbf1a08a0d4e47fdad6172e433eeb34bc6b13b4e/tests/data/ljspeech/wavs/LJ001-0010.mp3 -------------------------------------------------------------------------------- /tests/data/ljspeech/wavs/LJ001-0010.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/coqui-ai/TTS/dbf1a08a0d4e47fdad6172e433eeb34bc6b13b4e/tests/data/ljspeech/wavs/LJ001-0010.npy -------------------------------------------------------------------------------- /tests/data/ljspeech/wavs/LJ001-0010.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/coqui-ai/TTS/dbf1a08a0d4e47fdad6172e433eeb34bc6b13b4e/tests/data/ljspeech/wavs/LJ001-0010.wav -------------------------------------------------------------------------------- /tests/data/ljspeech/wavs/LJ001-0011.flac: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/coqui-ai/TTS/dbf1a08a0d4e47fdad6172e433eeb34bc6b13b4e/tests/data/ljspeech/wavs/LJ001-0011.flac -------------------------------------------------------------------------------- /tests/data/ljspeech/wavs/LJ001-0011.mp3: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/coqui-ai/TTS/dbf1a08a0d4e47fdad6172e433eeb34bc6b13b4e/tests/data/ljspeech/wavs/LJ001-0011.mp3 -------------------------------------------------------------------------------- /tests/data/ljspeech/wavs/LJ001-0011.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/coqui-ai/TTS/dbf1a08a0d4e47fdad6172e433eeb34bc6b13b4e/tests/data/ljspeech/wavs/LJ001-0011.npy -------------------------------------------------------------------------------- /tests/data/ljspeech/wavs/LJ001-0011.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/coqui-ai/TTS/dbf1a08a0d4e47fdad6172e433eeb34bc6b13b4e/tests/data/ljspeech/wavs/LJ001-0011.wav -------------------------------------------------------------------------------- /tests/data/ljspeech/wavs/LJ001-0012.flac: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/coqui-ai/TTS/dbf1a08a0d4e47fdad6172e433eeb34bc6b13b4e/tests/data/ljspeech/wavs/LJ001-0012.flac -------------------------------------------------------------------------------- /tests/data/ljspeech/wavs/LJ001-0012.mp3: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/coqui-ai/TTS/dbf1a08a0d4e47fdad6172e433eeb34bc6b13b4e/tests/data/ljspeech/wavs/LJ001-0012.mp3 -------------------------------------------------------------------------------- /tests/data/ljspeech/wavs/LJ001-0012.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/coqui-ai/TTS/dbf1a08a0d4e47fdad6172e433eeb34bc6b13b4e/tests/data/ljspeech/wavs/LJ001-0012.npy -------------------------------------------------------------------------------- /tests/data/ljspeech/wavs/LJ001-0012.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/coqui-ai/TTS/dbf1a08a0d4e47fdad6172e433eeb34bc6b13b4e/tests/data/ljspeech/wavs/LJ001-0012.wav -------------------------------------------------------------------------------- /tests/data/ljspeech/wavs/LJ001-0013.flac: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/coqui-ai/TTS/dbf1a08a0d4e47fdad6172e433eeb34bc6b13b4e/tests/data/ljspeech/wavs/LJ001-0013.flac -------------------------------------------------------------------------------- /tests/data/ljspeech/wavs/LJ001-0013.mp3: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/coqui-ai/TTS/dbf1a08a0d4e47fdad6172e433eeb34bc6b13b4e/tests/data/ljspeech/wavs/LJ001-0013.mp3 -------------------------------------------------------------------------------- /tests/data/ljspeech/wavs/LJ001-0013.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/coqui-ai/TTS/dbf1a08a0d4e47fdad6172e433eeb34bc6b13b4e/tests/data/ljspeech/wavs/LJ001-0013.npy -------------------------------------------------------------------------------- /tests/data/ljspeech/wavs/LJ001-0013.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/coqui-ai/TTS/dbf1a08a0d4e47fdad6172e433eeb34bc6b13b4e/tests/data/ljspeech/wavs/LJ001-0013.wav -------------------------------------------------------------------------------- /tests/data/ljspeech/wavs/LJ001-0014.flac: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/coqui-ai/TTS/dbf1a08a0d4e47fdad6172e433eeb34bc6b13b4e/tests/data/ljspeech/wavs/LJ001-0014.flac -------------------------------------------------------------------------------- /tests/data/ljspeech/wavs/LJ001-0014.mp3: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/coqui-ai/TTS/dbf1a08a0d4e47fdad6172e433eeb34bc6b13b4e/tests/data/ljspeech/wavs/LJ001-0014.mp3 -------------------------------------------------------------------------------- /tests/data/ljspeech/wavs/LJ001-0014.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/coqui-ai/TTS/dbf1a08a0d4e47fdad6172e433eeb34bc6b13b4e/tests/data/ljspeech/wavs/LJ001-0014.npy -------------------------------------------------------------------------------- /tests/data/ljspeech/wavs/LJ001-0014.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/coqui-ai/TTS/dbf1a08a0d4e47fdad6172e433eeb34bc6b13b4e/tests/data/ljspeech/wavs/LJ001-0014.wav -------------------------------------------------------------------------------- /tests/data/ljspeech/wavs/LJ001-0015.flac: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/coqui-ai/TTS/dbf1a08a0d4e47fdad6172e433eeb34bc6b13b4e/tests/data/ljspeech/wavs/LJ001-0015.flac -------------------------------------------------------------------------------- /tests/data/ljspeech/wavs/LJ001-0015.mp3: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/coqui-ai/TTS/dbf1a08a0d4e47fdad6172e433eeb34bc6b13b4e/tests/data/ljspeech/wavs/LJ001-0015.mp3 -------------------------------------------------------------------------------- /tests/data/ljspeech/wavs/LJ001-0015.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/coqui-ai/TTS/dbf1a08a0d4e47fdad6172e433eeb34bc6b13b4e/tests/data/ljspeech/wavs/LJ001-0015.npy -------------------------------------------------------------------------------- /tests/data/ljspeech/wavs/LJ001-0015.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/coqui-ai/TTS/dbf1a08a0d4e47fdad6172e433eeb34bc6b13b4e/tests/data/ljspeech/wavs/LJ001-0015.wav -------------------------------------------------------------------------------- /tests/data/ljspeech/wavs/LJ001-0016.flac: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/coqui-ai/TTS/dbf1a08a0d4e47fdad6172e433eeb34bc6b13b4e/tests/data/ljspeech/wavs/LJ001-0016.flac -------------------------------------------------------------------------------- /tests/data/ljspeech/wavs/LJ001-0016.mp3: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/coqui-ai/TTS/dbf1a08a0d4e47fdad6172e433eeb34bc6b13b4e/tests/data/ljspeech/wavs/LJ001-0016.mp3 -------------------------------------------------------------------------------- /tests/data/ljspeech/wavs/LJ001-0016.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/coqui-ai/TTS/dbf1a08a0d4e47fdad6172e433eeb34bc6b13b4e/tests/data/ljspeech/wavs/LJ001-0016.npy -------------------------------------------------------------------------------- /tests/data/ljspeech/wavs/LJ001-0016.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/coqui-ai/TTS/dbf1a08a0d4e47fdad6172e433eeb34bc6b13b4e/tests/data/ljspeech/wavs/LJ001-0016.wav -------------------------------------------------------------------------------- /tests/data/ljspeech/wavs/LJ001-0017.flac: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/coqui-ai/TTS/dbf1a08a0d4e47fdad6172e433eeb34bc6b13b4e/tests/data/ljspeech/wavs/LJ001-0017.flac -------------------------------------------------------------------------------- /tests/data/ljspeech/wavs/LJ001-0017.mp3: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/coqui-ai/TTS/dbf1a08a0d4e47fdad6172e433eeb34bc6b13b4e/tests/data/ljspeech/wavs/LJ001-0017.mp3 -------------------------------------------------------------------------------- /tests/data/ljspeech/wavs/LJ001-0017.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/coqui-ai/TTS/dbf1a08a0d4e47fdad6172e433eeb34bc6b13b4e/tests/data/ljspeech/wavs/LJ001-0017.npy -------------------------------------------------------------------------------- /tests/data/ljspeech/wavs/LJ001-0017.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/coqui-ai/TTS/dbf1a08a0d4e47fdad6172e433eeb34bc6b13b4e/tests/data/ljspeech/wavs/LJ001-0017.wav -------------------------------------------------------------------------------- /tests/data/ljspeech/wavs/LJ001-0018.flac: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/coqui-ai/TTS/dbf1a08a0d4e47fdad6172e433eeb34bc6b13b4e/tests/data/ljspeech/wavs/LJ001-0018.flac -------------------------------------------------------------------------------- /tests/data/ljspeech/wavs/LJ001-0018.mp3: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/coqui-ai/TTS/dbf1a08a0d4e47fdad6172e433eeb34bc6b13b4e/tests/data/ljspeech/wavs/LJ001-0018.mp3 -------------------------------------------------------------------------------- /tests/data/ljspeech/wavs/LJ001-0018.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/coqui-ai/TTS/dbf1a08a0d4e47fdad6172e433eeb34bc6b13b4e/tests/data/ljspeech/wavs/LJ001-0018.npy -------------------------------------------------------------------------------- /tests/data/ljspeech/wavs/LJ001-0018.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/coqui-ai/TTS/dbf1a08a0d4e47fdad6172e433eeb34bc6b13b4e/tests/data/ljspeech/wavs/LJ001-0018.wav -------------------------------------------------------------------------------- /tests/data/ljspeech/wavs/LJ001-0019.flac: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/coqui-ai/TTS/dbf1a08a0d4e47fdad6172e433eeb34bc6b13b4e/tests/data/ljspeech/wavs/LJ001-0019.flac -------------------------------------------------------------------------------- /tests/data/ljspeech/wavs/LJ001-0019.mp3: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/coqui-ai/TTS/dbf1a08a0d4e47fdad6172e433eeb34bc6b13b4e/tests/data/ljspeech/wavs/LJ001-0019.mp3 -------------------------------------------------------------------------------- /tests/data/ljspeech/wavs/LJ001-0019.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/coqui-ai/TTS/dbf1a08a0d4e47fdad6172e433eeb34bc6b13b4e/tests/data/ljspeech/wavs/LJ001-0019.npy -------------------------------------------------------------------------------- /tests/data/ljspeech/wavs/LJ001-0019.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/coqui-ai/TTS/dbf1a08a0d4e47fdad6172e433eeb34bc6b13b4e/tests/data/ljspeech/wavs/LJ001-0019.wav -------------------------------------------------------------------------------- /tests/data/ljspeech/wavs/LJ001-0020.flac: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/coqui-ai/TTS/dbf1a08a0d4e47fdad6172e433eeb34bc6b13b4e/tests/data/ljspeech/wavs/LJ001-0020.flac -------------------------------------------------------------------------------- /tests/data/ljspeech/wavs/LJ001-0020.mp3: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/coqui-ai/TTS/dbf1a08a0d4e47fdad6172e433eeb34bc6b13b4e/tests/data/ljspeech/wavs/LJ001-0020.mp3 -------------------------------------------------------------------------------- /tests/data/ljspeech/wavs/LJ001-0020.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/coqui-ai/TTS/dbf1a08a0d4e47fdad6172e433eeb34bc6b13b4e/tests/data/ljspeech/wavs/LJ001-0020.npy -------------------------------------------------------------------------------- /tests/data/ljspeech/wavs/LJ001-0020.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/coqui-ai/TTS/dbf1a08a0d4e47fdad6172e433eeb34bc6b13b4e/tests/data/ljspeech/wavs/LJ001-0020.wav -------------------------------------------------------------------------------- /tests/data/ljspeech/wavs/LJ001-0021.flac: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/coqui-ai/TTS/dbf1a08a0d4e47fdad6172e433eeb34bc6b13b4e/tests/data/ljspeech/wavs/LJ001-0021.flac -------------------------------------------------------------------------------- /tests/data/ljspeech/wavs/LJ001-0021.mp3: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/coqui-ai/TTS/dbf1a08a0d4e47fdad6172e433eeb34bc6b13b4e/tests/data/ljspeech/wavs/LJ001-0021.mp3 -------------------------------------------------------------------------------- /tests/data/ljspeech/wavs/LJ001-0021.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/coqui-ai/TTS/dbf1a08a0d4e47fdad6172e433eeb34bc6b13b4e/tests/data/ljspeech/wavs/LJ001-0021.npy -------------------------------------------------------------------------------- /tests/data/ljspeech/wavs/LJ001-0021.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/coqui-ai/TTS/dbf1a08a0d4e47fdad6172e433eeb34bc6b13b4e/tests/data/ljspeech/wavs/LJ001-0021.wav -------------------------------------------------------------------------------- /tests/data/ljspeech/wavs/LJ001-0022.flac: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/coqui-ai/TTS/dbf1a08a0d4e47fdad6172e433eeb34bc6b13b4e/tests/data/ljspeech/wavs/LJ001-0022.flac -------------------------------------------------------------------------------- /tests/data/ljspeech/wavs/LJ001-0022.mp3: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/coqui-ai/TTS/dbf1a08a0d4e47fdad6172e433eeb34bc6b13b4e/tests/data/ljspeech/wavs/LJ001-0022.mp3 -------------------------------------------------------------------------------- /tests/data/ljspeech/wavs/LJ001-0022.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/coqui-ai/TTS/dbf1a08a0d4e47fdad6172e433eeb34bc6b13b4e/tests/data/ljspeech/wavs/LJ001-0022.npy -------------------------------------------------------------------------------- /tests/data/ljspeech/wavs/LJ001-0022.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/coqui-ai/TTS/dbf1a08a0d4e47fdad6172e433eeb34bc6b13b4e/tests/data/ljspeech/wavs/LJ001-0022.wav -------------------------------------------------------------------------------- /tests/data/ljspeech/wavs/LJ001-0023.flac: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/coqui-ai/TTS/dbf1a08a0d4e47fdad6172e433eeb34bc6b13b4e/tests/data/ljspeech/wavs/LJ001-0023.flac -------------------------------------------------------------------------------- /tests/data/ljspeech/wavs/LJ001-0023.mp3: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/coqui-ai/TTS/dbf1a08a0d4e47fdad6172e433eeb34bc6b13b4e/tests/data/ljspeech/wavs/LJ001-0023.mp3 -------------------------------------------------------------------------------- /tests/data/ljspeech/wavs/LJ001-0023.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/coqui-ai/TTS/dbf1a08a0d4e47fdad6172e433eeb34bc6b13b4e/tests/data/ljspeech/wavs/LJ001-0023.npy -------------------------------------------------------------------------------- /tests/data/ljspeech/wavs/LJ001-0023.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/coqui-ai/TTS/dbf1a08a0d4e47fdad6172e433eeb34bc6b13b4e/tests/data/ljspeech/wavs/LJ001-0023.wav -------------------------------------------------------------------------------- /tests/data/ljspeech/wavs/LJ001-0024.flac: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/coqui-ai/TTS/dbf1a08a0d4e47fdad6172e433eeb34bc6b13b4e/tests/data/ljspeech/wavs/LJ001-0024.flac -------------------------------------------------------------------------------- /tests/data/ljspeech/wavs/LJ001-0024.mp3: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/coqui-ai/TTS/dbf1a08a0d4e47fdad6172e433eeb34bc6b13b4e/tests/data/ljspeech/wavs/LJ001-0024.mp3 -------------------------------------------------------------------------------- /tests/data/ljspeech/wavs/LJ001-0024.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/coqui-ai/TTS/dbf1a08a0d4e47fdad6172e433eeb34bc6b13b4e/tests/data/ljspeech/wavs/LJ001-0024.npy -------------------------------------------------------------------------------- /tests/data/ljspeech/wavs/LJ001-0024.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/coqui-ai/TTS/dbf1a08a0d4e47fdad6172e433eeb34bc6b13b4e/tests/data/ljspeech/wavs/LJ001-0024.wav -------------------------------------------------------------------------------- /tests/data/ljspeech/wavs/LJ001-0025.flac: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/coqui-ai/TTS/dbf1a08a0d4e47fdad6172e433eeb34bc6b13b4e/tests/data/ljspeech/wavs/LJ001-0025.flac -------------------------------------------------------------------------------- /tests/data/ljspeech/wavs/LJ001-0025.mp3: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/coqui-ai/TTS/dbf1a08a0d4e47fdad6172e433eeb34bc6b13b4e/tests/data/ljspeech/wavs/LJ001-0025.mp3 -------------------------------------------------------------------------------- /tests/data/ljspeech/wavs/LJ001-0025.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/coqui-ai/TTS/dbf1a08a0d4e47fdad6172e433eeb34bc6b13b4e/tests/data/ljspeech/wavs/LJ001-0025.npy -------------------------------------------------------------------------------- /tests/data/ljspeech/wavs/LJ001-0025.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/coqui-ai/TTS/dbf1a08a0d4e47fdad6172e433eeb34bc6b13b4e/tests/data/ljspeech/wavs/LJ001-0025.wav -------------------------------------------------------------------------------- /tests/data/ljspeech/wavs/LJ001-0026.flac: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/coqui-ai/TTS/dbf1a08a0d4e47fdad6172e433eeb34bc6b13b4e/tests/data/ljspeech/wavs/LJ001-0026.flac -------------------------------------------------------------------------------- /tests/data/ljspeech/wavs/LJ001-0026.mp3: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/coqui-ai/TTS/dbf1a08a0d4e47fdad6172e433eeb34bc6b13b4e/tests/data/ljspeech/wavs/LJ001-0026.mp3 -------------------------------------------------------------------------------- /tests/data/ljspeech/wavs/LJ001-0026.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/coqui-ai/TTS/dbf1a08a0d4e47fdad6172e433eeb34bc6b13b4e/tests/data/ljspeech/wavs/LJ001-0026.npy -------------------------------------------------------------------------------- /tests/data/ljspeech/wavs/LJ001-0026.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/coqui-ai/TTS/dbf1a08a0d4e47fdad6172e433eeb34bc6b13b4e/tests/data/ljspeech/wavs/LJ001-0026.wav -------------------------------------------------------------------------------- /tests/data/ljspeech/wavs/LJ001-0027.flac: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/coqui-ai/TTS/dbf1a08a0d4e47fdad6172e433eeb34bc6b13b4e/tests/data/ljspeech/wavs/LJ001-0027.flac -------------------------------------------------------------------------------- /tests/data/ljspeech/wavs/LJ001-0027.mp3: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/coqui-ai/TTS/dbf1a08a0d4e47fdad6172e433eeb34bc6b13b4e/tests/data/ljspeech/wavs/LJ001-0027.mp3 -------------------------------------------------------------------------------- /tests/data/ljspeech/wavs/LJ001-0027.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/coqui-ai/TTS/dbf1a08a0d4e47fdad6172e433eeb34bc6b13b4e/tests/data/ljspeech/wavs/LJ001-0027.npy -------------------------------------------------------------------------------- /tests/data/ljspeech/wavs/LJ001-0027.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/coqui-ai/TTS/dbf1a08a0d4e47fdad6172e433eeb34bc6b13b4e/tests/data/ljspeech/wavs/LJ001-0027.wav -------------------------------------------------------------------------------- /tests/data/ljspeech/wavs/LJ001-0028.flac: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/coqui-ai/TTS/dbf1a08a0d4e47fdad6172e433eeb34bc6b13b4e/tests/data/ljspeech/wavs/LJ001-0028.flac -------------------------------------------------------------------------------- /tests/data/ljspeech/wavs/LJ001-0028.mp3: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/coqui-ai/TTS/dbf1a08a0d4e47fdad6172e433eeb34bc6b13b4e/tests/data/ljspeech/wavs/LJ001-0028.mp3 -------------------------------------------------------------------------------- /tests/data/ljspeech/wavs/LJ001-0028.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/coqui-ai/TTS/dbf1a08a0d4e47fdad6172e433eeb34bc6b13b4e/tests/data/ljspeech/wavs/LJ001-0028.npy -------------------------------------------------------------------------------- /tests/data/ljspeech/wavs/LJ001-0028.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/coqui-ai/TTS/dbf1a08a0d4e47fdad6172e433eeb34bc6b13b4e/tests/data/ljspeech/wavs/LJ001-0028.wav -------------------------------------------------------------------------------- /tests/data/ljspeech/wavs/LJ001-0029.flac: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/coqui-ai/TTS/dbf1a08a0d4e47fdad6172e433eeb34bc6b13b4e/tests/data/ljspeech/wavs/LJ001-0029.flac -------------------------------------------------------------------------------- /tests/data/ljspeech/wavs/LJ001-0029.mp3: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/coqui-ai/TTS/dbf1a08a0d4e47fdad6172e433eeb34bc6b13b4e/tests/data/ljspeech/wavs/LJ001-0029.mp3 -------------------------------------------------------------------------------- /tests/data/ljspeech/wavs/LJ001-0029.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/coqui-ai/TTS/dbf1a08a0d4e47fdad6172e433eeb34bc6b13b4e/tests/data/ljspeech/wavs/LJ001-0029.npy -------------------------------------------------------------------------------- /tests/data/ljspeech/wavs/LJ001-0029.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/coqui-ai/TTS/dbf1a08a0d4e47fdad6172e433eeb34bc6b13b4e/tests/data/ljspeech/wavs/LJ001-0029.wav -------------------------------------------------------------------------------- /tests/data/ljspeech/wavs/LJ001-0030.flac: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/coqui-ai/TTS/dbf1a08a0d4e47fdad6172e433eeb34bc6b13b4e/tests/data/ljspeech/wavs/LJ001-0030.flac -------------------------------------------------------------------------------- /tests/data/ljspeech/wavs/LJ001-0030.mp3: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/coqui-ai/TTS/dbf1a08a0d4e47fdad6172e433eeb34bc6b13b4e/tests/data/ljspeech/wavs/LJ001-0030.mp3 -------------------------------------------------------------------------------- /tests/data/ljspeech/wavs/LJ001-0030.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/coqui-ai/TTS/dbf1a08a0d4e47fdad6172e433eeb34bc6b13b4e/tests/data/ljspeech/wavs/LJ001-0030.npy -------------------------------------------------------------------------------- /tests/data/ljspeech/wavs/LJ001-0030.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/coqui-ai/TTS/dbf1a08a0d4e47fdad6172e433eeb34bc6b13b4e/tests/data/ljspeech/wavs/LJ001-0030.wav -------------------------------------------------------------------------------- /tests/data/ljspeech/wavs/LJ001-0031.flac: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/coqui-ai/TTS/dbf1a08a0d4e47fdad6172e433eeb34bc6b13b4e/tests/data/ljspeech/wavs/LJ001-0031.flac -------------------------------------------------------------------------------- /tests/data/ljspeech/wavs/LJ001-0031.mp3: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/coqui-ai/TTS/dbf1a08a0d4e47fdad6172e433eeb34bc6b13b4e/tests/data/ljspeech/wavs/LJ001-0031.mp3 -------------------------------------------------------------------------------- /tests/data/ljspeech/wavs/LJ001-0031.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/coqui-ai/TTS/dbf1a08a0d4e47fdad6172e433eeb34bc6b13b4e/tests/data/ljspeech/wavs/LJ001-0031.npy -------------------------------------------------------------------------------- /tests/data/ljspeech/wavs/LJ001-0031.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/coqui-ai/TTS/dbf1a08a0d4e47fdad6172e433eeb34bc6b13b4e/tests/data/ljspeech/wavs/LJ001-0031.wav -------------------------------------------------------------------------------- /tests/data/ljspeech/wavs/LJ001-0032.flac: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/coqui-ai/TTS/dbf1a08a0d4e47fdad6172e433eeb34bc6b13b4e/tests/data/ljspeech/wavs/LJ001-0032.flac -------------------------------------------------------------------------------- /tests/data/ljspeech/wavs/LJ001-0032.mp3: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/coqui-ai/TTS/dbf1a08a0d4e47fdad6172e433eeb34bc6b13b4e/tests/data/ljspeech/wavs/LJ001-0032.mp3 -------------------------------------------------------------------------------- /tests/data/ljspeech/wavs/LJ001-0032.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/coqui-ai/TTS/dbf1a08a0d4e47fdad6172e433eeb34bc6b13b4e/tests/data/ljspeech/wavs/LJ001-0032.npy -------------------------------------------------------------------------------- /tests/data/ljspeech/wavs/LJ001-0032.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/coqui-ai/TTS/dbf1a08a0d4e47fdad6172e433eeb34bc6b13b4e/tests/data/ljspeech/wavs/LJ001-0032.wav -------------------------------------------------------------------------------- /tests/data_tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/coqui-ai/TTS/dbf1a08a0d4e47fdad6172e433eeb34bc6b13b4e/tests/data_tests/__init__.py -------------------------------------------------------------------------------- /tests/data_tests/test_dataset_formatters.py: -------------------------------------------------------------------------------- 1 | import os 2 | import unittest 3 | 4 | from tests import get_tests_input_path 5 | from TTS.tts.datasets.formatters import common_voice 6 | 7 | 8 | class TestTTSFormatters(unittest.TestCase): 9 | def test_common_voice_preprocessor(self): # pylint: disable=no-self-use 10 | root_path = get_tests_input_path() 11 | meta_file = "common_voice.tsv" 12 | items = common_voice(root_path, meta_file) 13 | assert items[0]["text"] == "The applicants are invited for coffee and visa is given immediately." 14 | assert items[0]["audio_file"] == os.path.join(get_tests_input_path(), "clips", "common_voice_en_20005954.wav") 15 | 16 | assert items[-1]["text"] == "Competition for limited resources has also resulted in some local conflicts." 17 | assert items[-1]["audio_file"] == os.path.join(get_tests_input_path(), "clips", "common_voice_en_19737074.wav") 18 | -------------------------------------------------------------------------------- /tests/inference_tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/coqui-ai/TTS/dbf1a08a0d4e47fdad6172e433eeb34bc6b13b4e/tests/inference_tests/__init__.py -------------------------------------------------------------------------------- /tests/inference_tests/test_synthesize.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | from tests import get_tests_output_path, run_cli 4 | 5 | 6 | def test_synthesize(): 7 | """Test synthesize.py with diffent arguments.""" 8 | output_path = os.path.join(get_tests_output_path(), "output.wav") 9 | run_cli("tts --list_models") 10 | 11 | # single speaker model 12 | run_cli(f'tts --text "This is an example." --out_path "{output_path}"') 13 | run_cli( 14 | "tts --model_name tts_models/en/ljspeech/glow-tts " f'--text "This is an example." --out_path "{output_path}"' 15 | ) 16 | run_cli( 17 | "tts --model_name tts_models/en/ljspeech/glow-tts " 18 | "--vocoder_name vocoder_models/en/ljspeech/multiband-melgan " 19 | f'--text "This is an example." --out_path "{output_path}"' 20 | ) 21 | -------------------------------------------------------------------------------- /tests/inputs/common_voice.tsv: -------------------------------------------------------------------------------- 1 | client_id path sentence up_votes down_votes age gender accent locale segment 2 | 95324d489b122a800b840e0b0d068f7363a1a6c2cd2e7365672cc7033e38deaa794bd59edcf8196aa35c9791652b9085ac3839a98bb50ebab4a1e8538a94846b common_voice_en_20005954.mp3 The applicants are invited for coffee and visa is given immediately. 3 0 en 3 | 95324d489b122a800b840e0b0d068f7363a1a6c2cd2e7365672cc7033e38deaa794bd59edcf8196aa35c9791652b9085ac3839a98bb50ebab4a1e8538a94846b common_voice_en_20005955.mp3 Developmental robotics is related to, but differs from, evolutionary robotics. 2 0 en 4 | 95324d489b122a800b840e0b0d068f7363a1a6c2cd2e7365672cc7033e38deaa794bd59edcf8196aa35c9791652b9085ac3839a98bb50ebab4a1e8538a94846b common_voice_en_20005956.mp3 The musical was originally directed and choreographed by Alan Lund. 2 0 en 5 | 954a4181ae9fba89d1b1570f2ae148b3ee18ee2311de978e698f598db859f830d93d35574596d713518e8c96cdae01fce7a08c60c2e0a22bcf01e020924440a6 common_voice_en_19737073.mp3 He graduated from Columbia High School, in Brown County, South Dakota. 2 0 en 6 | 954a4181ae9fba89d1b1570f2ae148b3ee18ee2311de978e698f598db859f830d93d35574596d713518e8c96cdae01fce7a08c60c2e0a22bcf01e020924440a6 common_voice_en_19737074.mp3 Competition for limited resources has also resulted in some local conflicts. 2 0 en 7 | -------------------------------------------------------------------------------- /tests/inputs/example_1.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/coqui-ai/TTS/dbf1a08a0d4e47fdad6172e433eeb34bc6b13b4e/tests/inputs/example_1.wav -------------------------------------------------------------------------------- /tests/inputs/language_ids.json: -------------------------------------------------------------------------------- 1 | { 2 | "en": 0, 3 | "fr-fr": 1, 4 | "pt-br": 2 5 | } -------------------------------------------------------------------------------- /tests/inputs/scale_stats.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/coqui-ai/TTS/dbf1a08a0d4e47fdad6172e433eeb34bc6b13b4e/tests/inputs/scale_stats.npy -------------------------------------------------------------------------------- /tests/inputs/server_config.json: -------------------------------------------------------------------------------- 1 | { 2 | "tts_checkpoint":"checkpoint_10.pth", // tts checkpoint file 3 | "tts_config":"dummy_model_config.json", // tts config.json file 4 | "tts_speakers": null, // json file listing speaker ids. null if no speaker embedding. 5 | "wavernn_lib_path": null, // Rootpath to wavernn project folder to be imported. If this is null, model uses GL for speech synthesis. 6 | "wavernn_file": null, // wavernn checkpoint file name 7 | "wavernn_config": null, // wavernn config file 8 | "vocoder_config":null, 9 | "vocoder_checkpoint": null, 10 | "is_wavernn_batched":true, 11 | "port": 5002, 12 | "use_cuda": false, 13 | "debug": true 14 | } 15 | -------------------------------------------------------------------------------- /tests/inputs/test_vocoder_audio_config.json: -------------------------------------------------------------------------------- 1 | { 2 | "audio":{ 3 | "num_mels": 80, // size of the mel spec frame. 4 | "num_freq": 513, // number of stft frequency levels. Size of the linear spectogram frame. 5 | "sample_rate": 22050, // wav sample-rate. If different than the original data, it is resampled. 6 | "frame_length_ms": null, // stft window length in ms. 7 | "frame_shift_ms": null, // stft window hop-lengh in ms. 8 | "hop_length": 256, 9 | "win_length": 1024, 10 | "preemphasis": 0.97, // pre-emphasis to reduce spec noise and make it more structured. If 0.0, no -pre-emphasis. 11 | "min_level_db": -100, // normalization range 12 | "ref_level_db": 20, // reference level db, theoretically 20db is the sound of air. 13 | "power": 1.5, // value to sharpen wav signals after GL algorithm. 14 | "griffin_lim_iters": 30,// #griffin-lim iterations. 30-60 is a good range. Larger the value, slower the generation. 15 | "signal_norm": true, // normalize the spec values in range [0, 1] 16 | "symmetric_norm": true, // move normalization to range [-1, 1] 17 | "clip_norm": true, // clip normalized values into the range. 18 | "max_norm": 4, // scale normalization to range [-max_norm, max_norm] or [0, max_norm] 19 | "mel_fmin": 0, // minimum freq level for mel-spec. ~50 for male and ~95 for female voices. Tune for dataset!! 20 | "mel_fmax": 8000, // maximum freq level for mel-spec. Tune for dataset!! 21 | "do_trim_silence": false 22 | } 23 | } 24 | 25 | -------------------------------------------------------------------------------- /tests/text_tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/coqui-ai/TTS/dbf1a08a0d4e47fdad6172e433eeb34bc6b13b4e/tests/text_tests/__init__.py -------------------------------------------------------------------------------- /tests/text_tests/test_belarusian_phonemizer.py: -------------------------------------------------------------------------------- 1 | import os 2 | import unittest 3 | import warnings 4 | 5 | from TTS.tts.utils.text.belarusian.phonemizer import belarusian_text_to_phonemes 6 | 7 | _TEST_CASES = """ 8 | Фанетычны канвертар/fanʲɛˈtɨt͡ʂnɨ kanˈvʲɛrtar 9 | Гэтак мы працавалі/ˈɣɛtak ˈmɨ prat͡saˈvalʲi 10 | """ 11 | 12 | 13 | class TestText(unittest.TestCase): 14 | def test_belarusian_text_to_phonemes(self): 15 | try: 16 | os.environ["BEL_FANETYKA_JAR"] 17 | except KeyError: 18 | warnings.warn( 19 | "You need to define 'BEL_FANETYKA_JAR' environment variable as path to the fanetyka.jar file to test Belarusian phonemizer", 20 | Warning, 21 | ) 22 | return 23 | 24 | for line in _TEST_CASES.strip().split("\n"): 25 | text, phonemes = line.split("/") 26 | self.assertEqual(belarusian_text_to_phonemes(text), phonemes) 27 | 28 | 29 | if __name__ == "__main__": 30 | unittest.main() 31 | -------------------------------------------------------------------------------- /tests/text_tests/test_japanese_phonemizer.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | from TTS.tts.utils.text.japanese.phonemizer import japanese_text_to_phonemes 4 | 5 | _TEST_CASES = """ 6 | どちらに行きますか?/dochiraniikimasuka? 7 | 今日は温泉に、行きます。/kyo:waoNseNni,ikimasu. 8 | 「A」から「Z」までです。/e:karazeqtomadedesu. 9 | そうですね!/so:desune! 10 | クジラは哺乳類です。/kujirawahonyu:ruidesu. 11 | ヴィディオを見ます。/bidioomimasu. 12 | 今日は8月22日です/kyo:wahachigatsuniju:ninichidesu 13 | xyzとαβγ/eqkusuwaizeqtotoarufabe:tagaNma 14 | 値段は$12.34です/nedaNwaju:niteNsaNyoNdorudesu 15 | """ 16 | 17 | 18 | class TestText(unittest.TestCase): 19 | def test_japanese_text_to_phonemes(self): 20 | for line in _TEST_CASES.strip().split("\n"): 21 | text, phone = line.split("/") 22 | self.assertEqual(japanese_text_to_phonemes(text), phone) 23 | 24 | 25 | if __name__ == "__main__": 26 | unittest.main() 27 | -------------------------------------------------------------------------------- /tests/text_tests/test_korean_phonemizer.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | from TTS.tts.utils.text.korean.phonemizer import korean_text_to_phonemes 4 | 5 | _TEST_CASES = """ 6 | 포상은 열심히 한 아이에게만 주어지기 때문에 포상인 것입니다./포상으 녈심히 하 나이에게만 주어지기 때무네 포상인 거심니다. 7 | 오늘은 8월 31일 입니다./오느른 파뤌 삼시비리 림니다. 8 | 친구 100명 만들기가 목표입니다./친구 뱅명 만들기가 목표임니다. 9 | A부터 Z까지 입니다./에이부터 제트까지 임니다. 10 | 이게 제 마음이에요./이게 제 마으미에요. 11 | """ 12 | _TEST_CASES_EN = """ 13 | 이제야 이쪽을 보는구나./IJeYa IJjoGeul BoNeunGuNa. 14 | 크고 맛있는 cake를 부탁해요./KeuGo MaSinNeun KeIKeuLeul BuTaKaeYo. 15 | 전부 거짓말이야./JeonBu GeoJinMaLiYa. 16 | 좋은 노래를 찾았어요./JoEun NoLaeLeul ChaJaSseoYo. 17 | """ 18 | 19 | 20 | class TestText(unittest.TestCase): 21 | def test_korean_text_to_phonemes(self): 22 | for line in _TEST_CASES.strip().split("\n"): 23 | text, phone = line.split("/") 24 | self.assertEqual(korean_text_to_phonemes(text), phone) 25 | for line in _TEST_CASES_EN.strip().split("\n"): 26 | text, phone = line.split("/") 27 | self.assertEqual(korean_text_to_phonemes(text, character="english"), phone) 28 | 29 | 30 | if __name__ == "__main__": 31 | unittest.main() 32 | -------------------------------------------------------------------------------- /tests/text_tests/test_punctuation.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | from TTS.tts.utils.text.punctuation import _DEF_PUNCS, Punctuation 4 | 5 | 6 | class PunctuationTest(unittest.TestCase): 7 | def setUp(self): 8 | self.punctuation = Punctuation() 9 | self.test_texts = [ 10 | ("This, is my text ... to be striped !! from text?", "This is my text to be striped from text"), 11 | ("This, is my text ... to be striped !! from text", "This is my text to be striped from text"), 12 | ("This, is my text ... to be striped from text?", "This is my text to be striped from text"), 13 | ("This, is my text to be striped from text", "This is my text to be striped from text"), 14 | (".", ""), 15 | (" . ", ""), 16 | ("!!! Attention !!!", "Attention"), 17 | ("!!! Attention !!! This is just a ... test.", "Attention This is just a test"), 18 | ("!!! Attention! This is just a ... test.", "Attention This is just a test"), 19 | ] 20 | 21 | def test_get_set_puncs(self): 22 | self.punctuation.puncs = "-=" 23 | self.assertEqual(self.punctuation.puncs, "-=") 24 | 25 | self.punctuation.puncs = _DEF_PUNCS 26 | self.assertEqual(self.punctuation.puncs, _DEF_PUNCS) 27 | 28 | def test_strip_punc(self): 29 | for text, gt in self.test_texts: 30 | text_striped = self.punctuation.strip(text) 31 | self.assertEqual(text_striped, gt) 32 | 33 | def test_strip_restore(self): 34 | for text, gt in self.test_texts: 35 | text_striped, puncs_map = self.punctuation.strip_to_restore(text) 36 | text_restored = self.punctuation.restore(text_striped, puncs_map) 37 | self.assertEqual(" ".join(text_striped), gt) 38 | self.assertEqual(text_restored[0], text) 39 | -------------------------------------------------------------------------------- /tests/text_tests/test_text_cleaners.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | from TTS.tts.utils.text.cleaners import english_cleaners, phoneme_cleaners 4 | 5 | 6 | def test_time() -> None: 7 | assert english_cleaners("It's 11:00") == "it's eleven a m" 8 | assert english_cleaners("It's 9:01") == "it's nine oh one a m" 9 | assert english_cleaners("It's 16:00") == "it's four p m" 10 | assert english_cleaners("It's 00:00 am") == "it's twelve a m" 11 | 12 | 13 | def test_currency() -> None: 14 | assert phoneme_cleaners("It's $10.50") == "It's ten dollars fifty cents" 15 | assert phoneme_cleaners("£1.1") == "one pound sterling one penny" 16 | assert phoneme_cleaners("¥1") == "one yen" 17 | 18 | 19 | def test_expand_numbers() -> None: 20 | assert phoneme_cleaners("-1") == "minus one" 21 | assert phoneme_cleaners("1") == "one" 22 | -------------------------------------------------------------------------------- /tests/tts_tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/coqui-ai/TTS/dbf1a08a0d4e47fdad6172e433eeb34bc6b13b4e/tests/tts_tests/__init__.py -------------------------------------------------------------------------------- /tests/tts_tests2/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/coqui-ai/TTS/dbf1a08a0d4e47fdad6172e433eeb34bc6b13b4e/tests/tts_tests2/__init__.py -------------------------------------------------------------------------------- /tests/vc_tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/coqui-ai/TTS/dbf1a08a0d4e47fdad6172e433eeb34bc6b13b4e/tests/vc_tests/__init__.py -------------------------------------------------------------------------------- /tests/vocoder_tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/coqui-ai/TTS/dbf1a08a0d4e47fdad6172e433eeb34bc6b13b4e/tests/vocoder_tests/__init__.py -------------------------------------------------------------------------------- /tests/vocoder_tests/test_fullband_melgan_train.py: -------------------------------------------------------------------------------- 1 | import glob 2 | import os 3 | import shutil 4 | 5 | from tests import get_device_id, get_tests_output_path, run_cli 6 | from TTS.vocoder.configs import FullbandMelganConfig 7 | 8 | config_path = os.path.join(get_tests_output_path(), "test_vocoder_config.json") 9 | output_path = os.path.join(get_tests_output_path(), "train_outputs") 10 | 11 | config = FullbandMelganConfig( 12 | batch_size=8, 13 | eval_batch_size=8, 14 | num_loader_workers=0, 15 | num_eval_loader_workers=0, 16 | run_eval=True, 17 | test_delay_epochs=-1, 18 | epochs=1, 19 | seq_len=8192, 20 | eval_split_size=1, 21 | print_step=1, 22 | print_eval=True, 23 | data_path="tests/data/ljspeech", 24 | discriminator_model_params={"base_channels": 16, "max_channels": 64, "downsample_factors": [4, 4, 4]}, 25 | output_path=output_path, 26 | ) 27 | config.audio.do_trim_silence = True 28 | config.audio.trim_db = 60 29 | config.save_json(config_path) 30 | 31 | # train the model for one epoch 32 | command_train = f"CUDA_VISIBLE_DEVICES='{get_device_id()}' python TTS/bin/train_vocoder.py --config_path {config_path} " 33 | run_cli(command_train) 34 | 35 | # Find latest folder 36 | continue_path = max(glob.glob(os.path.join(output_path, "*/")), key=os.path.getmtime) 37 | 38 | # restore the model and continue training for one more epoch 39 | command_train = ( 40 | f"CUDA_VISIBLE_DEVICES='{get_device_id()}' python TTS/bin/train_vocoder.py --continue_path {continue_path} " 41 | ) 42 | run_cli(command_train) 43 | shutil.rmtree(continue_path) 44 | -------------------------------------------------------------------------------- /tests/vocoder_tests/test_hifigan_train.py: -------------------------------------------------------------------------------- 1 | import glob 2 | import os 3 | import shutil 4 | 5 | from tests import get_device_id, get_tests_output_path, run_cli 6 | from TTS.vocoder.configs import HifiganConfig 7 | 8 | config_path = os.path.join(get_tests_output_path(), "test_vocoder_config.json") 9 | output_path = os.path.join(get_tests_output_path(), "train_outputs") 10 | 11 | 12 | config = HifiganConfig( 13 | batch_size=8, 14 | eval_batch_size=8, 15 | num_loader_workers=0, 16 | num_eval_loader_workers=0, 17 | run_eval=True, 18 | test_delay_epochs=-1, 19 | epochs=1, 20 | seq_len=1024, 21 | eval_split_size=1, 22 | print_step=1, 23 | print_eval=True, 24 | data_path="tests/data/ljspeech", 25 | output_path=output_path, 26 | ) 27 | config.audio.do_trim_silence = True 28 | config.audio.trim_db = 60 29 | config.save_json(config_path) 30 | 31 | # train the model for one epoch 32 | command_train = f"CUDA_VISIBLE_DEVICES='{get_device_id()}' python TTS/bin/train_vocoder.py --config_path {config_path} " 33 | run_cli(command_train) 34 | 35 | # Find latest folder 36 | continue_path = max(glob.glob(os.path.join(output_path, "*/")), key=os.path.getmtime) 37 | 38 | # restore the model and continue training for one more epoch 39 | command_train = ( 40 | f"CUDA_VISIBLE_DEVICES='{get_device_id()}' python TTS/bin/train_vocoder.py --continue_path {continue_path} " 41 | ) 42 | run_cli(command_train) 43 | shutil.rmtree(continue_path) 44 | -------------------------------------------------------------------------------- /tests/vocoder_tests/test_melgan_train.py: -------------------------------------------------------------------------------- 1 | import glob 2 | import os 3 | import shutil 4 | 5 | from tests import get_device_id, get_tests_output_path, run_cli 6 | from TTS.vocoder.configs import MelganConfig 7 | 8 | config_path = os.path.join(get_tests_output_path(), "test_vocoder_config.json") 9 | output_path = os.path.join(get_tests_output_path(), "train_outputs") 10 | 11 | config = MelganConfig( 12 | batch_size=4, 13 | eval_batch_size=4, 14 | num_loader_workers=0, 15 | num_eval_loader_workers=0, 16 | run_eval=True, 17 | test_delay_epochs=-1, 18 | epochs=1, 19 | seq_len=2048, 20 | eval_split_size=1, 21 | print_step=1, 22 | discriminator_model_params={"base_channels": 16, "max_channels": 64, "downsample_factors": [4, 4, 4]}, 23 | print_eval=True, 24 | data_path="tests/data/ljspeech", 25 | output_path=output_path, 26 | ) 27 | config.audio.do_trim_silence = True 28 | config.audio.trim_db = 60 29 | config.save_json(config_path) 30 | 31 | # train the model for one epoch 32 | command_train = f"CUDA_VISIBLE_DEVICES='{get_device_id()}' python TTS/bin/train_vocoder.py --config_path {config_path} " 33 | run_cli(command_train) 34 | 35 | # Find latest folder 36 | continue_path = max(glob.glob(os.path.join(output_path, "*/")), key=os.path.getmtime) 37 | 38 | # restore the model and continue training for one more epoch 39 | command_train = ( 40 | f"CUDA_VISIBLE_DEVICES='{get_device_id()}' python TTS/bin/train_vocoder.py --continue_path {continue_path} " 41 | ) 42 | run_cli(command_train) 43 | shutil.rmtree(continue_path) 44 | -------------------------------------------------------------------------------- /tests/vocoder_tests/test_multiband_melgan_train.py: -------------------------------------------------------------------------------- 1 | import glob 2 | import os 3 | import shutil 4 | 5 | from tests import get_device_id, get_tests_output_path, run_cli 6 | from TTS.vocoder.configs import MultibandMelganConfig 7 | 8 | config_path = os.path.join(get_tests_output_path(), "test_vocoder_config.json") 9 | output_path = os.path.join(get_tests_output_path(), "train_outputs") 10 | 11 | config = MultibandMelganConfig( 12 | batch_size=8, 13 | eval_batch_size=8, 14 | num_loader_workers=0, 15 | num_eval_loader_workers=0, 16 | run_eval=True, 17 | test_delay_epochs=-1, 18 | epochs=1, 19 | seq_len=8192, 20 | eval_split_size=1, 21 | print_step=1, 22 | print_eval=True, 23 | steps_to_start_discriminator=1, 24 | data_path="tests/data/ljspeech", 25 | discriminator_model_params={"base_channels": 16, "max_channels": 64, "downsample_factors": [4, 4, 4]}, 26 | output_path=output_path, 27 | ) 28 | config.audio.do_trim_silence = True 29 | config.audio.trim_db = 60 30 | config.save_json(config_path) 31 | 32 | # train the model for one epoch 33 | command_train = f"CUDA_VISIBLE_DEVICES='{get_device_id()}' python TTS/bin/train_vocoder.py --config_path {config_path} " 34 | run_cli(command_train) 35 | 36 | # Find latest folder 37 | continue_path = max(glob.glob(os.path.join(output_path, "*/")), key=os.path.getmtime) 38 | 39 | # restore the model and continue training for one more epoch 40 | command_train = ( 41 | f"CUDA_VISIBLE_DEVICES='{get_device_id()}' python TTS/bin/train_vocoder.py --continue_path {continue_path} " 42 | ) 43 | run_cli(command_train) 44 | shutil.rmtree(continue_path) 45 | -------------------------------------------------------------------------------- /tests/vocoder_tests/test_parallel_wavegan_train.py: -------------------------------------------------------------------------------- 1 | import glob 2 | import os 3 | import shutil 4 | 5 | from tests import get_device_id, get_tests_output_path, run_cli 6 | from TTS.vocoder.configs import ParallelWaveganConfig 7 | 8 | config_path = os.path.join(get_tests_output_path(), "test_vocoder_config.json") 9 | output_path = os.path.join(get_tests_output_path(), "train_outputs") 10 | 11 | config = ParallelWaveganConfig( 12 | batch_size=4, 13 | eval_batch_size=4, 14 | num_loader_workers=0, 15 | num_eval_loader_workers=0, 16 | run_eval=True, 17 | test_delay_epochs=-1, 18 | epochs=1, 19 | seq_len=2048, 20 | eval_split_size=1, 21 | print_step=1, 22 | print_eval=True, 23 | data_path="tests/data/ljspeech", 24 | output_path=output_path, 25 | ) 26 | config.audio.do_trim_silence = True 27 | config.audio.trim_db = 60 28 | config.save_json(config_path) 29 | 30 | # train the model for one epoch 31 | command_train = f"CUDA_VISIBLE_DEVICES='{get_device_id()}' python TTS/bin/train_vocoder.py --config_path {config_path} " 32 | run_cli(command_train) 33 | 34 | # Find latest folder 35 | continue_path = max(glob.glob(os.path.join(output_path, "*/")), key=os.path.getmtime) 36 | 37 | # restore the model and continue training for one more epoch 38 | command_train = ( 39 | f"CUDA_VISIBLE_DEVICES='{get_device_id()}' python TTS/bin/train_vocoder.py --continue_path {continue_path} " 40 | ) 41 | run_cli(command_train) 42 | shutil.rmtree(continue_path) 43 | -------------------------------------------------------------------------------- /tests/vocoder_tests/test_vocoder_melgan_discriminator.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import torch 3 | 4 | from TTS.vocoder.models.melgan_discriminator import MelganDiscriminator 5 | from TTS.vocoder.models.melgan_multiscale_discriminator import MelganMultiscaleDiscriminator 6 | 7 | 8 | def test_melgan_discriminator(): 9 | model = MelganDiscriminator() 10 | print(model) 11 | dummy_input = torch.rand((4, 1, 256 * 10)) 12 | output, _ = model(dummy_input) 13 | assert np.all(output.shape == (4, 1, 10)) 14 | 15 | 16 | def test_melgan_multi_scale_discriminator(): 17 | model = MelganMultiscaleDiscriminator() 18 | print(model) 19 | dummy_input = torch.rand((4, 1, 256 * 16)) 20 | scores, feats = model(dummy_input) 21 | assert len(scores) == 3 22 | assert len(scores) == len(feats) 23 | assert np.all(scores[0].shape == (4, 1, 64)) 24 | assert np.all(feats[0][0].shape == (4, 16, 4096)) 25 | assert np.all(feats[0][1].shape == (4, 64, 1024)) 26 | assert np.all(feats[0][2].shape == (4, 256, 256)) 27 | -------------------------------------------------------------------------------- /tests/vocoder_tests/test_vocoder_melgan_generator.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import torch 3 | 4 | from TTS.vocoder.models.melgan_generator import MelganGenerator 5 | 6 | 7 | def test_melgan_generator(): 8 | model = MelganGenerator() 9 | print(model) 10 | dummy_input = torch.rand((4, 80, 64)) 11 | output = model(dummy_input) 12 | assert np.all(output.shape == (4, 1, 64 * 256)) 13 | output = model.inference(dummy_input) 14 | assert np.all(output.shape == (4, 1, (64 + 4) * 256)) 15 | -------------------------------------------------------------------------------- /tests/vocoder_tests/test_vocoder_parallel_wavegan_discriminator.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import torch 3 | 4 | from TTS.vocoder.models.parallel_wavegan_discriminator import ( 5 | ParallelWaveganDiscriminator, 6 | ResidualParallelWaveganDiscriminator, 7 | ) 8 | 9 | 10 | def test_pwgan_disciminator(): 11 | model = ParallelWaveganDiscriminator( 12 | in_channels=1, 13 | out_channels=1, 14 | kernel_size=3, 15 | num_layers=10, 16 | conv_channels=64, 17 | dilation_factor=1, 18 | nonlinear_activation="LeakyReLU", 19 | nonlinear_activation_params={"negative_slope": 0.2}, 20 | bias=True, 21 | ) 22 | dummy_x = torch.rand((4, 1, 64 * 256)) 23 | output = model(dummy_x) 24 | assert np.all(output.shape == (4, 1, 64 * 256)) 25 | model.remove_weight_norm() 26 | 27 | 28 | def test_redisual_pwgan_disciminator(): 29 | model = ResidualParallelWaveganDiscriminator( 30 | in_channels=1, 31 | out_channels=1, 32 | kernel_size=3, 33 | num_layers=30, 34 | stacks=3, 35 | res_channels=64, 36 | gate_channels=128, 37 | skip_channels=64, 38 | dropout=0.0, 39 | bias=True, 40 | nonlinear_activation="LeakyReLU", 41 | nonlinear_activation_params={"negative_slope": 0.2}, 42 | ) 43 | dummy_x = torch.rand((4, 1, 64 * 256)) 44 | output = model(dummy_x) 45 | assert np.all(output.shape == (4, 1, 64 * 256)) 46 | model.remove_weight_norm() 47 | -------------------------------------------------------------------------------- /tests/vocoder_tests/test_vocoder_parallel_wavegan_generator.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import torch 3 | 4 | from TTS.vocoder.models.parallel_wavegan_generator import ParallelWaveganGenerator 5 | 6 | 7 | def test_pwgan_generator(): 8 | model = ParallelWaveganGenerator( 9 | in_channels=1, 10 | out_channels=1, 11 | kernel_size=3, 12 | num_res_blocks=30, 13 | stacks=3, 14 | res_channels=64, 15 | gate_channels=128, 16 | skip_channels=64, 17 | aux_channels=80, 18 | dropout=0.0, 19 | bias=True, 20 | use_weight_norm=True, 21 | upsample_factors=[4, 4, 4, 4], 22 | ) 23 | dummy_c = torch.rand((2, 80, 5)) 24 | output = model(dummy_c) 25 | assert np.all(output.shape == (2, 1, 5 * 256)), output.shape 26 | model.remove_weight_norm() 27 | output = model.inference(dummy_c) 28 | assert np.all(output.shape == (2, 1, (5 + 4) * 256)) 29 | -------------------------------------------------------------------------------- /tests/vocoder_tests/test_vocoder_pqmf.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | import soundfile as sf 4 | import torch 5 | from librosa.core import load 6 | 7 | from tests import get_tests_input_path, get_tests_output_path, get_tests_path 8 | from TTS.vocoder.layers.pqmf import PQMF 9 | 10 | TESTS_PATH = get_tests_path() 11 | WAV_FILE = os.path.join(get_tests_input_path(), "example_1.wav") 12 | 13 | 14 | def test_pqmf(): 15 | w, sr = load(WAV_FILE) 16 | 17 | layer = PQMF(N=4, taps=62, cutoff=0.15, beta=9.0) 18 | w, sr = load(WAV_FILE) 19 | w2 = torch.from_numpy(w[None, None, :]) 20 | b2 = layer.analysis(w2) 21 | w2_ = layer.synthesis(b2) 22 | 23 | print(w2_.max()) 24 | print(w2_.min()) 25 | print(w2_.mean()) 26 | sf.write(os.path.join(get_tests_output_path(), "pqmf_output.wav"), w2_.flatten().detach(), sr) 27 | -------------------------------------------------------------------------------- /tests/vocoder_tests/test_vocoder_rwd.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import torch 3 | 4 | from TTS.vocoder.models.random_window_discriminator import RandomWindowDiscriminator 5 | 6 | 7 | def test_rwd(): 8 | layer = RandomWindowDiscriminator( 9 | cond_channels=80, 10 | window_sizes=(512, 1024, 2048, 4096, 8192), 11 | cond_disc_downsample_factors=[(8, 4, 2, 2, 2), (8, 4, 2, 2), (8, 4, 2), (8, 4), (4, 2, 2)], 12 | hop_length=256, 13 | ) 14 | x = torch.rand([4, 1, 22050]) 15 | c = torch.rand([4, 80, 22050 // 256]) 16 | 17 | scores, _ = layer(x, c) 18 | assert len(scores) == 10 19 | assert np.all(scores[0].shape == (4, 1, 1)) 20 | -------------------------------------------------------------------------------- /tests/vocoder_tests/test_vocoder_wavernn.py: -------------------------------------------------------------------------------- 1 | import random 2 | 3 | import numpy as np 4 | import torch 5 | 6 | from TTS.vocoder.configs import WavernnConfig 7 | from TTS.vocoder.models.wavernn import Wavernn, WavernnArgs 8 | 9 | 10 | def test_wavernn(): 11 | config = WavernnConfig() 12 | config.model_args = WavernnArgs( 13 | rnn_dims=512, 14 | fc_dims=512, 15 | mode="mold", 16 | mulaw=False, 17 | pad=2, 18 | use_aux_net=True, 19 | use_upsample_net=True, 20 | upsample_factors=[4, 8, 8], 21 | feat_dims=80, 22 | compute_dims=128, 23 | res_out_dims=128, 24 | num_res_blocks=10, 25 | ) 26 | config.audio.hop_length = 256 27 | config.audio.sample_rate = 2048 28 | 29 | dummy_x = torch.rand((2, 1280)) 30 | dummy_m = torch.rand((2, 80, 9)) 31 | y_size = random.randrange(20, 60) 32 | dummy_y = torch.rand((80, y_size)) 33 | 34 | # mode: mold 35 | model = Wavernn(config) 36 | output = model(dummy_x, dummy_m) 37 | assert np.all(output.shape == (2, 1280, 30)), output.shape 38 | 39 | # mode: gauss 40 | config.model_args.mode = "gauss" 41 | model = Wavernn(config) 42 | output = model(dummy_x, dummy_m) 43 | assert np.all(output.shape == (2, 1280, 2)), output.shape 44 | 45 | # mode: quantized 46 | config.model_args.mode = 4 47 | model = Wavernn(config) 48 | output = model(dummy_x, dummy_m) 49 | assert np.all(output.shape == (2, 1280, 2**4)), output.shape 50 | output = model.inference(dummy_y, True, 5500, 550) 51 | assert np.all(output.shape == (256 * (y_size - 1),)) 52 | -------------------------------------------------------------------------------- /tests/vocoder_tests/test_wavegrad_train.py: -------------------------------------------------------------------------------- 1 | import glob 2 | import os 3 | import shutil 4 | 5 | from tests import get_device_id, get_tests_output_path, run_cli 6 | from TTS.vocoder.configs import WavegradConfig 7 | 8 | config_path = os.path.join(get_tests_output_path(), "test_vocoder_config.json") 9 | output_path = os.path.join(get_tests_output_path(), "train_outputs") 10 | 11 | config = WavegradConfig( 12 | batch_size=8, 13 | eval_batch_size=8, 14 | num_loader_workers=0, 15 | num_eval_loader_workers=0, 16 | run_eval=True, 17 | test_delay_epochs=-1, 18 | epochs=1, 19 | seq_len=8192, 20 | eval_split_size=1, 21 | print_step=1, 22 | print_eval=True, 23 | data_path="tests/data/ljspeech", 24 | output_path=output_path, 25 | test_noise_schedule={"min_val": 1e-6, "max_val": 1e-2, "num_steps": 2}, 26 | ) 27 | config.audio.do_trim_silence = True 28 | config.audio.trim_db = 60 29 | config.save_json(config_path) 30 | 31 | # train the model for one epoch 32 | command_train = f"CUDA_VISIBLE_DEVICES='{get_device_id()}' python TTS/bin/train_vocoder.py --config_path {config_path} " 33 | run_cli(command_train) 34 | 35 | # Find latest folder 36 | continue_path = max(glob.glob(os.path.join(output_path, "*/")), key=os.path.getmtime) 37 | 38 | # restore the model and continue training for one more epoch 39 | command_train = ( 40 | f"CUDA_VISIBLE_DEVICES='{get_device_id()}' python TTS/bin/train_vocoder.py --continue_path {continue_path} " 41 | ) 42 | run_cli(command_train) 43 | shutil.rmtree(continue_path) 44 | -------------------------------------------------------------------------------- /tests/vocoder_tests/test_wavernn_train.py: -------------------------------------------------------------------------------- 1 | import glob 2 | import os 3 | import shutil 4 | 5 | from tests import get_device_id, get_tests_output_path, run_cli 6 | from TTS.vocoder.configs import WavernnConfig 7 | from TTS.vocoder.models.wavernn import WavernnArgs 8 | 9 | config_path = os.path.join(get_tests_output_path(), "test_vocoder_config.json") 10 | output_path = os.path.join(get_tests_output_path(), "train_outputs") 11 | 12 | 13 | config = WavernnConfig( 14 | model_args=WavernnArgs(), 15 | batch_size=8, 16 | eval_batch_size=8, 17 | num_loader_workers=0, 18 | num_eval_loader_workers=0, 19 | run_eval=True, 20 | test_delay_epochs=-1, 21 | epochs=1, 22 | seq_len=256, # for shorter test time 23 | eval_split_size=1, 24 | print_step=1, 25 | print_eval=True, 26 | data_path="tests/data/ljspeech", 27 | output_path=output_path, 28 | ) 29 | config.audio.do_trim_silence = True 30 | config.audio.trim_db = 60 31 | config.save_json(config_path) 32 | 33 | # train the model for one epoch 34 | command_train = f"CUDA_VISIBLE_DEVICES='{get_device_id()}' python TTS/bin/train_vocoder.py --config_path {config_path} " 35 | run_cli(command_train) 36 | 37 | # Find latest folder 38 | continue_path = max(glob.glob(os.path.join(output_path, "*/")), key=os.path.getmtime) 39 | 40 | # restore the model and continue training for one more epoch 41 | command_train = ( 42 | f"CUDA_VISIBLE_DEVICES='{get_device_id()}' python TTS/bin/train_vocoder.py --continue_path {continue_path} " 43 | ) 44 | run_cli(command_train) 45 | shutil.rmtree(continue_path) 46 | -------------------------------------------------------------------------------- /tests/zoo_tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/coqui-ai/TTS/dbf1a08a0d4e47fdad6172e433eeb34bc6b13b4e/tests/zoo_tests/__init__.py --------------------------------------------------------------------------------