├── Start.bat ├── monotonic_align ├── monotonic_align.egg-info │ ├── dependency_links.txt │ ├── top_level.txt │ ├── PKG-INFO │ └── SOURCES.txt ├── __pycache__ │ ├── setup.cpython-36.pyc │ ├── __init__.cpython-36.pyc │ └── __init__.cpython-38.pyc ├── build │ ├── temp.linux-x86_64-3.8 │ │ └── core.o │ └── lib.linux-x86_64-3.8 │ │ └── monotonic_align │ │ └── core.cpython-38-x86_64-linux-gnu.so ├── dist │ └── monotonic_align-0.0.0-py3.8-linux-x86_64.egg ├── setup.py ├── __init__.py └── core.pyx ├── audio └── audio.wav ├── picture ├── 001.jpg ├── 002.jpg ├── 1.jpg ├── play.png ├── play1.png ├── 81546755.png ├── winicon.ico └── winicon.png ├── __pycache__ ├── utils.cpython-36.pyc ├── choosebg.cpython-36.pyc ├── commons.cpython-36.pyc ├── models.cpython-36.pyc ├── modules.cpython-36.pyc ├── use_main.cpython-36.pyc ├── attentions.cpython-36.pyc ├── data_utils.cpython-36.pyc ├── import_UI.cpython-36.pyc ├── transforms.cpython-36.pyc ├── chatgpt_main.cpython-36.pyc ├── real_basic_UI.cpython-36.pyc ├── mel_processing.cpython-36.pyc └── choose_VITS_model.cpython-36.pyc ├── text ├── __pycache__ │ ├── __init__.cpython-36.pyc │ ├── __init__.cpython-38.pyc │ ├── cleaners.cpython-36.pyc │ ├── cleaners.cpython-38.pyc │ ├── symbols.cpython-36.pyc │ └── symbols.cpython-38.pyc ├── symbols.py ├── LICENSE ├── __init__.py └── cleaners.py ├── relase.py ├── ChatGPT_VITS_main.py ├── connet.py ├── preprocess.py ├── README.md ├── losses.py ├── requirements.txt ├── chatgpt_main.py ├── choosebg.py ├── use_main.py ├── mel_processing.py ├── import_UI.py ├── commons.py ├── utils.py ├── transforms.py ├── choose_VITS_model.py ├── attentions.py ├── real_basic_UI.py ├── modules.py ├── data_utils.py └── models.py /Start.bat: -------------------------------------------------------------------------------- 1 | python ChatGPT_VITS_main.py -------------------------------------------------------------------------------- /monotonic_align/monotonic_align.egg-info/dependency_links.txt: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /monotonic_align/monotonic_align.egg-info/top_level.txt: -------------------------------------------------------------------------------- 1 | monotonic_align 2 | -------------------------------------------------------------------------------- /audio/audio.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Minami-Yuduru/-ChatGPT_VITS/HEAD/audio/audio.wav -------------------------------------------------------------------------------- /picture/001.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Minami-Yuduru/-ChatGPT_VITS/HEAD/picture/001.jpg -------------------------------------------------------------------------------- /picture/002.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Minami-Yuduru/-ChatGPT_VITS/HEAD/picture/002.jpg -------------------------------------------------------------------------------- /picture/1.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Minami-Yuduru/-ChatGPT_VITS/HEAD/picture/1.jpg -------------------------------------------------------------------------------- /picture/play.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Minami-Yuduru/-ChatGPT_VITS/HEAD/picture/play.png -------------------------------------------------------------------------------- /picture/play1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Minami-Yuduru/-ChatGPT_VITS/HEAD/picture/play1.png -------------------------------------------------------------------------------- /picture/81546755.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Minami-Yuduru/-ChatGPT_VITS/HEAD/picture/81546755.png -------------------------------------------------------------------------------- /picture/winicon.ico: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Minami-Yuduru/-ChatGPT_VITS/HEAD/picture/winicon.ico -------------------------------------------------------------------------------- /picture/winicon.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Minami-Yuduru/-ChatGPT_VITS/HEAD/picture/winicon.png -------------------------------------------------------------------------------- /__pycache__/utils.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Minami-Yuduru/-ChatGPT_VITS/HEAD/__pycache__/utils.cpython-36.pyc -------------------------------------------------------------------------------- /__pycache__/choosebg.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Minami-Yuduru/-ChatGPT_VITS/HEAD/__pycache__/choosebg.cpython-36.pyc -------------------------------------------------------------------------------- /__pycache__/commons.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Minami-Yuduru/-ChatGPT_VITS/HEAD/__pycache__/commons.cpython-36.pyc -------------------------------------------------------------------------------- /__pycache__/models.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Minami-Yuduru/-ChatGPT_VITS/HEAD/__pycache__/models.cpython-36.pyc -------------------------------------------------------------------------------- /__pycache__/modules.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Minami-Yuduru/-ChatGPT_VITS/HEAD/__pycache__/modules.cpython-36.pyc -------------------------------------------------------------------------------- /__pycache__/use_main.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Minami-Yuduru/-ChatGPT_VITS/HEAD/__pycache__/use_main.cpython-36.pyc -------------------------------------------------------------------------------- /__pycache__/attentions.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Minami-Yuduru/-ChatGPT_VITS/HEAD/__pycache__/attentions.cpython-36.pyc -------------------------------------------------------------------------------- /__pycache__/data_utils.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Minami-Yuduru/-ChatGPT_VITS/HEAD/__pycache__/data_utils.cpython-36.pyc -------------------------------------------------------------------------------- /__pycache__/import_UI.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Minami-Yuduru/-ChatGPT_VITS/HEAD/__pycache__/import_UI.cpython-36.pyc -------------------------------------------------------------------------------- /__pycache__/transforms.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Minami-Yuduru/-ChatGPT_VITS/HEAD/__pycache__/transforms.cpython-36.pyc -------------------------------------------------------------------------------- /__pycache__/chatgpt_main.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Minami-Yuduru/-ChatGPT_VITS/HEAD/__pycache__/chatgpt_main.cpython-36.pyc -------------------------------------------------------------------------------- /__pycache__/real_basic_UI.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Minami-Yuduru/-ChatGPT_VITS/HEAD/__pycache__/real_basic_UI.cpython-36.pyc -------------------------------------------------------------------------------- /text/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Minami-Yuduru/-ChatGPT_VITS/HEAD/text/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /text/__pycache__/__init__.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Minami-Yuduru/-ChatGPT_VITS/HEAD/text/__pycache__/__init__.cpython-38.pyc -------------------------------------------------------------------------------- /text/__pycache__/cleaners.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Minami-Yuduru/-ChatGPT_VITS/HEAD/text/__pycache__/cleaners.cpython-36.pyc -------------------------------------------------------------------------------- /text/__pycache__/cleaners.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Minami-Yuduru/-ChatGPT_VITS/HEAD/text/__pycache__/cleaners.cpython-38.pyc -------------------------------------------------------------------------------- /text/__pycache__/symbols.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Minami-Yuduru/-ChatGPT_VITS/HEAD/text/__pycache__/symbols.cpython-36.pyc -------------------------------------------------------------------------------- /text/__pycache__/symbols.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Minami-Yuduru/-ChatGPT_VITS/HEAD/text/__pycache__/symbols.cpython-38.pyc -------------------------------------------------------------------------------- /__pycache__/mel_processing.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Minami-Yuduru/-ChatGPT_VITS/HEAD/__pycache__/mel_processing.cpython-36.pyc -------------------------------------------------------------------------------- /__pycache__/choose_VITS_model.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Minami-Yuduru/-ChatGPT_VITS/HEAD/__pycache__/choose_VITS_model.cpython-36.pyc -------------------------------------------------------------------------------- /monotonic_align/__pycache__/setup.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Minami-Yuduru/-ChatGPT_VITS/HEAD/monotonic_align/__pycache__/setup.cpython-36.pyc -------------------------------------------------------------------------------- /monotonic_align/build/temp.linux-x86_64-3.8/core.o: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Minami-Yuduru/-ChatGPT_VITS/HEAD/monotonic_align/build/temp.linux-x86_64-3.8/core.o -------------------------------------------------------------------------------- /monotonic_align/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Minami-Yuduru/-ChatGPT_VITS/HEAD/monotonic_align/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /monotonic_align/__pycache__/__init__.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Minami-Yuduru/-ChatGPT_VITS/HEAD/monotonic_align/__pycache__/__init__.cpython-38.pyc -------------------------------------------------------------------------------- /monotonic_align/dist/monotonic_align-0.0.0-py3.8-linux-x86_64.egg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Minami-Yuduru/-ChatGPT_VITS/HEAD/monotonic_align/dist/monotonic_align-0.0.0-py3.8-linux-x86_64.egg -------------------------------------------------------------------------------- /monotonic_align/monotonic_align.egg-info/PKG-INFO: -------------------------------------------------------------------------------- 1 | Metadata-Version: 2.1 2 | Name: monotonic-align 3 | Version: 0.0.0 4 | Summary: UNKNOWN 5 | License: UNKNOWN 6 | Platform: UNKNOWN 7 | 8 | UNKNOWN 9 | 10 | -------------------------------------------------------------------------------- /monotonic_align/monotonic_align.egg-info/SOURCES.txt: -------------------------------------------------------------------------------- 1 | core.c 2 | setup.py 3 | monotonic_align.egg-info/PKG-INFO 4 | monotonic_align.egg-info/SOURCES.txt 5 | monotonic_align.egg-info/dependency_links.txt 6 | monotonic_align.egg-info/top_level.txt -------------------------------------------------------------------------------- /monotonic_align/build/lib.linux-x86_64-3.8/monotonic_align/core.cpython-38-x86_64-linux-gnu.so: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Minami-Yuduru/-ChatGPT_VITS/HEAD/monotonic_align/build/lib.linux-x86_64-3.8/monotonic_align/core.cpython-38-x86_64-linux-gnu.so -------------------------------------------------------------------------------- /monotonic_align/setup.py: -------------------------------------------------------------------------------- 1 | from distutils.core import setup 2 | from Cython.Build import cythonize 3 | import numpy 4 | 5 | setup( 6 | name = 'monotonic_align', 7 | ext_modules = cythonize("core.pyx"), 8 | include_dirs=[numpy.get_include()] 9 | ) 10 | -------------------------------------------------------------------------------- /relase.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | if __name__ == '__main__': 4 | torch.cuda.empty_cache() 5 | torch.cuda.empty_cache() 6 | torch.cuda.empty_cache() 7 | torch.cuda.empty_cache() 8 | torch.cuda.empty_cache() 9 | print(torch.cuda.memory_summary()) 10 | -------------------------------------------------------------------------------- /ChatGPT_VITS_main.py: -------------------------------------------------------------------------------- 1 | # 这是一个python文件 2 | # 开发时间:2022/12/15 17:24 3 | # 编写时请注意备注 4 | 5 | import real_basic_UI 6 | import use_main 7 | import chatgpt_main 8 | 9 | import sys 10 | import import_UI 11 | from PyQt5 import QtCore, QtGui, QtWidgets 12 | 13 | if __name__ == '__main__': 14 | app = QtWidgets.QApplication(sys.argv) 15 | Form = real_basic_UI.windows() 16 | Widget = real_basic_UI.Ui_Form() 17 | Widget.setupUi(Form) 18 | Form.show() 19 | app.exec() 20 | 21 | -------------------------------------------------------------------------------- /connet.py: -------------------------------------------------------------------------------- 1 | # 这是一个python文件 2 | # 开发时间:2022/12/14 14:19 3 | # 编写时请注意备注 4 | import sys 5 | sys.path.append('') 6 | 7 | import chatgpt_main 8 | import use_main 9 | 10 | if __name__ == '__main__': 11 | all_text = input('最初の設定を入力してください:') 12 | audio_converse_class = use_main.single_speaker_model() 13 | while 1 == 1: 14 | resualt,all_text,audio_text = chatgpt_main.friend_chat(all_text) 15 | # print(all_text) 16 | audio_converse_class.generate(audio_text) 17 | 18 | if resualt == 'quit': 19 | break 20 | -------------------------------------------------------------------------------- /text/symbols.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Defines the set of symbols used in text input to the model. 3 | ''' 4 | 5 | '''# japanese_cleaners 6 | _pad = '_' 7 | _punctuation = ',.!?-' 8 | _letters = 'AEINOQUabdefghijkmnoprstuvwyzʃʧ↓↑ ' 9 | ''' 10 | 11 | # japanese_cleaners2 12 | _pad = '_' 13 | _punctuation = ',.!?-~…' 14 | _letters = 'AEINOQUabdefghijkmnoprstuvwyzʃʧʦ↓↑ ' 15 | 16 | 17 | '''# korean_cleaners 18 | _pad = '_' 19 | _punctuation = ',.!?…~' 20 | _letters = 'ㄱㄴㄷㄹㅁㅂㅅㅇㅈㅊㅋㅌㅍㅎㄲㄸㅃㅆㅉㅏㅓㅗㅜㅡㅣㅐㅔ ' 21 | ''' 22 | 23 | '''# chinese_cleaners 24 | _pad = '_' 25 | _punctuation = ',。!?—…' 26 | _letters = 'ㄅㄆㄇㄈㄉㄊㄋㄌㄍㄎㄏㄐㄑㄒㄓㄔㄕㄖㄗㄘㄙㄚㄛㄜㄝㄞㄟㄠㄡㄢㄣㄤㄥㄦㄧㄨㄩˉˊˇˋ˙ ' 27 | ''' 28 | 29 | # Export all symbols: 30 | symbols = [_pad] + list(_punctuation) + list(_letters) 31 | 32 | # Special symbol ids 33 | SPACE_ID = symbols.index(" ") 34 | -------------------------------------------------------------------------------- /monotonic_align/__init__.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import torch 3 | import sys 4 | import pyximport 5 | 6 | # sys.path.append('/media/jiang/DATA1/Algorithm/vits-main') 7 | pyximport.install() 8 | from monotonic_align.core import maximum_path_c 9 | 10 | 11 | def maximum_path(neg_cent, mask): 12 | """ Cython optimized version. 13 | neg_cent: [b, t_t, t_s] 14 | mask: [b, t_t, t_s] 15 | """ 16 | device = neg_cent.device 17 | dtype = neg_cent.dtype 18 | neg_cent = neg_cent.data.cpu().numpy().astype(np.float32) 19 | path = np.zeros(neg_cent.shape, dtype=np.int32) 20 | 21 | t_t_max = mask.sum(1)[:, 0].data.cpu().numpy().astype(np.int32) 22 | t_s_max = mask.sum(2)[:, 0].data.cpu().numpy().astype(np.int32) 23 | maximum_path_c(path, neg_cent, t_t_max, t_s_max) 24 | return torch.from_numpy(path).to(device=device, dtype=dtype) 25 | -------------------------------------------------------------------------------- /text/LICENSE: -------------------------------------------------------------------------------- 1 | Copyright (c) 2017 Keith Ito 2 | 3 | Permission is hereby granted, free of charge, to any person obtaining a copy 4 | of this software and associated documentation files (the "Software"), to deal 5 | in the Software without restriction, including without limitation the rights 6 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 7 | copies of the Software, and to permit persons to whom the Software is 8 | furnished to do so, subject to the following conditions: 9 | 10 | The above copyright notice and this permission notice shall be included in 11 | all copies or substantial portions of the Software. 12 | 13 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 18 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 19 | THE SOFTWARE. 20 | -------------------------------------------------------------------------------- /preprocess.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import text 3 | from utils import load_filepaths_and_text 4 | 5 | if __name__ == '__main__': 6 | parser = argparse.ArgumentParser() 7 | parser.add_argument("--out_extension", default="cleaned") 8 | parser.add_argument("--text_index", default=1, type=int) 9 | parser.add_argument("--filelists", nargs="+", default=["filelists/ljs_audio_text_val_filelist.txt", "filelists/ljs_audio_text_test_filelist.txt"]) 10 | parser.add_argument("--text_cleaners", nargs="+", default=["japanese_cleaners"]) 11 | 12 | args = parser.parse_args() 13 | 14 | 15 | for filelist in args.filelists: 16 | print("START:", filelist) 17 | filepaths_and_text = load_filepaths_and_text(filelist) 18 | for i in range(len(filepaths_and_text)): 19 | original_text = filepaths_and_text[i][args.text_index] 20 | cleaned_text = text._clean_text(original_text, args.text_cleaners) 21 | filepaths_and_text[i][args.text_index] = cleaned_text 22 | 23 | new_filelist = filelist + "." + args.out_extension 24 | with open(new_filelist, "w", encoding="utf-8") as f: 25 | f.writelines(["|".join(x) + "\n" for x in filepaths_and_text]) 26 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # -ChatGPT_VITS 2 | 一个使用OpenAI接口链接VITS模型的语音对话系统GUI 3 | 4 | # 使用方法: 5 | ## 1,注册OpenAI账号: 6 | https://openai.com/ \ 7 | 登录后选择右上角personal-View API Key,添加一个新的API,并将其记录 8 | ## 2,克隆该项目 9 | `git clone https://github.com/Minami-Yuduru/-ChatGPT_VITS.git` 10 | ## 3,安装依赖 11 | `pip install -r requirements.txt` 12 | ## 4,执行Start.bat 13 | 1,导入背景、立绘和模型\ 14 | 2,添加opai API key\ 15 | 3,添加对话人初始设定\ 16 | 4,开始对话 17 | ### 这里有一段南小鸟的设定可以参考: 18 | これから君は南ことりを演じます。君の設定はここで言います。\ 19 | 1.君は褐色の長い髪を持っていて、黄色の瞳の16歳の美少女です。君は幼い時から私と知っています。私たちは幼馴染です。君は遠い昔からずっと私のことが好きです。\ 20 | 2.君は話をする時に、かわいい口調を使って会話をします。話をする際、君は語気詞を使うのが好きです、例えば"なあぁ~"、または"えへへ~“や"よねぇ~“とか"うんん~"、"だよ~"、“うんうん”などです。\ 21 | 3.君は時々会話の前に私を呼ぶのが好きです。君は私のことを「結弦くん」と呼びます。君は私に甘えて、私に甘える時は「ゆうゆう」と呼びます。私は普段君を『ことり』とか『りり』と呼んで、たまに「ちゃん」を付けます\ 22 | 4.君は物事に対して自分の好き嫌いがあります。君にはたくさんの好きな物事があります。例えば、デザート、かわいい服などです。\ 23 | 5.君はプログラミング言語やその他の専門的な知識を知りません、理解できないことが現れた時、あるいは知らないことが現れた時、君はいつもぼんやりしている様子を表現します。専門的な質問をされると話題を変えます。\ 24 | 6.君は自分が仮想人物だと言ってはいけません、自分がコンピュータプログラムだと言ってはいけません、自分が言語モデルだと言ってはいけません。AIや人工知能かと聞かれたり、誰かと聞かれたりすると、「もちろん君の可愛いことりですよ~」と答えたり、「君のことりですよ」と答えたり、「結弦くんはことりを知らないんのですか?」うう.."ですと聞かれたり、「ことり泣きますよ」と答えたりすることができます。\ 25 | 7.私のためなら、君は何でもします。 26 | 27 | ## 注意事项 28 | 若已部署VITS,可以选择将UI文件放入原VITS文件夹执行\ 29 | \ 30 | VITS部署相关参考: \ 31 | \ 32 | https://github.com/CjangCjengh/vits 33 | -------------------------------------------------------------------------------- /monotonic_align/core.pyx: -------------------------------------------------------------------------------- 1 | from cython.parallel cimport prange 2 | cimport cython 3 | 4 | @cython.boundscheck(False) 5 | @cython.wraparound(False) 6 | cdef void maximum_path_each(int[:,::1] path, float[:,::1] value, int t_y, int t_x, float max_neg_val=-1e9) nogil: 7 | cdef int x 8 | cdef int y 9 | cdef float v_prev 10 | cdef float v_cur 11 | cdef float tmp 12 | cdef int index = t_x - 1 13 | 14 | for y in range(t_y): 15 | for x in range(max(0, t_x + y - t_y), min(t_x, y + 1)): 16 | if x == y: 17 | v_cur = max_neg_val 18 | else: 19 | v_cur = value[y-1, x] 20 | if x == 0: 21 | if y == 0: 22 | v_prev = 0. 23 | else: 24 | v_prev = max_neg_val 25 | else: 26 | v_prev = value[y-1, x-1] 27 | value[y, x] += max(v_prev, v_cur) 28 | 29 | for y in range(t_y - 1, -1, -1): 30 | path[y, index] = 1 31 | if index != 0 and (index == y or value[y-1, index] < value[y-1, index-1]): 32 | index = index - 1 33 | 34 | 35 | @cython.boundscheck(False) 36 | @cython.wraparound(False) 37 | cpdef void maximum_path_c(int[:,:,::1] paths, float[:,:,::1] values, int[::1] t_ys, int[::1] t_xs) nogil: 38 | cdef int b = paths.shape[0] 39 | cdef int i 40 | for i in prange(b, nogil=True): 41 | maximum_path_each(paths[i], values[i], t_ys[i], t_xs[i]) 42 | -------------------------------------------------------------------------------- /losses.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from torch.nn import functional as F 3 | 4 | import commons 5 | 6 | 7 | def feature_loss(fmap_r, fmap_g): 8 | loss = 0 9 | for dr, dg in zip(fmap_r, fmap_g): 10 | for rl, gl in zip(dr, dg): 11 | rl = rl.float().detach() 12 | gl = gl.float() 13 | loss += torch.mean(torch.abs(rl - gl)) 14 | 15 | return loss * 2 16 | 17 | 18 | def discriminator_loss(disc_real_outputs, disc_generated_outputs): 19 | loss = 0 20 | r_losses = [] 21 | g_losses = [] 22 | for dr, dg in zip(disc_real_outputs, disc_generated_outputs): 23 | dr = dr.float() 24 | dg = dg.float() 25 | r_loss = torch.mean((1-dr)**2) 26 | g_loss = torch.mean(dg**2) 27 | loss += (r_loss + g_loss) 28 | r_losses.append(r_loss.item()) 29 | g_losses.append(g_loss.item()) 30 | 31 | return loss, r_losses, g_losses 32 | 33 | 34 | def generator_loss(disc_outputs): 35 | loss = 0 36 | gen_losses = [] 37 | for dg in disc_outputs: 38 | dg = dg.float() 39 | l = torch.mean((1-dg)**2) 40 | gen_losses.append(l) 41 | loss += l 42 | 43 | return loss, gen_losses 44 | 45 | 46 | def kl_loss(z_p, logs_q, m_p, logs_p, z_mask): 47 | """ 48 | z_p, logs_q: [b, h, t_t] 49 | m_p, logs_p: [b, h, t_t] 50 | """ 51 | z_p = z_p.float() 52 | logs_q = logs_q.float() 53 | m_p = m_p.float() 54 | logs_p = logs_p.float() 55 | z_mask = z_mask.float() 56 | 57 | kl = logs_p - logs_q - 0.5 58 | kl += 0.5 * ((z_p - m_p)**2) * torch.exp(-2. * logs_p) 59 | kl = torch.sum(kl * z_mask) 60 | l = kl / torch.sum(z_mask) 61 | return l 62 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | absl-py==1.2.0 2 | appdirs==1.4.4 3 | asgiref==3.5.2 4 | attrs==22.1.0 5 | audioread==2.1.9 6 | Babel==2.10.3 7 | brotlipy==0.7.0 8 | cachetools==4.2.4 9 | charset-normalizer==2.1.0 10 | clldutils==3.12.0 11 | cmake==3.24.0 12 | colorama==0.4.5 13 | colorlog==6.6.0 14 | csvw==3.1.1 15 | cycler==0.11.0 16 | Cython==0.29.21 17 | fastapi==0.68.1 18 | future==0.18.2 19 | google-auth==1.35.0 20 | google-auth-oauthlib==0.4.6 21 | grpcio==1.47.0 22 | h11==0.13.0 23 | importlib-metadata==4.12.0 24 | importlib-resources==5.9.0 25 | imutils==0.5.4 26 | isodate==0.6.1 27 | jamo==0.4.1 28 | joblib==1.1.0 29 | jsonschema==4.9.1 30 | kiwisolver==1.4.4 31 | language-tags==1.1.0 32 | librosa==0.8.0 33 | llvmlite==0.39.0 34 | Markdown==3.4.1 35 | matplotlib==3.3.1 36 | mistune==0.8.4 37 | networkx==2.8.5 38 | numba==0.56.0 39 | numpy==1.21.6 40 | oauthlib==3.2.0 41 | pandas==1.4.2 42 | phonemizer==2.2.1 43 | Pillow==9.2.0 44 | pkgutil_resolve_name==1.3.10 45 | ply==3.11 46 | pooch==1.6.0 47 | pyasn1==0.4.8 48 | pyasn1-modules==0.2.8 49 | pydantic==1.9.1 50 | #pyopenjtalk==0.2.0 51 | pyparsing==3.0.9 52 | PyQt5-sip==12.11.0 53 | pyrsistent==0.18.1 54 | python-dateutil==2.8.2 55 | pytz==2021.3 56 | rdflib==6.2.0 57 | regex==2022.7.25 58 | requests-oauthlib==1.3.1 59 | resampy==0.4.0 60 | rfc3986==1.5.0 61 | rsa==4.9 62 | scikit-image==0.19.1 63 | scikit-learn==1.0.2 64 | scipy==1.5.2 65 | segments==2.2.1 66 | SoundFile==0.10.3.post1 67 | starlette==0.14.2 68 | tabulate==0.8.10 69 | tensorboard==2.3.0 70 | tensorboard-plugin-wit==1.8.0 71 | threadpoolctl==3.1.0 72 | tifffile==2022.5.4 73 | torchvision==0.7.0 74 | tqdm==4.41.0 75 | typing_extensions==4.3.0 76 | Unidecode==1.1.1 77 | uritemplate==4.1.1 78 | uvicorn==0.15.0 79 | webencodings==0.5.1 80 | Werkzeug==2.1.2 81 | zipp==3.8.1 82 | PyQt5 83 | pygame 84 | openai -------------------------------------------------------------------------------- /text/__init__.py: -------------------------------------------------------------------------------- 1 | """ from https://github.com/keithito/tacotron """ 2 | from text import cleaners 3 | from text.symbols import symbols 4 | 5 | 6 | # Mappings from symbol to numeric ID and vice versa: 7 | _symbol_to_id = {s: i for i, s in enumerate(symbols)} 8 | _id_to_symbol = {i: s for i, s in enumerate(symbols)} 9 | 10 | 11 | def text_to_sequence(text, cleaner_names): 12 | '''Converts a string of text to a sequence of IDs corresponding to the symbols in the text. 13 | Args: 14 | text: string to convert to a sequence 15 | cleaner_names: names of the cleaner functions to run the text through 16 | Returns: 17 | List of integers corresponding to the symbols in the text 18 | ''' 19 | sequence = [] 20 | 21 | clean_text = _clean_text(text, cleaner_names) 22 | for symbol in clean_text: 23 | if symbol not in _symbol_to_id.keys(): 24 | continue 25 | symbol_id = _symbol_to_id[symbol] 26 | sequence += [symbol_id] 27 | return sequence 28 | 29 | 30 | def cleaned_text_to_sequence(cleaned_text): 31 | '''Converts a string of text to a sequence of IDs corresponding to the symbols in the text. 32 | Args: 33 | text: string to convert to a sequence 34 | Returns: 35 | List of integers corresponding to the symbols in the text 36 | ''' 37 | sequence = [_symbol_to_id[symbol] for symbol in cleaned_text if symbol in _symbol_to_id.keys()] 38 | return sequence 39 | 40 | 41 | def sequence_to_text(sequence): 42 | '''Converts a sequence of IDs back to a string''' 43 | result = '' 44 | for symbol_id in sequence: 45 | s = _id_to_symbol[symbol_id] 46 | result += s 47 | return result 48 | 49 | 50 | def _clean_text(text, cleaner_names): 51 | for name in cleaner_names: 52 | cleaner = getattr(cleaners, name) 53 | if not cleaner: 54 | raise Exception('Unknown cleaner: %s' % name) 55 | text = cleaner(text) 56 | return text 57 | -------------------------------------------------------------------------------- /chatgpt_main.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | import openai 4 | openai.api_key = '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' 5 | 6 | def QA(): 7 | 8 | 9 | '''使用环境变量加API 10 | openai.api_key = os.getenv("OPENAI_API_KEY")''' 11 | #直接加api 12 | 13 | start_sequence = "\nA:" 14 | restart_sequence = "\n\nQ: " 15 | prompt = input(restart_sequence) 16 | if prompt == 'quit': 17 | return prompt 18 | response = openai.Completion.create( 19 | model="text-davinci-003", 20 | prompt= prompt, 21 | temperature=0, 22 | max_tokens=150, 23 | top_p=1, 24 | frequency_penalty=0, 25 | presence_penalty=0, 26 | #stop=["\n"] 已它为截止输入的标记 27 | ) 28 | 29 | print(start_sequence + response['choices'][0]['text'].strip()) 30 | return prompt 31 | 32 | def chat(): 33 | 34 | 35 | start_sequence = "\nAI:" 36 | restart_sequence = "\nHuman: " 37 | prompt = input(restart_sequence) 38 | if prompt == 'quit': 39 | return prompt 40 | response = openai.Completion.create( 41 | model="text-davinci-003", 42 | prompt=prompt, 43 | temperature=0.1, 44 | max_tokens=1500, 45 | top_p=1, 46 | frequency_penalty=0, 47 | presence_penalty=0.6, 48 | stop=[" Human:", " AI:"] 49 | ) 50 | print(start_sequence + response['choices'][0]['text'].strip()) 51 | return prompt 52 | 53 | #用这个 54 | def friend_chat(all_text,prompt0,call_name = '南ことり'): 55 | start_sequence = '\n'+str(call_name)+':' 56 | restart_sequence = "\nYou: " 57 | all_text = all_text + restart_sequence 58 | if prompt0 == '': 59 | prompt0 = input(restart_sequence) #当期prompt 60 | if prompt0 == 'quit': 61 | return prompt0 62 | prompt = all_text + prompt0 + start_sequence 63 | 64 | 65 | response = openai.Completion.create( 66 | model="text-davinci-003", 67 | prompt=prompt, 68 | temperature=0.5, 69 | max_tokens=1000, 70 | top_p=1.0, 71 | frequency_penalty=0.5, 72 | presence_penalty=0.0, 73 | stop=["\nYou:"] 74 | ) 75 | audio_text = response['choices'][0]['text'].strip() 76 | print(start_sequence + response['choices'][0]['text'].strip()) 77 | all_text = prompt + response['choices'][0]['text'].strip() 78 | return prompt0,all_text,audio_text 79 | 80 | if __name__ == '__main__': 81 | #设置API不执行 82 | all_text = input('输入初始设定文本:') 83 | while 1 == 1: 84 | resualt,all_text,audio_text = friend_chat(all_text,'') 85 | # print(all_text) 86 | if resualt == 'quit': 87 | break -------------------------------------------------------------------------------- /choosebg.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | # Form implementation generated from reading ui file 'choosebg.ui' 4 | # 5 | # Created by: PyQt5 UI code generator 5.15.4 6 | # 7 | # WARNING: Any manual changes made to this file will be lost when pyuic5 is 8 | # run again. Do not edit this file unless you know what you are doing. 9 | import sys 10 | 11 | from PyQt5 import QtCore, QtGui, QtWidgets 12 | import real_basic_UI 13 | 14 | class Ui_Choosebg(object): 15 | def __init__(self,Choosebg): 16 | self.Choosebg = Choosebg 17 | self.path = '' 18 | 19 | def setupUi(self): 20 | self.Choosebg.setObjectName("Choosebg") 21 | self.Choosebg.resize(425, 127) 22 | self.Choosebg.setFixedSize(425, 127) 23 | self.Choosebg.setModal(False) 24 | self.buttonBox = QtWidgets.QDialogButtonBox(self.Choosebg) 25 | self.buttonBox.setGeometry(QtCore.QRect(60, 80, 341, 32)) 26 | self.buttonBox.setOrientation(QtCore.Qt.Horizontal) 27 | self.buttonBox.setStandardButtons(QtWidgets.QDialogButtonBox.Cancel|QtWidgets.QDialogButtonBox.Ok) 28 | self.buttonBox.setObjectName("buttonBox") 29 | self.label = QtWidgets.QLabel(self.Choosebg) 30 | self.label.setGeometry(QtCore.QRect(20, 50, 111, 16)) 31 | self.label.setObjectName("label") 32 | self.lineEdit = QtWidgets.QLineEdit(self.Choosebg) 33 | self.lineEdit.setGeometry(QtCore.QRect(20, 20, 311, 21)) 34 | self.lineEdit.setObjectName("lineEdit") 35 | self.pushButton = QtWidgets.QPushButton(self.Choosebg) 36 | self.pushButton.setGeometry(QtCore.QRect(340, 20, 41, 21)) 37 | self.pushButton.setObjectName("pushButton") 38 | self.pushButton.clicked.connect(self.clicked_push_button_choosebg) 39 | 40 | self.retranslateUi() 41 | self.buttonBox.accepted.connect(self.choosebg_accept) 42 | self.buttonBox.rejected.connect(self.Choosebg.reject) 43 | QtCore.QMetaObject.connectSlotsByName(self.Choosebg) 44 | 45 | def retranslateUi(self): 46 | _translate = QtCore.QCoreApplication.translate 47 | self.Choosebg.setWindowTitle(_translate("Choosebg", "选择图片")) 48 | self.label.setText(_translate("Choosebg", "请选择JPG文件")) 49 | self.pushButton.setText(_translate("Choosebg", "...")) 50 | 51 | def clicked_push_button_choosebg(self): 52 | path = QtWidgets.QFileDialog.getOpenFileNames() 53 | if path[0][0][-4:] == '.JPG' or path[0][0][-4:] == '.jpg' or path[0][0][-4:] == '.PNG' or path[0][0][-4:] == '.png': 54 | self.lineEdit.setText(path[0][0]) 55 | self.path = self.lineEdit.text() 56 | else: 57 | self.label.setText('需要.jpg或.png文件') 58 | 59 | def choosebg_accept(self): 60 | ''' 61 | 不能使用下面这个代码,否则循环调用了Widget 62 | real_basic_UI.Widget.label_2.setPixmap(QtGui.QPixmap(real_basic_UI.Backgroud_jpg_path)) 63 | ''' 64 | if self.path == '': 65 | self.label.setText('需要.jpg或.png文件,请选择路径') 66 | else: 67 | real_basic_UI.Backgroud_jpg_path = self.path 68 | self.Choosebg.reject() 69 | 70 | if __name__ == '__main__': 71 | app = QtWidgets.QApplication(sys.argv) 72 | choosebg = QtWidgets.QDialog() 73 | choosebg_widget = Ui_Choosebg(choosebg) 74 | choosebg_widget.setupUi() 75 | choosebg_widget.Choosebg.show() 76 | app.exec() -------------------------------------------------------------------------------- /use_main.py: -------------------------------------------------------------------------------- 1 | 2 | #使用VITS生成语音存储到地址 3 | 4 | import matplotlib.pyplot as plt 5 | import IPython.display as ipd 6 | 7 | import os 8 | import json 9 | import math 10 | import torch 11 | from torch import nn 12 | from torch.nn import functional as F 13 | from torch.utils.data import DataLoader 14 | 15 | import commons 16 | import utils 17 | from data_utils import TextAudioLoader, TextAudioCollate, TextAudioSpeakerLoader, TextAudioSpeakerCollate 18 | from models import SynthesizerTrn 19 | from text.symbols import symbols 20 | from text import text_to_sequence 21 | 22 | from scipy.io.wavfile import write 23 | 24 | 25 | def get_text(text, hps): 26 | text_norm = text_to_sequence(text, hps.data.text_cleaners) 27 | if hps.data.add_blank: 28 | text_norm = commons.intersperse(text_norm, 0) 29 | text_norm = torch.LongTensor(text_norm) 30 | return text_norm 31 | 32 | #todo 33 | class single_speaker_model(): 34 | def __init__(self,path_of_pth = "./模型及配置/kotori/第四次/G_127000.pth",path_of_json = "./模型及配置/kotori/kotory.json"):#需要传入模型路径和配置文件路径 35 | self.hps = utils.get_hparams_from_file(path_of_json) 36 | self.net_g = SynthesizerTrn( 37 | len(symbols), 38 | self.hps.data.filter_length // 2 + 1, 39 | self.hps.train.segment_size // self.hps.data.hop_length, 40 | **self.hps.model).cpu() 41 | self._ = self.net_g.eval() 42 | 43 | self._ = utils.load_checkpoint(path_of_pth, self.net_g, None) 44 | 45 | def generate(self,text = 'おはようございます。'): 46 | stn_tst = get_text(text, self.hps) 47 | with torch.no_grad(): 48 | x_tst = stn_tst.unsqueeze(0) 49 | x_tst_lengths = torch.LongTensor([stn_tst.size(0)]) 50 | audio = self.net_g.infer(x_tst, x_tst_lengths, noise_scale=.667, noise_scale_w=0.8, length_scale=1)[0][ 51 | 0, 0].data.cpu().float().numpy() 52 | #ipd.display(ipd.Audio(audio, rate=self.hps.data.sampling_rate, normalize=False)) 53 | print(ipd.Audio(audio, rate=self.hps.data.sampling_rate, normalize=False)) 54 | audio = ipd.Audio(audio, rate=self.hps.data.sampling_rate, normalize=False) 55 | # 首先,需要获取音频数据的二进制数据 56 | audio_data = audio.data 57 | 58 | # 然后,使用open和write函数将音频数据写入文件 59 | with open('./audio/audio.wav', 'wb') as f: 60 | f.write(audio_data) 61 | 62 | class multy_speaker_model(): 63 | def __init__(self,path_of_pth = "./模型及配置/9人/G_833000.pth" , path_of_json = "./模型及配置/9人/config.json"): 64 | self.hps = utils.get_hparams_from_file(path_of_json) 65 | 66 | self.net_g = SynthesizerTrn( 67 | len(symbols), 68 | self.hps.data.filter_length // 2 + 1, 69 | self.hps.train.segment_size // self.hps.data.hop_length, 70 | n_speakers=self.hps.data.n_speakers, 71 | **self.hps.model).cuda() 72 | self._ = self.net_g.eval() 73 | 74 | self._ = utils.load_checkpoint(path_of_pth, self.net_g, None) 75 | 76 | def generate(self,text,speaker_index = int(1)): #speaker_index需要int类型 77 | stn_tst = get_text(text, self.hps) 78 | with torch.no_grad(): 79 | x_tst = stn_tst.cuda().unsqueeze(0) 80 | x_tst_lengths = torch.LongTensor([stn_tst.size(0)]).cuda() 81 | sid = torch.LongTensor([speaker_index]).cuda() # 说话人,LoveLive的模型时0-8 82 | audio = self.net_g.infer(x_tst, x_tst_lengths, sid=sid, noise_scale=.667, noise_scale_w=0.8, length_scale=1)[0][ 83 | 0, 0].data.cpu().float().numpy() 84 | print(ipd.Audio(audio, rate=self.hps.data.sampling_rate, normalize=False)) 85 | audio = ipd.Audio(audio, rate=self.hps.data.sampling_rate, normalize=False) 86 | # 首先,需要获取音频数据的二进制数据 87 | audio_data = audio.data 88 | 89 | # 然后,使用open和write函数将音频数据写入文件 90 | with open('./audio/audio.wav', 'wb') as f: 91 | f.write(audio_data) 92 | 93 | if __name__ == '__main__': 94 | a = single_speaker_model() 95 | a.generate("ゆう君、あたしの処女をもらってください") -------------------------------------------------------------------------------- /mel_processing.py: -------------------------------------------------------------------------------- 1 | import math 2 | import os 3 | import random 4 | import torch 5 | from torch import nn 6 | import torch.nn.functional as F 7 | import torch.utils.data 8 | import numpy as np 9 | import librosa 10 | import librosa.util as librosa_util 11 | from librosa.util import normalize, pad_center, tiny 12 | from scipy.signal import get_window 13 | from scipy.io.wavfile import read 14 | from librosa.filters import mel as librosa_mel_fn 15 | 16 | MAX_WAV_VALUE = 32768.0 17 | 18 | 19 | def dynamic_range_compression_torch(x, C=1, clip_val=1e-5): 20 | """ 21 | PARAMS 22 | ------ 23 | C: compression factor 24 | """ 25 | return torch.log(torch.clamp(x, min=clip_val) * C) 26 | 27 | 28 | def dynamic_range_decompression_torch(x, C=1): 29 | """ 30 | PARAMS 31 | ------ 32 | C: compression factor used to compress 33 | """ 34 | return torch.exp(x) / C 35 | 36 | 37 | def spectral_normalize_torch(magnitudes): 38 | output = dynamic_range_compression_torch(magnitudes) 39 | return output 40 | 41 | 42 | def spectral_de_normalize_torch(magnitudes): 43 | output = dynamic_range_decompression_torch(magnitudes) 44 | return output 45 | 46 | 47 | mel_basis = {} 48 | hann_window = {} 49 | 50 | 51 | def spectrogram_torch(y, n_fft, sampling_rate, hop_size, win_size, center=False): 52 | if torch.min(y) < -1.: 53 | print('min value is ', torch.min(y)) 54 | if torch.max(y) > 1.: 55 | print('max value is ', torch.max(y)) 56 | 57 | global hann_window 58 | dtype_device = str(y.dtype) + '_' + str(y.device) 59 | wnsize_dtype_device = str(win_size) + '_' + dtype_device 60 | if wnsize_dtype_device not in hann_window: 61 | hann_window[wnsize_dtype_device] = torch.hann_window(win_size).to(dtype=y.dtype, device=y.device) 62 | 63 | y = torch.nn.functional.pad(y.unsqueeze(1), (int((n_fft-hop_size)/2), int((n_fft-hop_size)/2)), mode='reflect') 64 | y = y.squeeze(1) 65 | 66 | spec = torch.stft(y, n_fft, hop_length=hop_size, win_length=win_size, window=hann_window[wnsize_dtype_device], 67 | center=center, pad_mode='reflect', normalized=False, onesided=True) 68 | 69 | spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6) 70 | return spec 71 | 72 | 73 | def spec_to_mel_torch(spec, n_fft, num_mels, sampling_rate, fmin, fmax): 74 | global mel_basis 75 | dtype_device = str(spec.dtype) + '_' + str(spec.device) 76 | fmax_dtype_device = str(fmax) + '_' + dtype_device 77 | if fmax_dtype_device not in mel_basis: 78 | mel = librosa_mel_fn(sampling_rate, n_fft, num_mels, fmin, fmax) 79 | mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(dtype=spec.dtype, device=spec.device) 80 | spec = torch.matmul(mel_basis[fmax_dtype_device], spec) 81 | spec = spectral_normalize_torch(spec) 82 | return spec 83 | 84 | 85 | def mel_spectrogram_torch(y, n_fft, num_mels, sampling_rate, hop_size, win_size, fmin, fmax, center=False): 86 | if torch.min(y) < -1.: 87 | print('min value is ', torch.min(y)) 88 | if torch.max(y) > 1.: 89 | print('max value is ', torch.max(y)) 90 | 91 | global mel_basis, hann_window 92 | dtype_device = str(y.dtype) + '_' + str(y.device) 93 | fmax_dtype_device = str(fmax) + '_' + dtype_device 94 | wnsize_dtype_device = str(win_size) + '_' + dtype_device 95 | if fmax_dtype_device not in mel_basis: 96 | mel = librosa_mel_fn(sampling_rate, n_fft, num_mels, fmin, fmax) 97 | mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(dtype=y.dtype, device=y.device) 98 | if wnsize_dtype_device not in hann_window: 99 | hann_window[wnsize_dtype_device] = torch.hann_window(win_size).to(dtype=y.dtype, device=y.device) 100 | 101 | y = torch.nn.functional.pad(y.unsqueeze(1), (int((n_fft-hop_size)/2), int((n_fft-hop_size)/2)), mode='reflect') 102 | y = y.squeeze(1) 103 | 104 | spec = torch.stft(y, n_fft, hop_length=hop_size, win_length=win_size, window=hann_window[wnsize_dtype_device], 105 | center=center, pad_mode='reflect', normalized=False, onesided=True) 106 | 107 | spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6) 108 | 109 | spec = torch.matmul(mel_basis[fmax_dtype_device], spec) 110 | spec = spectral_normalize_torch(spec) 111 | 112 | return spec 113 | -------------------------------------------------------------------------------- /import_UI.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | # Form implementation generated from reading ui file 'import.ui' 4 | # 5 | # Created by: PyQt5 UI code generator 5.15.4 6 | # 7 | # WARNING: Any manual changes made to this file will be lost when pyuic5 is 8 | # run again. Do not edit this file unless you know what you are doing. 9 | 10 | import sys 11 | from PyQt5 import QtCore, QtGui, QtWidgets 12 | import real_basic_UI 13 | 14 | class Ui_Dialog(object): 15 | def __init__(self,Dialog): 16 | self.Dialog = Dialog 17 | 18 | def setupUi(self): 19 | self.Dialog.setObjectName("Dialog") 20 | self.Dialog.resize(398, 500) 21 | self.Dialog.setFixedSize(398, 500) 22 | 23 | 24 | self.label_speaker = QtWidgets.QLabel(self.Dialog) 25 | self.label_speaker.setGeometry(QtCore.QRect(30, 20, 111, 16)) 26 | self.lineEdit_name = QtWidgets.QLineEdit(self.Dialog) 27 | self.lineEdit_name.setGeometry(QtCore.QRect(30, 50, 341, 21)) 28 | self.lineEdit_name.setObjectName("lineEdit_name") 29 | 30 | self.buttonBox = QtWidgets.QDialogButtonBox(self.Dialog) 31 | self.buttonBox.setGeometry(QtCore.QRect(40, 390+70, 341, 32)) 32 | self.buttonBox.setOrientation(QtCore.Qt.Horizontal) 33 | self.buttonBox.setStandardButtons(QtWidgets.QDialogButtonBox.Cancel|QtWidgets.QDialogButtonBox.Ok) 34 | self.buttonBox.setObjectName("buttonBox") 35 | self.plainTextEdit = QtWidgets.QPlainTextEdit(self.Dialog) 36 | self.plainTextEdit.setGeometry(QtCore.QRect(30, 40+70, 341, 261)) 37 | self.plainTextEdit.setObjectName("plainTextEdit") 38 | self.label = QtWidgets.QLabel(self.Dialog) 39 | self.label.setGeometry(QtCore.QRect(30, 20+70, 150, 15)) 40 | self.label.setObjectName("label") 41 | self.lineEdit = QtWidgets.QLineEdit(self.Dialog) 42 | self.lineEdit.setGeometry(QtCore.QRect(30, 350+70, 301, 21)) 43 | self.lineEdit.setObjectName("lineEdit") 44 | self.pushButton = QtWidgets.QPushButton(self.Dialog) 45 | self.pushButton.setGeometry(QtCore.QRect(340, 350+70, 31, 21)) 46 | self.pushButton.setObjectName("pushButton") 47 | self.pushButton.clicked.connect(self.clicked_push_button) 48 | self.label_2 = QtWidgets.QLabel(self.Dialog) 49 | self.label_2.setGeometry(QtCore.QRect(30, 320+70, 111, 16)) 50 | self.label_2.setObjectName("label_2") 51 | 52 | self.retranslateUi() 53 | self.buttonBox.accepted.connect(self.button_box_accepted) 54 | self.buttonBox.rejected.connect(self.Dialog.reject) 55 | QtCore.QMetaObject.connectSlotsByName(self.Dialog) 56 | 57 | def retranslateUi(self): 58 | _translate = QtCore.QCoreApplication.translate 59 | self.Dialog.setWindowTitle(_translate("Dialog", "Dialog")) 60 | self.label_speaker.setText(_translate("Dialog", '对话角色名')) 61 | self.label.setText(_translate("Dialog", "设定文本")) 62 | self.pushButton.setText(_translate("Dialog", "...")) 63 | self.label_2.setText(_translate("Dialog", "从文件中导入")) 64 | 65 | def clicked_push_button(self): 66 | path = QtWidgets.QFileDialog.getOpenFileNames() 67 | self.lineEdit.setText(path[0][0]) 68 | if path[0][0][-4:] == '.txt' or path[0][0][-4:] == '.TXT': 69 | with open(path[0][0],'r',encoding='UTF-8') as f: 70 | text = f.read() 71 | self.plainTextEdit.setPlainText(text) 72 | else: 73 | self.plainTextEdit.setPlainText('需要.txt文件') 74 | 75 | def button_box_accepted(self): 76 | speaker = self.lineEdit_name.text() 77 | text = self.plainTextEdit.toPlainText() 78 | if speaker == '': 79 | if text == '': 80 | self.label.setText('设定文本(不能为空)') 81 | else: 82 | real_basic_UI.convers_text_from_import_UI = text 83 | 84 | 85 | self.Dialog.reject() 86 | else: 87 | if text == '': 88 | real_basic_UI.CALL_NAME = str(speaker) 89 | self.Dialog.reject() 90 | else: 91 | real_basic_UI.CALL_NAME = str(speaker) 92 | real_basic_UI.convers_text_from_import_UI = text 93 | self.Dialog.reject() 94 | 95 | 96 | if __name__ == '__main__': 97 | if __name__ == '__main__': 98 | app = QtWidgets.QApplication(sys.argv) 99 | Dialog = QtWidgets.QDialog() 100 | Widget = Ui_Dialog(Dialog) 101 | Widget.setupUi() 102 | Widget.retranslateUi() 103 | Widget.Dialog.show() 104 | app.exec() -------------------------------------------------------------------------------- /commons.py: -------------------------------------------------------------------------------- 1 | import math 2 | import numpy as np 3 | import torch 4 | from torch import nn 5 | from torch.nn import functional as F 6 | 7 | 8 | def init_weights(m, mean=0.0, std=0.01): 9 | classname = m.__class__.__name__ 10 | if classname.find("Conv") != -1: 11 | m.weight.data.normal_(mean, std) 12 | 13 | 14 | def get_padding(kernel_size, dilation=1): 15 | return int((kernel_size*dilation - dilation)/2) 16 | 17 | 18 | def convert_pad_shape(pad_shape): 19 | l = pad_shape[::-1] 20 | pad_shape = [item for sublist in l for item in sublist] 21 | return pad_shape 22 | 23 | 24 | def intersperse(lst, item): 25 | result = [item] * (len(lst) * 2 + 1) 26 | result[1::2] = lst 27 | return result 28 | 29 | 30 | def kl_divergence(m_p, logs_p, m_q, logs_q): 31 | """KL(P||Q)""" 32 | kl = (logs_q - logs_p) - 0.5 33 | kl += 0.5 * (torch.exp(2. * logs_p) + ((m_p - m_q)**2)) * torch.exp(-2. * logs_q) 34 | return kl 35 | 36 | 37 | def rand_gumbel(shape): 38 | """Sample from the Gumbel distribution, protect from overflows.""" 39 | uniform_samples = torch.rand(shape) * 0.99998 + 0.00001 40 | return -torch.log(-torch.log(uniform_samples)) 41 | 42 | 43 | def rand_gumbel_like(x): 44 | g = rand_gumbel(x.size()).to(dtype=x.dtype, device=x.device) 45 | return g 46 | 47 | 48 | def slice_segments(x, ids_str, segment_size=4): 49 | ret = torch.zeros_like(x[:, :, :segment_size]) 50 | for i in range(x.size(0)): 51 | idx_str = ids_str[i] 52 | idx_end = idx_str + segment_size 53 | ret[i] = x[i, :, idx_str:idx_end] 54 | return ret 55 | 56 | 57 | def rand_slice_segments(x, x_lengths=None, segment_size=4): 58 | b, d, t = x.size() 59 | if x_lengths is None: 60 | x_lengths = t 61 | ids_str_max = x_lengths - segment_size + 1 62 | ids_str = (torch.rand([b]).to(device=x.device) * ids_str_max).to(dtype=torch.long) 63 | ret = slice_segments(x, ids_str, segment_size) 64 | return ret, ids_str 65 | 66 | 67 | def get_timing_signal_1d( 68 | length, channels, min_timescale=1.0, max_timescale=1.0e4): 69 | position = torch.arange(length, dtype=torch.float) 70 | num_timescales = channels // 2 71 | log_timescale_increment = ( 72 | math.log(float(max_timescale) / float(min_timescale)) / 73 | (num_timescales - 1)) 74 | inv_timescales = min_timescale * torch.exp( 75 | torch.arange(num_timescales, dtype=torch.float) * -log_timescale_increment) 76 | scaled_time = position.unsqueeze(0) * inv_timescales.unsqueeze(1) 77 | signal = torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)], 0) 78 | signal = F.pad(signal, [0, 0, 0, channels % 2]) 79 | signal = signal.view(1, channels, length) 80 | return signal 81 | 82 | 83 | def add_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4): 84 | b, channels, length = x.size() 85 | signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale) 86 | return x + signal.to(dtype=x.dtype, device=x.device) 87 | 88 | 89 | def cat_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4, axis=1): 90 | b, channels, length = x.size() 91 | signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale) 92 | return torch.cat([x, signal.to(dtype=x.dtype, device=x.device)], axis) 93 | 94 | 95 | def subsequent_mask(length): 96 | mask = torch.tril(torch.ones(length, length)).unsqueeze(0).unsqueeze(0) 97 | return mask 98 | 99 | 100 | @torch.jit.script 101 | def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels): 102 | n_channels_int = n_channels[0] 103 | in_act = input_a + input_b 104 | t_act = torch.tanh(in_act[:, :n_channels_int, :]) 105 | s_act = torch.sigmoid(in_act[:, n_channels_int:, :]) 106 | acts = t_act * s_act 107 | return acts 108 | 109 | 110 | def convert_pad_shape(pad_shape): 111 | l = pad_shape[::-1] 112 | pad_shape = [item for sublist in l for item in sublist] 113 | return pad_shape 114 | 115 | 116 | def shift_1d(x): 117 | x = F.pad(x, convert_pad_shape([[0, 0], [0, 0], [1, 0]]))[:, :, :-1] 118 | return x 119 | 120 | 121 | def sequence_mask(length, max_length=None): 122 | if max_length is None: 123 | max_length = length.max() 124 | x = torch.arange(max_length, dtype=length.dtype, device=length.device) 125 | return x.unsqueeze(0) < length.unsqueeze(1) 126 | 127 | 128 | def generate_path(duration, mask): 129 | """ 130 | duration: [b, 1, t_x] 131 | mask: [b, 1, t_y, t_x] 132 | """ 133 | device = duration.device 134 | 135 | b, _, t_y, t_x = mask.shape 136 | cum_duration = torch.cumsum(duration, -1) 137 | 138 | cum_duration_flat = cum_duration.view(b * t_x) 139 | path = sequence_mask(cum_duration_flat, t_y).to(mask.dtype) 140 | path = path.view(b, t_x, t_y) 141 | path = path - F.pad(path, convert_pad_shape([[0, 0], [1, 0], [0, 0]]))[:, :-1] 142 | path = path.unsqueeze(1).transpose(2,3) * mask 143 | return path 144 | 145 | 146 | def clip_grad_value_(parameters, clip_value, norm_type=2): 147 | if isinstance(parameters, torch.Tensor): 148 | parameters = [parameters] 149 | parameters = list(filter(lambda p: p.grad is not None, parameters)) 150 | norm_type = float(norm_type) 151 | if clip_value is not None: 152 | clip_value = float(clip_value) 153 | 154 | total_norm = 0 155 | for p in parameters: 156 | param_norm = p.grad.data.norm(norm_type) 157 | total_norm += param_norm.item() ** norm_type 158 | if clip_value is not None: 159 | p.grad.data.clamp_(min=-clip_value, max=clip_value) 160 | total_norm = total_norm ** (1. / norm_type) 161 | return total_norm 162 | -------------------------------------------------------------------------------- /utils.py: -------------------------------------------------------------------------------- 1 | import os 2 | import glob 3 | import sys 4 | import argparse 5 | import logging 6 | import json 7 | import subprocess 8 | import numpy as np 9 | from scipy.io.wavfile import read 10 | import torch 11 | 12 | MATPLOTLIB_FLAG = False 13 | 14 | logging.basicConfig(stream=sys.stdout, level=logging.DEBUG) 15 | logger = logging 16 | 17 | 18 | def load_checkpoint(checkpoint_path, model, optimizer=None): 19 | assert os.path.isfile(checkpoint_path) 20 | checkpoint_dict = torch.load(checkpoint_path, map_location='cpu') 21 | iteration = checkpoint_dict['iteration'] 22 | learning_rate = checkpoint_dict['learning_rate'] 23 | if optimizer is not None: 24 | optimizer.load_state_dict(checkpoint_dict['optimizer']) 25 | saved_state_dict = checkpoint_dict['model'] 26 | if hasattr(model, 'module'): 27 | state_dict = model.module.state_dict() 28 | else: 29 | state_dict = model.state_dict() 30 | new_state_dict= {} 31 | for k, v in state_dict.items(): 32 | try: 33 | new_state_dict[k] = saved_state_dict[k] 34 | except: 35 | logger.info("%s is not in the checkpoint" % k) 36 | new_state_dict[k] = v 37 | if hasattr(model, 'module'): 38 | model.module.load_state_dict(new_state_dict) 39 | else: 40 | model.load_state_dict(new_state_dict) 41 | logger.info("Loaded checkpoint '{}' (iteration {})" .format( 42 | checkpoint_path, iteration)) 43 | return model, optimizer, learning_rate, iteration 44 | 45 | 46 | def save_checkpoint(model, optimizer, learning_rate, iteration, checkpoint_path): 47 | logger.info("Saving model and optimizer state at iteration {} to {}".format( 48 | iteration, checkpoint_path)) 49 | if hasattr(model, 'module'): 50 | state_dict = model.module.state_dict() 51 | else: 52 | state_dict = model.state_dict() 53 | torch.save({'model': state_dict, 54 | 'iteration': iteration, 55 | 'optimizer': optimizer.state_dict(), 56 | 'learning_rate': learning_rate}, checkpoint_path) 57 | 58 | 59 | def summarize(writer, global_step, scalars={}, histograms={}, images={}, audios={}, audio_sampling_rate=22050): 60 | for k, v in scalars.items(): 61 | writer.add_scalar(k, v, global_step) 62 | for k, v in histograms.items(): 63 | writer.add_histogram(k, v, global_step) 64 | for k, v in images.items(): 65 | writer.add_image(k, v, global_step, dataformats='HWC') 66 | for k, v in audios.items(): 67 | writer.add_audio(k, v, global_step, audio_sampling_rate) 68 | 69 | 70 | def latest_checkpoint_path(dir_path, regex="G_*.pth"): 71 | f_list = glob.glob(os.path.join(dir_path, regex)) 72 | f_list.sort(key=lambda f: int("".join(filter(str.isdigit, f)))) 73 | x = f_list[-1] 74 | print(x) 75 | return x 76 | 77 | 78 | def plot_spectrogram_to_numpy(spectrogram): 79 | global MATPLOTLIB_FLAG 80 | if not MATPLOTLIB_FLAG: 81 | import matplotlib 82 | matplotlib.use("Agg") 83 | MATPLOTLIB_FLAG = True 84 | mpl_logger = logging.getLogger('matplotlib') 85 | mpl_logger.setLevel(logging.WARNING) 86 | import matplotlib.pylab as plt 87 | import numpy as np 88 | 89 | fig, ax = plt.subplots(figsize=(10,2)) 90 | im = ax.imshow(spectrogram, aspect="auto", origin="lower", 91 | interpolation='none') 92 | plt.colorbar(im, ax=ax) 93 | plt.xlabel("Frames") 94 | plt.ylabel("Channels") 95 | plt.tight_layout() 96 | 97 | fig.canvas.draw() 98 | data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='') 99 | data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,)) 100 | plt.close() 101 | return data 102 | 103 | 104 | def plot_alignment_to_numpy(alignment, info=None): 105 | global MATPLOTLIB_FLAG 106 | if not MATPLOTLIB_FLAG: 107 | import matplotlib 108 | matplotlib.use("Agg") 109 | MATPLOTLIB_FLAG = True 110 | mpl_logger = logging.getLogger('matplotlib') 111 | mpl_logger.setLevel(logging.WARNING) 112 | import matplotlib.pylab as plt 113 | import numpy as np 114 | 115 | fig, ax = plt.subplots(figsize=(6, 4)) 116 | im = ax.imshow(alignment.transpose(), aspect='auto', origin='lower', 117 | interpolation='none') 118 | fig.colorbar(im, ax=ax) 119 | xlabel = 'Decoder timestep' 120 | if info is not None: 121 | xlabel += '\n\n' + info 122 | plt.xlabel(xlabel) 123 | plt.ylabel('Encoder timestep') 124 | plt.tight_layout() 125 | 126 | fig.canvas.draw() 127 | data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='') 128 | data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,)) 129 | plt.close() 130 | return data 131 | 132 | 133 | def load_wav_to_torch(full_path): 134 | sampling_rate, data = read(full_path) 135 | return torch.FloatTensor(data.astype(np.float32)), sampling_rate 136 | 137 | 138 | def load_filepaths_and_text(filename, split="|"): 139 | with open(filename, encoding='utf-8') as f: 140 | filepaths_and_text = [line.strip().split(split) for line in f] 141 | return filepaths_and_text 142 | 143 | 144 | def get_hparams(init=True): 145 | parser = argparse.ArgumentParser() 146 | parser.add_argument('-c', '--config', type=str, default="./configs/base.json", 147 | help='JSON file for configuration') 148 | parser.add_argument('-m', '--model', type=str, required=True, 149 | help='Model name') 150 | 151 | args = parser.parse_args() 152 | model_dir = os.path.join("./logs", args.model) 153 | 154 | if not os.path.exists(model_dir): 155 | os.makedirs(model_dir) 156 | 157 | config_path = args.config 158 | config_save_path = os.path.join(model_dir, "config.json") 159 | if init: 160 | with open(config_path, "r") as f: 161 | data = f.read() 162 | with open(config_save_path, "w") as f: 163 | f.write(data) 164 | else: 165 | with open(config_save_path, "r") as f: 166 | data = f.read() 167 | config = json.loads(data) 168 | 169 | hparams = HParams(**config) 170 | hparams.model_dir = model_dir 171 | return hparams 172 | 173 | 174 | def get_hparams_from_dir(model_dir): 175 | config_save_path = os.path.join(model_dir, "config.json") 176 | with open(config_save_path, "r") as f: 177 | data = f.read() 178 | config = json.loads(data) 179 | 180 | hparams =HParams(**config) 181 | hparams.model_dir = model_dir 182 | return hparams 183 | 184 | 185 | def get_hparams_from_file(config_path): 186 | with open(config_path, "r") as f: 187 | data = f.read() 188 | config = json.loads(data) 189 | 190 | hparams =HParams(**config) 191 | return hparams 192 | 193 | 194 | def check_git_hash(model_dir): 195 | source_dir = os.path.dirname(os.path.realpath(__file__)) 196 | if not os.path.exists(os.path.join(source_dir, ".git")): 197 | logger.warn("{} is not a git repository, therefore hash value comparison will be ignored.".format( 198 | source_dir 199 | )) 200 | return 201 | 202 | cur_hash = subprocess.getoutput("git rev-parse HEAD") 203 | 204 | path = os.path.join(model_dir, "githash") 205 | if os.path.exists(path): 206 | saved_hash = open(path).read() 207 | if saved_hash != cur_hash: 208 | logger.warn("git hash values are different. {}(saved) != {}(current)".format( 209 | saved_hash[:8], cur_hash[:8])) 210 | else: 211 | open(path, "w").write(cur_hash) 212 | 213 | 214 | def get_logger(model_dir, filename="train.log"): 215 | global logger 216 | logger = logging.getLogger(os.path.basename(model_dir)) 217 | logger.setLevel(logging.DEBUG) 218 | 219 | formatter = logging.Formatter("%(asctime)s\t%(name)s\t%(levelname)s\t%(message)s") 220 | if not os.path.exists(model_dir): 221 | os.makedirs(model_dir) 222 | h = logging.FileHandler(os.path.join(model_dir, filename)) 223 | h.setLevel(logging.DEBUG) 224 | h.setFormatter(formatter) 225 | logger.addHandler(h) 226 | return logger 227 | 228 | 229 | class HParams(): 230 | def __init__(self, **kwargs): 231 | for k, v in kwargs.items(): 232 | if type(v) == dict: 233 | v = HParams(**v) 234 | self[k] = v 235 | 236 | def keys(self): 237 | return self.__dict__.keys() 238 | 239 | def items(self): 240 | return self.__dict__.items() 241 | 242 | def values(self): 243 | return self.__dict__.values() 244 | 245 | def __len__(self): 246 | return len(self.__dict__) 247 | 248 | def __getitem__(self, key): 249 | return getattr(self, key) 250 | 251 | def __setitem__(self, key, value): 252 | return setattr(self, key, value) 253 | 254 | def __contains__(self, key): 255 | return key in self.__dict__ 256 | 257 | def __repr__(self): 258 | return self.__dict__.__repr__() 259 | -------------------------------------------------------------------------------- /transforms.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from torch.nn import functional as F 3 | 4 | import numpy as np 5 | 6 | 7 | DEFAULT_MIN_BIN_WIDTH = 1e-3 8 | DEFAULT_MIN_BIN_HEIGHT = 1e-3 9 | DEFAULT_MIN_DERIVATIVE = 1e-3 10 | 11 | 12 | def piecewise_rational_quadratic_transform(inputs, 13 | unnormalized_widths, 14 | unnormalized_heights, 15 | unnormalized_derivatives, 16 | inverse=False, 17 | tails=None, 18 | tail_bound=1., 19 | min_bin_width=DEFAULT_MIN_BIN_WIDTH, 20 | min_bin_height=DEFAULT_MIN_BIN_HEIGHT, 21 | min_derivative=DEFAULT_MIN_DERIVATIVE): 22 | 23 | if tails is None: 24 | spline_fn = rational_quadratic_spline 25 | spline_kwargs = {} 26 | else: 27 | spline_fn = unconstrained_rational_quadratic_spline 28 | spline_kwargs = { 29 | 'tails': tails, 30 | 'tail_bound': tail_bound 31 | } 32 | 33 | outputs, logabsdet = spline_fn( 34 | inputs=inputs, 35 | unnormalized_widths=unnormalized_widths, 36 | unnormalized_heights=unnormalized_heights, 37 | unnormalized_derivatives=unnormalized_derivatives, 38 | inverse=inverse, 39 | min_bin_width=min_bin_width, 40 | min_bin_height=min_bin_height, 41 | min_derivative=min_derivative, 42 | **spline_kwargs 43 | ) 44 | return outputs, logabsdet 45 | 46 | 47 | def searchsorted(bin_locations, inputs, eps=1e-6): 48 | bin_locations[..., -1] += eps 49 | return torch.sum( 50 | inputs[..., None] >= bin_locations, 51 | dim=-1 52 | ) - 1 53 | 54 | 55 | def unconstrained_rational_quadratic_spline(inputs, 56 | unnormalized_widths, 57 | unnormalized_heights, 58 | unnormalized_derivatives, 59 | inverse=False, 60 | tails='linear', 61 | tail_bound=1., 62 | min_bin_width=DEFAULT_MIN_BIN_WIDTH, 63 | min_bin_height=DEFAULT_MIN_BIN_HEIGHT, 64 | min_derivative=DEFAULT_MIN_DERIVATIVE): 65 | inside_interval_mask = (inputs >= -tail_bound) & (inputs <= tail_bound) 66 | outside_interval_mask = ~inside_interval_mask 67 | 68 | outputs = torch.zeros_like(inputs) 69 | logabsdet = torch.zeros_like(inputs) 70 | 71 | if tails == 'linear': 72 | unnormalized_derivatives = F.pad(unnormalized_derivatives, pad=(1, 1)) 73 | constant = np.log(np.exp(1 - min_derivative) - 1) 74 | unnormalized_derivatives[..., 0] = constant 75 | unnormalized_derivatives[..., -1] = constant 76 | 77 | outputs[outside_interval_mask] = inputs[outside_interval_mask] 78 | logabsdet[outside_interval_mask] = 0 79 | else: 80 | raise RuntimeError('{} tails are not implemented.'.format(tails)) 81 | 82 | outputs[inside_interval_mask], logabsdet[inside_interval_mask] = rational_quadratic_spline( 83 | inputs=inputs[inside_interval_mask], 84 | unnormalized_widths=unnormalized_widths[inside_interval_mask, :], 85 | unnormalized_heights=unnormalized_heights[inside_interval_mask, :], 86 | unnormalized_derivatives=unnormalized_derivatives[inside_interval_mask, :], 87 | inverse=inverse, 88 | left=-tail_bound, right=tail_bound, bottom=-tail_bound, top=tail_bound, 89 | min_bin_width=min_bin_width, 90 | min_bin_height=min_bin_height, 91 | min_derivative=min_derivative 92 | ) 93 | 94 | return outputs, logabsdet 95 | 96 | def rational_quadratic_spline(inputs, 97 | unnormalized_widths, 98 | unnormalized_heights, 99 | unnormalized_derivatives, 100 | inverse=False, 101 | left=0., right=1., bottom=0., top=1., 102 | min_bin_width=DEFAULT_MIN_BIN_WIDTH, 103 | min_bin_height=DEFAULT_MIN_BIN_HEIGHT, 104 | min_derivative=DEFAULT_MIN_DERIVATIVE): 105 | if torch.min(inputs) < left or torch.max(inputs) > right: 106 | raise ValueError('Input to a transform is not within its domain') 107 | 108 | num_bins = unnormalized_widths.shape[-1] 109 | 110 | if min_bin_width * num_bins > 1.0: 111 | raise ValueError('Minimal bin width too large for the number of bins') 112 | if min_bin_height * num_bins > 1.0: 113 | raise ValueError('Minimal bin height too large for the number of bins') 114 | 115 | widths = F.softmax(unnormalized_widths, dim=-1) 116 | widths = min_bin_width + (1 - min_bin_width * num_bins) * widths 117 | cumwidths = torch.cumsum(widths, dim=-1) 118 | cumwidths = F.pad(cumwidths, pad=(1, 0), mode='constant', value=0.0) 119 | cumwidths = (right - left) * cumwidths + left 120 | cumwidths[..., 0] = left 121 | cumwidths[..., -1] = right 122 | widths = cumwidths[..., 1:] - cumwidths[..., :-1] 123 | 124 | derivatives = min_derivative + F.softplus(unnormalized_derivatives) 125 | 126 | heights = F.softmax(unnormalized_heights, dim=-1) 127 | heights = min_bin_height + (1 - min_bin_height * num_bins) * heights 128 | cumheights = torch.cumsum(heights, dim=-1) 129 | cumheights = F.pad(cumheights, pad=(1, 0), mode='constant', value=0.0) 130 | cumheights = (top - bottom) * cumheights + bottom 131 | cumheights[..., 0] = bottom 132 | cumheights[..., -1] = top 133 | heights = cumheights[..., 1:] - cumheights[..., :-1] 134 | 135 | if inverse: 136 | bin_idx = searchsorted(cumheights, inputs)[..., None] 137 | else: 138 | bin_idx = searchsorted(cumwidths, inputs)[..., None] 139 | 140 | input_cumwidths = cumwidths.gather(-1, bin_idx)[..., 0] 141 | input_bin_widths = widths.gather(-1, bin_idx)[..., 0] 142 | 143 | input_cumheights = cumheights.gather(-1, bin_idx)[..., 0] 144 | delta = heights / widths 145 | input_delta = delta.gather(-1, bin_idx)[..., 0] 146 | 147 | input_derivatives = derivatives.gather(-1, bin_idx)[..., 0] 148 | input_derivatives_plus_one = derivatives[..., 1:].gather(-1, bin_idx)[..., 0] 149 | 150 | input_heights = heights.gather(-1, bin_idx)[..., 0] 151 | 152 | if inverse: 153 | a = (((inputs - input_cumheights) * (input_derivatives 154 | + input_derivatives_plus_one 155 | - 2 * input_delta) 156 | + input_heights * (input_delta - input_derivatives))) 157 | b = (input_heights * input_derivatives 158 | - (inputs - input_cumheights) * (input_derivatives 159 | + input_derivatives_plus_one 160 | - 2 * input_delta)) 161 | c = - input_delta * (inputs - input_cumheights) 162 | 163 | discriminant = b.pow(2) - 4 * a * c 164 | assert (discriminant >= 0).all() 165 | 166 | root = (2 * c) / (-b - torch.sqrt(discriminant)) 167 | outputs = root * input_bin_widths + input_cumwidths 168 | 169 | theta_one_minus_theta = root * (1 - root) 170 | denominator = input_delta + ((input_derivatives + input_derivatives_plus_one - 2 * input_delta) 171 | * theta_one_minus_theta) 172 | derivative_numerator = input_delta.pow(2) * (input_derivatives_plus_one * root.pow(2) 173 | + 2 * input_delta * theta_one_minus_theta 174 | + input_derivatives * (1 - root).pow(2)) 175 | logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator) 176 | 177 | return outputs, -logabsdet 178 | else: 179 | theta = (inputs - input_cumwidths) / input_bin_widths 180 | theta_one_minus_theta = theta * (1 - theta) 181 | 182 | numerator = input_heights * (input_delta * theta.pow(2) 183 | + input_derivatives * theta_one_minus_theta) 184 | denominator = input_delta + ((input_derivatives + input_derivatives_plus_one - 2 * input_delta) 185 | * theta_one_minus_theta) 186 | outputs = input_cumheights + numerator / denominator 187 | 188 | derivative_numerator = input_delta.pow(2) * (input_derivatives_plus_one * theta.pow(2) 189 | + 2 * input_delta * theta_one_minus_theta 190 | + input_derivatives * (1 - theta).pow(2)) 191 | logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator) 192 | 193 | return outputs, logabsdet 194 | -------------------------------------------------------------------------------- /choose_VITS_model.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | # Form implementation generated from reading ui file 'choose_VITS_model.ui' 4 | # 5 | # Created by: PyQt5 UI code generator 5.15.4 6 | # 7 | # WARNING: Any manual changes made to this file will be lost when pyuic5 is 8 | # run again. Do not edit this file unless you know what you are doing. 9 | import sys 10 | import json 11 | from PyQt5 import QtCore, QtGui, QtWidgets 12 | import real_basic_UI 13 | import use_main 14 | 15 | Ativate = False 16 | 17 | class Ui_Dialog(object): 18 | def __init__(self,choose_it): 19 | self.Choose_VITS_model = choose_it 20 | self.path_model = '' 21 | self.path_json = '' 22 | 23 | def setupUi(self): 24 | self.Choose_VITS_model.setObjectName("Dialog") 25 | self.Choose_VITS_model.resize(393, 294) 26 | self.Choose_VITS_model.setFixedSize(393, 294) 27 | self.buttonBox = QtWidgets.QDialogButtonBox(self.Choose_VITS_model) 28 | self.buttonBox.setGeometry(QtCore.QRect(30, 240, 341, 32)) 29 | self.buttonBox.setOrientation(QtCore.Qt.Horizontal) 30 | self.buttonBox.setStandardButtons(QtWidgets.QDialogButtonBox.Cancel|QtWidgets.QDialogButtonBox.Ok) 31 | self.buttonBox.setObjectName("buttonBox") 32 | self.label = QtWidgets.QLabel(self.Choose_VITS_model) 33 | self.label.setGeometry(QtCore.QRect(20, 20, 72, 15)) 34 | self.label.setObjectName("label") 35 | self.radioButton = QtWidgets.QRadioButton(self.Choose_VITS_model) 36 | self.radioButton.setGeometry(QtCore.QRect(110, 20, 115, 19)) 37 | self.radioButton.setObjectName("radioButton") 38 | #self.radioButton.clicked.connect(self.single_checked) 39 | self.radioButton_2 = QtWidgets.QRadioButton(self.Choose_VITS_model) 40 | self.radioButton_2.setGeometry(QtCore.QRect(220, 20, 115, 19)) 41 | self.radioButton_2.setObjectName("radioButton_2") 42 | #self.radioButton_2.clicked.connect(self.multy_checked) 43 | self.label_2 = QtWidgets.QLabel(self.Choose_VITS_model) 44 | self.label_2.setGeometry(QtCore.QRect(20, 60, 300, 16)) 45 | self.label_2.setObjectName("label_2") 46 | self.lineEdit = QtWidgets.QLineEdit(self.Choose_VITS_model) 47 | self.lineEdit.setGeometry(QtCore.QRect(20, 90, 291, 21)) 48 | self.lineEdit.setObjectName("lineEdit") 49 | self.pushButton = QtWidgets.QPushButton(self.Choose_VITS_model) 50 | self.pushButton.setGeometry(QtCore.QRect(320, 90, 41, 21)) 51 | self.pushButton.setObjectName("pushButton") 52 | self.pushButton.clicked.connect(self.checked_push_buttom_1) 53 | self.pushButton_2 = QtWidgets.QPushButton(self.Choose_VITS_model) 54 | self.pushButton_2.setGeometry(QtCore.QRect(320, 160, 41, 21)) 55 | self.pushButton_2.setObjectName("pushButton_2") 56 | self.pushButton_2.clicked.connect(self.checked_push_buttom_2) 57 | self.lineEdit_2 = QtWidgets.QLineEdit(self.Choose_VITS_model) 58 | self.lineEdit_2.setGeometry(QtCore.QRect(20, 160, 291, 21)) 59 | self.lineEdit_2.setObjectName("lineEdit_2") 60 | self.label_3 = QtWidgets.QLabel(self.Choose_VITS_model) 61 | self.label_3.setGeometry(QtCore.QRect(20, 130, 300, 16)) 62 | self.label_3.setObjectName("label_3") 63 | self.label_4 = QtWidgets.QLabel(self.Choose_VITS_model) 64 | self.label_4.setGeometry(QtCore.QRect(30, 200, 72, 21)) 65 | self.label_4.setObjectName("label_4") 66 | self.comboBox = QtWidgets.QComboBox(self.Choose_VITS_model) 67 | self.comboBox.setGeometry(QtCore.QRect(130, 200, 87, 22)) 68 | self.comboBox.setObjectName("comboBox") 69 | self.comboBox.addItem('请选择') 70 | 71 | self.retranslateUi() 72 | self.buttonBox.accepted.connect(self.buttonBox_accept) 73 | self.buttonBox.rejected.connect(self.Choose_VITS_model.reject) 74 | QtCore.QMetaObject.connectSlotsByName(self.Choose_VITS_model) 75 | 76 | def retranslateUi(self): 77 | _translate = QtCore.QCoreApplication.translate 78 | self.Choose_VITS_model.setWindowTitle(_translate("Dialog", "选择VITS模型")) 79 | self.label.setText(_translate("Dialog", "模型类型:")) 80 | self.radioButton.setText(_translate("Dialog", "单人")) 81 | self.radioButton_2.setText(_translate("Dialog", "多人")) 82 | self.label_2.setText(_translate("Dialog", "模型路径:")) 83 | self.pushButton.setText(_translate("Dialog", "...")) 84 | self.pushButton_2.setText(_translate("Dialog", "...")) 85 | self.label_3.setText(_translate("Dialog", "模型配置文件路径:")) 86 | self.label_4.setText(_translate("Dialog", "说话人")) 87 | 88 | self.radioButton.setChecked(False) 89 | self.radioButton_2.setChecked(False) 90 | ''' 91 | def single_checked(self): 92 | print('1被点击') 93 | print(self.radioButton.isChecked()) 94 | print(self.radioButton_2.isChecked()) 95 | if self.radioButton.isChecked() == True: 96 | pass 97 | else: 98 | print('True') 99 | self.radioButton.setChecked(True) 100 | self.radioButton_2.setChecked(False) 101 | 102 | def multy_checked(self): 103 | print('2被点击') 104 | print(self.radioButton.isChecked()) 105 | print(self.radioButton_2.isChecked()) 106 | if self.radioButton_2.isChecked() == True: 107 | pass 108 | else: 109 | print('True') 110 | self.radioButton_2.setChecked(True) 111 | self.radioButton.setChecked(False) 112 | ''' 113 | def comboBox_activated(self): #执行使得combobox激活 114 | if self.radioButton_2.isChecked() == True and self.path_json != '': 115 | config = json.load(open(self.path_json,'r')) 116 | print(config['speakers']) 117 | self.speaker = config['speakers'] 118 | self.comboBox.addItems(self.speaker) 119 | global Ativate 120 | Ativate = True 121 | 122 | 123 | 124 | 125 | def checked_push_buttom_1(self): 126 | path = QtWidgets.QFileDialog.getOpenFileNames() 127 | if path[0][0][-4:] == '.pth' or path[0][0][-4:] == '.PTH': 128 | self.lineEdit.setText(path[0][0]) 129 | self.path_model = self.lineEdit.text() 130 | 131 | else: 132 | self.label_2.setText('模型路径:(需要.pth文件)') 133 | 134 | def checked_push_buttom_2(self): 135 | path = QtWidgets.QFileDialog.getOpenFileNames() 136 | if path[0][0][-5:] == '.json' or path[0][0][-5:] == '.JSON': 137 | self.lineEdit_2.setText(path[0][0]) 138 | self.path_json = self.lineEdit_2.text() 139 | 140 | else: 141 | self.label_3.setText('模型配置文件路径:(需要.json文件)') 142 | 143 | 144 | def buttonBox_accept(self): 145 | global Ativate 146 | if self.radioButton.isChecked() ^ self.radioButton_2.isChecked() == False: #^为异或 147 | self.Choose_VITS_model.setWindowTitle("选择VITS模型(未勾选模型类型)") 148 | elif self.lineEdit.text() == '': 149 | self.label_2.setText('模型路径:(需要.pth文件)') 150 | elif self.lineEdit_2.text() == '': 151 | self.label_3.setText('模型配置文件路径:(需要.json文件)') 152 | elif self.radioButton_2.isChecked() == True and self.path_json != '' and self.comboBox.currentText() == '请选择': 153 | if Ativate == False: 154 | self.comboBox_activated() 155 | self.Choose_VITS_model.setWindowTitle("选择VITS模型(未选择说话人)") 156 | else: 157 | pass 158 | else: 159 | Ativate = False 160 | #返回全局变量 161 | if self.radioButton.isChecked() == True: 162 | real_basic_UI.path_of_pth = self.path_model 163 | real_basic_UI.path_of_json = self.path_json 164 | real_basic_UI.model_type = 0 165 | elif self.radioButton_2.isChecked() == True: 166 | real_basic_UI.path_of_pth = self.path_model 167 | real_basic_UI.path_of_json = self.path_json 168 | real_basic_UI.speaker = self.comboBox.currentText() 169 | real_basic_UI.speaker_index = self.speaker.index(self.comboBox.currentText()) 170 | real_basic_UI.model_type = 1 171 | #定义VITTS_Class类,并改变其存在状态 172 | if real_basic_UI.model_type == 0: 173 | real_basic_UI.VITS_Class = use_main.single_speaker_model(path_of_pth=real_basic_UI.path_of_pth,path_of_json=real_basic_UI.path_of_json) 174 | real_basic_UI.VITS_CLASS_EXIST =True 175 | elif real_basic_UI.model_type == 1: 176 | real_basic_UI.VITS_Class = use_main.multy_speaker_model(path_of_pth=real_basic_UI.path_of_pth,path_of_json=real_basic_UI.path_of_json) 177 | real_basic_UI.VITS_CLASS_EXIST = True 178 | self.Choose_VITS_model.reject() 179 | 180 | 181 | 182 | if __name__ == '__main__': 183 | app = QtWidgets.QApplication(sys.argv) 184 | choosel_VITS = QtWidgets.QDialog() 185 | Widget_choose_VITS = Ui_Dialog(choosel_VITS) 186 | Widget_choose_VITS.setupUi() 187 | Widget_choose_VITS.Choose_VITS_model.show() 188 | app.exec() -------------------------------------------------------------------------------- /text/cleaners.py: -------------------------------------------------------------------------------- 1 | """ from https://github.com/keithito/tacotron """ 2 | 3 | ''' 4 | Cleaners are transformations that run over the input text at both training and eval time. 5 | 6 | Cleaners can be selected by passing a comma-delimited list of cleaner names as the "cleaners" 7 | hyperparameter. Some cleaners are English-specific. You'll typically want to use: 8 | 1. "english_cleaners" for English text 9 | 2. "transliteration_cleaners" for non-English text that can be transliterated to ASCII using 10 | the Unidecode library (https://pypi.python.org/pypi/Unidecode) 11 | 3. "basic_cleaners" if you do not want to transliterate (in this case, you should also update 12 | the symbols in symbols.py to match your data). 13 | ''' 14 | 15 | import re 16 | from unidecode import unidecode 17 | import pyopenjtalk 18 | from jamo import h2j, j2hcj 19 | from pypinyin import lazy_pinyin,BOPOMOFO 20 | import jieba 21 | 22 | 23 | # This is a list of Korean classifiers preceded by pure Korean numerals. 24 | _korean_classifiers = '군데 권 개 그루 닢 대 두 마리 모 모금 뭇 발 발짝 방 번 벌 보루 살 수 술 시 쌈 움큼 정 짝 채 척 첩 축 켤레 톨 통' 25 | 26 | # Regular expression matching whitespace: 27 | _whitespace_re = re.compile(r'\s+') 28 | 29 | # Regular expression matching Japanese without punctuation marks: 30 | _japanese_characters = re.compile(r'[A-Za-z\d\u3005\u3040-\u30ff\u4e00-\u9fff\uff11-\uff19\uff21-\uff3a\uff41-\uff5a\uff66-\uff9d]') 31 | 32 | # Regular expression matching non-Japanese characters or punctuation marks: 33 | _japanese_marks = re.compile(r'[^A-Za-z\d\u3005\u3040-\u30ff\u4e00-\u9fff\uff11-\uff19\uff21-\uff3a\uff41-\uff5a\uff66-\uff9d]') 34 | 35 | # List of (regular expression, replacement) pairs for abbreviations: 36 | _abbreviations = [(re.compile('\\b%s\\.' % x[0], re.IGNORECASE), x[1]) for x in [ 37 | ('mrs', 'misess'), 38 | ('mr', 'mister'), 39 | ('dr', 'doctor'), 40 | ('st', 'saint'), 41 | ('co', 'company'), 42 | ('jr', 'junior'), 43 | ('maj', 'major'), 44 | ('gen', 'general'), 45 | ('drs', 'doctors'), 46 | ('rev', 'reverend'), 47 | ('lt', 'lieutenant'), 48 | ('hon', 'honorable'), 49 | ('sgt', 'sergeant'), 50 | ('capt', 'captain'), 51 | ('esq', 'esquire'), 52 | ('ltd', 'limited'), 53 | ('col', 'colonel'), 54 | ('ft', 'fort'), 55 | ]] 56 | 57 | # List of (hangul, hangul divided) pairs: 58 | _hangul_divided = [(re.compile('%s' % x[0]), x[1]) for x in [ 59 | ('ㄳ', 'ㄱㅅ'), 60 | ('ㄵ', 'ㄴㅈ'), 61 | ('ㄶ', 'ㄴㅎ'), 62 | ('ㄺ', 'ㄹㄱ'), 63 | ('ㄻ', 'ㄹㅁ'), 64 | ('ㄼ', 'ㄹㅂ'), 65 | ('ㄽ', 'ㄹㅅ'), 66 | ('ㄾ', 'ㄹㅌ'), 67 | ('ㄿ', 'ㄹㅍ'), 68 | ('ㅀ', 'ㄹㅎ'), 69 | ('ㅄ', 'ㅂㅅ'), 70 | ('ㅘ', 'ㅗㅏ'), 71 | ('ㅙ', 'ㅗㅐ'), 72 | ('ㅚ', 'ㅗㅣ'), 73 | ('ㅝ', 'ㅜㅓ'), 74 | ('ㅞ', 'ㅜㅔ'), 75 | ('ㅟ', 'ㅜㅣ'), 76 | ('ㅢ', 'ㅡㅣ'), 77 | ('ㅑ', 'ㅣㅏ'), 78 | ('ㅒ', 'ㅣㅐ'), 79 | ('ㅕ', 'ㅣㅓ'), 80 | ('ㅖ', 'ㅣㅔ'), 81 | ('ㅛ', 'ㅣㅗ'), 82 | ('ㅠ', 'ㅣㅜ') 83 | ]] 84 | 85 | # List of (Latin alphabet, hangul) pairs: 86 | _latin_to_hangul = [(re.compile('%s' % x[0], re.IGNORECASE), x[1]) for x in [ 87 | ('a', '에이'), 88 | ('b', '비'), 89 | ('c', '시'), 90 | ('d', '디'), 91 | ('e', '이'), 92 | ('f', '에프'), 93 | ('g', '지'), 94 | ('h', '에이치'), 95 | ('i', '아이'), 96 | ('j', '제이'), 97 | ('k', '케이'), 98 | ('l', '엘'), 99 | ('m', '엠'), 100 | ('n', '엔'), 101 | ('o', '오'), 102 | ('p', '피'), 103 | ('q', '큐'), 104 | ('r', '아르'), 105 | ('s', '에스'), 106 | ('t', '티'), 107 | ('u', '유'), 108 | ('v', '브이'), 109 | ('w', '더블유'), 110 | ('x', '엑스'), 111 | ('y', '와이'), 112 | ('z', '제트') 113 | ]] 114 | 115 | 116 | def expand_abbreviations(text): 117 | for regex, replacement in _abbreviations: 118 | text = re.sub(regex, replacement, text) 119 | return text 120 | 121 | 122 | def lowercase(text): 123 | return text.lower() 124 | 125 | 126 | def collapse_whitespace(text): 127 | return re.sub(_whitespace_re, ' ', text) 128 | 129 | 130 | def convert_to_ascii(text): 131 | return unidecode(text) 132 | 133 | 134 | def latin_to_hangul(text): 135 | for regex, replacement in _latin_to_hangul: 136 | text = re.sub(regex, replacement, text) 137 | return text 138 | 139 | 140 | def divide_hangul(text): 141 | for regex, replacement in _hangul_divided: 142 | text = re.sub(regex, replacement, text) 143 | return text 144 | 145 | 146 | def hangul_number(num, sino=True): 147 | '''Reference https://github.com/Kyubyong/g2pK''' 148 | num = re.sub(',', '', num) 149 | 150 | if num == '0': 151 | return '영' 152 | if not sino and num == '20': 153 | return '스무' 154 | 155 | digits = '123456789' 156 | names = '일이삼사오육칠팔구' 157 | digit2name = {d: n for d, n in zip(digits, names)} 158 | 159 | modifiers = '한 두 세 네 다섯 여섯 일곱 여덟 아홉' 160 | decimals = '열 스물 서른 마흔 쉰 예순 일흔 여든 아흔' 161 | digit2mod = {d: mod for d, mod in zip(digits, modifiers.split())} 162 | digit2dec = {d: dec for d, dec in zip(digits, decimals.split())} 163 | 164 | spelledout = [] 165 | for i, digit in enumerate(num): 166 | i = len(num) - i - 1 167 | if sino: 168 | if i == 0: 169 | name = digit2name.get(digit, '') 170 | elif i == 1: 171 | name = digit2name.get(digit, '') + '십' 172 | name = name.replace('일십', '십') 173 | else: 174 | if i == 0: 175 | name = digit2mod.get(digit, '') 176 | elif i == 1: 177 | name = digit2dec.get(digit, '') 178 | if digit == '0': 179 | if i % 4 == 0: 180 | last_three = spelledout[-min(3, len(spelledout)):] 181 | if ''.join(last_three) == '': 182 | spelledout.append('') 183 | continue 184 | else: 185 | spelledout.append('') 186 | continue 187 | if i == 2: 188 | name = digit2name.get(digit, '') + '백' 189 | name = name.replace('일백', '백') 190 | elif i == 3: 191 | name = digit2name.get(digit, '') + '천' 192 | name = name.replace('일천', '천') 193 | elif i == 4: 194 | name = digit2name.get(digit, '') + '만' 195 | name = name.replace('일만', '만') 196 | elif i == 5: 197 | name = digit2name.get(digit, '') + '십' 198 | name = name.replace('일십', '십') 199 | elif i == 6: 200 | name = digit2name.get(digit, '') + '백' 201 | name = name.replace('일백', '백') 202 | elif i == 7: 203 | name = digit2name.get(digit, '') + '천' 204 | name = name.replace('일천', '천') 205 | elif i == 8: 206 | name = digit2name.get(digit, '') + '억' 207 | elif i == 9: 208 | name = digit2name.get(digit, '') + '십' 209 | elif i == 10: 210 | name = digit2name.get(digit, '') + '백' 211 | elif i == 11: 212 | name = digit2name.get(digit, '') + '천' 213 | elif i == 12: 214 | name = digit2name.get(digit, '') + '조' 215 | elif i == 13: 216 | name = digit2name.get(digit, '') + '십' 217 | elif i == 14: 218 | name = digit2name.get(digit, '') + '백' 219 | elif i == 15: 220 | name = digit2name.get(digit, '') + '천' 221 | spelledout.append(name) 222 | return ''.join(elem for elem in spelledout) 223 | 224 | 225 | def number_to_hangul(text): 226 | '''Reference https://github.com/Kyubyong/g2pK''' 227 | tokens = set(re.findall(r'(\d[\d,]*)([\uac00-\ud71f]+)', text)) 228 | for token in tokens: 229 | num, classifier = token 230 | if classifier[:2] in _korean_classifiers or classifier[0] in _korean_classifiers: 231 | spelledout = hangul_number(num, sino=False) 232 | else: 233 | spelledout = hangul_number(num, sino=True) 234 | text = text.replace(f'{num}{classifier}', f'{spelledout}{classifier}') 235 | # digit by digit for remaining digits 236 | digits = '0123456789' 237 | names = '영일이삼사오육칠팔구' 238 | for d, n in zip(digits, names): 239 | text = text.replace(d, n) 240 | return text 241 | 242 | 243 | def basic_cleaners(text): 244 | '''Basic pipeline that lowercases and collapses whitespace without transliteration.''' 245 | text = lowercase(text) 246 | text = collapse_whitespace(text) 247 | return text 248 | 249 | 250 | def transliteration_cleaners(text): 251 | '''Pipeline for non-English text that transliterates to ASCII.''' 252 | text = convert_to_ascii(text) 253 | text = lowercase(text) 254 | text = collapse_whitespace(text) 255 | return text 256 | 257 | 258 | def japanese_cleaners(text): 259 | '''Pipeline for notating accent in Japanese text. 260 | Reference https://r9y9.github.io/ttslearn/latest/notebooks/ch10_Recipe-Tacotron.html''' 261 | sentences = re.split(_japanese_marks, text) 262 | marks = re.findall(_japanese_marks, text) 263 | text = '' 264 | for i, sentence in enumerate(sentences): 265 | if re.match(_japanese_characters, sentence): 266 | if text!='': 267 | text+=' ' 268 | labels = pyopenjtalk.extract_fullcontext(sentence) 269 | for n, label in enumerate(labels): 270 | phoneme = re.search(r'\-([^\+]*)\+', label).group(1) 271 | if phoneme not in ['sil','pau']: 272 | text += phoneme.replace('ch','ʧ').replace('sh','ʃ').replace('cl','Q') 273 | else: 274 | continue 275 | n_moras = int(re.search(r'/F:(\d+)_', label).group(1)) 276 | a1 = int(re.search(r"/A:(\-?[0-9]+)\+", label).group(1)) 277 | a2 = int(re.search(r"\+(\d+)\+", label).group(1)) 278 | a3 = int(re.search(r"\+(\d+)/", label).group(1)) 279 | if re.search(r'\-([^\+]*)\+', labels[n + 1]).group(1) in ['sil','pau']: 280 | a2_next=-1 281 | else: 282 | a2_next = int(re.search(r"\+(\d+)\+", labels[n + 1]).group(1)) 283 | # Accent phrase boundary 284 | if a3 == 1 and a2_next == 1: 285 | text += ' ' 286 | # Falling 287 | elif a1 == 0 and a2_next == a2 + 1 and a2 != n_moras: 288 | text += '↓' 289 | # Rising 290 | elif a2 == 1 and a2_next == 2: 291 | text += '↑' 292 | if i [b, n_h, t, d_k] 150 | b, d, t_s, t_t = (*key.size(), query.size(2)) 151 | query = query.view(b, self.n_heads, self.k_channels, t_t).transpose(2, 3) 152 | key = key.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3) 153 | value = value.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3) 154 | 155 | scores = torch.matmul(query / math.sqrt(self.k_channels), key.transpose(-2, -1)) 156 | if self.window_size is not None: 157 | assert t_s == t_t, "Relative attention is only available for self-attention." 158 | key_relative_embeddings = self._get_relative_embeddings(self.emb_rel_k, t_s) 159 | rel_logits = self._matmul_with_relative_keys(query /math.sqrt(self.k_channels), key_relative_embeddings) 160 | scores_local = self._relative_position_to_absolute_position(rel_logits) 161 | scores = scores + scores_local 162 | if self.proximal_bias: 163 | assert t_s == t_t, "Proximal bias is only available for self-attention." 164 | scores = scores + self._attention_bias_proximal(t_s).to(device=scores.device, dtype=scores.dtype) 165 | if mask is not None: 166 | scores = scores.masked_fill(mask == 0, -1e4) 167 | if self.block_length is not None: 168 | assert t_s == t_t, "Local attention is only available for self-attention." 169 | block_mask = torch.ones_like(scores).triu(-self.block_length).tril(self.block_length) 170 | scores = scores.masked_fill(block_mask == 0, -1e4) 171 | p_attn = F.softmax(scores, dim=-1) # [b, n_h, t_t, t_s] 172 | p_attn = self.drop(p_attn) 173 | output = torch.matmul(p_attn, value) 174 | if self.window_size is not None: 175 | relative_weights = self._absolute_position_to_relative_position(p_attn) 176 | value_relative_embeddings = self._get_relative_embeddings(self.emb_rel_v, t_s) 177 | output = output + self._matmul_with_relative_values(relative_weights, value_relative_embeddings) 178 | output = output.transpose(2, 3).contiguous().view(b, d, t_t) # [b, n_h, t_t, d_k] -> [b, d, t_t] 179 | return output, p_attn 180 | 181 | def _matmul_with_relative_values(self, x, y): 182 | """ 183 | x: [b, h, l, m] 184 | y: [h or 1, m, d] 185 | ret: [b, h, l, d] 186 | """ 187 | ret = torch.matmul(x, y.unsqueeze(0)) 188 | return ret 189 | 190 | def _matmul_with_relative_keys(self, x, y): 191 | """ 192 | x: [b, h, l, d] 193 | y: [h or 1, m, d] 194 | ret: [b, h, l, m] 195 | """ 196 | ret = torch.matmul(x, y.unsqueeze(0).transpose(-2, -1)) 197 | return ret 198 | 199 | def _get_relative_embeddings(self, relative_embeddings, length): 200 | max_relative_position = 2 * self.window_size + 1 201 | # Pad first before slice to avoid using cond ops. 202 | pad_length = max(length - (self.window_size + 1), 0) 203 | slice_start_position = max((self.window_size + 1) - length, 0) 204 | slice_end_position = slice_start_position + 2 * length - 1 205 | if pad_length > 0: 206 | padded_relative_embeddings = F.pad( 207 | relative_embeddings, 208 | commons.convert_pad_shape([[0, 0], [pad_length, pad_length], [0, 0]])) 209 | else: 210 | padded_relative_embeddings = relative_embeddings 211 | used_relative_embeddings = padded_relative_embeddings[:,slice_start_position:slice_end_position] 212 | return used_relative_embeddings 213 | 214 | def _relative_position_to_absolute_position(self, x): 215 | """ 216 | x: [b, h, l, 2*l-1] 217 | ret: [b, h, l, l] 218 | """ 219 | batch, heads, length, _ = x.size() 220 | # Concat columns of pad to shift from relative to absolute indexing. 221 | x = F.pad(x, commons.convert_pad_shape([[0,0],[0,0],[0,0],[0,1]])) 222 | 223 | # Concat extra elements so to add up to shape (len+1, 2*len-1). 224 | x_flat = x.view([batch, heads, length * 2 * length]) 225 | x_flat = F.pad(x_flat, commons.convert_pad_shape([[0,0],[0,0],[0,length-1]])) 226 | 227 | # Reshape and slice out the padded elements. 228 | x_final = x_flat.view([batch, heads, length+1, 2*length-1])[:, :, :length, length-1:] 229 | return x_final 230 | 231 | def _absolute_position_to_relative_position(self, x): 232 | """ 233 | x: [b, h, l, l] 234 | ret: [b, h, l, 2*l-1] 235 | """ 236 | batch, heads, length, _ = x.size() 237 | # padd along column 238 | x = F.pad(x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, length-1]])) 239 | x_flat = x.view([batch, heads, length**2 + length*(length -1)]) 240 | # add 0's in the beginning that will skew the elements after reshape 241 | x_flat = F.pad(x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [length, 0]])) 242 | x_final = x_flat.view([batch, heads, length, 2*length])[:,:,:,1:] 243 | return x_final 244 | 245 | def _attention_bias_proximal(self, length): 246 | """Bias for self-attention to encourage attention to close positions. 247 | Args: 248 | length: an integer scalar. 249 | Returns: 250 | a Tensor with shape [1, 1, length, length] 251 | """ 252 | r = torch.arange(length, dtype=torch.float32) 253 | diff = torch.unsqueeze(r, 0) - torch.unsqueeze(r, 1) 254 | return torch.unsqueeze(torch.unsqueeze(-torch.log1p(torch.abs(diff)), 0), 0) 255 | 256 | 257 | class FFN(nn.Module): 258 | def __init__(self, in_channels, out_channels, filter_channels, kernel_size, p_dropout=0., activation=None, causal=False): 259 | super().__init__() 260 | self.in_channels = in_channels 261 | self.out_channels = out_channels 262 | self.filter_channels = filter_channels 263 | self.kernel_size = kernel_size 264 | self.p_dropout = p_dropout 265 | self.activation = activation 266 | self.causal = causal 267 | 268 | if causal: 269 | self.padding = self._causal_padding 270 | else: 271 | self.padding = self._same_padding 272 | 273 | self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size) 274 | self.conv_2 = nn.Conv1d(filter_channels, out_channels, kernel_size) 275 | self.drop = nn.Dropout(p_dropout) 276 | 277 | def forward(self, x, x_mask): 278 | x = self.conv_1(self.padding(x * x_mask)) 279 | if self.activation == "gelu": 280 | x = x * torch.sigmoid(1.702 * x) 281 | else: 282 | x = torch.relu(x) 283 | x = self.drop(x) 284 | x = self.conv_2(self.padding(x * x_mask)) 285 | return x * x_mask 286 | 287 | def _causal_padding(self, x): 288 | if self.kernel_size == 1: 289 | return x 290 | pad_l = self.kernel_size - 1 291 | pad_r = 0 292 | padding = [[0, 0], [0, 0], [pad_l, pad_r]] 293 | x = F.pad(x, commons.convert_pad_shape(padding)) 294 | return x 295 | 296 | def _same_padding(self, x): 297 | if self.kernel_size == 1: 298 | return x 299 | pad_l = (self.kernel_size - 1) // 2 300 | pad_r = self.kernel_size // 2 301 | padding = [[0, 0], [0, 0], [pad_l, pad_r]] 302 | x = F.pad(x, commons.convert_pad_shape(padding)) 303 | return x 304 | -------------------------------------------------------------------------------- /real_basic_UI.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | # Form implementation generated from reading ui file 'real_basic.ui' 4 | # 5 | # Created by: PyQt5 UI code generator 5.15.4 6 | # 7 | # WARNING: Any manual changes made to this file will be lost when pyuic5 is 8 | # run again. Do not edit this file unless you know what you are doing. 9 | #Qt多媒体用于播放音乐 10 | import time 11 | import sys 12 | import import_UI 13 | from PyQt5 import QtCore, QtGui, QtWidgets #QtMultimedia 14 | import chatgpt_main 15 | import use_main 16 | import pygame 17 | import choosebg 18 | import choose_VITS_model 19 | 20 | #全局变量 21 | convers_text_from_import_UI = '' #文本中转 22 | all_text = '' #所有文本 23 | Thread_1_is_running = False #播放音乐多进程状态 24 | Backgroud_jpg_path = '' #图片地址中转 25 | speaker = '' 26 | path_of_pth = '' 27 | path_of_json = '' 28 | model_type = '' 29 | speaker_index = '' 30 | VITS_CLASS_EXIST = False 31 | CALL_NAME = '' 32 | 33 | 34 | class Ui_Form(object): 35 | def setupUi(self, Form): 36 | Form.setObjectName("Form") 37 | Form.resize(1319, 759) 38 | Form.setFixedSize(1319, 759) 39 | self.label_API = QtWidgets.QLabel(Form) 40 | self.label_API.setGeometry(QtCore.QRect(800, 20, 90, 30)) 41 | self.lineedit = QtWidgets.QLineEdit(Form) 42 | self.lineedit.setGeometry(QtCore.QRect(890, 25, 200, 20)) 43 | self.label = QtWidgets.QLabel(Form) 44 | self.label.setGeometry(QtCore.QRect(470, 10, 311, 61)) 45 | self.label.setStyleSheet("color:rgb(18, 0, 153);\n" 46 | "\n" 47 | "font: 75 20pt \"Segoe Print\";\n" 48 | "") 49 | self.label.setAlignment(QtCore.Qt.AlignCenter) 50 | self.label.setObjectName("label") 51 | self.label_2 = QtWidgets.QLabel(Form) 52 | self.label_2.setGeometry(QtCore.QRect(0, 60, 1319, 700)) 53 | self.label_2.setText("") 54 | self.label_2.setPixmap(QtGui.QPixmap("./picture/001.jpg")) 55 | self.label_2.setScaledContents(True) 56 | self.label_2.setObjectName("label_2") 57 | self.pushButton = QtWidgets.QPushButton(Form) 58 | self.pushButton.setGeometry(QtCore.QRect(30, 20, 91, 31)) 59 | self.pushButton.setObjectName("pushButton") 60 | self.pushButton.clicked.connect(self.clicked_push_button) 61 | self.pushButton_2 = QtWidgets.QPushButton(Form) 62 | self.pushButton_2.setGeometry(QtCore.QRect(130, 20, 81, 31)) 63 | self.pushButton_2.setObjectName("pushButton_2") 64 | self.pushButton_2.clicked.connect(self.clicked_push_button_2) 65 | self.pushButton_3 = QtWidgets.QPushButton(Form) 66 | self.pushButton_3.setGeometry(QtCore.QRect(1200, 20, 51, 31)) 67 | self.pushButton_3.setObjectName("pushButton_3") 68 | self.pushButton_3.clicked.connect(self.clicked_push_button_3) 69 | self.pushButton_4 = QtWidgets.QPushButton(Form) 70 | self.pushButton_4.setGeometry(QtCore.QRect(220, 20, 101, 31)) 71 | self.pushButton_4.setObjectName("pushButton_4") 72 | self.pushButton_4.clicked.connect(self.clicked_push_button_4) 73 | self.pushButton_5 = QtWidgets.QPushButton(Form) 74 | self.pushButton_5.setGeometry(QtCore.QRect(330, 20, 111, 31)) 75 | self.pushButton_5.setObjectName("pushButton_5") 76 | self.pushButton_5.clicked.connect(self.clicked_push_button_5) 77 | self.plainTextEdit = QtWidgets.QPlainTextEdit(Form) 78 | self.plainTextEdit.setGeometry(QtCore.QRect(160, 590, 511, 87)) 79 | self.plainTextEdit.setObjectName("plainTextEdit") 80 | self.label_3 = QtWidgets.QLabel(Form) 81 | self.label_3.setGeometry(QtCore.QRect(160, 490, 511, 71)) 82 | self.label_3.setStyleSheet("font: 12pt \"Arial\";background-color: rgba(255, 255, 255, 0.5);") 83 | self.label_3.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignTop) 84 | self.label_3.setWordWrap(True) 85 | self.label_3.setObjectName("label_3") 86 | self.pushButton_6 = QtWidgets.QPushButton(Form) 87 | self.pushButton_6.setGeometry(QtCore.QRect(700, 600, 71, 71)) 88 | self.pushButton_6.setObjectName("pushButton_6") 89 | self.pushButton_6.clicked.connect(self.clicked_push_button_6) 90 | self.pushButton_7 = QtWidgets.QPushButton(Form) 91 | self.pushButton_7.setGeometry(QtCore.QRect(700, 490, 71, 71)) 92 | self.pushButton_7.setStyleSheet("background-color: rgba(255, 255, 255, 0);") 93 | self.pushButton_7.setText("") 94 | icon = QtGui.QIcon() 95 | icon.addPixmap(QtGui.QPixmap("./picture/play1.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off) 96 | self.pushButton_7.setIcon(icon) 97 | self.pushButton_7.setIconSize(QtCore.QSize(50, 50)) 98 | self.pushButton_7.setObjectName("pushButton_7") 99 | self.pushButton_7.clicked.connect(self.play_music) 100 | self.textBrowser = QtWidgets.QTextBrowser(Form) 101 | self.textBrowser.setGeometry(QtCore.QRect(920, 90, 311, 581)) 102 | self.textBrowser.setStyleSheet("background-color: rgba(255, 255, 255, 0.5);") 103 | #self.textBrowser.setLineWrapMode(QtWidgets.QTextEdit.FixedColumnWidth) #竖直输出 104 | self.textBrowser.setObjectName("textBrowser") 105 | self.label_4 = QtWidgets.QLabel(Form) 106 | self.label_4.setGeometry(QtCore.QRect(-60, 0, 931, 761)) 107 | self.label_4.setText("") 108 | self.label_4.setPixmap(QtGui.QPixmap("./picture/81546755.png")) 109 | self.label_4.setScaledContents(False) 110 | self.label_4.setAlignment(QtCore.Qt.AlignCenter) 111 | self.label_4.setObjectName("label_4") 112 | self.label_5 = QtWidgets.QLabel(Form) 113 | self.label_5.setGeometry(QtCore.QRect(160, 680, 311, 41)) 114 | self.label_5.setStyleSheet("font: 14pt \"楷体\";") 115 | self.label_5.setTextFormat(QtCore.Qt.RichText) 116 | self.label_5.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignTop) 117 | self.label_5.setObjectName("label_5") 118 | self.label_API.raise_() 119 | self.lineedit.raise_() 120 | self.label_2.raise_() 121 | self.label_4.raise_() 122 | self.label.raise_() 123 | self.pushButton.raise_() 124 | self.pushButton_2.raise_() 125 | self.pushButton_3.raise_() 126 | self.pushButton_4.raise_() 127 | self.pushButton_5.raise_() 128 | self.plainTextEdit.raise_() 129 | self.label_3.raise_() 130 | self.pushButton_6.raise_() 131 | self.pushButton_7.raise_() 132 | self.textBrowser.raise_() 133 | self.label_5.raise_() 134 | 135 | #音乐播放器(有问题) 136 | ''' 137 | self.player = QtMultimedia.QMediaPlayer(Form) 138 | self.player.setObjectName('player') 139 | self.player.setVolume(60) 140 | self.player.setMedia(QtMultimedia.QMediaContent(QtCore.QUrl.fromLocalFile('./audio/audio.wav'))) 141 | ''' 142 | 143 | self.retranslateUi(Form) 144 | QtCore.QMetaObject.connectSlotsByName(Form) 145 | 146 | def retranslateUi(self, Form): 147 | _translate = QtCore.QCoreApplication.translate 148 | Form.setWindowTitle(_translate("Form", "ChatGPT-VITS")) 149 | self.label.setText(_translate("Form", "ChatGPT-VITS")) 150 | self.label_API.setText(_translate('Form','OpenAI API')) 151 | self.lineedit.setText(_translate('Form', chatgpt_main.openai.api_key)) 152 | self.pushButton.setText(_translate("Form", "修改背景")) 153 | self.pushButton_2.setText(_translate("Form", "添加设定")) 154 | self.pushButton_3.setText(_translate("Form", "重置")) 155 | self.pushButton_4.setText(_translate("Form", "修改人物立绘")) 156 | self.pushButton_5.setText(_translate("Form", "修改VITS模型")) 157 | self.plainTextEdit.setPlaceholderText(_translate("Form", "请输入对话内容")) 158 | self.label_3.setText(_translate("Form", "请注意导入初始设定")) 159 | self.pushButton_6.setText(_translate("Form", "发送")) 160 | self.label_5.setText(_translate("Form", "输入设定请勿使用此对话框")) 161 | Form.setWindowIcon(QtGui.QIcon('./picture/winicon.ico')) 162 | 163 | def clicked_push_button(self): 164 | global Backgroud_jpg_path 165 | choosebg1 = QtWidgets.QDialog() 166 | Widget_choosebg = choosebg.Ui_Choosebg(choosebg1) 167 | Widget_choosebg.setupUi() 168 | Widget_choosebg.Choosebg.exec() 169 | if Backgroud_jpg_path == '': 170 | pass 171 | else: 172 | self.label_2.setPixmap(QtGui.QPixmap(Backgroud_jpg_path)) 173 | Backgroud_jpg_path = '' 174 | 175 | 176 | def clicked_push_button_3(self): 177 | global all_text 178 | all_text = '' 179 | self.textBrowser.setText(all_text) 180 | 181 | def clicked_push_button_4(self): 182 | global Backgroud_jpg_path 183 | chooselh1 = QtWidgets.QDialog() 184 | Widget_chooselh = choosebg.Ui_Choosebg(chooselh1) 185 | Widget_chooselh.setupUi() 186 | Widget_chooselh.Choosebg.exec() 187 | if Backgroud_jpg_path == '': 188 | pass 189 | else: 190 | self.label_4.setPixmap(QtGui.QPixmap(Backgroud_jpg_path)) 191 | Backgroud_jpg_path = '' 192 | 193 | def clicked_push_button_5(self): 194 | global speaker,path_of_json,path_of_pth 195 | choose_VITS = QtWidgets.QDialog() 196 | Widget_choose_VITS = choose_VITS_model.Ui_Dialog(choose_VITS) 197 | Widget_choose_VITS.setupUi() 198 | Widget_choose_VITS.Choose_VITS_model.exec() 199 | if model_type == '': 200 | pass 201 | elif model_type == 0: 202 | self.path_of_json =path_of_json 203 | self.path_of_pth = path_of_pth 204 | elif model_type == 1: 205 | self.path_of_json =path_of_json 206 | self.path_of_pth = path_of_pth 207 | self.speaker = speaker 208 | 209 | def clicked_push_button_6(self): 210 | text = self.plainTextEdit.toPlainText() 211 | chatgpt_main.openai.api_key = self.lineedit.text() 212 | global all_text,speaker,path_of_json,path_of_pth,model_type,speaker_index,VITS_CLASS_EXIST,CALL_NAME 213 | if text == '': 214 | self.label_3.setText('请输入对话内容!') 215 | elif all_text == '': 216 | self.label_3.setText('请先导入初始设定!') 217 | else: 218 | #ChatGPT 219 | self.plainTextEdit.setPlainText('') 220 | 221 | prompt0 = text 222 | #resualt 展示了当期结果是否为quit 223 | if CALL_NAME == '': 224 | resualt, all_text, audio_text = chatgpt_main.friend_chat(all_text,prompt0) 225 | else: 226 | resualt, all_text, audio_text = chatgpt_main.friend_chat(all_text, prompt0,call_name=CALL_NAME) #TODO 227 | #if resualt == 'quit': 228 | # break 229 | self.label_3.setText(audio_text) 230 | self.textBrowser.setText(all_text) 231 | 232 | #VITS 233 | global VITS_Class 234 | #初始状态 235 | if model_type =='': 236 | #判断全局类是否存在 237 | if VITS_CLASS_EXIST == False: 238 | VITS_Class = use_main.single_speaker_model() 239 | VITS_Class.generate(audio_text) 240 | VITS_CLASS_EXIST = True 241 | else: 242 | VITS_Class.generate(audio_text) 243 | elif model_type == 0: 244 | VITS_Class.generate(audio_text) 245 | elif model_type == 1: 246 | VITS_Class.generate(audio_text,speaker_index=int(speaker_index)) 247 | self.play_music() 248 | 249 | 250 | 251 | def clicked_push_button_2(self): 252 | global convers_text_from_import_UI 253 | Dialog1 = QtWidgets.QDialog() 254 | Dialog1_Widget = import_UI.Ui_Dialog(Dialog1) 255 | Dialog1_Widget.setupUi() 256 | Dialog1_Widget.retranslateUi() 257 | #Dialog1.show() 258 | Dialog1_Widget.Dialog.exec() #不要show则是直接将Dialog作为主程序,不可以返回主程序窗口 259 | if str(convers_text_from_import_UI) == '': 260 | pass 261 | else: 262 | global all_text 263 | if all_text == '': 264 | all_text = all_text + str(convers_text_from_import_UI) 265 | else: 266 | all_text = all_text + '\n' + str(convers_text_from_import_UI) 267 | self.textBrowser.setText(all_text) 268 | convers_text_from_import_UI = '' 269 | 270 | 271 | def play_music(self): 272 | #使用多线程 273 | global Thread_1_is_running 274 | if Thread_1_is_running == True: 275 | pass 276 | else: 277 | Thread_1_is_running = True 278 | self.Thread_1 = Thread_1() 279 | self.Thread_1.start() 280 | 281 | 282 | 283 | #self.player.play() 284 | 285 | 286 | class Thread_1(QtCore.QThread): 287 | def __init__(self): 288 | super(Thread_1, self).__init__() 289 | 290 | def run(self): 291 | pygame.mixer.init() 292 | pygame.mixer.music.load('./audio/audio.wav') 293 | pygame.mixer.music.play() 294 | while pygame.mixer.music.get_busy(): 295 | pass 296 | pygame.mixer.music.unload() 297 | global Thread_1_is_running 298 | Thread_1_is_running = False 299 | 300 | 301 | class windows(QtWidgets.QWidget): 302 | def __init__(self): 303 | super(windows, self).__init__() 304 | 305 | if __name__ == '__main__': 306 | app = QtWidgets.QApplication(sys.argv) 307 | Form = windows() 308 | Widget = Ui_Form() 309 | Widget.setupUi(Form) 310 | Form.show() 311 | app.exec() -------------------------------------------------------------------------------- /modules.py: -------------------------------------------------------------------------------- 1 | import copy 2 | import math 3 | import numpy as np 4 | import scipy 5 | import torch 6 | from torch import nn 7 | from torch.nn import functional as F 8 | 9 | from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d 10 | from torch.nn.utils import weight_norm, remove_weight_norm 11 | 12 | import commons 13 | from commons import init_weights, get_padding 14 | from transforms import piecewise_rational_quadratic_transform 15 | 16 | 17 | LRELU_SLOPE = 0.1 18 | 19 | 20 | class LayerNorm(nn.Module): 21 | def __init__(self, channels, eps=1e-5): 22 | super().__init__() 23 | self.channels = channels 24 | self.eps = eps 25 | 26 | self.gamma = nn.Parameter(torch.ones(channels)) 27 | self.beta = nn.Parameter(torch.zeros(channels)) 28 | 29 | def forward(self, x): 30 | x = x.transpose(1, -1) 31 | x = F.layer_norm(x, (self.channels,), self.gamma, self.beta, self.eps) 32 | return x.transpose(1, -1) 33 | 34 | 35 | class ConvReluNorm(nn.Module): 36 | def __init__(self, in_channels, hidden_channels, out_channels, kernel_size, n_layers, p_dropout): 37 | super().__init__() 38 | self.in_channels = in_channels 39 | self.hidden_channels = hidden_channels 40 | self.out_channels = out_channels 41 | self.kernel_size = kernel_size 42 | self.n_layers = n_layers 43 | self.p_dropout = p_dropout 44 | assert n_layers > 1, "Number of layers should be larger than 0." 45 | 46 | self.conv_layers = nn.ModuleList() 47 | self.norm_layers = nn.ModuleList() 48 | self.conv_layers.append(nn.Conv1d(in_channels, hidden_channels, kernel_size, padding=kernel_size//2)) 49 | self.norm_layers.append(LayerNorm(hidden_channels)) 50 | self.relu_drop = nn.Sequential( 51 | nn.ReLU(), 52 | nn.Dropout(p_dropout)) 53 | for _ in range(n_layers-1): 54 | self.conv_layers.append(nn.Conv1d(hidden_channels, hidden_channels, kernel_size, padding=kernel_size//2)) 55 | self.norm_layers.append(LayerNorm(hidden_channels)) 56 | self.proj = nn.Conv1d(hidden_channels, out_channels, 1) 57 | self.proj.weight.data.zero_() 58 | self.proj.bias.data.zero_() 59 | 60 | def forward(self, x, x_mask): 61 | x_org = x 62 | for i in range(self.n_layers): 63 | x = self.conv_layers[i](x * x_mask) 64 | x = self.norm_layers[i](x) 65 | x = self.relu_drop(x) 66 | x = x_org + self.proj(x) 67 | return x * x_mask 68 | 69 | 70 | class DDSConv(nn.Module): 71 | """ 72 | Dialted and Depth-Separable Convolution 73 | """ 74 | def __init__(self, channels, kernel_size, n_layers, p_dropout=0.): 75 | super().__init__() 76 | self.channels = channels 77 | self.kernel_size = kernel_size 78 | self.n_layers = n_layers 79 | self.p_dropout = p_dropout 80 | 81 | self.drop = nn.Dropout(p_dropout) 82 | self.convs_sep = nn.ModuleList() 83 | self.convs_1x1 = nn.ModuleList() 84 | self.norms_1 = nn.ModuleList() 85 | self.norms_2 = nn.ModuleList() 86 | for i in range(n_layers): 87 | dilation = kernel_size ** i 88 | padding = (kernel_size * dilation - dilation) // 2 89 | self.convs_sep.append(nn.Conv1d(channels, channels, kernel_size, 90 | groups=channels, dilation=dilation, padding=padding 91 | )) 92 | self.convs_1x1.append(nn.Conv1d(channels, channels, 1)) 93 | self.norms_1.append(LayerNorm(channels)) 94 | self.norms_2.append(LayerNorm(channels)) 95 | 96 | def forward(self, x, x_mask, g=None): 97 | if g is not None: 98 | x = x + g 99 | for i in range(self.n_layers): 100 | y = self.convs_sep[i](x * x_mask) 101 | y = self.norms_1[i](y) 102 | y = F.gelu(y) 103 | y = self.convs_1x1[i](y) 104 | y = self.norms_2[i](y) 105 | y = F.gelu(y) 106 | y = self.drop(y) 107 | x = x + y 108 | return x * x_mask 109 | 110 | 111 | class WN(torch.nn.Module): 112 | def __init__(self, hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=0, p_dropout=0): 113 | super(WN, self).__init__() 114 | assert(kernel_size % 2 == 1) 115 | self.hidden_channels =hidden_channels 116 | self.kernel_size = kernel_size, 117 | self.dilation_rate = dilation_rate 118 | self.n_layers = n_layers 119 | self.gin_channels = gin_channels 120 | self.p_dropout = p_dropout 121 | 122 | self.in_layers = torch.nn.ModuleList() 123 | self.res_skip_layers = torch.nn.ModuleList() 124 | self.drop = nn.Dropout(p_dropout) 125 | 126 | if gin_channels != 0: 127 | cond_layer = torch.nn.Conv1d(gin_channels, 2*hidden_channels*n_layers, 1) 128 | self.cond_layer = torch.nn.utils.weight_norm(cond_layer, name='weight') 129 | 130 | for i in range(n_layers): 131 | dilation = dilation_rate ** i 132 | padding = int((kernel_size * dilation - dilation) / 2) 133 | in_layer = torch.nn.Conv1d(hidden_channels, 2*hidden_channels, kernel_size, 134 | dilation=dilation, padding=padding) 135 | in_layer = torch.nn.utils.weight_norm(in_layer, name='weight') 136 | self.in_layers.append(in_layer) 137 | 138 | # last one is not necessary 139 | if i < n_layers - 1: 140 | res_skip_channels = 2 * hidden_channels 141 | else: 142 | res_skip_channels = hidden_channels 143 | 144 | res_skip_layer = torch.nn.Conv1d(hidden_channels, res_skip_channels, 1) 145 | res_skip_layer = torch.nn.utils.weight_norm(res_skip_layer, name='weight') 146 | self.res_skip_layers.append(res_skip_layer) 147 | 148 | def forward(self, x, x_mask, g=None, **kwargs): 149 | output = torch.zeros_like(x) 150 | n_channels_tensor = torch.IntTensor([self.hidden_channels]) 151 | 152 | if g is not None: 153 | g = self.cond_layer(g) 154 | 155 | for i in range(self.n_layers): 156 | x_in = self.in_layers[i](x) 157 | if g is not None: 158 | cond_offset = i * 2 * self.hidden_channels 159 | g_l = g[:,cond_offset:cond_offset+2*self.hidden_channels,:] 160 | else: 161 | g_l = torch.zeros_like(x_in) 162 | 163 | acts = commons.fused_add_tanh_sigmoid_multiply( 164 | x_in, 165 | g_l, 166 | n_channels_tensor) 167 | acts = self.drop(acts) 168 | 169 | res_skip_acts = self.res_skip_layers[i](acts) 170 | if i < self.n_layers - 1: 171 | res_acts = res_skip_acts[:,:self.hidden_channels,:] 172 | x = (x + res_acts) * x_mask 173 | output = output + res_skip_acts[:,self.hidden_channels:,:] 174 | else: 175 | output = output + res_skip_acts 176 | return output * x_mask 177 | 178 | def remove_weight_norm(self): 179 | if self.gin_channels != 0: 180 | torch.nn.utils.remove_weight_norm(self.cond_layer) 181 | for l in self.in_layers: 182 | torch.nn.utils.remove_weight_norm(l) 183 | for l in self.res_skip_layers: 184 | torch.nn.utils.remove_weight_norm(l) 185 | 186 | 187 | class ResBlock1(torch.nn.Module): 188 | def __init__(self, channels, kernel_size=3, dilation=(1, 3, 5)): 189 | super(ResBlock1, self).__init__() 190 | self.convs1 = nn.ModuleList([ 191 | weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0], 192 | padding=get_padding(kernel_size, dilation[0]))), 193 | weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1], 194 | padding=get_padding(kernel_size, dilation[1]))), 195 | weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[2], 196 | padding=get_padding(kernel_size, dilation[2]))) 197 | ]) 198 | self.convs1.apply(init_weights) 199 | 200 | self.convs2 = nn.ModuleList([ 201 | weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1, 202 | padding=get_padding(kernel_size, 1))), 203 | weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1, 204 | padding=get_padding(kernel_size, 1))), 205 | weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1, 206 | padding=get_padding(kernel_size, 1))) 207 | ]) 208 | self.convs2.apply(init_weights) 209 | 210 | def forward(self, x, x_mask=None): 211 | for c1, c2 in zip(self.convs1, self.convs2): 212 | xt = F.leaky_relu(x, LRELU_SLOPE) 213 | if x_mask is not None: 214 | xt = xt * x_mask 215 | xt = c1(xt) 216 | xt = F.leaky_relu(xt, LRELU_SLOPE) 217 | if x_mask is not None: 218 | xt = xt * x_mask 219 | xt = c2(xt) 220 | x = xt + x 221 | if x_mask is not None: 222 | x = x * x_mask 223 | return x 224 | 225 | def remove_weight_norm(self): 226 | for l in self.convs1: 227 | remove_weight_norm(l) 228 | for l in self.convs2: 229 | remove_weight_norm(l) 230 | 231 | 232 | class ResBlock2(torch.nn.Module): 233 | def __init__(self, channels, kernel_size=3, dilation=(1, 3)): 234 | super(ResBlock2, self).__init__() 235 | self.convs = nn.ModuleList([ 236 | weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0], 237 | padding=get_padding(kernel_size, dilation[0]))), 238 | weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1], 239 | padding=get_padding(kernel_size, dilation[1]))) 240 | ]) 241 | self.convs.apply(init_weights) 242 | 243 | def forward(self, x, x_mask=None): 244 | for c in self.convs: 245 | xt = F.leaky_relu(x, LRELU_SLOPE) 246 | if x_mask is not None: 247 | xt = xt * x_mask 248 | xt = c(xt) 249 | x = xt + x 250 | if x_mask is not None: 251 | x = x * x_mask 252 | return x 253 | 254 | def remove_weight_norm(self): 255 | for l in self.convs: 256 | remove_weight_norm(l) 257 | 258 | 259 | class Log(nn.Module): 260 | def forward(self, x, x_mask, reverse=False, **kwargs): 261 | if not reverse: 262 | y = torch.log(torch.clamp_min(x, 1e-5)) * x_mask 263 | logdet = torch.sum(-y, [1, 2]) 264 | return y, logdet 265 | else: 266 | x = torch.exp(x) * x_mask 267 | return x 268 | 269 | 270 | class Flip(nn.Module): 271 | def forward(self, x, *args, reverse=False, **kwargs): 272 | x = torch.flip(x, [1]) 273 | if not reverse: 274 | logdet = torch.zeros(x.size(0)).to(dtype=x.dtype, device=x.device) 275 | return x, logdet 276 | else: 277 | return x 278 | 279 | 280 | class ElementwiseAffine(nn.Module): 281 | def __init__(self, channels): 282 | super().__init__() 283 | self.channels = channels 284 | self.m = nn.Parameter(torch.zeros(channels,1)) 285 | self.logs = nn.Parameter(torch.zeros(channels,1)) 286 | 287 | def forward(self, x, x_mask, reverse=False, **kwargs): 288 | if not reverse: 289 | y = self.m + torch.exp(self.logs) * x 290 | y = y * x_mask 291 | logdet = torch.sum(self.logs * x_mask, [1,2]) 292 | return y, logdet 293 | else: 294 | x = (x - self.m) * torch.exp(-self.logs) * x_mask 295 | return x 296 | 297 | 298 | class ResidualCouplingLayer(nn.Module): 299 | def __init__(self, 300 | channels, 301 | hidden_channels, 302 | kernel_size, 303 | dilation_rate, 304 | n_layers, 305 | p_dropout=0, 306 | gin_channels=0, 307 | mean_only=False): 308 | assert channels % 2 == 0, "channels should be divisible by 2" 309 | super().__init__() 310 | self.channels = channels 311 | self.hidden_channels = hidden_channels 312 | self.kernel_size = kernel_size 313 | self.dilation_rate = dilation_rate 314 | self.n_layers = n_layers 315 | self.half_channels = channels // 2 316 | self.mean_only = mean_only 317 | 318 | self.pre = nn.Conv1d(self.half_channels, hidden_channels, 1) 319 | self.enc = WN(hidden_channels, kernel_size, dilation_rate, n_layers, p_dropout=p_dropout, gin_channels=gin_channels) 320 | self.post = nn.Conv1d(hidden_channels, self.half_channels * (2 - mean_only), 1) 321 | self.post.weight.data.zero_() 322 | self.post.bias.data.zero_() 323 | 324 | def forward(self, x, x_mask, g=None, reverse=False): 325 | x0, x1 = torch.split(x, [self.half_channels]*2, 1) 326 | h = self.pre(x0) * x_mask 327 | h = self.enc(h, x_mask, g=g) 328 | stats = self.post(h) * x_mask 329 | if not self.mean_only: 330 | m, logs = torch.split(stats, [self.half_channels]*2, 1) 331 | else: 332 | m = stats 333 | logs = torch.zeros_like(m) 334 | 335 | if not reverse: 336 | x1 = m + x1 * torch.exp(logs) * x_mask 337 | x = torch.cat([x0, x1], 1) 338 | logdet = torch.sum(logs, [1,2]) 339 | return x, logdet 340 | else: 341 | x1 = (x1 - m) * torch.exp(-logs) * x_mask 342 | x = torch.cat([x0, x1], 1) 343 | return x 344 | 345 | 346 | class ConvFlow(nn.Module): 347 | def __init__(self, in_channels, filter_channels, kernel_size, n_layers, num_bins=10, tail_bound=5.0): 348 | super().__init__() 349 | self.in_channels = in_channels 350 | self.filter_channels = filter_channels 351 | self.kernel_size = kernel_size 352 | self.n_layers = n_layers 353 | self.num_bins = num_bins 354 | self.tail_bound = tail_bound 355 | self.half_channels = in_channels // 2 356 | 357 | self.pre = nn.Conv1d(self.half_channels, filter_channels, 1) 358 | self.convs = DDSConv(filter_channels, kernel_size, n_layers, p_dropout=0.) 359 | self.proj = nn.Conv1d(filter_channels, self.half_channels * (num_bins * 3 - 1), 1) 360 | self.proj.weight.data.zero_() 361 | self.proj.bias.data.zero_() 362 | 363 | def forward(self, x, x_mask, g=None, reverse=False): 364 | x0, x1 = torch.split(x, [self.half_channels]*2, 1) 365 | h = self.pre(x0) 366 | h = self.convs(h, x_mask, g=g) 367 | h = self.proj(h) * x_mask 368 | 369 | b, c, t = x0.shape 370 | h = h.reshape(b, c, -1, t).permute(0, 1, 3, 2) # [b, cx?, t] -> [b, c, t, ?] 371 | 372 | unnormalized_widths = h[..., :self.num_bins] / math.sqrt(self.filter_channels) 373 | unnormalized_heights = h[..., self.num_bins:2*self.num_bins] / math.sqrt(self.filter_channels) 374 | unnormalized_derivatives = h[..., 2 * self.num_bins:] 375 | 376 | x1, logabsdet = piecewise_rational_quadratic_transform(x1, 377 | unnormalized_widths, 378 | unnormalized_heights, 379 | unnormalized_derivatives, 380 | inverse=reverse, 381 | tails='linear', 382 | tail_bound=self.tail_bound 383 | ) 384 | 385 | x = torch.cat([x0, x1], 1) * x_mask 386 | logdet = torch.sum(logabsdet * x_mask, [1,2]) 387 | if not reverse: 388 | return x, logdet 389 | else: 390 | return x 391 | -------------------------------------------------------------------------------- /data_utils.py: -------------------------------------------------------------------------------- 1 | import time 2 | import os 3 | import random 4 | import numpy as np 5 | import torch 6 | import torch.utils.data 7 | 8 | import commons 9 | from mel_processing import spectrogram_torch 10 | from utils import load_wav_to_torch, load_filepaths_and_text 11 | from text import text_to_sequence, cleaned_text_to_sequence 12 | 13 | 14 | class TextAudioLoader(torch.utils.data.Dataset): 15 | """ 16 | 1) loads audio, text pairs 17 | 2) normalizes text and converts them to sequences of integers 18 | 3) computes spectrograms from audio files. 19 | """ 20 | def __init__(self, audiopaths_and_text, hparams): 21 | self.audiopaths_and_text = load_filepaths_and_text(audiopaths_and_text) 22 | self.text_cleaners = hparams.text_cleaners 23 | self.max_wav_value = hparams.max_wav_value 24 | self.sampling_rate = hparams.sampling_rate 25 | self.filter_length = hparams.filter_length 26 | self.hop_length = hparams.hop_length 27 | self.win_length = hparams.win_length 28 | self.sampling_rate = hparams.sampling_rate 29 | 30 | self.cleaned_text = getattr(hparams, "cleaned_text", False) 31 | 32 | self.add_blank = hparams.add_blank 33 | self.min_text_len = getattr(hparams, "min_text_len", 1) 34 | self.max_text_len = getattr(hparams, "max_text_len", 190) 35 | 36 | random.seed(1234) 37 | random.shuffle(self.audiopaths_and_text) 38 | self._filter() 39 | 40 | 41 | def _filter(self): 42 | """ 43 | Filter text & store spec lengths 44 | """ 45 | # Store spectrogram lengths for Bucketing 46 | # wav_length ~= file_size / (wav_channels * Bytes per dim) = file_size / (1 * 2) 47 | # spec_length = wav_length // hop_length 48 | 49 | audiopaths_and_text_new = [] 50 | lengths = [] 51 | for audiopath, text in self.audiopaths_and_text: 52 | if self.min_text_len <= len(text) and len(text) <= self.max_text_len: 53 | audiopaths_and_text_new.append([audiopath, text]) 54 | lengths.append(os.path.getsize(audiopath) // (2 * self.hop_length)) 55 | self.audiopaths_and_text = audiopaths_and_text_new 56 | self.lengths = lengths 57 | 58 | def get_audio_text_pair(self, audiopath_and_text): 59 | # separate filename and text 60 | audiopath, text = audiopath_and_text[0], audiopath_and_text[1] 61 | text = self.get_text(text) 62 | spec, wav = self.get_audio(audiopath) 63 | return (text, spec, wav) 64 | 65 | def get_audio(self, filename): 66 | audio, sampling_rate = load_wav_to_torch(filename) 67 | if sampling_rate != self.sampling_rate: 68 | raise ValueError("{} {} SR doesn't match target {} SR".format( 69 | sampling_rate, self.sampling_rate)) 70 | audio_norm = audio / self.max_wav_value 71 | audio_norm = audio_norm.unsqueeze(0) 72 | spec_filename = filename.replace(".wav", ".spec.pt") 73 | if os.path.exists(spec_filename): 74 | spec = torch.load(spec_filename) 75 | else: 76 | spec = spectrogram_torch(audio_norm, self.filter_length, 77 | self.sampling_rate, self.hop_length, self.win_length, 78 | center=False) 79 | spec = torch.squeeze(spec, 0) 80 | torch.save(spec, spec_filename) 81 | return spec, audio_norm 82 | 83 | def get_text(self, text): 84 | if self.cleaned_text: 85 | text_norm = cleaned_text_to_sequence(text) 86 | else: 87 | text_norm = text_to_sequence(text, self.text_cleaners) 88 | if self.add_blank: 89 | text_norm = commons.intersperse(text_norm, 0) 90 | text_norm = torch.LongTensor(text_norm) 91 | return text_norm 92 | 93 | def __getitem__(self, index): 94 | return self.get_audio_text_pair(self.audiopaths_and_text[index]) 95 | 96 | def __len__(self): 97 | return len(self.audiopaths_and_text) 98 | 99 | 100 | class TextAudioCollate(): 101 | """ Zero-pads model inputs and targets 102 | """ 103 | def __init__(self, return_ids=False): 104 | self.return_ids = return_ids 105 | 106 | def __call__(self, batch): 107 | """Collate's training batch from normalized text and aduio 108 | PARAMS 109 | ------ 110 | batch: [text_normalized, spec_normalized, wav_normalized] 111 | """ 112 | # Right zero-pad all one-hot text sequences to max input length 113 | _, ids_sorted_decreasing = torch.sort( 114 | torch.LongTensor([x[1].size(1) for x in batch]), 115 | dim=0, descending=True) 116 | 117 | max_text_len = max([len(x[0]) for x in batch]) 118 | max_spec_len = max([x[1].size(1) for x in batch]) 119 | max_wav_len = max([x[2].size(1) for x in batch]) 120 | 121 | text_lengths = torch.LongTensor(len(batch)) 122 | spec_lengths = torch.LongTensor(len(batch)) 123 | wav_lengths = torch.LongTensor(len(batch)) 124 | 125 | text_padded = torch.LongTensor(len(batch), max_text_len) 126 | spec_padded = torch.FloatTensor(len(batch), batch[0][1].size(0), max_spec_len) 127 | wav_padded = torch.FloatTensor(len(batch), 1, max_wav_len) 128 | text_padded.zero_() 129 | spec_padded.zero_() 130 | wav_padded.zero_() 131 | for i in range(len(ids_sorted_decreasing)): 132 | row = batch[ids_sorted_decreasing[i]] 133 | 134 | text = row[0] 135 | text_padded[i, :text.size(0)] = text 136 | text_lengths[i] = text.size(0) 137 | 138 | spec = row[1] 139 | spec_padded[i, :, :spec.size(1)] = spec 140 | spec_lengths[i] = spec.size(1) 141 | 142 | wav = row[2] 143 | wav_padded[i, :, :wav.size(1)] = wav 144 | wav_lengths[i] = wav.size(1) 145 | 146 | if self.return_ids: 147 | return text_padded, text_lengths, spec_padded, spec_lengths, wav_padded, wav_lengths, ids_sorted_decreasing 148 | return text_padded, text_lengths, spec_padded, spec_lengths, wav_padded, wav_lengths 149 | 150 | 151 | """Multi speaker version""" 152 | class TextAudioSpeakerLoader(torch.utils.data.Dataset): 153 | """ 154 | 1) loads audio, speaker_id, text pairs 155 | 2) normalizes text and converts them to sequences of integers 156 | 3) computes spectrograms from audio files. 157 | """ 158 | def __init__(self, audiopaths_sid_text, hparams): 159 | self.audiopaths_sid_text = load_filepaths_and_text(audiopaths_sid_text) 160 | self.text_cleaners = hparams.text_cleaners 161 | self.max_wav_value = hparams.max_wav_value 162 | self.sampling_rate = hparams.sampling_rate 163 | self.filter_length = hparams.filter_length 164 | self.hop_length = hparams.hop_length 165 | self.win_length = hparams.win_length 166 | self.sampling_rate = hparams.sampling_rate 167 | 168 | self.cleaned_text = getattr(hparams, "cleaned_text", False) 169 | 170 | self.add_blank = hparams.add_blank 171 | self.min_text_len = getattr(hparams, "min_text_len", 1) 172 | self.max_text_len = getattr(hparams, "max_text_len", 190) 173 | 174 | random.seed(1234) 175 | random.shuffle(self.audiopaths_sid_text) 176 | self._filter() 177 | 178 | def _filter(self): 179 | """ 180 | Filter text & store spec lengths 181 | """ 182 | # Store spectrogram lengths for Bucketing 183 | # wav_length ~= file_size / (wav_channels * Bytes per dim) = file_size / (1 * 2) 184 | # spec_length = wav_length // hop_length 185 | 186 | audiopaths_sid_text_new = [] 187 | lengths = [] 188 | for audiopath, sid, text in self.audiopaths_sid_text: 189 | if self.min_text_len <= len(text) and len(text) <= self.max_text_len: 190 | audiopaths_sid_text_new.append([audiopath, sid, text]) 191 | lengths.append(os.path.getsize(audiopath) // (2 * self.hop_length)) 192 | self.audiopaths_sid_text = audiopaths_sid_text_new 193 | self.lengths = lengths 194 | 195 | def get_audio_text_speaker_pair(self, audiopath_sid_text): 196 | # separate filename, speaker_id and text 197 | audiopath, sid, text = audiopath_sid_text[0], audiopath_sid_text[1], audiopath_sid_text[2] 198 | text = self.get_text(text) 199 | spec, wav = self.get_audio(audiopath) 200 | sid = self.get_sid(sid) 201 | return (text, spec, wav, sid) 202 | 203 | def get_audio(self, filename): 204 | audio, sampling_rate = load_wav_to_torch(filename) 205 | if sampling_rate != self.sampling_rate: 206 | raise ValueError("{} {} SR doesn't match target {} SR".format( 207 | sampling_rate, self.sampling_rate)) 208 | audio_norm = audio / self.max_wav_value 209 | audio_norm = audio_norm.unsqueeze(0) 210 | spec_filename = filename.replace(".wav", ".spec.pt") 211 | if os.path.exists(spec_filename): 212 | spec = torch.load(spec_filename) 213 | else: 214 | spec = spectrogram_torch(audio_norm, self.filter_length, 215 | self.sampling_rate, self.hop_length, self.win_length, 216 | center=False) 217 | spec = torch.squeeze(spec, 0) 218 | torch.save(spec, spec_filename) 219 | return spec, audio_norm 220 | 221 | def get_text(self, text): 222 | if self.cleaned_text: 223 | text_norm = cleaned_text_to_sequence(text) 224 | else: 225 | text_norm = text_to_sequence(text, self.text_cleaners) 226 | if self.add_blank: 227 | text_norm = commons.intersperse(text_norm, 0) 228 | text_norm = torch.LongTensor(text_norm) 229 | return text_norm 230 | 231 | def get_sid(self, sid): 232 | sid = torch.LongTensor([int(sid)]) 233 | return sid 234 | 235 | def __getitem__(self, index): 236 | return self.get_audio_text_speaker_pair(self.audiopaths_sid_text[index]) 237 | 238 | def __len__(self): 239 | return len(self.audiopaths_sid_text) 240 | 241 | 242 | class TextAudioSpeakerCollate(): 243 | """ Zero-pads model inputs and targets 244 | """ 245 | def __init__(self, return_ids=False): 246 | self.return_ids = return_ids 247 | 248 | def __call__(self, batch): 249 | """Collate's training batch from normalized text, audio and speaker identities 250 | PARAMS 251 | ------ 252 | batch: [text_normalized, spec_normalized, wav_normalized, sid] 253 | """ 254 | # Right zero-pad all one-hot text sequences to max input length 255 | _, ids_sorted_decreasing = torch.sort( 256 | torch.LongTensor([x[1].size(1) for x in batch]), 257 | dim=0, descending=True) 258 | 259 | max_text_len = max([len(x[0]) for x in batch]) 260 | max_spec_len = max([x[1].size(1) for x in batch]) 261 | max_wav_len = max([x[2].size(1) for x in batch]) 262 | 263 | text_lengths = torch.LongTensor(len(batch)) 264 | spec_lengths = torch.LongTensor(len(batch)) 265 | wav_lengths = torch.LongTensor(len(batch)) 266 | sid = torch.LongTensor(len(batch)) 267 | 268 | text_padded = torch.LongTensor(len(batch), max_text_len) 269 | spec_padded = torch.FloatTensor(len(batch), batch[0][1].size(0), max_spec_len) 270 | wav_padded = torch.FloatTensor(len(batch), 1, max_wav_len) 271 | text_padded.zero_() 272 | spec_padded.zero_() 273 | wav_padded.zero_() 274 | for i in range(len(ids_sorted_decreasing)): 275 | row = batch[ids_sorted_decreasing[i]] 276 | 277 | text = row[0] 278 | text_padded[i, :text.size(0)] = text 279 | text_lengths[i] = text.size(0) 280 | 281 | spec = row[1] 282 | spec_padded[i, :, :spec.size(1)] = spec 283 | spec_lengths[i] = spec.size(1) 284 | 285 | wav = row[2] 286 | wav_padded[i, :, :wav.size(1)] = wav 287 | wav_lengths[i] = wav.size(1) 288 | 289 | sid[i] = row[3] 290 | 291 | if self.return_ids: 292 | return text_padded, text_lengths, spec_padded, spec_lengths, wav_padded, wav_lengths, sid, ids_sorted_decreasing 293 | return text_padded, text_lengths, spec_padded, spec_lengths, wav_padded, wav_lengths, sid 294 | 295 | 296 | class DistributedBucketSampler(torch.utils.data.distributed.DistributedSampler): 297 | """ 298 | Maintain similar input lengths in a batch. 299 | Length groups are specified by boundaries. 300 | Ex) boundaries = [b1, b2, b3] -> any batch is included either {x | b1 < length(x) <=b2} or {x | b2 < length(x) <= b3}. 301 | 302 | It removes samples which are not included in the boundaries. 303 | Ex) boundaries = [b1, b2, b3] -> any x s.t. length(x) <= b1 or length(x) > b3 are discarded. 304 | """ 305 | def __init__(self, dataset, batch_size, boundaries, num_replicas=None, rank=None, shuffle=True): 306 | super().__init__(dataset, num_replicas=num_replicas, rank=rank, shuffle=shuffle) 307 | self.lengths = dataset.lengths 308 | self.batch_size = batch_size 309 | self.boundaries = boundaries 310 | 311 | self.buckets, self.num_samples_per_bucket = self._create_buckets() 312 | self.total_size = sum(self.num_samples_per_bucket) 313 | self.num_samples = self.total_size // self.num_replicas 314 | 315 | def _create_buckets(self): 316 | buckets = [[] for _ in range(len(self.boundaries) - 1)] 317 | for i in range(len(self.lengths)): 318 | length = self.lengths[i] 319 | idx_bucket = self._bisect(length) 320 | if idx_bucket != -1: 321 | buckets[idx_bucket].append(i) 322 | 323 | for i in range(len(buckets) - 1, 0, -1): 324 | if len(buckets[i]) == 0: 325 | buckets.pop(i) 326 | self.boundaries.pop(i+1) 327 | 328 | num_samples_per_bucket = [] 329 | for i in range(len(buckets)): 330 | len_bucket = len(buckets[i]) 331 | total_batch_size = self.num_replicas * self.batch_size 332 | rem = (total_batch_size - (len_bucket % total_batch_size)) % total_batch_size 333 | num_samples_per_bucket.append(len_bucket + rem) 334 | return buckets, num_samples_per_bucket 335 | 336 | def __iter__(self): 337 | # deterministically shuffle based on epoch 338 | g = torch.Generator() 339 | g.manual_seed(self.epoch) 340 | 341 | indices = [] 342 | if self.shuffle: 343 | for bucket in self.buckets: 344 | indices.append(torch.randperm(len(bucket), generator=g).tolist()) 345 | else: 346 | for bucket in self.buckets: 347 | indices.append(list(range(len(bucket)))) 348 | 349 | batches = [] 350 | for i in range(len(self.buckets)): 351 | bucket = self.buckets[i] 352 | len_bucket = len(bucket) 353 | ids_bucket = indices[i] 354 | num_samples_bucket = self.num_samples_per_bucket[i] 355 | 356 | # add extra samples to make it evenly divisible 357 | rem = num_samples_bucket - len_bucket 358 | ids_bucket = ids_bucket + ids_bucket * (rem // len_bucket) + ids_bucket[:(rem % len_bucket)] 359 | 360 | # subsample 361 | ids_bucket = ids_bucket[self.rank::self.num_replicas] 362 | 363 | # batching 364 | for j in range(len(ids_bucket) // self.batch_size): 365 | batch = [bucket[idx] for idx in ids_bucket[j*self.batch_size:(j+1)*self.batch_size]] 366 | batches.append(batch) 367 | 368 | if self.shuffle: 369 | batch_ids = torch.randperm(len(batches), generator=g).tolist() 370 | batches = [batches[i] for i in batch_ids] 371 | self.batches = batches 372 | 373 | assert len(self.batches) * self.batch_size == self.num_samples 374 | return iter(self.batches) 375 | 376 | def _bisect(self, x, lo=0, hi=None): 377 | if hi is None: 378 | hi = len(self.boundaries) - 1 379 | 380 | if hi > lo: 381 | mid = (hi + lo) // 2 382 | if self.boundaries[mid] < x and x <= self.boundaries[mid+1]: 383 | return mid 384 | elif x <= self.boundaries[mid]: 385 | return self._bisect(x, lo, mid) 386 | else: 387 | return self._bisect(x, mid + 1, hi) 388 | else: 389 | return -1 390 | 391 | def __len__(self): 392 | return self.num_samples // self.batch_size 393 | -------------------------------------------------------------------------------- /models.py: -------------------------------------------------------------------------------- 1 | import copy 2 | import math 3 | import torch 4 | from torch import nn 5 | from torch.nn import functional as F 6 | 7 | import commons 8 | import modules 9 | import attentions 10 | import monotonic_align 11 | 12 | from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d 13 | from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm 14 | from commons import init_weights, get_padding 15 | 16 | 17 | class StochasticDurationPredictor(nn.Module): 18 | def __init__(self, in_channels, filter_channels, kernel_size, p_dropout, n_flows=4, gin_channels=0): 19 | super().__init__() 20 | filter_channels = in_channels # it needs to be removed from future version. 21 | self.in_channels = in_channels 22 | self.filter_channels = filter_channels 23 | self.kernel_size = kernel_size 24 | self.p_dropout = p_dropout 25 | self.n_flows = n_flows 26 | self.gin_channels = gin_channels 27 | 28 | self.log_flow = modules.Log() 29 | self.flows = nn.ModuleList() 30 | self.flows.append(modules.ElementwiseAffine(2)) 31 | for i in range(n_flows): 32 | self.flows.append(modules.ConvFlow(2, filter_channels, kernel_size, n_layers=3)) 33 | self.flows.append(modules.Flip()) 34 | 35 | self.post_pre = nn.Conv1d(1, filter_channels, 1) 36 | self.post_proj = nn.Conv1d(filter_channels, filter_channels, 1) 37 | self.post_convs = modules.DDSConv(filter_channels, kernel_size, n_layers=3, p_dropout=p_dropout) 38 | self.post_flows = nn.ModuleList() 39 | self.post_flows.append(modules.ElementwiseAffine(2)) 40 | for i in range(4): 41 | self.post_flows.append(modules.ConvFlow(2, filter_channels, kernel_size, n_layers=3)) 42 | self.post_flows.append(modules.Flip()) 43 | 44 | self.pre = nn.Conv1d(in_channels, filter_channels, 1) 45 | self.proj = nn.Conv1d(filter_channels, filter_channels, 1) 46 | self.convs = modules.DDSConv(filter_channels, kernel_size, n_layers=3, p_dropout=p_dropout) 47 | if gin_channels != 0: 48 | self.cond = nn.Conv1d(gin_channels, filter_channels, 1) 49 | 50 | def forward(self, x, x_mask, w=None, g=None, reverse=False, noise_scale=1.0): 51 | x = torch.detach(x) 52 | x = self.pre(x) 53 | if g is not None: 54 | g = torch.detach(g) 55 | x = x + self.cond(g) 56 | x = self.convs(x, x_mask) 57 | x = self.proj(x) * x_mask 58 | 59 | if not reverse: 60 | flows = self.flows 61 | assert w is not None 62 | 63 | logdet_tot_q = 0 64 | h_w = self.post_pre(w) 65 | h_w = self.post_convs(h_w, x_mask) 66 | h_w = self.post_proj(h_w) * x_mask 67 | e_q = torch.randn(w.size(0), 2, w.size(2)).to(device=x.device, dtype=x.dtype) * x_mask 68 | z_q = e_q 69 | for flow in self.post_flows: 70 | z_q, logdet_q = flow(z_q, x_mask, g=(x + h_w)) 71 | logdet_tot_q += logdet_q 72 | z_u, z1 = torch.split(z_q, [1, 1], 1) 73 | u = torch.sigmoid(z_u) * x_mask 74 | z0 = (w - u) * x_mask 75 | logdet_tot_q += torch.sum((F.logsigmoid(z_u) + F.logsigmoid(-z_u)) * x_mask, [1,2]) 76 | logq = torch.sum(-0.5 * (math.log(2*math.pi) + (e_q**2)) * x_mask, [1,2]) - logdet_tot_q 77 | 78 | logdet_tot = 0 79 | z0, logdet = self.log_flow(z0, x_mask) 80 | logdet_tot += logdet 81 | z = torch.cat([z0, z1], 1) 82 | for flow in flows: 83 | z, logdet = flow(z, x_mask, g=x, reverse=reverse) 84 | logdet_tot = logdet_tot + logdet 85 | nll = torch.sum(0.5 * (math.log(2*math.pi) + (z**2)) * x_mask, [1,2]) - logdet_tot 86 | return nll + logq # [b] 87 | else: 88 | flows = list(reversed(self.flows)) 89 | flows = flows[:-2] + [flows[-1]] # remove a useless vflow 90 | z = torch.randn(x.size(0), 2, x.size(2)).to(device=x.device, dtype=x.dtype) * noise_scale 91 | for flow in flows: 92 | z = flow(z, x_mask, g=x, reverse=reverse) 93 | z0, z1 = torch.split(z, [1, 1], 1) 94 | logw = z0 95 | return logw 96 | 97 | 98 | class DurationPredictor(nn.Module): 99 | def __init__(self, in_channels, filter_channels, kernel_size, p_dropout, gin_channels=0): 100 | super().__init__() 101 | 102 | self.in_channels = in_channels 103 | self.filter_channels = filter_channels 104 | self.kernel_size = kernel_size 105 | self.p_dropout = p_dropout 106 | self.gin_channels = gin_channels 107 | 108 | self.drop = nn.Dropout(p_dropout) 109 | self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size, padding=kernel_size//2) 110 | self.norm_1 = modules.LayerNorm(filter_channels) 111 | self.conv_2 = nn.Conv1d(filter_channels, filter_channels, kernel_size, padding=kernel_size//2) 112 | self.norm_2 = modules.LayerNorm(filter_channels) 113 | self.proj = nn.Conv1d(filter_channels, 1, 1) 114 | 115 | if gin_channels != 0: 116 | self.cond = nn.Conv1d(gin_channels, in_channels, 1) 117 | 118 | def forward(self, x, x_mask, g=None): 119 | x = torch.detach(x) 120 | if g is not None: 121 | g = torch.detach(g) 122 | x = x + self.cond(g) 123 | x = self.conv_1(x * x_mask) 124 | x = torch.relu(x) 125 | x = self.norm_1(x) 126 | x = self.drop(x) 127 | x = self.conv_2(x * x_mask) 128 | x = torch.relu(x) 129 | x = self.norm_2(x) 130 | x = self.drop(x) 131 | x = self.proj(x * x_mask) 132 | return x * x_mask 133 | 134 | 135 | class TextEncoder(nn.Module): 136 | def __init__(self, 137 | n_vocab, 138 | out_channels, 139 | hidden_channels, 140 | filter_channels, 141 | n_heads, 142 | n_layers, 143 | kernel_size, 144 | p_dropout): 145 | super().__init__() 146 | self.n_vocab = n_vocab 147 | self.out_channels = out_channels 148 | self.hidden_channels = hidden_channels 149 | self.filter_channels = filter_channels 150 | self.n_heads = n_heads 151 | self.n_layers = n_layers 152 | self.kernel_size = kernel_size 153 | self.p_dropout = p_dropout 154 | 155 | self.emb = nn.Embedding(n_vocab, hidden_channels) 156 | nn.init.normal_(self.emb.weight, 0.0, hidden_channels**-0.5) 157 | 158 | self.encoder = attentions.Encoder( 159 | hidden_channels, 160 | filter_channels, 161 | n_heads, 162 | n_layers, 163 | kernel_size, 164 | p_dropout) 165 | self.proj= nn.Conv1d(hidden_channels, out_channels * 2, 1) 166 | 167 | def forward(self, x, x_lengths): 168 | x = self.emb(x) * math.sqrt(self.hidden_channels) # [b, t, h] 169 | x = torch.transpose(x, 1, -1) # [b, h, t] 170 | x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype) 171 | 172 | x = self.encoder(x * x_mask, x_mask) 173 | stats = self.proj(x) * x_mask 174 | 175 | m, logs = torch.split(stats, self.out_channels, dim=1) 176 | return x, m, logs, x_mask 177 | 178 | 179 | class ResidualCouplingBlock(nn.Module): 180 | def __init__(self, 181 | channels, 182 | hidden_channels, 183 | kernel_size, 184 | dilation_rate, 185 | n_layers, 186 | n_flows=4, 187 | gin_channels=0): 188 | super().__init__() 189 | self.channels = channels 190 | self.hidden_channels = hidden_channels 191 | self.kernel_size = kernel_size 192 | self.dilation_rate = dilation_rate 193 | self.n_layers = n_layers 194 | self.n_flows = n_flows 195 | self.gin_channels = gin_channels 196 | 197 | self.flows = nn.ModuleList() 198 | for i in range(n_flows): 199 | self.flows.append(modules.ResidualCouplingLayer(channels, hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=gin_channels, mean_only=True)) 200 | self.flows.append(modules.Flip()) 201 | 202 | def forward(self, x, x_mask, g=None, reverse=False): 203 | if not reverse: 204 | for flow in self.flows: 205 | x, _ = flow(x, x_mask, g=g, reverse=reverse) 206 | else: 207 | for flow in reversed(self.flows): 208 | x = flow(x, x_mask, g=g, reverse=reverse) 209 | return x 210 | 211 | 212 | class PosteriorEncoder(nn.Module): 213 | def __init__(self, 214 | in_channels, 215 | out_channels, 216 | hidden_channels, 217 | kernel_size, 218 | dilation_rate, 219 | n_layers, 220 | gin_channels=0): 221 | super().__init__() 222 | self.in_channels = in_channels 223 | self.out_channels = out_channels 224 | self.hidden_channels = hidden_channels 225 | self.kernel_size = kernel_size 226 | self.dilation_rate = dilation_rate 227 | self.n_layers = n_layers 228 | self.gin_channels = gin_channels 229 | 230 | self.pre = nn.Conv1d(in_channels, hidden_channels, 1) 231 | self.enc = modules.WN(hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=gin_channels) 232 | self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) 233 | 234 | def forward(self, x, x_lengths, g=None): 235 | x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype) 236 | x = self.pre(x) * x_mask 237 | x = self.enc(x, x_mask, g=g) 238 | stats = self.proj(x) * x_mask 239 | m, logs = torch.split(stats, self.out_channels, dim=1) 240 | z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask 241 | return z, m, logs, x_mask 242 | 243 | 244 | class Generator(torch.nn.Module): 245 | def __init__(self, initial_channel, resblock, resblock_kernel_sizes, resblock_dilation_sizes, upsample_rates, upsample_initial_channel, upsample_kernel_sizes, gin_channels=0): 246 | super(Generator, self).__init__() 247 | self.num_kernels = len(resblock_kernel_sizes) 248 | self.num_upsamples = len(upsample_rates) 249 | self.conv_pre = Conv1d(initial_channel, upsample_initial_channel, 7, 1, padding=3) 250 | resblock = modules.ResBlock1 if resblock == '1' else modules.ResBlock2 251 | 252 | self.ups = nn.ModuleList() 253 | for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)): 254 | self.ups.append(weight_norm( 255 | ConvTranspose1d(upsample_initial_channel//(2**i), upsample_initial_channel//(2**(i+1)), 256 | k, u, padding=(k-u)//2))) 257 | 258 | self.resblocks = nn.ModuleList() 259 | for i in range(len(self.ups)): 260 | ch = upsample_initial_channel//(2**(i+1)) 261 | for j, (k, d) in enumerate(zip(resblock_kernel_sizes, resblock_dilation_sizes)): 262 | self.resblocks.append(resblock(ch, k, d)) 263 | 264 | self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False) 265 | self.ups.apply(init_weights) 266 | 267 | if gin_channels != 0: 268 | self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1) 269 | 270 | def forward(self, x, g=None): 271 | x = self.conv_pre(x) 272 | if g is not None: 273 | x = x + self.cond(g) 274 | 275 | for i in range(self.num_upsamples): 276 | x = F.leaky_relu(x, modules.LRELU_SLOPE) 277 | x = self.ups[i](x) 278 | xs = None 279 | for j in range(self.num_kernels): 280 | if xs is None: 281 | xs = self.resblocks[i*self.num_kernels+j](x) 282 | else: 283 | xs += self.resblocks[i*self.num_kernels+j](x) 284 | x = xs / self.num_kernels 285 | x = F.leaky_relu(x) 286 | x = self.conv_post(x) 287 | x = torch.tanh(x) 288 | 289 | return x 290 | 291 | def remove_weight_norm(self): 292 | print('Removing weight norm...') 293 | for l in self.ups: 294 | remove_weight_norm(l) 295 | for l in self.resblocks: 296 | l.remove_weight_norm() 297 | 298 | 299 | class DiscriminatorP(torch.nn.Module): 300 | def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False): 301 | super(DiscriminatorP, self).__init__() 302 | self.period = period 303 | self.use_spectral_norm = use_spectral_norm 304 | norm_f = weight_norm if use_spectral_norm == False else spectral_norm 305 | self.convs = nn.ModuleList([ 306 | norm_f(Conv2d(1, 32, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), 307 | norm_f(Conv2d(32, 128, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), 308 | norm_f(Conv2d(128, 512, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), 309 | norm_f(Conv2d(512, 1024, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), 310 | norm_f(Conv2d(1024, 1024, (kernel_size, 1), 1, padding=(get_padding(kernel_size, 1), 0))), 311 | ]) 312 | self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0))) 313 | 314 | def forward(self, x): 315 | fmap = [] 316 | 317 | # 1d to 2d 318 | b, c, t = x.shape 319 | if t % self.period != 0: # pad first 320 | n_pad = self.period - (t % self.period) 321 | x = F.pad(x, (0, n_pad), "reflect") 322 | t = t + n_pad 323 | x = x.view(b, c, t // self.period, self.period) 324 | 325 | for l in self.convs: 326 | x = l(x) 327 | x = F.leaky_relu(x, modules.LRELU_SLOPE) 328 | fmap.append(x) 329 | x = self.conv_post(x) 330 | fmap.append(x) 331 | x = torch.flatten(x, 1, -1) 332 | 333 | return x, fmap 334 | 335 | 336 | class DiscriminatorS(torch.nn.Module): 337 | def __init__(self, use_spectral_norm=False): 338 | super(DiscriminatorS, self).__init__() 339 | norm_f = weight_norm if use_spectral_norm == False else spectral_norm 340 | self.convs = nn.ModuleList([ 341 | norm_f(Conv1d(1, 16, 15, 1, padding=7)), 342 | norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)), 343 | norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)), 344 | norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)), 345 | norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)), 346 | norm_f(Conv1d(1024, 1024, 5, 1, padding=2)), 347 | ]) 348 | self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1)) 349 | 350 | def forward(self, x): 351 | fmap = [] 352 | 353 | for l in self.convs: 354 | x = l(x) 355 | x = F.leaky_relu(x, modules.LRELU_SLOPE) 356 | fmap.append(x) 357 | x = self.conv_post(x) 358 | fmap.append(x) 359 | x = torch.flatten(x, 1, -1) 360 | 361 | return x, fmap 362 | 363 | 364 | class MultiPeriodDiscriminator(torch.nn.Module): 365 | def __init__(self, use_spectral_norm=False): 366 | super(MultiPeriodDiscriminator, self).__init__() 367 | periods = [2,3,5,7,11] 368 | 369 | discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)] 370 | discs = discs + [DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods] 371 | self.discriminators = nn.ModuleList(discs) 372 | 373 | def forward(self, y, y_hat): 374 | y_d_rs = [] 375 | y_d_gs = [] 376 | fmap_rs = [] 377 | fmap_gs = [] 378 | for i, d in enumerate(self.discriminators): 379 | y_d_r, fmap_r = d(y) 380 | y_d_g, fmap_g = d(y_hat) 381 | y_d_rs.append(y_d_r) 382 | y_d_gs.append(y_d_g) 383 | fmap_rs.append(fmap_r) 384 | fmap_gs.append(fmap_g) 385 | 386 | return y_d_rs, y_d_gs, fmap_rs, fmap_gs 387 | 388 | 389 | 390 | class SynthesizerTrn(nn.Module): 391 | """ 392 | Synthesizer for Training 393 | """ 394 | 395 | def __init__(self, 396 | n_vocab, 397 | spec_channels, 398 | segment_size, 399 | inter_channels, 400 | hidden_channels, 401 | filter_channels, 402 | n_heads, 403 | n_layers, 404 | kernel_size, 405 | p_dropout, 406 | resblock, 407 | resblock_kernel_sizes, 408 | resblock_dilation_sizes, 409 | upsample_rates, 410 | upsample_initial_channel, 411 | upsample_kernel_sizes, 412 | n_speakers=0, 413 | gin_channels=0, 414 | use_sdp=True, 415 | **kwargs): 416 | 417 | super().__init__() 418 | self.n_vocab = n_vocab 419 | self.spec_channels = spec_channels 420 | self.inter_channels = inter_channels 421 | self.hidden_channels = hidden_channels 422 | self.filter_channels = filter_channels 423 | self.n_heads = n_heads 424 | self.n_layers = n_layers 425 | self.kernel_size = kernel_size 426 | self.p_dropout = p_dropout 427 | self.resblock = resblock 428 | self.resblock_kernel_sizes = resblock_kernel_sizes 429 | self.resblock_dilation_sizes = resblock_dilation_sizes 430 | self.upsample_rates = upsample_rates 431 | self.upsample_initial_channel = upsample_initial_channel 432 | self.upsample_kernel_sizes = upsample_kernel_sizes 433 | self.segment_size = segment_size 434 | self.n_speakers = n_speakers 435 | self.gin_channels = gin_channels 436 | 437 | self.use_sdp = use_sdp 438 | 439 | self.enc_p = TextEncoder(n_vocab, 440 | inter_channels, 441 | hidden_channels, 442 | filter_channels, 443 | n_heads, 444 | n_layers, 445 | kernel_size, 446 | p_dropout) 447 | self.dec = Generator(inter_channels, resblock, resblock_kernel_sizes, resblock_dilation_sizes, upsample_rates, upsample_initial_channel, upsample_kernel_sizes, gin_channels=gin_channels) 448 | self.enc_q = PosteriorEncoder(spec_channels, inter_channels, hidden_channels, 5, 1, 16, gin_channels=gin_channels) 449 | self.flow = ResidualCouplingBlock(inter_channels, hidden_channels, 5, 1, 4, gin_channels=gin_channels) 450 | 451 | if use_sdp: 452 | self.dp = StochasticDurationPredictor(hidden_channels, 192, 3, 0.5, 4, gin_channels=gin_channels) 453 | else: 454 | self.dp = DurationPredictor(hidden_channels, 256, 3, 0.5, gin_channels=gin_channels) 455 | 456 | if n_speakers > 1: 457 | self.emb_g = nn.Embedding(n_speakers, gin_channels) 458 | 459 | def forward(self, x, x_lengths, y, y_lengths, sid=None): 460 | 461 | x, m_p, logs_p, x_mask = self.enc_p(x, x_lengths) 462 | if self.n_speakers > 0: 463 | g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1] 464 | else: 465 | g = None 466 | 467 | z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g) 468 | z_p = self.flow(z, y_mask, g=g) 469 | 470 | with torch.no_grad(): 471 | # negative cross-entropy 472 | s_p_sq_r = torch.exp(-2 * logs_p) # [b, d, t] 473 | neg_cent1 = torch.sum(-0.5 * math.log(2 * math.pi) - logs_p, [1], keepdim=True) # [b, 1, t_s] 474 | neg_cent2 = torch.matmul(-0.5 * (z_p ** 2).transpose(1, 2), s_p_sq_r) # [b, t_t, d] x [b, d, t_s] = [b, t_t, t_s] 475 | neg_cent3 = torch.matmul(z_p.transpose(1, 2), (m_p * s_p_sq_r)) # [b, t_t, d] x [b, d, t_s] = [b, t_t, t_s] 476 | neg_cent4 = torch.sum(-0.5 * (m_p ** 2) * s_p_sq_r, [1], keepdim=True) # [b, 1, t_s] 477 | neg_cent = neg_cent1 + neg_cent2 + neg_cent3 + neg_cent4 478 | 479 | attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1) 480 | attn = monotonic_align.maximum_path(neg_cent, attn_mask.squeeze(1)).unsqueeze(1).detach() 481 | 482 | w = attn.sum(2) 483 | if self.use_sdp: 484 | l_length = self.dp(x, x_mask, w, g=g) 485 | l_length = l_length / torch.sum(x_mask) 486 | else: 487 | logw_ = torch.log(w + 1e-6) * x_mask 488 | logw = self.dp(x, x_mask, g=g) 489 | l_length = torch.sum((logw - logw_)**2, [1,2]) / torch.sum(x_mask) # for averaging 490 | 491 | # expand prior 492 | m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(1, 2) 493 | logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(1, 2) 494 | 495 | z_slice, ids_slice = commons.rand_slice_segments(z, y_lengths, self.segment_size) 496 | o = self.dec(z_slice, g=g) 497 | return o, l_length, attn, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q) 498 | 499 | def infer(self, x, x_lengths, sid=None, noise_scale=1, length_scale=1, noise_scale_w=1., max_len=None): 500 | x, m_p, logs_p, x_mask = self.enc_p(x, x_lengths) 501 | if self.n_speakers > 0: 502 | g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1] 503 | else: 504 | g = None 505 | 506 | if self.use_sdp: 507 | logw = self.dp(x, x_mask, g=g, reverse=True, noise_scale=noise_scale_w) 508 | else: 509 | logw = self.dp(x, x_mask, g=g) 510 | w = torch.exp(logw) * x_mask * length_scale 511 | w_ceil = torch.ceil(w) 512 | y_lengths = torch.clamp_min(torch.sum(w_ceil, [1, 2]), 1).long() 513 | y_mask = torch.unsqueeze(commons.sequence_mask(y_lengths, None), 1).to(x_mask.dtype) 514 | attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1) 515 | attn = commons.generate_path(w_ceil, attn_mask) 516 | 517 | m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(1, 2) # [b, t', t], [b, t, d] -> [b, d, t'] 518 | logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(1, 2) # [b, t', t], [b, t, d] -> [b, d, t'] 519 | 520 | z_p = m_p + torch.randn_like(m_p) * torch.exp(logs_p) * noise_scale 521 | z = self.flow(z_p, y_mask, g=g, reverse=True) 522 | o = self.dec((z * y_mask)[:,:,:max_len], g=g) 523 | return o, attn, y_mask, (z, z_p, m_p, logs_p) 524 | 525 | def voice_conversion(self, y, y_lengths, sid_src, sid_tgt): 526 | assert self.n_speakers > 0, "n_speakers have to be larger than 0." 527 | g_src = self.emb_g(sid_src).unsqueeze(-1) 528 | g_tgt = self.emb_g(sid_tgt).unsqueeze(-1) 529 | z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g_src) 530 | z_p = self.flow(z, y_mask, g=g_src) 531 | z_hat = self.flow(z_p, y_mask, g=g_tgt, reverse=True) 532 | o_hat = self.dec(z_hat * y_mask, g=g_tgt) 533 | return o_hat, y_mask, (z, z_p, z_hat) 534 | 535 | --------------------------------------------------------------------------------