├── .gitattributes ├── .gitignore ├── IMG ├── 20240102162212.png ├── 93ff10b42d8cc753527e50c41b8e2d72.png ├── {68AAFB0D-E298-4087-B041-3593260314AC}.png └── {6BDE2B2B-3C7A-4de5-90E8-C55DB1FC18C0}.png ├── LICENSE ├── README.md ├── README_CN.md ├── clean_csv.py ├── input └── .gitkeep ├── kick.py ├── load_npy.py ├── modules ├── cluster.py ├── model │ ├── emotion_encoder.py │ └── voice_encoder.py ├── utils.py └── visualizations.py ├── move_files.py ├── output └── .gitkeep ├── pretrain ├── encoder_1570000.bak └── wav2vec2-large-robust-12-ft-emotion-msp-dim │ ├── LICENSE │ ├── config.json │ ├── preprocessor_config.json │ └── vocab.json ├── requirements.txt ├── splitter.py └── viewer ├── .gitignore ├── README.md ├── bun.lockb ├── eslint.config.js ├── index.html ├── package.json ├── postcss.config.js ├── screenshot.png ├── src ├── App.tsx ├── ScatterPlot.tsx ├── index.css ├── main.tsx └── vite-env.d.ts ├── tailwind.config.js ├── tsconfig.app.json ├── tsconfig.json ├── tsconfig.node.json └── vite.config.ts /.gitattributes: -------------------------------------------------------------------------------- 1 | # Auto detect text files and perform LF normalization 2 | * text=auto 3 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | share/python-wheels/ 24 | *.egg-info/ 25 | .installed.cfg 26 | *.egg 27 | MANIFEST 28 | 29 | # PyInstaller 30 | # Usually these files are written by a python script from a template 31 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 32 | *.manifest 33 | *.spec 34 | 35 | # Installer logs 36 | pip-log.txt 37 | pip-delete-this-directory.txt 38 | 39 | # Unit test / coverage reports 40 | htmlcov/ 41 | .tox/ 42 | .nox/ 43 | .coverage 44 | .coverage.* 45 | .cache 46 | nosetests.xml 47 | coverage.xml 48 | *.cover 49 | *.py,cover 50 | .hypothesis/ 51 | .pytest_cache/ 52 | cover/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | .pybuilder/ 76 | target/ 77 | 78 | # Jupyter Notebook 79 | .ipynb_checkpoints 80 | 81 | # IPython 82 | profile_default/ 83 | ipython_config.py 84 | 85 | # pyenv 86 | # For a library or package, you might want to ignore these files since the code is 87 | # intended to run in multiple environments; otherwise, check them in: 88 | # .python-version 89 | 90 | # pipenv 91 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 92 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 93 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 94 | # install all needed dependencies. 95 | #Pipfile.lock 96 | 97 | # poetry 98 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. 99 | # This is especially recommended for binary packages to ensure reproducibility, and is more 100 | # commonly ignored for libraries. 101 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control 102 | #poetry.lock 103 | 104 | # pdm 105 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. 106 | #pdm.lock 107 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it 108 | # in version control. 109 | # https://pdm.fming.dev/#use-with-ide 110 | .pdm.toml 111 | 112 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm 113 | __pypackages__/ 114 | 115 | # Celery stuff 116 | celerybeat-schedule 117 | celerybeat.pid 118 | 119 | # SageMath parsed files 120 | *.sage.py 121 | 122 | # Environments 123 | .env 124 | .venv 125 | env/ 126 | venv/ 127 | ENV/ 128 | env.bak/ 129 | venv.bak/ 130 | 131 | # Spyder project settings 132 | .spyderproject 133 | .spyproject 134 | 135 | # Rope project settings 136 | .ropeproject 137 | 138 | # mkdocs documentation 139 | /site 140 | 141 | # mypy 142 | .mypy_cache/ 143 | .dmypy.json 144 | dmypy.json 145 | 146 | # Pyre type checker 147 | .pyre/ 148 | 149 | # pytype static type analyzer 150 | .pytype/ 151 | 152 | # Cython debug symbols 153 | cython_debug/ 154 | 155 | # PyCharm 156 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can 157 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore 158 | # and can be added to the global gitignore or merged into this file. For a more nuclear 159 | # option (not recommended) you can uncomment the following to ignore the entire idea folder. 160 | #.idea/ 161 | 162 | .DS_Store 163 | /input/* 164 | /output/* 165 | /pretrain/wav2vec2-large-robust-12-ft-emotion-msp-dim/*.bin -------------------------------------------------------------------------------- /IMG/20240102162212.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KakaruHayate/ColorSplitter/e408c70bb9495e20e6a6e8631e8ddc61d6288145/IMG/20240102162212.png -------------------------------------------------------------------------------- /IMG/93ff10b42d8cc753527e50c41b8e2d72.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KakaruHayate/ColorSplitter/e408c70bb9495e20e6a6e8631e8ddc61d6288145/IMG/93ff10b42d8cc753527e50c41b8e2d72.png -------------------------------------------------------------------------------- /IMG/{68AAFB0D-E298-4087-B041-3593260314AC}.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KakaruHayate/ColorSplitter/e408c70bb9495e20e6a6e8631e8ddc61d6288145/IMG/{68AAFB0D-E298-4087-B041-3593260314AC}.png -------------------------------------------------------------------------------- /IMG/{6BDE2B2B-3C7A-4de5-90E8-C55DB1FC18C0}.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KakaruHayate/ColorSplitter/e408c70bb9495e20e6a6e8631e8ddc61d6288145/IMG/{6BDE2B2B-3C7A-4de5-90E8-C55DB1FC18C0}.png -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2023 KakaruHayate 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # ColorSplitter 2 | 3 | ![result](IMG/20240102162212.png) 4 | 5 | [中文文档](README_CN.md) 6 | 7 | [webui](https://github.com/KakaruHayate/ColorSplitter/tree/main/viewer) 8 | 9 | A command-line tool for separating vocal timbres 10 | 11 | The encoder model train by total of 303 speakers for 52 hours data 12 | 13 | # Introduction 14 | 15 | ColorSplitter is a command-line tool for classifying the vocal timbre styles of single-speaker data in the pre-processing stage of vocal data. 16 | 17 | For scenarios that do not require style classification, using this tool to filter data can also reduce the problem of unstable timbre performance of the model. 18 | 19 | **Please note** that this project is based on Speaker Verification technology, and it is not clear whether the timbre changes of singing are completely related to the voiceprint differences, just for fun :) 20 | 21 | The research in this field is still scarce, hoping to inspire more ideas. 22 | 23 | Thanks to the community user: 洛泠羽 24 | 25 | # New version features 26 | 27 | Implemented automatic optimization of clustering results, no longer need users to judge the optimal clustering results themselves. 28 | 29 | `splitter.py` deleted the `--nmax` parameter, added `--nmin` (minimum number of timbre types, invalid when cluster parameter is 2) `--cluster` (clustering method, 1:SpectralCluster, 2:UmapHdbscan), `--mer_cosine` to merge clusters that are too similar. 30 | 31 | **New version tips** 32 | 33 | 1. Run `splitter.py` directly with the default parameters by specifying the speaker. 34 | 35 | 2. If the result has only one cluster, observe the distribution map, set `--nmin` to the number you think is reasonable, and run `splitter.py` again. 36 | 37 | 3. The optimal value of `--nmin` may be smaller than expected in actual tests. 38 | 39 | 4. The new clustering algorithm is faster, it is recommended to try multiple times. 40 | 41 | 5. The emotion classification function has now been implemented and can be called through the `--encoder emotion` function. Go to when using https://huggingface.co/audeering/wav2vec2-large-robust-12-ft-emotion-msp-dim/tree/main Download `pytorch_Model.bin` is placed in the `wav2vec2-large-robust-12-ft-emotion-msp-dim` directory. 42 | 43 | 6. You can also use `--encoder mix` to filter audio that matches two similar features at the same time. This feature can help you filter `GPT SoVITS` or `Bert-VITS2.3` prompts. 44 | 45 | # Progress 46 | 47 | - [x] **Correctly trained weights** 48 | - [x] Clustering algorithm optimization 49 | - [ ] SSL(Waiting for the wespeaker work done) 50 | - [x] emotional encoder 51 | - [x] embed mix 52 | 53 | # Environment Configuration 54 | 55 | It works normally under `python3.8`, please go to install [Microsoft C++ Build Tools](https://visualstudio.microsoft.com/visual-cpp-build-tools/) 56 | 57 | Then use the following command to install environment dependencies 58 | 59 | ``` 60 | pip install -r requirements.txt 61 | ``` 62 | 63 | Tips: If you are only using the timbre encoder, you only need to install the CPU version of pytorch. In other cases, it is recommended to use the GPU version. 64 | 65 | # How to Use 66 | 67 | **1. Move your well-made Diffsinger dataset to the `.\input` folder and run the following command** 68 | 69 | ``` 70 | python splitter.py --spk --nmin <'N'_min_num> 71 | ``` 72 | 73 | Enter the speaker name after `--spk`, and enter the minimum number of timbre types after `--nmin` (minimum 1, maximum 14,default 1) 74 | 75 | Tips: This project does not need to read the annotation file (transcriptions.csv) of the Diffsinger dataset, so as long as the file structure is as shown below, it can work normally 76 | ``` 77 | - input 78 | - 79 | - raw 80 | - wavs 81 | - audio1.wav 82 | - audio2.wav 83 | - ... 84 | ``` 85 | The wav files are best already split 86 | 87 | **2. (Optional) Exclude outliers as shown in the figure below** 88 | 89 | ![kick](IMG/{68AAFB0D-E298-4087-B041-3593260314AC}.png) 90 | 91 | As shown, cluster 3 is obviously a minority outlier, you can use the following command to separate it from the dataset 92 | ``` 93 | python kick.py --spk --clust 94 | ``` 95 | The separated data will be saved in `.\input\__` 96 | 97 | Please note that running this step may not necessarily optimize the results 98 | 99 | **3. After you select the optimal result you think, run the following command to classify the wav files in the dataset 100 | ``` 101 | python move_files.py --spk 102 | ``` 103 | The classified results will be saved in `.\output\\` 104 | After that, you still need to manually merge the too small clusters to meet the training requirements 105 | 106 | 107 | **4. (Optional) Move `clean_csv.py` to the same level as `transcriptions.csv` and run it, you can delete the wav file entries that are not included in the `wavs` folder** 108 | 109 | 110 | # Based on Project 111 | 112 | [Resemblyzer](https://github.com/resemble-ai/Resemblyzer/) 113 | 114 | [3D-Speaker](https://github.com/alibaba-damo-academy/3D-Speaker/) 115 | 116 | [wav2vec2-large-robust-12-ft-emotion-msp-dim](https://huggingface.co/audeering/wav2vec2-large-robust-12-ft-emotion-msp-dim) 117 | 118 | -------------------------------------------------------------------------------- /README_CN.md: -------------------------------------------------------------------------------- 1 | # ColorSplitter 2 | 3 | ![result](IMG/20240102162212.png) 4 | 5 | [webui](https://github.com/KakaruHayate/ColorSplitter/tree/main/viewer) 6 | 7 | 一个用于分离歌声音色的命令行工具 8 | 9 | 模型通过52小时303个说话人进行训练 10 | 11 | # 介绍 12 | 13 | ColorSplitter是一个为了在歌声数据的处理前期,对单说话人数据的音色风格进行分类的命令行工具 14 | 15 | 对于不需要进行风格分类的场合,使用本工具进行数据筛选,也可以减轻模型的音色表现不稳定问题 16 | 17 | **请注意**,本项目基于说话人确认(Speaker Verification)技术,目前并不确定唱歌的音色变化是与声纹差异完全相关,just for fun:) 18 | 19 | 目前该领域研究仍然匮乏,抛砖引玉 20 | 21 | 感谢社区用户:洛泠羽 22 | 23 | # 新版本特性 24 | 25 | 实装了聚类结果自动优化,不再需要用户自己判断聚类最优结果 26 | 27 | `splitter.py`删除了`--nmax`参数,添加了`--nmin`(最小音色类型数量,cluster参数为2时无效)`--cluster`(聚类方式,1:SpectralCluster, 2:UmapHdbscan),`--mer_cosine`合并过于相似的簇 28 | 29 | **新版本使用技巧** 30 | 31 | 1.默认参数直接指定说话人运行`splitter.py` 32 | 33 | 2.如果结果只有一个簇,观察分布图,将`--nmin`设为你认为合理的数量,再次运行`splitter.py` 34 | 35 | 3.实际测试下`--nmin`的最优值可能比想象的要小 36 | 37 | 4.新的聚类算法速度较快,建议多次尝试 38 | 39 | 5.新版本已支持情绪编码器的使用,可以通过`--encoder emotion`调用。使用时前往https://huggingface.co/audeering/wav2vec2-large-robust-12-ft-emotion-msp-dim/tree/main下载`pytorch_model.bin`放置在`pretrain/wav2vec2-large-robust-12-ft-emotion-msp-dim`目录下 40 | 41 | 6.你也可以用`--encoder mix`筛选同时符合两个特征相似的音频,这个功能可以帮助你筛选`GPT SoVITS`和`BertVITS2.3`的参考音频 42 | 43 | # 进展 44 | 45 | - [x] **正确训练的权重** 46 | - [x] 聚类算法优化 47 | - [ ] SSL(摆了,等we-net更新直接搬过来) 48 | - [x] emotional encoder 49 | - [x] embed mix 50 | 51 | # 环境配置 52 | 53 | `python3.8`下使用正常,请先安装[Microsoft C++ Build Tools](https://visualstudio.microsoft.com/visual-cpp-build-tools/) 54 | 55 | 之后使用以下命令安装环境依赖 56 | 57 | ``` 58 | pip install -r requirements.txt 59 | ``` 60 | 注意:如果你只是用音色编码器则只需要安装CPU版本的pytorch,其他情况下建议使用GPU版本 61 | 62 | # 如何使用 63 | 64 | **1.将你制作好的Diffsinger数据集移动到`.\input`文件夹下,运行以下命令** 65 | 66 | ``` 67 | python splitter.py --spk --nmin <'N'_min_num> 68 | ``` 69 | 70 | 其中`--spk`后输入说话人名称,`--nmin`后输入最小音色类型数量(最小1最大14默认1) 71 | 72 | tips:本项目并不需要读取Diffsinger数据集的标注文件(transcriptions.csv),所以保证只要文件结构如下所示就可以正常工作 73 | ``` 74 | - input 75 | - 76 | - raw 77 | - wavs 78 | - audio1.wav 79 | - audio2.wav 80 | - ... 81 | ``` 82 | 其中wav文件最好已经进行过切分 83 | 84 | **2.(可选)剔除如下图所示的离群点** 85 | 86 | ![kick](IMG/{68AAFB0D-E298-4087-B041-3593260314AC}.png) 87 | 88 | 如同所示,簇3明显为少数离群点,可以使用以下命令将其从数据集中分离 89 | ``` 90 | python kick.py --spk --clust 91 | ``` 92 | 被分离出的数据将保存在`.\input\_` 93 | 94 | 请注意运行此步骤未必会对结果产生正向优化 95 | 96 | **3.选定你认为的最优结果后,运行以下命令将数据集中的wav文件分类 97 | ``` 98 | python move_files.py --spk 99 | ``` 100 | 分类后结果将保存到`.\output\\`中 101 | 在那之后还需要人工对过小的簇进行归并,以达到训练的需求 102 | 103 | **4.(可选)将`clean_csv.py`移动到与`transcriptions.csv`同级后运行,可以删除`wavs`文件夹中没有包含的wav文件条目** 104 | 105 | # 基于项目 106 | 107 | [Resemblyzer](https://github.com/resemble-ai/Resemblyzer/) 108 | 109 | [3D-Speaker](https://github.com/alibaba-damo-academy/3D-Speaker/) 110 | 111 | [wav2vec2-large-robust-12-ft-emotion-msp-dim](https://huggingface.co/audeering/wav2vec2-large-robust-12-ft-emotion-msp-dim) 112 | -------------------------------------------------------------------------------- /clean_csv.py: -------------------------------------------------------------------------------- 1 | import os 2 | import csv 3 | 4 | 5 | wav_files = set(f[:-4] for f in os.listdir('wavs') if f.endswith('.wav')) 6 | with open('transcriptions.csv', 'r') as f: 7 | reader = csv.reader(f) 8 | header = next(reader) 9 | rows = [row for row in reader if row[0] in wav_files] 10 | 11 | with open('transcriptions.csv', 'w', newline='') as f: 12 | writer = csv.writer(f) 13 | writer.writerow(header) 14 | writer.writerows(rows) -------------------------------------------------------------------------------- /input/.gitkeep: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KakaruHayate/ColorSplitter/e408c70bb9495e20e6a6e8631e8ddc61d6288145/input/.gitkeep -------------------------------------------------------------------------------- /kick.py: -------------------------------------------------------------------------------- 1 | import os 2 | import shutil 3 | import pandas as pd 4 | import argparse 5 | 6 | parser = argparse.ArgumentParser() 7 | parser.add_argument('--spk', type=str, help='Speaker name') 8 | parser.add_argument('--clust', type=int, help='Cluster value') 9 | parser.add_argument('--encoder', type=str, default='timbre', help='encoder type') 10 | args = parser.parse_args() 11 | 12 | Speaker_name = args.spk #Speaker name 13 | clust_value = args.clust # Cluster value 14 | encoder_name = args.encoder 15 | 16 | data = pd.read_csv(os.path.join('output', Speaker_name, f'clustered_files({encoder_name}).csv')) 17 | 18 | for index, row in data.iterrows(): 19 | file_path = row['filename'] 20 | clust = row['clust'] 21 | 22 | if clust == clust_value: 23 | clust_dir = os.path.join('input', f'{Speaker_name}_{clust_value}') 24 | if not os.path.exists(clust_dir): 25 | os.makedirs(clust_dir) 26 | 27 | shutil.move(file_path, clust_dir) 28 | -------------------------------------------------------------------------------- /load_npy.py: -------------------------------------------------------------------------------- 1 | import pandas as pd 2 | from pathlib import Path 3 | import numpy as np 4 | import matplotlib.pyplot as plt 5 | from modules.visualizations import plot_projections, process_json_file 6 | from modules.cluster import CommonClustering 7 | import argparse 8 | import os 9 | 10 | parser = argparse.ArgumentParser() 11 | parser.add_argument('--path', type=str, help='path to the .npy file') 12 | parser.add_argument('--reducer', type=int, default=2, help='1:tSNE, 2:Umap') 13 | parser.add_argument('--json', type=str, default=None, help='path to the .json file') 14 | args = parser.parse_args() 15 | 16 | if args.reducer == 1: 17 | cluster_name = 'spectral' 18 | elif args.reducer == 2: 19 | cluster_name = 'umap_hdbscan' 20 | else: 21 | raise ValueError('reducer type error') 22 | 23 | npy_path = args.path 24 | 25 | embeds = np.load(npy_path) 26 | 27 | if args.json == None: 28 | token_names = np.arange(embeds.shape[0]) 29 | else: 30 | token_names = process_json_file(args.json) 31 | labels = np.ones_like(token_names) 32 | 33 | output_dir = f'output/npy_result' 34 | if not os.path.exists(output_dir): 35 | os.makedirs(output_dir) 36 | 37 | df = pd.DataFrame({ 38 | 'token': [f'{i}' for i in range(embeds.shape[0])], 39 | 'clust': labels 40 | }) 41 | df.to_csv(f'{output_dir}/clustered_files({os.path.basename(npy_path)}).csv', index=False) 42 | 43 | 44 | plot_projections(embeds, labels, title="Embedding projections", cluster_name=cluster_name, labels=token_names) 45 | plt.savefig(f'{output_dir}/embedding_projections({os.path.basename(npy_path)}).png', dpi=600) 46 | plt.show() 47 | -------------------------------------------------------------------------------- /modules/cluster.py: -------------------------------------------------------------------------------- 1 | # Copyright 3D-Speaker (https://github.com/alibaba-damo-academy/3D-Speaker). All Rights Reserved. 2 | # Licensed under the Apache License, Version 2.0 (http://www.apache.org/licenses/LICENSE-2.0) 3 | 4 | import numpy as np 5 | import scipy 6 | import sklearn 7 | from sklearn.cluster._kmeans import k_means 8 | from sklearn.metrics.pairwise import cosine_similarity 9 | 10 | try: 11 | import umap, hdbscan 12 | except ImportError: 13 | raise ImportError( 14 | "Package \"umap\" or \"hdbscan\" not found. \ 15 | Please install them first by \"pip install umap-learn hdbscan\"." 16 | ) 17 | 18 | 19 | class SpectralCluster: 20 | """A spectral clustering method using unnormalized Laplacian of affinity matrix. 21 | This implementation is adapted from https://github.com/speechbrain/speechbrain. 22 | """ 23 | 24 | def __init__(self, min_num_spks=1, max_num_spks=14, pval=0.02, min_pnum=6, oracle_num=None): 25 | self.min_num_spks = min_num_spks 26 | self.max_num_spks = max_num_spks 27 | self.min_pnum = min_pnum 28 | self.pval = pval 29 | self.k = oracle_num 30 | 31 | def __call__(self, X, pval=None, oracle_num=None): 32 | # Similarity matrix computation 33 | sim_mat = self.get_sim_mat(X) 34 | 35 | # Refining similarity matrix with pval 36 | prunned_sim_mat = self.p_pruning(sim_mat, pval) 37 | 38 | # Symmetrization 39 | sym_prund_sim_mat = 0.5 * (prunned_sim_mat + prunned_sim_mat.T) 40 | 41 | # Laplacian calculation 42 | laplacian = self.get_laplacian(sym_prund_sim_mat) 43 | 44 | # Get Spectral Embeddings 45 | emb, num_of_spk = self.get_spec_embs(laplacian, oracle_num) 46 | 47 | # Perform clustering 48 | labels = self.cluster_embs(emb, num_of_spk) 49 | 50 | return labels 51 | 52 | def get_sim_mat(self, X): 53 | # Cosine similarities 54 | M = cosine_similarity(X, X) 55 | return M 56 | 57 | def p_pruning(self, A, pval=None): 58 | if pval is None: 59 | pval = self.pval 60 | n_elems = int((1 - pval) * A.shape[0]) 61 | n_elems = min(n_elems, A.shape[0]-self.min_pnum) 62 | 63 | # For each row in a affinity matrix 64 | for i in range(A.shape[0]): 65 | low_indexes = np.argsort(A[i, :]) 66 | low_indexes = low_indexes[0:n_elems] 67 | 68 | # Replace smaller similarity values by 0s 69 | A[i, low_indexes] = 0 70 | return A 71 | 72 | def get_laplacian(self, M): 73 | M[np.diag_indices(M.shape[0])] = 0 74 | D = np.sum(np.abs(M), axis=1) 75 | D = np.diag(D) 76 | L = D - M 77 | return L 78 | 79 | def get_spec_embs(self, L, k_oracle=None): 80 | if k_oracle is None: 81 | k_oracle = self.k 82 | 83 | lambdas, eig_vecs = scipy.linalg.eigh(L) 84 | 85 | if k_oracle is not None: 86 | num_of_spk = k_oracle 87 | else: 88 | lambda_gap_list = self.getEigenGaps( 89 | lambdas[self.min_num_spks - 1:self.max_num_spks + 1]) 90 | num_of_spk = np.argmax(lambda_gap_list) + self.min_num_spks 91 | 92 | emb = eig_vecs[:, :num_of_spk] 93 | return emb, num_of_spk 94 | 95 | def cluster_embs(self, emb, k): 96 | # k-means 97 | _, labels, _ = k_means(emb, k, n_init='auto') 98 | return labels 99 | 100 | def getEigenGaps(self, eig_vals): 101 | eig_vals_gap_list = [] 102 | for i in range(len(eig_vals) - 1): 103 | gap = float(eig_vals[i + 1]) - float(eig_vals[i]) 104 | eig_vals_gap_list.append(gap) 105 | return eig_vals_gap_list 106 | 107 | 108 | class UmapHdbscan: 109 | """ 110 | Reference: 111 | - Siqi Zheng, Hongbin Suo. Reformulating Speaker Diarization as Community Detection With 112 | Emphasis On Topological Structure. ICASSP2022 113 | """ 114 | 115 | def __init__(self, n_neighbors=20, n_components=60, min_samples=20, min_cluster_size=10, metric='euclidean'): 116 | self.n_neighbors = n_neighbors 117 | self.n_components = n_components 118 | self.min_samples = min_samples 119 | self.min_cluster_size = min_cluster_size 120 | self.metric = metric 121 | 122 | def __call__(self, X): 123 | umap_X = umap.UMAP( 124 | n_neighbors=self.n_neighbors, 125 | min_dist=0.0, 126 | n_components=min(self.n_components, X.shape[0]-2), 127 | metric=self.metric, 128 | ).fit_transform(X) 129 | labels = hdbscan.HDBSCAN(min_samples=self.min_samples, min_cluster_size=self.min_cluster_size).fit_predict(umap_X) 130 | return labels 131 | 132 | 133 | class CommonClustering: 134 | """Perfom clustering for input embeddings and output the labels. 135 | """ 136 | 137 | def __init__(self, cluster_type, cluster_line=10, mer_cos=None, min_cluster_size=4, **kwargs): 138 | self.cluster_type = cluster_type 139 | self.cluster_line = cluster_line 140 | self.min_cluster_size = min_cluster_size 141 | self.mer_cos = mer_cos 142 | if self.cluster_type == 'spectral': 143 | self.cluster = SpectralCluster(**kwargs) 144 | elif self.cluster_type == 'umap_hdbscan': 145 | kwargs['min_cluster_size'] = min_cluster_size 146 | self.cluster = UmapHdbscan(**kwargs) 147 | else: 148 | raise ValueError( 149 | '%s is not currently supported.' % self.cluster_type 150 | ) 151 | 152 | def __call__(self, X): 153 | # clustering and return the labels 154 | assert len(X.shape) == 2, 'Shape of input should be [N, C]' 155 | if X.shape[0] < self.cluster_line: 156 | return np.ones(X.shape[0], dtype=int) 157 | # clustering 158 | labels = self.cluster(X) 159 | 160 | # remove extremely minor cluster 161 | labels = self.filter_minor_cluster(labels, X, self.min_cluster_size) 162 | # merge similar speaker 163 | if self.mer_cos is not None: 164 | labels = self.merge_by_cos(labels, X, self.mer_cos) 165 | 166 | return labels 167 | 168 | def filter_minor_cluster(self, labels, x, min_cluster_size): 169 | cset = np.unique(labels) 170 | csize = np.array([(labels == i).sum() for i in cset]) 171 | minor_idx = np.where(csize < self.min_cluster_size)[0] 172 | if len(minor_idx) == 0: 173 | return labels 174 | 175 | minor_cset = cset[minor_idx] 176 | major_idx = np.where(csize >= self.min_cluster_size)[0] 177 | major_cset = cset[major_idx] 178 | major_center = np.stack([x[labels == i].mean(0) \ 179 | for i in major_cset]) 180 | for i in range(len(labels)): 181 | if labels[i] in minor_cset: 182 | cos_sim = cosine_similarity(x[i][np.newaxis], major_center) 183 | labels[i] = major_cset[cos_sim.argmax()] 184 | 185 | return labels 186 | 187 | def merge_by_cos(self, labels, x, cos_thr): 188 | # merge the similar speakers by cosine similarity 189 | assert cos_thr > 0 and cos_thr <= 1 190 | while True: 191 | cset = np.unique(labels) 192 | if len(cset) == 1: 193 | break 194 | centers = np.stack([x[labels == i].mean(0) \ 195 | for i in cset]) 196 | affinity = cosine_similarity(centers, centers) 197 | affinity = np.triu(affinity, 1) 198 | idx = np.unravel_index(np.argmax(affinity), affinity.shape) 199 | if affinity[idx] < cos_thr: 200 | break 201 | c1, c2 = cset[np.array(idx)] 202 | labels[labels==c2]=c1 203 | return labels 204 | -------------------------------------------------------------------------------- /modules/model/emotion_encoder.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | from transformers import Wav2Vec2Processor 4 | from transformers.models.wav2vec2.modeling_wav2vec2 import ( 5 | Wav2Vec2Model, 6 | Wav2Vec2PreTrainedModel, 7 | ) 8 | import os 9 | import librosa 10 | import numpy as np 11 | 12 | 13 | class RegressionHead(nn.Module): 14 | r"""Classification head.""" 15 | 16 | def __init__(self, config): 17 | super().__init__() 18 | 19 | self.dense = nn.Linear(config.hidden_size, config.hidden_size) 20 | self.dropout = nn.Dropout(config.final_dropout) 21 | self.out_proj = nn.Linear(config.hidden_size, config.num_labels) 22 | 23 | def forward(self, features, **kwargs): 24 | x = features 25 | x = self.dropout(x) 26 | x = self.dense(x) 27 | x = torch.tanh(x) 28 | x = self.dropout(x) 29 | x = self.out_proj(x) 30 | 31 | return x 32 | 33 | 34 | class EmotionModel(Wav2Vec2PreTrainedModel): 35 | r"""Speech emotion classifier.""" 36 | 37 | def __init__(self, config): 38 | super().__init__(config) 39 | 40 | self.config = config 41 | self.wav2vec2 = Wav2Vec2Model(config) 42 | self.classifier = RegressionHead(config) 43 | self.init_weights() 44 | 45 | def forward( 46 | self, 47 | input_values, 48 | ): 49 | outputs = self.wav2vec2(input_values) 50 | hidden_states = outputs[0] 51 | hidden_states = torch.mean(hidden_states, dim=1) 52 | logits = self.classifier(hidden_states) 53 | 54 | return hidden_states, logits 55 | 56 | 57 | # load model from hub 58 | device = 'cuda' if torch.cuda.is_available() else "cpu" 59 | model_path = './pretrain/wav2vec2-large-robust-12-ft-emotion-msp-dim' 60 | processor = Wav2Vec2Processor.from_pretrained(model_path) 61 | model = EmotionModel.from_pretrained(model_path).to(device) 62 | 63 | 64 | def process_func( 65 | x: np.ndarray, 66 | sampling_rate: int, 67 | embeddings: bool = False, 68 | ) -> np.ndarray: 69 | r"""Predict emotions or extract embeddings from raw audio signal.""" 70 | 71 | # run through processor to normalize signal 72 | # always returns a batch, so we just get the first entry 73 | # then we put it on the device 74 | y = processor(x, sampling_rate=sampling_rate) 75 | y = y['input_values'][0] 76 | y = y.reshape(1, -1) 77 | y = torch.from_numpy(y).to(device) 78 | 79 | # run through model 80 | with torch.no_grad(): 81 | y = model(y)[0 if embeddings else 1] 82 | 83 | # convert to numpy 84 | y = y.detach().cpu().numpy() 85 | 86 | return y 87 | 88 | 89 | def extract_wav(path): 90 | wav, sr = librosa.load(path, sr = 16000) 91 | emb = process_func(np.expand_dims(wav, 0), sr, embeddings=True) 92 | return emb 93 | 94 | -------------------------------------------------------------------------------- /modules/model/voice_encoder.py: -------------------------------------------------------------------------------- 1 | from resemblyzer.hparams import * 2 | from resemblyzer import audio 3 | from pathlib import Path 4 | from typing import Union, List 5 | from torch import nn 6 | from time import perf_counter as timer 7 | import numpy as np 8 | import torch 9 | 10 | 11 | class VoiceEncoder(nn.Module): 12 | def __init__(self, device: Union[str, torch.device]="cpu", verbose=True, weights_fpath: Union[Path, str]=None): 13 | """ 14 | If None, defaults to cuda if it is available on your machine, otherwise the model will 15 | run on cpu. Outputs are always returned on the cpu, as numpy arrays. 16 | :param weights_fpath: path to ".pt" file path. 17 | If None, defaults to built-in "pretrained.pt" model 18 | """ 19 | super().__init__() 20 | 21 | # Define the network 22 | self.lstm = nn.LSTM(mel_n_channels, model_hidden_size, model_num_layers, batch_first=True) 23 | self.linear = nn.Linear(model_hidden_size, model_embedding_size) 24 | self.relu = nn.ReLU() 25 | 26 | # Get the target device 27 | if device is None: 28 | device = torch.device("cuda" if torch.cuda.is_available() else "cpu") 29 | elif isinstance(device, str): 30 | device = torch.device(device) 31 | self.device = device 32 | 33 | # Load the pretrained model'speaker weights 34 | if weights_fpath is None: 35 | weights_fpath = Path(__file__).resolve().parent.joinpath("pretrained.pt") 36 | else: 37 | weights_fpath = Path(weights_fpath) 38 | 39 | if not weights_fpath.exists(): 40 | raise Exception("Couldn't find the voice encoder pretrained model at %s." % 41 | weights_fpath) 42 | start = timer() 43 | checkpoint = torch.load(weights_fpath, map_location="cpu") 44 | self.load_state_dict(checkpoint["model_state"], strict=False) 45 | self.to(device) 46 | 47 | if verbose: 48 | print("Loaded the voice encoder model on %s in %.2f seconds." % 49 | (device.type, timer() - start)) 50 | 51 | def forward(self, mels: torch.FloatTensor): 52 | """ 53 | Computes the embeddings of a batch of utterance spectrograms. 54 | 55 | :param mels: a batch of mel spectrograms of same duration as a float32 tensor of shape 56 | (batch_size, n_frames, n_channels) 57 | :return: the embeddings as a float 32 tensor of shape (batch_size, embedding_size). 58 | Embeddings are positive and L2-normed, thus they lay in the range [0, 1]. 59 | """ 60 | # Pass the input through the LSTM layers and retrieve the final hidden state of the last 61 | # layer. Apply a cutoff to 0 for negative values and L2 normalize the embeddings. 62 | _, (hidden, _) = self.lstm(mels) 63 | embeds_raw = self.relu(self.linear(hidden[-1])) 64 | return embeds_raw / torch.norm(embeds_raw, dim=1, keepdim=True) 65 | 66 | @staticmethod 67 | def compute_partial_slices(n_samples: int, rate, min_coverage): 68 | """ 69 | Computes where to split an utterance waveform and its corresponding mel spectrogram to 70 | obtain partial utterances of each. Both the waveform and the 71 | mel spectrogram slices are returned, so as to make each partial utterance waveform 72 | correspond to its spectrogram. 73 | 74 | The returned ranges may be indexing further than the length of the waveform. It is 75 | recommended that you pad the waveform with zeros up to wav_slices[-1].stop. 76 | 77 | :param n_samples: the number of samples in the waveform 78 | :param rate: how many partial utterances should occur per second. Partial utterances must 79 | cover the span of the entire utterance, thus the rate should not be lower than the inverse 80 | of the duration of a partial utterance. By default, partial utterances are 1.6s long and 81 | the minimum rate is thus 0.625. 82 | :param min_coverage: when reaching the last partial utterance, it may or may not have 83 | enough frames. If at least of are present, 84 | then the last partial utterance will be considered by zero-padding the audio. Otherwise, 85 | it will be discarded. If there aren't enough frames for one partial utterance, 86 | this parameter is ignored so that the function always returns at least one slice. 87 | :return: the waveform slices and mel spectrogram slices as lists of array slices. Index 88 | respectively the waveform and the mel spectrogram with these slices to obtain the partial 89 | utterances. 90 | """ 91 | assert 0 < min_coverage <= 1 92 | 93 | # Compute how many frames separate two partial utterances 94 | samples_per_frame = int((sampling_rate * mel_window_step / 1000)) 95 | n_frames = int(np.ceil((n_samples + 1) / samples_per_frame)) 96 | frame_step = int(np.round((sampling_rate / rate) / samples_per_frame)) 97 | assert 0 < frame_step, "The rate is too high" 98 | assert frame_step <= partials_n_frames, "The rate is too low, it should be %f at least" % \ 99 | (sampling_rate / (samples_per_frame * partials_n_frames)) 100 | 101 | # Compute the slices 102 | wav_slices, mel_slices = [], [] 103 | steps = max(1, n_frames - partials_n_frames + frame_step + 1) 104 | for i in range(0, steps, frame_step): 105 | mel_range = np.array([i, i + partials_n_frames]) 106 | wav_range = mel_range * samples_per_frame 107 | mel_slices.append(slice(*mel_range)) 108 | wav_slices.append(slice(*wav_range)) 109 | 110 | # Evaluate whether extra padding is warranted or not 111 | last_wav_range = wav_slices[-1] 112 | coverage = (n_samples - last_wav_range.start) / (last_wav_range.stop - last_wav_range.start) 113 | if coverage < min_coverage and len(mel_slices) > 1: 114 | mel_slices = mel_slices[:-1] 115 | wav_slices = wav_slices[:-1] 116 | 117 | return wav_slices, mel_slices 118 | 119 | def embed_utterance(self, wav: np.ndarray, return_partials=False, rate=1.3, min_coverage=0.75): 120 | """ 121 | Computes an embedding for a single utterance. The utterance is divided in partial 122 | utterances and an embedding is computed for each. The complete utterance embedding is the 123 | L2-normed average embedding of the partial utterances. 124 | 125 | TODO: independent batched version of this function 126 | 127 | :param wav: a preprocessed utterance waveform as a numpy array of float32 128 | :param return_partials: if True, the partial embeddings will also be returned along with 129 | the wav slices corresponding to each partial utterance. 130 | :param rate: how many partial utterances should occur per second. Partial utterances must 131 | cover the span of the entire utterance, thus the rate should not be lower than the inverse 132 | of the duration of a partial utterance. By default, partial utterances are 1.6s long and 133 | the minimum rate is thus 0.625. 134 | :param min_coverage: when reaching the last partial utterance, it may or may not have 135 | enough frames. If at least of are present, 136 | then the last partial utterance will be considered by zero-padding the audio. Otherwise, 137 | it will be discarded. If there aren't enough frames for one partial utterance, 138 | this parameter is ignored so that the function always returns at least one slice. 139 | :return: the embedding as a numpy array of float32 of shape (model_embedding_size,). If 140 | is True, the partial utterances as a numpy array of float32 of shape 141 | (n_partials, model_embedding_size) and the wav partials as a list of slices will also be 142 | returned. 143 | """ 144 | # Compute where to split the utterance into partials and pad the waveform with zeros if 145 | # the partial utterances cover a larger range. 146 | wav_slices, mel_slices = self.compute_partial_slices(len(wav), rate, min_coverage) 147 | max_wave_length = wav_slices[-1].stop 148 | if max_wave_length >= len(wav): 149 | wav = np.pad(wav, (0, max_wave_length - len(wav)), "constant") 150 | 151 | # Split the utterance into partials and forward them through the model 152 | mel = audio.wav_to_mel_spectrogram(wav) 153 | mels = np.array([mel[s] for s in mel_slices]) 154 | with torch.no_grad(): 155 | mels = torch.from_numpy(mels).to(self.device) 156 | partial_embeds = self(mels).cpu().numpy() 157 | 158 | # Compute the utterance embedding from the partial embeddings 159 | raw_embed = np.mean(partial_embeds, axis=0) 160 | embed = raw_embed / np.linalg.norm(raw_embed, 2) 161 | 162 | if return_partials: 163 | return embed, partial_embeds, wav_slices 164 | return embed 165 | 166 | def embed_speaker(self, wavs: List[np.ndarray], **kwargs): 167 | """ 168 | Compute the embedding of a collection of wavs (presumably from the same speaker) by 169 | averaging their embedding and L2-normalizing it. 170 | 171 | :param wavs: list of wavs a numpy arrays of float32. 172 | :param kwargs: extra arguments to embed_utterance() 173 | :return: the embedding as a numpy array of float32 of shape (model_embedding_size,). 174 | """ 175 | raw_embed = np.mean([self.embed_utterance(wav, return_partials=False, **kwargs) \ 176 | for wav in wavs], axis=0) 177 | return raw_embed / np.linalg.norm(raw_embed, 2) -------------------------------------------------------------------------------- /modules/utils.py: -------------------------------------------------------------------------------- 1 | from resemblyzer import preprocess_wav 2 | from modules.model.voice_encoder import VoiceEncoder 3 | from tqdm import tqdm 4 | import numpy as np 5 | import pickle 6 | import os 7 | import importlib 8 | 9 | class GetEmbeds: 10 | """ Used to obtain embedding vectors for audio. Directly input wav. 11 | """ 12 | 13 | def __init__(self, encoder_type, Speaker_name): 14 | self.encoder_type = encoder_type 15 | self.Speaker_name = Speaker_name 16 | if self.encoder_type == 'timbre': 17 | self.encoder = VoiceEncoder(weights_fpath="pretrain/encoder_1570000.bak") 18 | elif self.encoder_type == 'emotion': 19 | self.emotion_module = importlib.import_module('modules.model.emotion_encoder') 20 | elif self.encoder_type == 'mix': 21 | self.encoder = VoiceEncoder(weights_fpath="pretrain/encoder_1570000.bak") 22 | self.emotion_module = importlib.import_module('modules.model.emotion_encoder') 23 | else: 24 | raise ValueError( 25 | '%s is not currently supported.' % self.encoder_type 26 | ) 27 | 28 | def __call__(self, wav_fpaths): 29 | if self.encoder_type == 'timbre': 30 | embeds = self.timbre_encoder(wav_fpaths) 31 | if self.encoder_type == 'emotion': 32 | embeds = self.emotion_encoder(wav_fpaths) 33 | if self.encoder_type == 'mix': 34 | embeds = self.mix_encoder(wav_fpaths) 35 | 36 | return embeds 37 | 38 | def timbre_encoder(self, wav_fpaths): 39 | features_path = os.path.join("input", self.Speaker_name, "features(timbre).pkl") 40 | # Check if features already exist 41 | if os.path.exists(features_path): 42 | with open(features_path, 'rb') as f: 43 | embeds = pickle.load(f) 44 | else: 45 | wavs = [preprocess_wav(wav_fpath) for wav_fpath in \ 46 | tqdm(wav_fpaths, f"Preprocessing wavs ({len(wav_fpaths)} utterances)")] 47 | embeds = np.array(list(map(self.encoder.embed_utterance, wavs))) 48 | with open(features_path, 'wb') as f: 49 | pickle.dump(embeds, f) 50 | 51 | return embeds 52 | 53 | def emotion_encoder(self, wav_fpaths): 54 | features_path = os.path.join("input", self.Speaker_name, "features(emotion).pkl") 55 | # Check if features already exist 56 | if os.path.exists(features_path): 57 | with open(features_path, 'rb') as f: 58 | embeds = pickle.load(f) 59 | else: 60 | embeds = [self.emotion_module.extract_wav(wav_fpath) for wav_fpath in \ 61 | tqdm(wav_fpaths, f"Preprocessing wavs ({len(wav_fpaths)} utterances)")] 62 | embeds = np.concatenate(embeds,axis=0) 63 | with open(features_path, 'wb') as f: 64 | pickle.dump(embeds, f) 65 | 66 | return embeds 67 | 68 | def mix_encoder(self, wav_fpaths): 69 | features_path = os.path.join("input", self.Speaker_name, "features(mix).pkl") 70 | # Check if features already exist 71 | if os.path.exists(features_path): 72 | with open(features_path, 'rb') as f: 73 | embeds = pickle.load(f) 74 | else: 75 | timber_embeds = self.timbre_encoder(wav_fpaths) 76 | emotion_embeds = self.emotion_encoder(wav_fpaths) 77 | embeds = np.concatenate((timber_embeds, emotion_embeds), axis=1) 78 | with open(features_path, 'wb') as f: 79 | pickle.dump(embeds, f) 80 | 81 | return embeds 82 | -------------------------------------------------------------------------------- /modules/visualizations.py: -------------------------------------------------------------------------------- 1 | from mpl_toolkits.axes_grid1 import make_axes_locatable 2 | from matplotlib.animation import FuncAnimation 3 | from resemblyzer import sampling_rate 4 | from matplotlib import cm 5 | from time import sleep, perf_counter as timer 6 | from umap import UMAP 7 | from sys import stderr 8 | import matplotlib.pyplot as plt 9 | import numpy as np 10 | from sklearn.manifold import TSNE 11 | import json 12 | 13 | 14 | _default_colors = plt.rcParams["axes.prop_cycle"].by_key()["color"] 15 | _my_colors = np.array([ 16 | [0, 127, 70], 17 | [255, 0, 0], 18 | [255, 217, 38], 19 | [0, 135, 255], 20 | [165, 0, 165], 21 | [255, 167, 255], 22 | [97, 142, 151], 23 | [0, 255, 255], 24 | [255, 96, 38], 25 | [142, 76, 0], 26 | [33, 0, 127], 27 | [0, 0, 0], 28 | [183, 183, 183], 29 | [76, 255, 0], 30 | ], dtype=float) / 255 31 | 32 | 33 | def generate_colors(n): 34 | cm = plt.get_cmap('gist_rainbow') 35 | colors = [cm(1.*i/n) for i in range(n)] 36 | return colors 37 | 38 | 39 | def play_wav(wav, blocking=True): 40 | try: 41 | import sounddevice as sd 42 | # Small bug with sounddevice.play: the audio is cut 0.5 second too early. We pad it to 43 | # make up for that 44 | wav = np.concatenate((wav, np.zeros(sampling_rate // 2))) 45 | sd.play(wav, sampling_rate, blocking=blocking) 46 | except Exception as e: 47 | print("Failed to play audio: %s" % repr(e)) 48 | 49 | 50 | def plot_similarity_matrix(matrix, labels_a=None, labels_b=None, ax: plt.Axes=None, title=""): 51 | if ax is None: 52 | _, ax = plt.subplots() 53 | fig = plt.gcf() 54 | 55 | img = ax.matshow(matrix, extent=(-0.5, matrix.shape[0] - 0.5, 56 | -0.5, matrix.shape[1] - 0.5)) 57 | 58 | ax.xaxis.set_ticks_position("bottom") 59 | if labels_a is not None: 60 | ax.set_xticks(range(len(labels_a))) 61 | ax.set_xticklabels(labels_a, rotation=90) 62 | if labels_b is not None: 63 | ax.set_yticks(range(len(labels_b))) 64 | ax.set_yticklabels(labels_b[::-1]) # Upper origin -> reverse y axis 65 | ax.set_title(title) 66 | 67 | cax = make_axes_locatable(ax).append_axes("right", size="5%", pad=0.15) 68 | fig.colorbar(img, cax=cax, ticks=np.linspace(0.4, 1, 7)) 69 | img.set_clim(0.4, 1) 70 | img.set_cmap("inferno") 71 | 72 | return ax 73 | 74 | 75 | def plot_histograms(all_samples, ax=None, names=None, title=""): 76 | """ 77 | Plots (possibly) overlapping histograms and their median 78 | """ 79 | if ax is None: 80 | _, ax = plt.subplots() 81 | 82 | for samples, color, name in zip(all_samples, _default_colors, names): 83 | ax.hist(samples, density=True, color=color + "80", label=name) 84 | ax.legend() 85 | ax.set_xlim(0.35, 1) 86 | ax.set_yticks([]) 87 | ax.set_title(title) 88 | 89 | ylim = ax.get_ylim() 90 | ax.set_ylim(*ylim) # Yeah, I know 91 | for samples, color in zip(all_samples, _default_colors): 92 | median = np.median(samples) 93 | ax.vlines(median, *ylim, color, "dashed") 94 | ax.text(median, ylim[1] * 0.15, "median", rotation=270, color=color) 95 | 96 | return ax 97 | 98 | 99 | def plot_projections(embeds, speakers, ax=None, colors=None, markers=None, legend=True, 100 | title="", cluster_name="", labels=None, **kwargs): 101 | if ax is None: 102 | _, ax = plt.subplots(figsize=(6, 6)) 103 | 104 | if cluster_name == 'spectral': 105 | reducer = TSNE(init='pca', **kwargs) 106 | if cluster_name == 'umap_hdbscan': 107 | reducer = UMAP(**kwargs) 108 | 109 | # Compute the 2D projections. You could also project to another number of dimensions (e.g. 110 | # for a 3D plot) or use a different different dimensionality reduction like PCA or TSNE. 111 | 112 | projs = reducer.fit_transform(embeds) 113 | 114 | # Draw the projections 115 | speakers = np.array(speakers) 116 | colors = generate_colors(len(np.unique(speakers))) 117 | colors = colors or _my_colors 118 | for i, speaker in enumerate(np.unique(speakers)): 119 | speaker_projs = projs[speakers == speaker] 120 | marker = "o" if markers is None else markers[i] 121 | label = speaker if legend else None 122 | ax.scatter(*speaker_projs.T, s=60, c=[colors[i]], marker=marker, label=label, edgecolors='k') 123 | if labels is not None: 124 | for j, (proj_x, proj_y) in enumerate(speaker_projs): 125 | label_index = np.where(speakers == speaker)[0][j] 126 | ax.text(proj_x, proj_y, str(labels[label_index]), fontsize=8, ha='right') 127 | center = speaker_projs.mean(axis=0) 128 | ax.scatter(*center, s=200, c=[colors[i]], marker="X", edgecolors='k') 129 | 130 | 131 | if legend: 132 | ax.legend(title="Speakers", ncol=2) 133 | ax.set_title(title) 134 | #ax.set_xticks([]) 135 | #ax.set_yticks([]) 136 | ax.grid(True) 137 | ax.set_aspect("equal") 138 | 139 | return projs 140 | 141 | 142 | def interactive_diarization(similarity_dict, wav, wav_splits, x_crop=5, show_time=False): 143 | fig, ax = plt.subplots() 144 | lines = [ax.plot([], [], label=name)[0] for name in similarity_dict.keys()] 145 | text = ax.text(0, 0, "", fontsize=10) 146 | 147 | def init(): 148 | ax.set_ylim(0.4, 1) 149 | ax.set_ylabel("Similarity") 150 | if show_time: 151 | ax.set_xlabel("Time (seconds)") 152 | else: 153 | ax.set_xticks([]) 154 | ax.set_title("Diarization") 155 | ax.legend(loc="lower right") 156 | return lines + [text] 157 | 158 | times = [((s.start + s.stop) / 2) / sampling_rate for s in wav_splits] 159 | rate = 1 / (times[1] - times[0]) 160 | crop_range = int(np.round(x_crop * rate)) 161 | ticks = np.arange(0, len(wav_splits), rate) 162 | ref_time = timer() 163 | 164 | def update(i): 165 | # Crop plot 166 | crop = (max(i - crop_range // 2, 0), i + crop_range // 2) 167 | ax.set_xlim(i - crop_range // 2, crop[1]) 168 | if show_time: 169 | crop_ticks = ticks[(crop[0] <= ticks) * (ticks <= crop[1])] 170 | ax.set_xticks(crop_ticks) 171 | ax.set_xticklabels(np.round(crop_ticks / rate).astype(np.int)) 172 | 173 | # Plot the prediction 174 | similarities = [s[i] for s in similarity_dict.values()] 175 | best = np.argmax(similarities) 176 | name, similarity = list(similarity_dict.keys())[best], similarities[best] 177 | if similarity > 0.75: 178 | message = "Speaker: %s (confident)" % name 179 | color = _default_colors[best] 180 | elif similarity > 0.65: 181 | message = "Speaker: %s (uncertain)" % name 182 | color = _default_colors[best] 183 | else: 184 | message = "Unknown/No speaker" 185 | color = "black" 186 | text.set_text(message) 187 | text.set_c(color) 188 | text.set_position((i, 0.96)) 189 | 190 | # Plot data 191 | for line, (name, similarities) in zip(lines, similarity_dict.items()): 192 | line.set_data(range(crop[0], i + 1), similarities[crop[0]:i + 1]) 193 | 194 | # Block to synchronize with the audio (interval is not reliable) 195 | current_time = timer() - ref_time 196 | if current_time < times[i]: 197 | sleep(times[i] - current_time) 198 | elif current_time - 0.2 > times[i]: 199 | print("Animation is delayed further than 200ms!", file=stderr) 200 | return lines + [text] 201 | 202 | ani = FuncAnimation(fig, update, frames=len(wav_splits), init_func=init, blit=not show_time, 203 | repeat=False, interval=1) 204 | play_wav(wav, blocking=False) 205 | plt.show() 206 | 207 | 208 | def plot_embedding_as_heatmap(embed, ax=None, title="", shape=None, color_range=(0, 0.30)): 209 | if ax is None: 210 | _, ax = plt.subplots() 211 | 212 | if shape is None: 213 | height = int(np.sqrt(len(embed))) 214 | shape = (height, -1) 215 | embed = embed.reshape(shape) 216 | 217 | cmap = cm.get_cmap() 218 | mappable = ax.imshow(embed, cmap=cmap) 219 | cbar = plt.colorbar(mappable, ax=ax, fraction=0.046, pad=0.04) 220 | cbar.set_clim(*color_range) 221 | 222 | ax.set_xticks([]), ax.set_yticks([]) 223 | ax.set_title(title) 224 | 225 | 226 | def process_json_file(file_path): 227 | with open(file_path, 'r', encoding='utf-8') as file: 228 | data = json.load(file) 229 | merged_data = {} 230 | for key, value in data.items(): 231 | if value not in merged_data: 232 | merged_data[value] = [] 233 | merged_data[value].append(key) 234 | 235 | result_str = [""] 236 | for keys in merged_data.values(): 237 | result_str.append(','.join(keys)) 238 | 239 | return result_str 240 | -------------------------------------------------------------------------------- /move_files.py: -------------------------------------------------------------------------------- 1 | import os 2 | import shutil 3 | import pandas as pd 4 | import argparse 5 | import os 6 | 7 | parser = argparse.ArgumentParser() 8 | parser.add_argument('--spk', type=str, help='Speaker name') 9 | parser.add_argument('--encoder', type=str, default='timbre', help='encoder type') 10 | args = parser.parse_args() 11 | 12 | 13 | Speaker_name = args.spk #Speaker name 14 | encoder_name = args.encoder 15 | 16 | data = pd.read_csv(os.path.join('output', Speaker_name, f'clustered_files({encoder_name}).csv')) 17 | 18 | for index, row in data.iterrows(): 19 | file_path = row['filename'] 20 | clust = row['clust'] 21 | 22 | clust_dir = os.path.join('output', Speaker_name, str(clust)) 23 | if not os.path.exists(clust_dir): 24 | os.makedirs(clust_dir) 25 | 26 | shutil.copy(file_path, clust_dir) 27 | -------------------------------------------------------------------------------- /output/.gitkeep: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KakaruHayate/ColorSplitter/e408c70bb9495e20e6a6e8631e8ddc61d6288145/output/.gitkeep -------------------------------------------------------------------------------- /pretrain/encoder_1570000.bak: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KakaruHayate/ColorSplitter/e408c70bb9495e20e6a6e8631e8ddc61d6288145/pretrain/encoder_1570000.bak -------------------------------------------------------------------------------- /pretrain/wav2vec2-large-robust-12-ft-emotion-msp-dim/LICENSE: -------------------------------------------------------------------------------- 1 | Attribution-NonCommercial-ShareAlike 4.0 International 2 | 3 | ======================================================================= 4 | 5 | Creative Commons Corporation ("Creative Commons") is not a law firm and 6 | does not provide legal services or legal advice. Distribution of 7 | Creative Commons public licenses does not create a lawyer-client or 8 | other relationship. Creative Commons makes its licenses and related 9 | information available on an "as-is" basis. Creative Commons gives no 10 | warranties regarding its licenses, any material licensed under their 11 | terms and conditions, or any related information. Creative Commons 12 | disclaims all liability for damages resulting from their use to the 13 | fullest extent possible. 14 | 15 | Using Creative Commons Public Licenses 16 | 17 | Creative Commons public licenses provide a standard set of terms and 18 | conditions that creators and other rights holders may use to share 19 | original works of authorship and other material subject to copyright 20 | and certain other rights specified in the public license below. The 21 | following considerations are for informational purposes only, are not 22 | exhaustive, and do not form part of our licenses. 23 | 24 | Considerations for licensors: Our public licenses are 25 | intended for use by those authorized to give the public 26 | permission to use material in ways otherwise restricted by 27 | copyright and certain other rights. Our licenses are 28 | irrevocable. Licensors should read and understand the terms 29 | and conditions of the license they choose before applying it. 30 | Licensors should also secure all rights necessary before 31 | applying our licenses so that the public can reuse the 32 | material as expected. Licensors should clearly mark any 33 | material not subject to the license. This includes other CC- 34 | licensed material, or material used under an exception or 35 | limitation to copyright. More considerations for licensors: 36 | wiki.creativecommons.org/Considerations_for_licensors 37 | 38 | Considerations for the public: By using one of our public 39 | licenses, a licensor grants the public permission to use the 40 | licensed material under specified terms and conditions. If 41 | the licensor's permission is not necessary for any reason--for 42 | example, because of any applicable exception or limitation to 43 | copyright--then that use is not regulated by the license. Our 44 | licenses grant only permissions under copyright and certain 45 | other rights that a licensor has authority to grant. Use of 46 | the licensed material may still be restricted for other 47 | reasons, including because others have copyright or other 48 | rights in the material. A licensor may make special requests, 49 | such as asking that all changes be marked or described. 50 | Although not required by our licenses, you are encouraged to 51 | respect those requests where reasonable. More considerations 52 | for the public: 53 | wiki.creativecommons.org/Considerations_for_licensees 54 | 55 | ======================================================================= 56 | 57 | Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International 58 | Public License 59 | 60 | By exercising the Licensed Rights (defined below), You accept and agree 61 | to be bound by the terms and conditions of this Creative Commons 62 | Attribution-NonCommercial-ShareAlike 4.0 International Public License 63 | ("Public License"). To the extent this Public License may be 64 | interpreted as a contract, You are granted the Licensed Rights in 65 | consideration of Your acceptance of these terms and conditions, and the 66 | Licensor grants You such rights in consideration of benefits the 67 | Licensor receives from making the Licensed Material available under 68 | these terms and conditions. 69 | 70 | 71 | Section 1 -- Definitions. 72 | 73 | a. Adapted Material means material subject to Copyright and Similar 74 | Rights that is derived from or based upon the Licensed Material 75 | and in which the Licensed Material is translated, altered, 76 | arranged, transformed, or otherwise modified in a manner requiring 77 | permission under the Copyright and Similar Rights held by the 78 | Licensor. For purposes of this Public License, where the Licensed 79 | Material is a musical work, performance, or sound recording, 80 | Adapted Material is always produced where the Licensed Material is 81 | synched in timed relation with a moving image. 82 | 83 | b. Adapter's License means the license You apply to Your Copyright 84 | and Similar Rights in Your contributions to Adapted Material in 85 | accordance with the terms and conditions of this Public License. 86 | 87 | c. BY-NC-SA Compatible License means a license listed at 88 | creativecommons.org/compatiblelicenses, approved by Creative 89 | Commons as essentially the equivalent of this Public License. 90 | 91 | d. Copyright and Similar Rights means copyright and/or similar rights 92 | closely related to copyright including, without limitation, 93 | performance, broadcast, sound recording, and Sui Generis Database 94 | Rights, without regard to how the rights are labeled or 95 | categorized. For purposes of this Public License, the rights 96 | specified in Section 2(b)(1)-(2) are not Copyright and Similar 97 | Rights. 98 | 99 | e. Effective Technological Measures means those measures that, in the 100 | absence of proper authority, may not be circumvented under laws 101 | fulfilling obligations under Article 11 of the WIPO Copyright 102 | Treaty adopted on December 20, 1996, and/or similar international 103 | agreements. 104 | 105 | f. Exceptions and Limitations means fair use, fair dealing, and/or 106 | any other exception or limitation to Copyright and Similar Rights 107 | that applies to Your use of the Licensed Material. 108 | 109 | g. License Elements means the license attributes listed in the name 110 | of a Creative Commons Public License. The License Elements of this 111 | Public License are Attribution, NonCommercial, and ShareAlike. 112 | 113 | h. Licensed Material means the artistic or literary work, database, 114 | or other material to which the Licensor applied this Public 115 | License. 116 | 117 | i. Licensed Rights means the rights granted to You subject to the 118 | terms and conditions of this Public License, which are limited to 119 | all Copyright and Similar Rights that apply to Your use of the 120 | Licensed Material and that the Licensor has authority to license. 121 | 122 | j. Licensor means the individual(s) or entity(ies) granting rights 123 | under this Public License. 124 | 125 | k. NonCommercial means not primarily intended for or directed towards 126 | commercial advantage or monetary compensation. For purposes of 127 | this Public License, the exchange of the Licensed Material for 128 | other material subject to Copyright and Similar Rights by digital 129 | file-sharing or similar means is NonCommercial provided there is 130 | no payment of monetary compensation in connection with the 131 | exchange. 132 | 133 | l. Share means to provide material to the public by any means or 134 | process that requires permission under the Licensed Rights, such 135 | as reproduction, public display, public performance, distribution, 136 | dissemination, communication, or importation, and to make material 137 | available to the public including in ways that members of the 138 | public may access the material from a place and at a time 139 | individually chosen by them. 140 | 141 | m. Sui Generis Database Rights means rights other than copyright 142 | resulting from Directive 96/9/EC of the European Parliament and of 143 | the Council of 11 March 1996 on the legal protection of databases, 144 | as amended and/or succeeded, as well as other essentially 145 | equivalent rights anywhere in the world. 146 | 147 | n. You means the individual or entity exercising the Licensed Rights 148 | under this Public License. Your has a corresponding meaning. 149 | 150 | 151 | Section 2 -- Scope. 152 | 153 | a. License grant. 154 | 155 | 1. Subject to the terms and conditions of this Public License, 156 | the Licensor hereby grants You a worldwide, royalty-free, 157 | non-sublicensable, non-exclusive, irrevocable license to 158 | exercise the Licensed Rights in the Licensed Material to: 159 | 160 | a. reproduce and Share the Licensed Material, in whole or 161 | in part, for NonCommercial purposes only; and 162 | 163 | b. produce, reproduce, and Share Adapted Material for 164 | NonCommercial purposes only. 165 | 166 | 2. Exceptions and Limitations. For the avoidance of doubt, where 167 | Exceptions and Limitations apply to Your use, this Public 168 | License does not apply, and You do not need to comply with 169 | its terms and conditions. 170 | 171 | 3. Term. The term of this Public License is specified in Section 172 | 6(a). 173 | 174 | 4. Media and formats; technical modifications allowed. The 175 | Licensor authorizes You to exercise the Licensed Rights in 176 | all media and formats whether now known or hereafter created, 177 | and to make technical modifications necessary to do so. The 178 | Licensor waives and/or agrees not to assert any right or 179 | authority to forbid You from making technical modifications 180 | necessary to exercise the Licensed Rights, including 181 | technical modifications necessary to circumvent Effective 182 | Technological Measures. For purposes of this Public License, 183 | simply making modifications authorized by this Section 2(a) 184 | (4) never produces Adapted Material. 185 | 186 | 5. Downstream recipients. 187 | 188 | a. Offer from the Licensor -- Licensed Material. Every 189 | recipient of the Licensed Material automatically 190 | receives an offer from the Licensor to exercise the 191 | Licensed Rights under the terms and conditions of this 192 | Public License. 193 | 194 | b. Additional offer from the Licensor -- Adapted Material. 195 | Every recipient of Adapted Material from You 196 | automatically receives an offer from the Licensor to 197 | exercise the Licensed Rights in the Adapted Material 198 | under the conditions of the Adapter's License You apply. 199 | 200 | c. No downstream restrictions. You may not offer or impose 201 | any additional or different terms or conditions on, or 202 | apply any Effective Technological Measures to, the 203 | Licensed Material if doing so restricts exercise of the 204 | Licensed Rights by any recipient of the Licensed 205 | Material. 206 | 207 | 6. No endorsement. Nothing in this Public License constitutes or 208 | may be construed as permission to assert or imply that You 209 | are, or that Your use of the Licensed Material is, connected 210 | with, or sponsored, endorsed, or granted official status by, 211 | the Licensor or others designated to receive attribution as 212 | provided in Section 3(a)(1)(A)(i). 213 | 214 | b. Other rights. 215 | 216 | 1. Moral rights, such as the right of integrity, are not 217 | licensed under this Public License, nor are publicity, 218 | privacy, and/or other similar personality rights; however, to 219 | the extent possible, the Licensor waives and/or agrees not to 220 | assert any such rights held by the Licensor to the limited 221 | extent necessary to allow You to exercise the Licensed 222 | Rights, but not otherwise. 223 | 224 | 2. Patent and trademark rights are not licensed under this 225 | Public License. 226 | 227 | 3. To the extent possible, the Licensor waives any right to 228 | collect royalties from You for the exercise of the Licensed 229 | Rights, whether directly or through a collecting society 230 | under any voluntary or waivable statutory or compulsory 231 | licensing scheme. In all other cases the Licensor expressly 232 | reserves any right to collect such royalties, including when 233 | the Licensed Material is used other than for NonCommercial 234 | purposes. 235 | 236 | 237 | Section 3 -- License Conditions. 238 | 239 | Your exercise of the Licensed Rights is expressly made subject to the 240 | following conditions. 241 | 242 | a. Attribution. 243 | 244 | 1. If You Share the Licensed Material (including in modified 245 | form), You must: 246 | 247 | a. retain the following if it is supplied by the Licensor 248 | with the Licensed Material: 249 | 250 | i. identification of the creator(s) of the Licensed 251 | Material and any others designated to receive 252 | attribution, in any reasonable manner requested by 253 | the Licensor (including by pseudonym if 254 | designated); 255 | 256 | ii. a copyright notice; 257 | 258 | iii. a notice that refers to this Public License; 259 | 260 | iv. a notice that refers to the disclaimer of 261 | warranties; 262 | 263 | v. a URI or hyperlink to the Licensed Material to the 264 | extent reasonably practicable; 265 | 266 | b. indicate if You modified the Licensed Material and 267 | retain an indication of any previous modifications; and 268 | 269 | c. indicate the Licensed Material is licensed under this 270 | Public License, and include the text of, or the URI or 271 | hyperlink to, this Public License. 272 | 273 | 2. You may satisfy the conditions in Section 3(a)(1) in any 274 | reasonable manner based on the medium, means, and context in 275 | which You Share the Licensed Material. For example, it may be 276 | reasonable to satisfy the conditions by providing a URI or 277 | hyperlink to a resource that includes the required 278 | information. 279 | 3. If requested by the Licensor, You must remove any of the 280 | information required by Section 3(a)(1)(A) to the extent 281 | reasonably practicable. 282 | 283 | b. ShareAlike. 284 | 285 | In addition to the conditions in Section 3(a), if You Share 286 | Adapted Material You produce, the following conditions also apply. 287 | 288 | 1. The Adapter's License You apply must be a Creative Commons 289 | license with the same License Elements, this version or 290 | later, or a BY-NC-SA Compatible License. 291 | 292 | 2. You must include the text of, or the URI or hyperlink to, the 293 | Adapter's License You apply. You may satisfy this condition 294 | in any reasonable manner based on the medium, means, and 295 | context in which You Share Adapted Material. 296 | 297 | 3. You may not offer or impose any additional or different terms 298 | or conditions on, or apply any Effective Technological 299 | Measures to, Adapted Material that restrict exercise of the 300 | rights granted under the Adapter's License You apply. 301 | 302 | 303 | Section 4 -- Sui Generis Database Rights. 304 | 305 | Where the Licensed Rights include Sui Generis Database Rights that 306 | apply to Your use of the Licensed Material: 307 | 308 | a. for the avoidance of doubt, Section 2(a)(1) grants You the right 309 | to extract, reuse, reproduce, and Share all or a substantial 310 | portion of the contents of the database for NonCommercial purposes 311 | only; 312 | 313 | b. if You include all or a substantial portion of the database 314 | contents in a database in which You have Sui Generis Database 315 | Rights, then the database in which You have Sui Generis Database 316 | Rights (but not its individual contents) is Adapted Material, 317 | including for purposes of Section 3(b); and 318 | 319 | c. You must comply with the conditions in Section 3(a) if You Share 320 | all or a substantial portion of the contents of the database. 321 | 322 | For the avoidance of doubt, this Section 4 supplements and does not 323 | replace Your obligations under this Public License where the Licensed 324 | Rights include other Copyright and Similar Rights. 325 | 326 | 327 | Section 5 -- Disclaimer of Warranties and Limitation of Liability. 328 | 329 | a. UNLESS OTHERWISE SEPARATELY UNDERTAKEN BY THE LICENSOR, TO THE 330 | EXTENT POSSIBLE, THE LICENSOR OFFERS THE LICENSED MATERIAL AS-IS 331 | AND AS-AVAILABLE, AND MAKES NO REPRESENTATIONS OR WARRANTIES OF 332 | ANY KIND CONCERNING THE LICENSED MATERIAL, WHETHER EXPRESS, 333 | IMPLIED, STATUTORY, OR OTHER. THIS INCLUDES, WITHOUT LIMITATION, 334 | WARRANTIES OF TITLE, MERCHANTABILITY, FITNESS FOR A PARTICULAR 335 | PURPOSE, NON-INFRINGEMENT, ABSENCE OF LATENT OR OTHER DEFECTS, 336 | ACCURACY, OR THE PRESENCE OR ABSENCE OF ERRORS, WHETHER OR NOT 337 | KNOWN OR DISCOVERABLE. WHERE DISCLAIMERS OF WARRANTIES ARE NOT 338 | ALLOWED IN FULL OR IN PART, THIS DISCLAIMER MAY NOT APPLY TO YOU. 339 | 340 | b. TO THE EXTENT POSSIBLE, IN NO EVENT WILL THE LICENSOR BE LIABLE 341 | TO YOU ON ANY LEGAL THEORY (INCLUDING, WITHOUT LIMITATION, 342 | NEGLIGENCE) OR OTHERWISE FOR ANY DIRECT, SPECIAL, INDIRECT, 343 | INCIDENTAL, CONSEQUENTIAL, PUNITIVE, EXEMPLARY, OR OTHER LOSSES, 344 | COSTS, EXPENSES, OR DAMAGES ARISING OUT OF THIS PUBLIC LICENSE OR 345 | USE OF THE LICENSED MATERIAL, EVEN IF THE LICENSOR HAS BEEN 346 | ADVISED OF THE POSSIBILITY OF SUCH LOSSES, COSTS, EXPENSES, OR 347 | DAMAGES. WHERE A LIMITATION OF LIABILITY IS NOT ALLOWED IN FULL OR 348 | IN PART, THIS LIMITATION MAY NOT APPLY TO YOU. 349 | 350 | c. The disclaimer of warranties and limitation of liability provided 351 | above shall be interpreted in a manner that, to the extent 352 | possible, most closely approximates an absolute disclaimer and 353 | waiver of all liability. 354 | 355 | 356 | Section 6 -- Term and Termination. 357 | 358 | a. This Public License applies for the term of the Copyright and 359 | Similar Rights licensed here. However, if You fail to comply with 360 | this Public License, then Your rights under this Public License 361 | terminate automatically. 362 | 363 | b. Where Your right to use the Licensed Material has terminated under 364 | Section 6(a), it reinstates: 365 | 366 | 1. automatically as of the date the violation is cured, provided 367 | it is cured within 30 days of Your discovery of the 368 | violation; or 369 | 370 | 2. upon express reinstatement by the Licensor. 371 | 372 | For the avoidance of doubt, this Section 6(b) does not affect any 373 | right the Licensor may have to seek remedies for Your violations 374 | of this Public License. 375 | 376 | c. For the avoidance of doubt, the Licensor may also offer the 377 | Licensed Material under separate terms or conditions or stop 378 | distributing the Licensed Material at any time; however, doing so 379 | will not terminate this Public License. 380 | 381 | d. Sections 1, 5, 6, 7, and 8 survive termination of this Public 382 | License. 383 | 384 | 385 | Section 7 -- Other Terms and Conditions. 386 | 387 | a. The Licensor shall not be bound by any additional or different 388 | terms or conditions communicated by You unless expressly agreed. 389 | 390 | b. Any arrangements, understandings, or agreements regarding the 391 | Licensed Material not stated herein are separate from and 392 | independent of the terms and conditions of this Public License. 393 | 394 | 395 | Section 8 -- Interpretation. 396 | 397 | a. For the avoidance of doubt, this Public License does not, and 398 | shall not be interpreted to, reduce, limit, restrict, or impose 399 | conditions on any use of the Licensed Material that could lawfully 400 | be made without permission under this Public License. 401 | 402 | b. To the extent possible, if any provision of this Public License is 403 | deemed unenforceable, it shall be automatically reformed to the 404 | minimum extent necessary to make it enforceable. If the provision 405 | cannot be reformed, it shall be severed from this Public License 406 | without affecting the enforceability of the remaining terms and 407 | conditions. 408 | 409 | c. No term or condition of this Public License will be waived and no 410 | failure to comply consented to unless expressly agreed to by the 411 | Licensor. 412 | 413 | d. Nothing in this Public License constitutes or may be interpreted 414 | as a limitation upon, or waiver of, any privileges and immunities 415 | that apply to the Licensor or You, including from the legal 416 | processes of any jurisdiction or authority. 417 | 418 | ======================================================================= 419 | 420 | Creative Commons is not a party to its public 421 | licenses. Notwithstanding, Creative Commons may elect to apply one of 422 | its public licenses to material it publishes and in those instances 423 | will be considered the “Licensor.” The text of the Creative Commons 424 | public licenses is dedicated to the public domain under the CC0 Public 425 | Domain Dedication. Except for the limited purpose of indicating that 426 | material is shared under a Creative Commons public license or as 427 | otherwise permitted by the Creative Commons policies published at 428 | creativecommons.org/policies, Creative Commons does not authorize the 429 | use of the trademark "Creative Commons" or any other trademark or logo 430 | of Creative Commons without its prior written consent including, 431 | without limitation, in connection with any unauthorized modifications 432 | to any of its public licenses or any other arrangements, 433 | understandings, or agreements concerning use of licensed material. For 434 | the avoidance of doubt, this paragraph does not form part of the 435 | public licenses. 436 | 437 | Creative Commons may be contacted at creativecommons.org. 438 | 439 | -------------------------------------------------------------------------------- /pretrain/wav2vec2-large-robust-12-ft-emotion-msp-dim/config.json: -------------------------------------------------------------------------------- 1 | { 2 | "_name_or_path": "torch", 3 | "activation_dropout": 0.1, 4 | "adapter_kernel_size": 3, 5 | "adapter_stride": 2, 6 | "add_adapter": false, 7 | "apply_spec_augment": true, 8 | "architectures": [ 9 | "Wav2Vec2ForSpeechClassification" 10 | ], 11 | "attention_dropout": 0.1, 12 | "bos_token_id": 1, 13 | "classifier_proj_size": 256, 14 | "codevector_dim": 768, 15 | "contrastive_logits_temperature": 0.1, 16 | "conv_bias": true, 17 | "conv_dim": [ 18 | 512, 19 | 512, 20 | 512, 21 | 512, 22 | 512, 23 | 512, 24 | 512 25 | ], 26 | "conv_kernel": [ 27 | 10, 28 | 3, 29 | 3, 30 | 3, 31 | 3, 32 | 2, 33 | 2 34 | ], 35 | "conv_stride": [ 36 | 5, 37 | 2, 38 | 2, 39 | 2, 40 | 2, 41 | 2, 42 | 2 43 | ], 44 | "ctc_loss_reduction": "sum", 45 | "ctc_zero_infinity": false, 46 | "diversity_loss_weight": 0.1, 47 | "do_stable_layer_norm": true, 48 | "eos_token_id": 2, 49 | "feat_extract_activation": "gelu", 50 | "feat_extract_dropout": 0.0, 51 | "feat_extract_norm": "layer", 52 | "feat_proj_dropout": 0.1, 53 | "feat_quantizer_dropout": 0.0, 54 | "final_dropout": 0.1, 55 | "finetuning_task": "wav2vec2_reg", 56 | "gradient_checkpointing": false, 57 | "hidden_act": "gelu", 58 | "hidden_dropout": 0.1, 59 | "hidden_dropout_prob": 0.1, 60 | "hidden_size": 1024, 61 | "id2label": { 62 | "0": "arousal", 63 | "1": "dominance", 64 | "2": "valence" 65 | }, 66 | "initializer_range": 0.02, 67 | "intermediate_size": 4096, 68 | "label2id": { 69 | "arousal": 0, 70 | "dominance": 1, 71 | "valence": 2 72 | }, 73 | "layer_norm_eps": 1e-05, 74 | "layerdrop": 0.1, 75 | "mask_feature_length": 10, 76 | "mask_feature_min_masks": 0, 77 | "mask_feature_prob": 0.0, 78 | "mask_time_length": 10, 79 | "mask_time_min_masks": 2, 80 | "mask_time_prob": 0.05, 81 | "model_type": "wav2vec2", 82 | "num_adapter_layers": 3, 83 | "num_attention_heads": 16, 84 | "num_codevector_groups": 2, 85 | "num_codevectors_per_group": 320, 86 | "num_conv_pos_embedding_groups": 16, 87 | "num_conv_pos_embeddings": 128, 88 | "num_feat_extract_layers": 7, 89 | "num_hidden_layers": 12, 90 | "num_negatives": 100, 91 | "output_hidden_size": 1024, 92 | "pad_token_id": 0, 93 | "pooling_mode": "mean", 94 | "problem_type": "regression", 95 | "proj_codevector_dim": 768, 96 | "tdnn_dilation": [ 97 | 1, 98 | 2, 99 | 3, 100 | 1, 101 | 1 102 | ], 103 | "tdnn_dim": [ 104 | 512, 105 | 512, 106 | 512, 107 | 512, 108 | 1500 109 | ], 110 | "tdnn_kernel": [ 111 | 5, 112 | 3, 113 | 3, 114 | 1, 115 | 1 116 | ], 117 | "torch_dtype": "float32", 118 | "transformers_version": "4.17.0.dev0", 119 | "use_weighted_layer_sum": false, 120 | "vocab_size": null, 121 | "xvector_output_dim": 512 122 | } 123 | -------------------------------------------------------------------------------- /pretrain/wav2vec2-large-robust-12-ft-emotion-msp-dim/preprocessor_config.json: -------------------------------------------------------------------------------- 1 | { 2 | "do_normalize": true, 3 | "feature_extractor_type": "Wav2Vec2FeatureExtractor", 4 | "feature_size": 1, 5 | "padding_side": "right", 6 | "padding_value": 0.0, 7 | "return_attention_mask": true, 8 | "sampling_rate": 16000 9 | } 10 | -------------------------------------------------------------------------------- /pretrain/wav2vec2-large-robust-12-ft-emotion-msp-dim/vocab.json: -------------------------------------------------------------------------------- 1 | {} -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | torch 2 | matplotlib>=3.0.0 3 | numpy>=1.20.3 4 | pandas 5 | Resemblyzer 6 | scikit_learn 7 | sounddevice 8 | tqdm 9 | umap_learn 10 | hdbscan 11 | transformers 12 | -------------------------------------------------------------------------------- /splitter.py: -------------------------------------------------------------------------------- 1 | import pandas as pd 2 | from pathlib import Path 3 | import numpy as np 4 | import matplotlib.pyplot as plt 5 | from modules.utils import GetEmbeds 6 | from modules.visualizations import plot_projections 7 | from modules.cluster import CommonClustering 8 | import argparse 9 | import os 10 | 11 | parser = argparse.ArgumentParser() 12 | parser.add_argument("--spk", type=str, help="Speaker name") 13 | parser.add_argument("--nmin", type=int, default=1, help="minimum number of clusters") 14 | parser.add_argument( 15 | "--cluster", type=int, default=1, help="1:SpectralCluster, 2:UmapHdbscan" 16 | ) 17 | parser.add_argument("--mer_cosine", type=str, default=None, help="merge similar embeds") 18 | parser.add_argument("--encoder", type=str, default="timbre", help="encoder type") 19 | args = parser.parse_args() 20 | 21 | Speaker_name = args.spk # Speaker name 22 | Nmin = args.nmin # set Nmax values 23 | merge_cos = args.mer_cosine 24 | encoder_name = args.encoder 25 | 26 | data_dir = os.path.join("input", Speaker_name, "raw", "wavs") 27 | wav_fpaths = list(Path(data_dir).glob("*.wav")) 28 | 29 | encoder = GetEmbeds(encoder_type=encoder_name, Speaker_name=Speaker_name) 30 | 31 | embeds = encoder.__call__(wav_fpaths) 32 | 33 | while True: 34 | if args.cluster == 1: 35 | cluster_name = "spectral" 36 | min_num_spks = Nmin 37 | mer_cos = merge_cos 38 | Cluster = CommonClustering( 39 | cluster_type=cluster_name, mer_cos=None, min_num_spks=Nmin 40 | ) 41 | elif args.cluster == 2: 42 | cluster_name = "umap_hdbscan" 43 | mer_cos = merge_cos 44 | Cluster = CommonClustering(mer_cos=None, cluster_type=cluster_name) 45 | else: 46 | raise ValueError("cluster type error") 47 | 48 | labels = Cluster.__call__(embeds) 49 | 50 | output_dir = f"output/{Speaker_name}" 51 | if not os.path.exists(output_dir): 52 | os.makedirs(output_dir) 53 | 54 | df = pd.DataFrame( 55 | {"filename": [str(fpath) for fpath in wav_fpaths], "clust": labels} 56 | ) 57 | proj = plot_projections( 58 | embeds, labels, title="Embedding projections", cluster_name=cluster_name 59 | ) 60 | df["x"] = proj[:, 0] 61 | df["y"] = proj[:, 1] 62 | 63 | plt.savefig(f"{output_dir}/embedding_projections({encoder_name}).png", dpi=600) 64 | plt.show() 65 | df.to_csv(f"{output_dir}/clustered_files({encoder_name}).csv", index=False) 66 | 67 | user_input = input("Are you satisfied with the results?/是否满意结果?(y/n): ") 68 | if user_input.lower() == "y": 69 | break 70 | else: 71 | Nmin = int(input("Please enter a new Nmin value/请输入新的Nmin值: ")) 72 | -------------------------------------------------------------------------------- /viewer/.gitignore: -------------------------------------------------------------------------------- 1 | # Logs 2 | logs 3 | *.log 4 | npm-debug.log* 5 | yarn-debug.log* 6 | yarn-error.log* 7 | pnpm-debug.log* 8 | lerna-debug.log* 9 | 10 | node_modules 11 | dist 12 | dist-ssr 13 | *.local 14 | 15 | # Editor directories and files 16 | .vscode/* 17 | !.vscode/extensions.json 18 | .idea 19 | .DS_Store 20 | *.suo 21 | *.ntvs* 22 | *.njsproj 23 | *.sln 24 | *.sw? 25 | -------------------------------------------------------------------------------- /viewer/README.md: -------------------------------------------------------------------------------- 1 | # Cluster viewer 2 | 3 | After running Colour Splitter you would get a csv file. You can use this viewer to load this csv file, listen to each point in the scatter plot and interactively explore the cluster result. 4 | 5 | ## Prerequisites 6 | 7 | - Bun (faster) or npm 8 | - Modern web browser 9 | 10 | ## Build 11 | 12 | 1. Navigate to the viewer directory: 13 | 14 | ```bash 15 | cd viewer 16 | ``` 17 | 18 | 2. Install dependencies: 19 | 20 | ```bash 21 | # Using Bun 22 | bun install 23 | 24 | # Or using npm 25 | npm install 26 | ``` 27 | 28 | ## Serve 29 | 30 | You'll need to run two servers simultaneously *in viewer directory*: 31 | 32 | 1. Web Application Server 33 | 34 | ```bash 35 | # Using Bun 36 | bun run dev 37 | 38 | # Or using npm 39 | npm run dev 40 | ``` 41 | 42 | 2. Audio file server 43 | 44 | ```bash 45 | # Using Bun 46 | bunx http-server --cors -p 8080 47 | 48 | # Or using npm 49 | npx http-server --cors -p 8080 50 | ``` 51 | 52 | ## Usage 53 | 54 | ![screenshot](screenshot.png) 55 | 56 | 1. Open your browser and navigate to the URL shown by the web application server (typically something like http://localhost:5173) 57 | 2. Look for the tab titled "Vite + React + TS" 58 | 3. Drag and drop your CSV file (generated by the color splitter) into the designated dropping area 59 | 4. Explore your clusters and play audio samples by clicking on individual data points 60 | -------------------------------------------------------------------------------- /viewer/bun.lockb: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KakaruHayate/ColorSplitter/e408c70bb9495e20e6a6e8631e8ddc61d6288145/viewer/bun.lockb -------------------------------------------------------------------------------- /viewer/eslint.config.js: -------------------------------------------------------------------------------- 1 | import js from '@eslint/js' 2 | import globals from 'globals' 3 | import reactHooks from 'eslint-plugin-react-hooks' 4 | import reactRefresh from 'eslint-plugin-react-refresh' 5 | import tseslint from 'typescript-eslint' 6 | 7 | export default tseslint.config( 8 | { ignores: ['dist'] }, 9 | { 10 | extends: [js.configs.recommended, ...tseslint.configs.recommended], 11 | files: ['**/*.{ts,tsx}'], 12 | languageOptions: { 13 | ecmaVersion: 2020, 14 | globals: globals.browser, 15 | }, 16 | plugins: { 17 | 'react-hooks': reactHooks, 18 | 'react-refresh': reactRefresh, 19 | }, 20 | rules: { 21 | ...reactHooks.configs.recommended.rules, 22 | 'react-refresh/only-export-components': [ 23 | 'warn', 24 | { allowConstantExport: true }, 25 | ], 26 | }, 27 | }, 28 | ) 29 | -------------------------------------------------------------------------------- /viewer/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | Vite + React + TS 8 | 9 | 10 |
11 | 12 | 13 | 14 | -------------------------------------------------------------------------------- /viewer/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "viewer", 3 | "private": true, 4 | "version": "0.0.0", 5 | "type": "module", 6 | "scripts": { 7 | "dev": "vite", 8 | "build": "tsc -b && vite build", 9 | "lint": "eslint .", 10 | "preview": "vite preview" 11 | }, 12 | "dependencies": { 13 | "@tremor/react": "^3.18.7", 14 | "autoprefixer": "^10.4.20", 15 | "lucide-react": "^0.473.0", 16 | "postcss": "^8.5.1", 17 | "react": "^18.3.1", 18 | "react-dom": "^18.3.1", 19 | "recharts": "^2.15.0", 20 | "tailwindcss": "^3.4.17" 21 | }, 22 | "devDependencies": { 23 | "@eslint/js": "^9.17.0", 24 | "@types/react": "^18.3.18", 25 | "@types/react-dom": "^18.3.5", 26 | "@vitejs/plugin-react-swc": "^3.5.0", 27 | "eslint": "^9.17.0", 28 | "eslint-plugin-react-hooks": "^5.0.0", 29 | "eslint-plugin-react-refresh": "^0.4.16", 30 | "globals": "^15.14.0", 31 | "typescript": "~5.6.2", 32 | "typescript-eslint": "^8.18.2", 33 | "vite": "^6.0.5" 34 | } 35 | } 36 | -------------------------------------------------------------------------------- /viewer/postcss.config.js: -------------------------------------------------------------------------------- 1 | export default { 2 | plugins: { 3 | tailwindcss: {}, 4 | autoprefixer: {}, 5 | }, 6 | } 7 | -------------------------------------------------------------------------------- /viewer/screenshot.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KakaruHayate/ColorSplitter/e408c70bb9495e20e6a6e8631e8ddc61d6288145/viewer/screenshot.png -------------------------------------------------------------------------------- /viewer/src/App.tsx: -------------------------------------------------------------------------------- 1 | import AudioVisualizer from "./ScatterPlot" 2 | 3 | function App() { 4 | return ( 5 | 6 | ) 7 | } 8 | 9 | export default App 10 | -------------------------------------------------------------------------------- /viewer/src/ScatterPlot.tsx: -------------------------------------------------------------------------------- 1 | import React, { useState, useCallback } from 'react'; 2 | import { ScatterChart, Scatter, XAxis, YAxis, CartesianGrid } from 'recharts'; 3 | import { Play, Pause } from 'lucide-react'; 4 | 5 | interface DataPoint { 6 | filename: string; 7 | clust: number; 8 | x: number; 9 | y: number; 10 | } 11 | 12 | 13 | const AudioVisualizer = () => { 14 | const [data, setData] = useState([]); 15 | const [selectedFile, setSelectedFile] = useState(null); 16 | 17 | 18 | const [currentAudio, setCurrentAudio] = useState<{ 19 | file: string; 20 | audio: HTMLAudioElement | null; 21 | isPlaying: boolean; 22 | progress: number; 23 | } | null>(null); 24 | 25 | const handlePlay = (filename: string) => { 26 | if (currentAudio?.file === filename) { 27 | // Resume/pause existing audio 28 | if (currentAudio.isPlaying) { 29 | currentAudio.audio?.pause(); 30 | setCurrentAudio(prev => prev ? { ...prev, isPlaying: false } : null); 31 | } else { 32 | currentAudio.audio?.play(); 33 | setCurrentAudio(prev => prev ? { ...prev, isPlaying: true } : null); 34 | } 35 | } else { 36 | // Stop previous audio if any 37 | currentAudio?.audio?.pause(); 38 | 39 | // Create new audio 40 | const audio = new Audio(`http://localhost:8080/${filename}`); 41 | audio.addEventListener('ended', () => { 42 | setCurrentAudio(prev => prev ? { ...prev, isPlaying: false, progress: 0 } : null); 43 | }); 44 | audio.play(); 45 | setCurrentAudio({ 46 | file: filename, 47 | audio, 48 | isPlaying: true, 49 | progress: 0 50 | }); 51 | } 52 | }; 53 | 54 | 55 | const handleDragOver = useCallback((e: React.DragEvent) => { 56 | e.preventDefault(); 57 | }, []); 58 | 59 | const handleDrop = useCallback((e: React.DragEvent) => { 60 | e.preventDefault(); 61 | const file = e.dataTransfer.files[0]; 62 | if (file && file.name.endsWith('.csv')) { 63 | const reader = new FileReader(); 64 | reader.onload = (event) => { 65 | const text = event.target?.result as string; 66 | const lines = text.split('\n'); 67 | const parsedData: DataPoint[] = lines.slice(1) 68 | .filter(line => line.trim()) 69 | .map(line => { 70 | const values = line.split(','); 71 | return { 72 | filename: values[0], 73 | clust: parseInt(values[1], 10), 74 | x: parseFloat(values[2]), 75 | y: parseFloat(values[3]) 76 | }; 77 | }); 78 | setData(parsedData); 79 | }; 80 | reader.readAsText(file); 81 | } 82 | }, []); 83 | 84 | const handleClick = (point: DataPoint) => { 85 | setSelectedFile(point.filename); 86 | }; 87 | 88 | const handleReset = () => { 89 | setSelectedFile(null); 90 | }; 91 | 92 | const displayFiles = selectedFile ? data.filter(d => d.filename === selectedFile) : data; 93 | 94 | return ( 95 |
96 |
97 |
102 |

Drop CSV here

103 |
104 | 105 | {data.length > 0 && ( 106 | 111 | 112 | 113 | 114 | {(() => { 115 | const maxCluster = Math.max(...data.map(d => d.clust)); 116 | const numClusters = maxCluster + 1; // Since clusters start from 0 117 | 118 | return Array.from(new Set(data.map(d => d.clust))).map((cluster) => { 119 | const hue = (360 / numClusters) * cluster; 120 | return ( 121 | d.clust === cluster)} 124 | fill={`hsl(${hue}deg, 70%, 50%)`} 125 | onClick={(point) => { 126 | const p = point as unknown as DataPoint; 127 | handleClick(p); 128 | handlePlay(p.filename); 129 | }} 130 | cursor="pointer" 131 | /> 132 | ); 133 | }); 134 | })()} 135 | 136 | )} 137 |
138 | 139 |
140 |
141 |

Files

142 | 148 |
149 |
150 | {displayFiles.map((file, index) => ( 151 |
156 |
157 | 166 | {file.filename.split('/').pop()} 167 |
168 |
169 | ))} 170 |
171 |
172 |
173 | ); 174 | }; 175 | 176 | export default AudioVisualizer; -------------------------------------------------------------------------------- /viewer/src/index.css: -------------------------------------------------------------------------------- 1 | @tailwind base; 2 | @tailwind components; 3 | @tailwind utilities; -------------------------------------------------------------------------------- /viewer/src/main.tsx: -------------------------------------------------------------------------------- 1 | import { StrictMode } from 'react' 2 | import { createRoot } from 'react-dom/client' 3 | import './index.css' 4 | import App from './App.tsx' 5 | 6 | createRoot(document.getElementById('root')!).render( 7 | 8 | 9 | , 10 | ) 11 | -------------------------------------------------------------------------------- /viewer/src/vite-env.d.ts: -------------------------------------------------------------------------------- 1 | /// 2 | -------------------------------------------------------------------------------- /viewer/tailwind.config.js: -------------------------------------------------------------------------------- 1 | /** @type {import('tailwindcss').Config} */ 2 | export default { 3 | content: [ 4 | "./index.html", 5 | "./src/**/*.{js,ts,jsx,tsx}", 6 | ], 7 | theme: { 8 | extend: {}, 9 | }, 10 | plugins: [], 11 | } 12 | 13 | -------------------------------------------------------------------------------- /viewer/tsconfig.app.json: -------------------------------------------------------------------------------- 1 | { 2 | "compilerOptions": { 3 | "tsBuildInfoFile": "./node_modules/.tmp/tsconfig.app.tsbuildinfo", 4 | "target": "ES2020", 5 | "useDefineForClassFields": true, 6 | "lib": ["ES2020", "DOM", "DOM.Iterable"], 7 | "module": "ESNext", 8 | "skipLibCheck": true, 9 | 10 | /* Bundler mode */ 11 | "moduleResolution": "bundler", 12 | "allowImportingTsExtensions": true, 13 | "isolatedModules": true, 14 | "moduleDetection": "force", 15 | "noEmit": true, 16 | "jsx": "react-jsx", 17 | 18 | /* Linting */ 19 | "strict": true, 20 | "noUnusedLocals": true, 21 | "noUnusedParameters": true, 22 | "noFallthroughCasesInSwitch": true, 23 | "noUncheckedSideEffectImports": true 24 | }, 25 | "include": ["src"] 26 | } 27 | -------------------------------------------------------------------------------- /viewer/tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "files": [], 3 | "references": [ 4 | { "path": "./tsconfig.app.json" }, 5 | { "path": "./tsconfig.node.json" } 6 | ] 7 | } 8 | -------------------------------------------------------------------------------- /viewer/tsconfig.node.json: -------------------------------------------------------------------------------- 1 | { 2 | "compilerOptions": { 3 | "tsBuildInfoFile": "./node_modules/.tmp/tsconfig.node.tsbuildinfo", 4 | "target": "ES2022", 5 | "lib": ["ES2023"], 6 | "module": "ESNext", 7 | "skipLibCheck": true, 8 | 9 | /* Bundler mode */ 10 | "moduleResolution": "bundler", 11 | "allowImportingTsExtensions": true, 12 | "isolatedModules": true, 13 | "moduleDetection": "force", 14 | "noEmit": true, 15 | 16 | /* Linting */ 17 | "strict": true, 18 | "noUnusedLocals": true, 19 | "noUnusedParameters": true, 20 | "noFallthroughCasesInSwitch": true, 21 | "noUncheckedSideEffectImports": true 22 | }, 23 | "include": ["vite.config.ts"] 24 | } 25 | -------------------------------------------------------------------------------- /viewer/vite.config.ts: -------------------------------------------------------------------------------- 1 | import { defineConfig } from 'vite' 2 | import react from '@vitejs/plugin-react-swc' 3 | 4 | // https://vite.dev/config/ 5 | export default defineConfig({ 6 | plugins: [react()], 7 | }) 8 | --------------------------------------------------------------------------------