├── datasets └── .tmp ├── hubert ├── __init__.py ├── hubert_manager.py ├── pre_kmeans_hubert.py └── customtokenizer.py ├── utils ├── __init__.py ├── lora.py └── bitsandbytes.py ├── FUNDING.yml ├── setup.py ├── bark ├── assets │ └── prompts │ │ ├── announcer.npz │ │ ├── speaker_0.npz │ │ ├── speaker_1.npz │ │ ├── speaker_2.npz │ │ ├── speaker_3.npz │ │ ├── speaker_4.npz │ │ ├── speaker_5.npz │ │ ├── speaker_6.npz │ │ ├── speaker_7.npz │ │ ├── speaker_8.npz │ │ ├── speaker_9.npz │ │ ├── de_speaker_0.npz │ │ ├── de_speaker_1.npz │ │ ├── de_speaker_2.npz │ │ ├── de_speaker_3.npz │ │ ├── de_speaker_4.npz │ │ ├── de_speaker_5.npz │ │ ├── de_speaker_6.npz │ │ ├── de_speaker_7.npz │ │ ├── de_speaker_8.npz │ │ ├── de_speaker_9.npz │ │ ├── en_speaker_0.npz │ │ ├── en_speaker_1.npz │ │ ├── en_speaker_2.npz │ │ ├── en_speaker_3.npz │ │ ├── en_speaker_4.npz │ │ ├── en_speaker_5.npz │ │ ├── en_speaker_6.npz │ │ ├── en_speaker_7.npz │ │ ├── en_speaker_8.npz │ │ ├── en_speaker_9.npz │ │ ├── es_speaker_0.npz │ │ ├── es_speaker_1.npz │ │ ├── es_speaker_2.npz │ │ ├── es_speaker_3.npz │ │ ├── es_speaker_4.npz │ │ ├── es_speaker_5.npz │ │ ├── es_speaker_6.npz │ │ ├── es_speaker_7.npz │ │ ├── es_speaker_8.npz │ │ ├── es_speaker_9.npz │ │ ├── fr_speaker_0.npz │ │ ├── fr_speaker_1.npz │ │ ├── fr_speaker_2.npz │ │ ├── fr_speaker_3.npz │ │ ├── fr_speaker_4.npz │ │ ├── fr_speaker_5.npz │ │ ├── fr_speaker_6.npz │ │ ├── fr_speaker_7.npz │ │ ├── fr_speaker_8.npz │ │ ├── fr_speaker_9.npz │ │ ├── hi_speaker_0.npz │ │ ├── hi_speaker_1.npz │ │ ├── hi_speaker_2.npz │ │ ├── hi_speaker_3.npz │ │ ├── hi_speaker_4.npz │ │ ├── hi_speaker_5.npz │ │ ├── hi_speaker_6.npz │ │ ├── hi_speaker_7.npz │ │ ├── hi_speaker_8.npz │ │ ├── hi_speaker_9.npz │ │ ├── it_speaker_0.npz │ │ ├── it_speaker_1.npz │ │ ├── it_speaker_2.npz │ │ ├── it_speaker_3.npz │ │ ├── it_speaker_4.npz │ │ ├── it_speaker_5.npz │ │ ├── it_speaker_6.npz │ │ ├── it_speaker_7.npz │ │ ├── it_speaker_8.npz │ │ ├── it_speaker_9.npz │ │ ├── ja_speaker_0.npz │ │ ├── ja_speaker_1.npz │ │ ├── ja_speaker_2.npz │ │ ├── ja_speaker_3.npz │ │ ├── ja_speaker_4.npz │ │ ├── ja_speaker_5.npz │ │ ├── ja_speaker_6.npz │ │ ├── ja_speaker_7.npz │ │ ├── ja_speaker_8.npz │ │ ├── ja_speaker_9.npz │ │ ├── ko_speaker_0.npz │ │ ├── ko_speaker_1.npz │ │ ├── ko_speaker_2.npz │ │ ├── ko_speaker_3.npz │ │ ├── ko_speaker_4.npz │ │ ├── ko_speaker_5.npz │ │ ├── ko_speaker_6.npz │ │ ├── ko_speaker_7.npz │ │ ├── ko_speaker_8.npz │ │ ├── ko_speaker_9.npz │ │ ├── pl_speaker_0.npz │ │ ├── pl_speaker_1.npz │ │ ├── pl_speaker_2.npz │ │ ├── pl_speaker_3.npz │ │ ├── pl_speaker_4.npz │ │ ├── pl_speaker_5.npz │ │ ├── pl_speaker_6.npz │ │ ├── pl_speaker_7.npz │ │ ├── pl_speaker_8.npz │ │ ├── pl_speaker_9.npz │ │ ├── pt_speaker_0.npz │ │ ├── pt_speaker_1.npz │ │ ├── pt_speaker_2.npz │ │ ├── pt_speaker_3.npz │ │ ├── pt_speaker_4.npz │ │ ├── pt_speaker_5.npz │ │ ├── pt_speaker_6.npz │ │ ├── pt_speaker_7.npz │ │ ├── pt_speaker_8.npz │ │ ├── pt_speaker_9.npz │ │ ├── ru_speaker_0.npz │ │ ├── ru_speaker_1.npz │ │ ├── ru_speaker_2.npz │ │ ├── ru_speaker_3.npz │ │ ├── ru_speaker_4.npz │ │ ├── ru_speaker_5.npz │ │ ├── ru_speaker_6.npz │ │ ├── ru_speaker_7.npz │ │ ├── ru_speaker_8.npz │ │ ├── ru_speaker_9.npz │ │ ├── tr_speaker_0.npz │ │ ├── tr_speaker_1.npz │ │ ├── tr_speaker_2.npz │ │ ├── tr_speaker_3.npz │ │ ├── tr_speaker_4.npz │ │ ├── tr_speaker_5.npz │ │ ├── tr_speaker_6.npz │ │ ├── tr_speaker_7.npz │ │ ├── tr_speaker_8.npz │ │ ├── tr_speaker_9.npz │ │ ├── zh_speaker_0.npz │ │ ├── zh_speaker_1.npz │ │ ├── zh_speaker_2.npz │ │ ├── zh_speaker_3.npz │ │ ├── zh_speaker_4.npz │ │ ├── zh_speaker_5.npz │ │ ├── zh_speaker_6.npz │ │ ├── zh_speaker_7.npz │ │ ├── zh_speaker_8.npz │ │ ├── zh_speaker_9.npz │ │ └── readme.md ├── __init__.py ├── api.py ├── model_fine.py ├── model.py └── generation.py ├── .gitignore ├── pyproject.toml ├── LICENSE.md ├── model-card.md ├── rvc_test.ipynb ├── generate.ipynb ├── rvc_infer.py ├── clone_voice.ipynb ├── README.md ├── generate_chunked.ipynb └── test_models.ipynb /datasets/.tmp: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /hubert/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /utils/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /FUNDING.yml: -------------------------------------------------------------------------------- 1 | github: serp-ai 2 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | from setuptools import setup 2 | 3 | setup() 4 | -------------------------------------------------------------------------------- /bark/assets/prompts/announcer.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/serp-ai/bark-with-voice-clone/HEAD/bark/assets/prompts/announcer.npz -------------------------------------------------------------------------------- /bark/assets/prompts/speaker_0.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/serp-ai/bark-with-voice-clone/HEAD/bark/assets/prompts/speaker_0.npz -------------------------------------------------------------------------------- /bark/assets/prompts/speaker_1.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/serp-ai/bark-with-voice-clone/HEAD/bark/assets/prompts/speaker_1.npz -------------------------------------------------------------------------------- /bark/assets/prompts/speaker_2.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/serp-ai/bark-with-voice-clone/HEAD/bark/assets/prompts/speaker_2.npz -------------------------------------------------------------------------------- /bark/assets/prompts/speaker_3.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/serp-ai/bark-with-voice-clone/HEAD/bark/assets/prompts/speaker_3.npz -------------------------------------------------------------------------------- /bark/assets/prompts/speaker_4.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/serp-ai/bark-with-voice-clone/HEAD/bark/assets/prompts/speaker_4.npz -------------------------------------------------------------------------------- /bark/assets/prompts/speaker_5.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/serp-ai/bark-with-voice-clone/HEAD/bark/assets/prompts/speaker_5.npz -------------------------------------------------------------------------------- /bark/assets/prompts/speaker_6.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/serp-ai/bark-with-voice-clone/HEAD/bark/assets/prompts/speaker_6.npz -------------------------------------------------------------------------------- /bark/assets/prompts/speaker_7.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/serp-ai/bark-with-voice-clone/HEAD/bark/assets/prompts/speaker_7.npz -------------------------------------------------------------------------------- /bark/assets/prompts/speaker_8.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/serp-ai/bark-with-voice-clone/HEAD/bark/assets/prompts/speaker_8.npz -------------------------------------------------------------------------------- /bark/assets/prompts/speaker_9.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/serp-ai/bark-with-voice-clone/HEAD/bark/assets/prompts/speaker_9.npz -------------------------------------------------------------------------------- /bark/assets/prompts/de_speaker_0.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/serp-ai/bark-with-voice-clone/HEAD/bark/assets/prompts/de_speaker_0.npz -------------------------------------------------------------------------------- /bark/assets/prompts/de_speaker_1.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/serp-ai/bark-with-voice-clone/HEAD/bark/assets/prompts/de_speaker_1.npz -------------------------------------------------------------------------------- /bark/assets/prompts/de_speaker_2.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/serp-ai/bark-with-voice-clone/HEAD/bark/assets/prompts/de_speaker_2.npz -------------------------------------------------------------------------------- /bark/assets/prompts/de_speaker_3.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/serp-ai/bark-with-voice-clone/HEAD/bark/assets/prompts/de_speaker_3.npz -------------------------------------------------------------------------------- /bark/assets/prompts/de_speaker_4.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/serp-ai/bark-with-voice-clone/HEAD/bark/assets/prompts/de_speaker_4.npz -------------------------------------------------------------------------------- /bark/assets/prompts/de_speaker_5.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/serp-ai/bark-with-voice-clone/HEAD/bark/assets/prompts/de_speaker_5.npz -------------------------------------------------------------------------------- /bark/assets/prompts/de_speaker_6.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/serp-ai/bark-with-voice-clone/HEAD/bark/assets/prompts/de_speaker_6.npz -------------------------------------------------------------------------------- /bark/assets/prompts/de_speaker_7.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/serp-ai/bark-with-voice-clone/HEAD/bark/assets/prompts/de_speaker_7.npz -------------------------------------------------------------------------------- /bark/assets/prompts/de_speaker_8.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/serp-ai/bark-with-voice-clone/HEAD/bark/assets/prompts/de_speaker_8.npz -------------------------------------------------------------------------------- /bark/assets/prompts/de_speaker_9.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/serp-ai/bark-with-voice-clone/HEAD/bark/assets/prompts/de_speaker_9.npz -------------------------------------------------------------------------------- /bark/assets/prompts/en_speaker_0.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/serp-ai/bark-with-voice-clone/HEAD/bark/assets/prompts/en_speaker_0.npz -------------------------------------------------------------------------------- /bark/assets/prompts/en_speaker_1.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/serp-ai/bark-with-voice-clone/HEAD/bark/assets/prompts/en_speaker_1.npz -------------------------------------------------------------------------------- /bark/assets/prompts/en_speaker_2.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/serp-ai/bark-with-voice-clone/HEAD/bark/assets/prompts/en_speaker_2.npz -------------------------------------------------------------------------------- /bark/assets/prompts/en_speaker_3.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/serp-ai/bark-with-voice-clone/HEAD/bark/assets/prompts/en_speaker_3.npz -------------------------------------------------------------------------------- /bark/assets/prompts/en_speaker_4.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/serp-ai/bark-with-voice-clone/HEAD/bark/assets/prompts/en_speaker_4.npz -------------------------------------------------------------------------------- /bark/assets/prompts/en_speaker_5.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/serp-ai/bark-with-voice-clone/HEAD/bark/assets/prompts/en_speaker_5.npz -------------------------------------------------------------------------------- /bark/assets/prompts/en_speaker_6.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/serp-ai/bark-with-voice-clone/HEAD/bark/assets/prompts/en_speaker_6.npz -------------------------------------------------------------------------------- /bark/assets/prompts/en_speaker_7.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/serp-ai/bark-with-voice-clone/HEAD/bark/assets/prompts/en_speaker_7.npz -------------------------------------------------------------------------------- /bark/assets/prompts/en_speaker_8.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/serp-ai/bark-with-voice-clone/HEAD/bark/assets/prompts/en_speaker_8.npz -------------------------------------------------------------------------------- /bark/assets/prompts/en_speaker_9.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/serp-ai/bark-with-voice-clone/HEAD/bark/assets/prompts/en_speaker_9.npz -------------------------------------------------------------------------------- /bark/assets/prompts/es_speaker_0.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/serp-ai/bark-with-voice-clone/HEAD/bark/assets/prompts/es_speaker_0.npz -------------------------------------------------------------------------------- /bark/assets/prompts/es_speaker_1.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/serp-ai/bark-with-voice-clone/HEAD/bark/assets/prompts/es_speaker_1.npz -------------------------------------------------------------------------------- /bark/assets/prompts/es_speaker_2.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/serp-ai/bark-with-voice-clone/HEAD/bark/assets/prompts/es_speaker_2.npz -------------------------------------------------------------------------------- /bark/assets/prompts/es_speaker_3.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/serp-ai/bark-with-voice-clone/HEAD/bark/assets/prompts/es_speaker_3.npz -------------------------------------------------------------------------------- /bark/assets/prompts/es_speaker_4.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/serp-ai/bark-with-voice-clone/HEAD/bark/assets/prompts/es_speaker_4.npz -------------------------------------------------------------------------------- /bark/assets/prompts/es_speaker_5.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/serp-ai/bark-with-voice-clone/HEAD/bark/assets/prompts/es_speaker_5.npz -------------------------------------------------------------------------------- /bark/assets/prompts/es_speaker_6.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/serp-ai/bark-with-voice-clone/HEAD/bark/assets/prompts/es_speaker_6.npz -------------------------------------------------------------------------------- /bark/assets/prompts/es_speaker_7.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/serp-ai/bark-with-voice-clone/HEAD/bark/assets/prompts/es_speaker_7.npz -------------------------------------------------------------------------------- /bark/assets/prompts/es_speaker_8.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/serp-ai/bark-with-voice-clone/HEAD/bark/assets/prompts/es_speaker_8.npz -------------------------------------------------------------------------------- /bark/assets/prompts/es_speaker_9.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/serp-ai/bark-with-voice-clone/HEAD/bark/assets/prompts/es_speaker_9.npz -------------------------------------------------------------------------------- /bark/assets/prompts/fr_speaker_0.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/serp-ai/bark-with-voice-clone/HEAD/bark/assets/prompts/fr_speaker_0.npz -------------------------------------------------------------------------------- /bark/assets/prompts/fr_speaker_1.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/serp-ai/bark-with-voice-clone/HEAD/bark/assets/prompts/fr_speaker_1.npz -------------------------------------------------------------------------------- /bark/assets/prompts/fr_speaker_2.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/serp-ai/bark-with-voice-clone/HEAD/bark/assets/prompts/fr_speaker_2.npz -------------------------------------------------------------------------------- /bark/assets/prompts/fr_speaker_3.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/serp-ai/bark-with-voice-clone/HEAD/bark/assets/prompts/fr_speaker_3.npz -------------------------------------------------------------------------------- /bark/assets/prompts/fr_speaker_4.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/serp-ai/bark-with-voice-clone/HEAD/bark/assets/prompts/fr_speaker_4.npz -------------------------------------------------------------------------------- /bark/assets/prompts/fr_speaker_5.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/serp-ai/bark-with-voice-clone/HEAD/bark/assets/prompts/fr_speaker_5.npz -------------------------------------------------------------------------------- /bark/assets/prompts/fr_speaker_6.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/serp-ai/bark-with-voice-clone/HEAD/bark/assets/prompts/fr_speaker_6.npz -------------------------------------------------------------------------------- /bark/assets/prompts/fr_speaker_7.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/serp-ai/bark-with-voice-clone/HEAD/bark/assets/prompts/fr_speaker_7.npz -------------------------------------------------------------------------------- /bark/assets/prompts/fr_speaker_8.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/serp-ai/bark-with-voice-clone/HEAD/bark/assets/prompts/fr_speaker_8.npz -------------------------------------------------------------------------------- /bark/assets/prompts/fr_speaker_9.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/serp-ai/bark-with-voice-clone/HEAD/bark/assets/prompts/fr_speaker_9.npz -------------------------------------------------------------------------------- /bark/assets/prompts/hi_speaker_0.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/serp-ai/bark-with-voice-clone/HEAD/bark/assets/prompts/hi_speaker_0.npz -------------------------------------------------------------------------------- /bark/assets/prompts/hi_speaker_1.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/serp-ai/bark-with-voice-clone/HEAD/bark/assets/prompts/hi_speaker_1.npz -------------------------------------------------------------------------------- /bark/assets/prompts/hi_speaker_2.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/serp-ai/bark-with-voice-clone/HEAD/bark/assets/prompts/hi_speaker_2.npz -------------------------------------------------------------------------------- /bark/assets/prompts/hi_speaker_3.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/serp-ai/bark-with-voice-clone/HEAD/bark/assets/prompts/hi_speaker_3.npz -------------------------------------------------------------------------------- /bark/assets/prompts/hi_speaker_4.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/serp-ai/bark-with-voice-clone/HEAD/bark/assets/prompts/hi_speaker_4.npz -------------------------------------------------------------------------------- /bark/assets/prompts/hi_speaker_5.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/serp-ai/bark-with-voice-clone/HEAD/bark/assets/prompts/hi_speaker_5.npz -------------------------------------------------------------------------------- /bark/assets/prompts/hi_speaker_6.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/serp-ai/bark-with-voice-clone/HEAD/bark/assets/prompts/hi_speaker_6.npz -------------------------------------------------------------------------------- /bark/assets/prompts/hi_speaker_7.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/serp-ai/bark-with-voice-clone/HEAD/bark/assets/prompts/hi_speaker_7.npz -------------------------------------------------------------------------------- /bark/assets/prompts/hi_speaker_8.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/serp-ai/bark-with-voice-clone/HEAD/bark/assets/prompts/hi_speaker_8.npz -------------------------------------------------------------------------------- /bark/assets/prompts/hi_speaker_9.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/serp-ai/bark-with-voice-clone/HEAD/bark/assets/prompts/hi_speaker_9.npz -------------------------------------------------------------------------------- /bark/assets/prompts/it_speaker_0.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/serp-ai/bark-with-voice-clone/HEAD/bark/assets/prompts/it_speaker_0.npz -------------------------------------------------------------------------------- /bark/assets/prompts/it_speaker_1.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/serp-ai/bark-with-voice-clone/HEAD/bark/assets/prompts/it_speaker_1.npz -------------------------------------------------------------------------------- /bark/assets/prompts/it_speaker_2.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/serp-ai/bark-with-voice-clone/HEAD/bark/assets/prompts/it_speaker_2.npz -------------------------------------------------------------------------------- /bark/assets/prompts/it_speaker_3.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/serp-ai/bark-with-voice-clone/HEAD/bark/assets/prompts/it_speaker_3.npz -------------------------------------------------------------------------------- /bark/assets/prompts/it_speaker_4.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/serp-ai/bark-with-voice-clone/HEAD/bark/assets/prompts/it_speaker_4.npz -------------------------------------------------------------------------------- /bark/assets/prompts/it_speaker_5.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/serp-ai/bark-with-voice-clone/HEAD/bark/assets/prompts/it_speaker_5.npz -------------------------------------------------------------------------------- /bark/assets/prompts/it_speaker_6.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/serp-ai/bark-with-voice-clone/HEAD/bark/assets/prompts/it_speaker_6.npz -------------------------------------------------------------------------------- /bark/assets/prompts/it_speaker_7.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/serp-ai/bark-with-voice-clone/HEAD/bark/assets/prompts/it_speaker_7.npz -------------------------------------------------------------------------------- /bark/assets/prompts/it_speaker_8.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/serp-ai/bark-with-voice-clone/HEAD/bark/assets/prompts/it_speaker_8.npz -------------------------------------------------------------------------------- /bark/assets/prompts/it_speaker_9.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/serp-ai/bark-with-voice-clone/HEAD/bark/assets/prompts/it_speaker_9.npz -------------------------------------------------------------------------------- /bark/assets/prompts/ja_speaker_0.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/serp-ai/bark-with-voice-clone/HEAD/bark/assets/prompts/ja_speaker_0.npz -------------------------------------------------------------------------------- /bark/assets/prompts/ja_speaker_1.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/serp-ai/bark-with-voice-clone/HEAD/bark/assets/prompts/ja_speaker_1.npz -------------------------------------------------------------------------------- /bark/assets/prompts/ja_speaker_2.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/serp-ai/bark-with-voice-clone/HEAD/bark/assets/prompts/ja_speaker_2.npz -------------------------------------------------------------------------------- /bark/assets/prompts/ja_speaker_3.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/serp-ai/bark-with-voice-clone/HEAD/bark/assets/prompts/ja_speaker_3.npz -------------------------------------------------------------------------------- /bark/assets/prompts/ja_speaker_4.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/serp-ai/bark-with-voice-clone/HEAD/bark/assets/prompts/ja_speaker_4.npz -------------------------------------------------------------------------------- /bark/assets/prompts/ja_speaker_5.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/serp-ai/bark-with-voice-clone/HEAD/bark/assets/prompts/ja_speaker_5.npz -------------------------------------------------------------------------------- /bark/assets/prompts/ja_speaker_6.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/serp-ai/bark-with-voice-clone/HEAD/bark/assets/prompts/ja_speaker_6.npz -------------------------------------------------------------------------------- /bark/assets/prompts/ja_speaker_7.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/serp-ai/bark-with-voice-clone/HEAD/bark/assets/prompts/ja_speaker_7.npz -------------------------------------------------------------------------------- /bark/assets/prompts/ja_speaker_8.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/serp-ai/bark-with-voice-clone/HEAD/bark/assets/prompts/ja_speaker_8.npz -------------------------------------------------------------------------------- /bark/assets/prompts/ja_speaker_9.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/serp-ai/bark-with-voice-clone/HEAD/bark/assets/prompts/ja_speaker_9.npz -------------------------------------------------------------------------------- /bark/assets/prompts/ko_speaker_0.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/serp-ai/bark-with-voice-clone/HEAD/bark/assets/prompts/ko_speaker_0.npz -------------------------------------------------------------------------------- /bark/assets/prompts/ko_speaker_1.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/serp-ai/bark-with-voice-clone/HEAD/bark/assets/prompts/ko_speaker_1.npz -------------------------------------------------------------------------------- /bark/assets/prompts/ko_speaker_2.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/serp-ai/bark-with-voice-clone/HEAD/bark/assets/prompts/ko_speaker_2.npz -------------------------------------------------------------------------------- /bark/assets/prompts/ko_speaker_3.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/serp-ai/bark-with-voice-clone/HEAD/bark/assets/prompts/ko_speaker_3.npz -------------------------------------------------------------------------------- /bark/assets/prompts/ko_speaker_4.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/serp-ai/bark-with-voice-clone/HEAD/bark/assets/prompts/ko_speaker_4.npz -------------------------------------------------------------------------------- /bark/assets/prompts/ko_speaker_5.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/serp-ai/bark-with-voice-clone/HEAD/bark/assets/prompts/ko_speaker_5.npz -------------------------------------------------------------------------------- /bark/assets/prompts/ko_speaker_6.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/serp-ai/bark-with-voice-clone/HEAD/bark/assets/prompts/ko_speaker_6.npz -------------------------------------------------------------------------------- /bark/assets/prompts/ko_speaker_7.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/serp-ai/bark-with-voice-clone/HEAD/bark/assets/prompts/ko_speaker_7.npz -------------------------------------------------------------------------------- /bark/assets/prompts/ko_speaker_8.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/serp-ai/bark-with-voice-clone/HEAD/bark/assets/prompts/ko_speaker_8.npz -------------------------------------------------------------------------------- /bark/assets/prompts/ko_speaker_9.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/serp-ai/bark-with-voice-clone/HEAD/bark/assets/prompts/ko_speaker_9.npz -------------------------------------------------------------------------------- /bark/assets/prompts/pl_speaker_0.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/serp-ai/bark-with-voice-clone/HEAD/bark/assets/prompts/pl_speaker_0.npz -------------------------------------------------------------------------------- /bark/assets/prompts/pl_speaker_1.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/serp-ai/bark-with-voice-clone/HEAD/bark/assets/prompts/pl_speaker_1.npz -------------------------------------------------------------------------------- /bark/assets/prompts/pl_speaker_2.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/serp-ai/bark-with-voice-clone/HEAD/bark/assets/prompts/pl_speaker_2.npz -------------------------------------------------------------------------------- /bark/assets/prompts/pl_speaker_3.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/serp-ai/bark-with-voice-clone/HEAD/bark/assets/prompts/pl_speaker_3.npz -------------------------------------------------------------------------------- /bark/assets/prompts/pl_speaker_4.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/serp-ai/bark-with-voice-clone/HEAD/bark/assets/prompts/pl_speaker_4.npz -------------------------------------------------------------------------------- /bark/assets/prompts/pl_speaker_5.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/serp-ai/bark-with-voice-clone/HEAD/bark/assets/prompts/pl_speaker_5.npz -------------------------------------------------------------------------------- /bark/assets/prompts/pl_speaker_6.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/serp-ai/bark-with-voice-clone/HEAD/bark/assets/prompts/pl_speaker_6.npz -------------------------------------------------------------------------------- /bark/assets/prompts/pl_speaker_7.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/serp-ai/bark-with-voice-clone/HEAD/bark/assets/prompts/pl_speaker_7.npz -------------------------------------------------------------------------------- /bark/assets/prompts/pl_speaker_8.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/serp-ai/bark-with-voice-clone/HEAD/bark/assets/prompts/pl_speaker_8.npz -------------------------------------------------------------------------------- /bark/assets/prompts/pl_speaker_9.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/serp-ai/bark-with-voice-clone/HEAD/bark/assets/prompts/pl_speaker_9.npz -------------------------------------------------------------------------------- /bark/assets/prompts/pt_speaker_0.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/serp-ai/bark-with-voice-clone/HEAD/bark/assets/prompts/pt_speaker_0.npz -------------------------------------------------------------------------------- /bark/assets/prompts/pt_speaker_1.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/serp-ai/bark-with-voice-clone/HEAD/bark/assets/prompts/pt_speaker_1.npz -------------------------------------------------------------------------------- /bark/assets/prompts/pt_speaker_2.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/serp-ai/bark-with-voice-clone/HEAD/bark/assets/prompts/pt_speaker_2.npz -------------------------------------------------------------------------------- /bark/assets/prompts/pt_speaker_3.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/serp-ai/bark-with-voice-clone/HEAD/bark/assets/prompts/pt_speaker_3.npz -------------------------------------------------------------------------------- /bark/assets/prompts/pt_speaker_4.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/serp-ai/bark-with-voice-clone/HEAD/bark/assets/prompts/pt_speaker_4.npz -------------------------------------------------------------------------------- /bark/assets/prompts/pt_speaker_5.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/serp-ai/bark-with-voice-clone/HEAD/bark/assets/prompts/pt_speaker_5.npz -------------------------------------------------------------------------------- /bark/assets/prompts/pt_speaker_6.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/serp-ai/bark-with-voice-clone/HEAD/bark/assets/prompts/pt_speaker_6.npz -------------------------------------------------------------------------------- /bark/assets/prompts/pt_speaker_7.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/serp-ai/bark-with-voice-clone/HEAD/bark/assets/prompts/pt_speaker_7.npz -------------------------------------------------------------------------------- /bark/assets/prompts/pt_speaker_8.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/serp-ai/bark-with-voice-clone/HEAD/bark/assets/prompts/pt_speaker_8.npz -------------------------------------------------------------------------------- /bark/assets/prompts/pt_speaker_9.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/serp-ai/bark-with-voice-clone/HEAD/bark/assets/prompts/pt_speaker_9.npz -------------------------------------------------------------------------------- /bark/assets/prompts/ru_speaker_0.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/serp-ai/bark-with-voice-clone/HEAD/bark/assets/prompts/ru_speaker_0.npz -------------------------------------------------------------------------------- /bark/assets/prompts/ru_speaker_1.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/serp-ai/bark-with-voice-clone/HEAD/bark/assets/prompts/ru_speaker_1.npz -------------------------------------------------------------------------------- /bark/assets/prompts/ru_speaker_2.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/serp-ai/bark-with-voice-clone/HEAD/bark/assets/prompts/ru_speaker_2.npz -------------------------------------------------------------------------------- /bark/assets/prompts/ru_speaker_3.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/serp-ai/bark-with-voice-clone/HEAD/bark/assets/prompts/ru_speaker_3.npz -------------------------------------------------------------------------------- /bark/assets/prompts/ru_speaker_4.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/serp-ai/bark-with-voice-clone/HEAD/bark/assets/prompts/ru_speaker_4.npz -------------------------------------------------------------------------------- /bark/assets/prompts/ru_speaker_5.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/serp-ai/bark-with-voice-clone/HEAD/bark/assets/prompts/ru_speaker_5.npz -------------------------------------------------------------------------------- /bark/assets/prompts/ru_speaker_6.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/serp-ai/bark-with-voice-clone/HEAD/bark/assets/prompts/ru_speaker_6.npz -------------------------------------------------------------------------------- /bark/assets/prompts/ru_speaker_7.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/serp-ai/bark-with-voice-clone/HEAD/bark/assets/prompts/ru_speaker_7.npz -------------------------------------------------------------------------------- /bark/assets/prompts/ru_speaker_8.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/serp-ai/bark-with-voice-clone/HEAD/bark/assets/prompts/ru_speaker_8.npz -------------------------------------------------------------------------------- /bark/assets/prompts/ru_speaker_9.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/serp-ai/bark-with-voice-clone/HEAD/bark/assets/prompts/ru_speaker_9.npz -------------------------------------------------------------------------------- /bark/assets/prompts/tr_speaker_0.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/serp-ai/bark-with-voice-clone/HEAD/bark/assets/prompts/tr_speaker_0.npz -------------------------------------------------------------------------------- /bark/assets/prompts/tr_speaker_1.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/serp-ai/bark-with-voice-clone/HEAD/bark/assets/prompts/tr_speaker_1.npz -------------------------------------------------------------------------------- /bark/assets/prompts/tr_speaker_2.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/serp-ai/bark-with-voice-clone/HEAD/bark/assets/prompts/tr_speaker_2.npz -------------------------------------------------------------------------------- /bark/assets/prompts/tr_speaker_3.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/serp-ai/bark-with-voice-clone/HEAD/bark/assets/prompts/tr_speaker_3.npz -------------------------------------------------------------------------------- /bark/assets/prompts/tr_speaker_4.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/serp-ai/bark-with-voice-clone/HEAD/bark/assets/prompts/tr_speaker_4.npz -------------------------------------------------------------------------------- /bark/assets/prompts/tr_speaker_5.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/serp-ai/bark-with-voice-clone/HEAD/bark/assets/prompts/tr_speaker_5.npz -------------------------------------------------------------------------------- /bark/assets/prompts/tr_speaker_6.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/serp-ai/bark-with-voice-clone/HEAD/bark/assets/prompts/tr_speaker_6.npz -------------------------------------------------------------------------------- /bark/assets/prompts/tr_speaker_7.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/serp-ai/bark-with-voice-clone/HEAD/bark/assets/prompts/tr_speaker_7.npz -------------------------------------------------------------------------------- /bark/assets/prompts/tr_speaker_8.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/serp-ai/bark-with-voice-clone/HEAD/bark/assets/prompts/tr_speaker_8.npz -------------------------------------------------------------------------------- /bark/assets/prompts/tr_speaker_9.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/serp-ai/bark-with-voice-clone/HEAD/bark/assets/prompts/tr_speaker_9.npz -------------------------------------------------------------------------------- /bark/assets/prompts/zh_speaker_0.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/serp-ai/bark-with-voice-clone/HEAD/bark/assets/prompts/zh_speaker_0.npz -------------------------------------------------------------------------------- /bark/assets/prompts/zh_speaker_1.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/serp-ai/bark-with-voice-clone/HEAD/bark/assets/prompts/zh_speaker_1.npz -------------------------------------------------------------------------------- /bark/assets/prompts/zh_speaker_2.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/serp-ai/bark-with-voice-clone/HEAD/bark/assets/prompts/zh_speaker_2.npz -------------------------------------------------------------------------------- /bark/assets/prompts/zh_speaker_3.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/serp-ai/bark-with-voice-clone/HEAD/bark/assets/prompts/zh_speaker_3.npz -------------------------------------------------------------------------------- /bark/assets/prompts/zh_speaker_4.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/serp-ai/bark-with-voice-clone/HEAD/bark/assets/prompts/zh_speaker_4.npz -------------------------------------------------------------------------------- /bark/assets/prompts/zh_speaker_5.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/serp-ai/bark-with-voice-clone/HEAD/bark/assets/prompts/zh_speaker_5.npz -------------------------------------------------------------------------------- /bark/assets/prompts/zh_speaker_6.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/serp-ai/bark-with-voice-clone/HEAD/bark/assets/prompts/zh_speaker_6.npz -------------------------------------------------------------------------------- /bark/assets/prompts/zh_speaker_7.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/serp-ai/bark-with-voice-clone/HEAD/bark/assets/prompts/zh_speaker_7.npz -------------------------------------------------------------------------------- /bark/assets/prompts/zh_speaker_8.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/serp-ai/bark-with-voice-clone/HEAD/bark/assets/prompts/zh_speaker_8.npz -------------------------------------------------------------------------------- /bark/assets/prompts/zh_speaker_9.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/serp-ai/bark-with-voice-clone/HEAD/bark/assets/prompts/zh_speaker_9.npz -------------------------------------------------------------------------------- /bark/__init__.py: -------------------------------------------------------------------------------- 1 | from .api import generate_audio, text_to_semantic, semantic_to_waveform, save_as_prompt 2 | from .generation import SAMPLE_RATE, preload_models 3 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | __pycache__/ 2 | *.wav 3 | _temp/ 4 | models/ 5 | wandb/ 6 | *_output/ 7 | output.npz 8 | joe_biden_state_of_union/ 9 | Retrieval-based-Voice-Conversion-WebUI/ 10 | devin-youtube/ 11 | train_rvc.ipynb 12 | *.pt -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [build-system] 2 | requires = ["setuptools"] 3 | build-backend = "setuptools.build_meta" 4 | 5 | [project] 6 | name = "suno-bark" 7 | version = "0.0.1a" 8 | description = "Bark text to audio model" 9 | readme = "README.md" 10 | requires-python = ">=3.8" 11 | authors = [ 12 | {name = "Suno Inc", email = "hello@suno.ai"}, 13 | ] 14 | # Apache 2.0 15 | license = {file = "LICENSE"} 16 | 17 | dependencies = [ 18 | "boto3", 19 | "encodec", 20 | "funcy", 21 | "numpy", 22 | "scipy", 23 | "tokenizers", 24 | "torch", 25 | "tqdm", 26 | "transformers", 27 | ] 28 | 29 | [project.urls] 30 | source = "https://github.com/suno-ai/bark" 31 | 32 | [project.optional-dependencies] 33 | dev = [ 34 | "bandit", 35 | "black", 36 | "codecov", 37 | "flake8", 38 | "huggingface-hub", 39 | "hypothesis>=6.14,<7", 40 | "isort>=5.0.0,<6", 41 | "jupyter", 42 | "mypy", 43 | "nbconvert", 44 | "nbformat", 45 | "pydocstyle", 46 | "pylint", 47 | "pytest", 48 | "pytest-cov", 49 | ] 50 | 51 | [tool.setuptools] 52 | packages = ["bark"] 53 | 54 | [tool.setuptools.package-data] 55 | bark = ["assets/prompts/*.npz"] 56 | 57 | [tool.black] 58 | line-length = 100 59 | -------------------------------------------------------------------------------- /bark/assets/prompts/readme.md: -------------------------------------------------------------------------------- 1 | # Example Prompts Data 2 | 3 | The provided data is in the .npz format, which is a file format used in Python for storing arrays and data. The data contains three arrays: semantic_prompt, coarse_prompt, and fine_prompt. 4 | 5 | ```semantic_prompt``` 6 | 7 | The semantic_prompt array contains a sequence of token IDs generated by the BERT tokenizer from Hugging Face. These tokens encode the text input and are used as an input to generate the audio output. The shape of this array is (n,), where n is the number of tokens in the input text. 8 | 9 | ```coarse_prompt``` 10 | 11 | The coarse_prompt array is an intermediate output of the text-to-speech pipeline, and contains token IDs generated by the first two codebooks of the EnCodec Codec from Facebook. This step converts the semantic tokens into a different representation that is better suited for the subsequent step. The shape of this array is (2, m), where m is the number of tokens after conversion by the EnCodec Codec. 12 | 13 | ```fine_prompt``` 14 | 15 | The fine_prompt array is a further processed output of the pipeline, and contains 8 codebooks from the EnCodec Codec. These codebooks represent the final stage of tokenization, and the resulting tokens are used to generate the audio output. The shape of this array is (8, p), where p is the number of tokens after further processing by the EnCodec Codec. 16 | 17 | Overall, these arrays represent different stages of a text-to-speech pipeline that converts text input into synthesized audio output. The semantic_prompt array represents the input text, while coarse_prompt and fine_prompt represent intermediate and final stages of tokenization, respectively. 18 | 19 | 20 | 21 | -------------------------------------------------------------------------------- /hubert/hubert_manager.py: -------------------------------------------------------------------------------- 1 | # From https://github.com/gitmylo/bark-voice-cloning-HuBERT-quantizer 2 | 3 | import os.path 4 | import shutil 5 | import urllib.request 6 | 7 | import huggingface_hub 8 | 9 | 10 | class HuBERTManager: 11 | @staticmethod 12 | def make_sure_hubert_installed(download_url: str = 'https://dl.fbaipublicfiles.com/hubert/hubert_base_ls960.pt', file_name: str = 'hubert.pt'): 13 | install_dir = os.path.join('data', 'models', 'hubert') 14 | if not os.path.isdir(install_dir): 15 | os.makedirs(install_dir, exist_ok=True) 16 | install_file = os.path.join(install_dir, file_name) 17 | if not os.path.isfile(install_file): 18 | print('Downloading HuBERT base model') 19 | urllib.request.urlretrieve(download_url, install_file) 20 | print('Downloaded HuBERT') 21 | return install_file 22 | 23 | 24 | @staticmethod 25 | def make_sure_tokenizer_installed(model: str = 'quantifier_hubert_base_ls960_14.pth', repo: str = 'GitMylo/bark-voice-cloning', local_file: str = 'tokenizer.pth'): 26 | install_dir = os.path.join('data', 'models', 'hubert') 27 | if not os.path.isdir(install_dir): 28 | os.makedirs(install_dir, exist_ok=True) 29 | install_file = os.path.join(install_dir, local_file) 30 | if not os.path.isfile(install_file): 31 | print('Downloading HuBERT custom tokenizer') 32 | huggingface_hub.hf_hub_download(repo, model, local_dir=install_dir, local_dir_use_symlinks=False) 33 | shutil.move(os.path.join(install_dir, model), install_file) 34 | print('Downloaded tokenizer') 35 | return install_file 36 | -------------------------------------------------------------------------------- /LICENSE.md: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2023 [SERP](https://serp.co/) | [SERP AI](https://serp.ai/) | [DS](https://devinschumacher.com/) 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | 23 | # A humble request 24 | 25 | Our mission is to make artificial intelligence accessible & enjoyable, so we can all build bridges to the future, together. 26 | 27 | Please, feel free to use this as you see fit in accordance with the law & ideally inline with our values of accessibility, equality & AI for all. 28 | 29 | We only have one humble request (not requirement) ... that you represent these values by adding one of our (extremely awesome) AI badges on your website / github / etc. 30 | 31 | 👉 You can generate & customize your own here: [https://serp.ly/@serpai/badges/ai](https://serp.ly/@serpai/badges/ai) 32 | 33 | Thank you! 34 | -------------------------------------------------------------------------------- /model-card.md: -------------------------------------------------------------------------------- 1 | # Model Card: Bark 2 | 3 | This is the official codebase for running the text to audio model, from Suno.ai. 4 | 5 | The following is additional information about the models released here. 6 | 7 | ## Model Details 8 | 9 | Bark is a series of three transformer models that turn text into audio. 10 | ### Text to semantic tokens 11 | - Input: text, tokenized with [BERT tokenizer from Hugging Face](https://huggingface.co/docs/transformers/model_doc/bert#transformers.BertTokenizer) 12 | - Output: semantic tokens that encode the audio to be generated 13 | 14 | ### Semantic to coarse tokens 15 | - Input: semantic tokens 16 | - Output: tokens from the first two codebooks of the [EnCodec Codec](https://github.com/facebookresearch/encodec) from facebook 17 | 18 | ### Coarse to fine tokens 19 | - Input: the first two codebooks from EnCodec 20 | - Output: 8 codebooks from EnCodec 21 | 22 | ### Architecture 23 | | Model | Parameters | Attention | Output Vocab size | 24 | |:-------------------------:|:----------:|------------|:-----------------:| 25 | | Text to semantic tokens | 80 M | Causal | 10,000 | 26 | | Semantic to coarse tokens | 80 M | Causal | 2x 1,024 | 27 | | Coarse to fine tokens | 80 M | Non-causal | 6x 1,024 | 28 | 29 | 30 | ### Release date 31 | April 2023 32 | 33 | ## Broader Implications 34 | We anticipate that this model's text to audio capabilities can be used to improve accessbility tools in a variety of languages. 35 | Straightforward improvements will allow models to run faster than realtime, rendering them useful for applications such as virtual assistants. 36 | 37 | While we hope that this release will enable users to express their creativity and build applications that are a force 38 | for good, we acknowledge that any text to audio model has the potential for dual use. While it is not straightforward 39 | to voice clone known people with Bark, they can still be used for nefarious purposes. To further reduce the chances of unintended use of Bark, 40 | we also release a simple classifier to detect Bark-generated audio with high accuracy (see notebooks section of the main repository). 41 | -------------------------------------------------------------------------------- /rvc_test.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": null, 6 | "metadata": {}, 7 | "outputs": [], 8 | "source": [ 9 | "from rvc_infer import get_vc, vc_single" 10 | ] 11 | }, 12 | { 13 | "cell_type": "code", 14 | "execution_count": null, 15 | "metadata": {}, 16 | "outputs": [], 17 | "source": [ 18 | "model_path = \"Retrieval-based-Voice-Conversion-WebUI/weights/mi-test.pth\"\n", 19 | "device=\"cuda:0\"\n", 20 | "is_half=True" 21 | ] 22 | }, 23 | { 24 | "cell_type": "code", 25 | "execution_count": null, 26 | "metadata": {}, 27 | "outputs": [], 28 | "source": [ 29 | "get_vc(model_path, device, is_half)" 30 | ] 31 | }, 32 | { 33 | "cell_type": "code", 34 | "execution_count": null, 35 | "metadata": {}, 36 | "outputs": [], 37 | "source": [ 38 | "index_rate = 0.75\n", 39 | "f0up_key = -6\n", 40 | "filter_radius = 3\n", 41 | "rms_mix_rate = 0.25\n", 42 | "protect = 0.33\n", 43 | "resample_sr = 48000\n", 44 | "f0method = \"harvest\" #harvest or pm\n", 45 | "input_path = \"output/audio.wav\"\n", 46 | "index_path = \"Retrieval-based-Voice-Conversion-WebUI/logs/mi-test/added_IVF256_Flat_nprobe_1_mi-test_v2.index\"\n", 47 | "\n", 48 | "wav_opt = vc_single(0,input_path,f0up_key,None,f0method,index_path,index_rate, filter_radius=filter_radius, resample_sr=resample_sr, rms_mix_rate=rms_mix_rate, protect=protect)" 49 | ] 50 | }, 51 | { 52 | "cell_type": "code", 53 | "execution_count": null, 54 | "metadata": {}, 55 | "outputs": [], 56 | "source": [ 57 | "from IPython.display import Audio\n", 58 | "# play audio\n", 59 | "Audio(wav_opt, rate=48000)" 60 | ] 61 | } 62 | ], 63 | "metadata": { 64 | "kernelspec": { 65 | "display_name": "Python 3", 66 | "language": "python", 67 | "name": "python3" 68 | }, 69 | "language_info": { 70 | "codemirror_mode": { 71 | "name": "ipython", 72 | "version": 3 73 | }, 74 | "file_extension": ".py", 75 | "mimetype": "text/x-python", 76 | "name": "python", 77 | "nbconvert_exporter": "python", 78 | "pygments_lexer": "ipython3", 79 | "version": "3.10.8" 80 | }, 81 | "orig_nbformat": 4 82 | }, 83 | "nbformat": 4, 84 | "nbformat_minor": 2 85 | } 86 | -------------------------------------------------------------------------------- /hubert/pre_kmeans_hubert.py: -------------------------------------------------------------------------------- 1 | """ 2 | Modified HuBERT model without kmeans. 3 | Original author: https://github.com/lucidrains/ 4 | Modified by: https://www.github.com/gitmylo/ 5 | License: MIT 6 | """ 7 | 8 | # Modified code from https://github.com/lucidrains/audiolm-pytorch/blob/main/audiolm_pytorch/hubert_kmeans.py 9 | 10 | from pathlib import Path 11 | 12 | import torch 13 | from torch import nn 14 | from einops import pack, unpack 15 | 16 | import fairseq 17 | 18 | from torchaudio.functional import resample 19 | 20 | from audiolm_pytorch.utils import curtail_to_multiple 21 | 22 | import logging 23 | logging.root.setLevel(logging.ERROR) 24 | 25 | 26 | def exists(val): 27 | return val is not None 28 | 29 | 30 | def default(val, d): 31 | return val if exists(val) else d 32 | 33 | 34 | class CustomHubert(nn.Module): 35 | """ 36 | checkpoint and kmeans can be downloaded at https://github.com/facebookresearch/fairseq/tree/main/examples/hubert 37 | or you can train your own 38 | """ 39 | 40 | def __init__( 41 | self, 42 | checkpoint_path, 43 | target_sample_hz=16000, 44 | seq_len_multiple_of=None, 45 | output_layer=9, 46 | device=None 47 | ): 48 | super().__init__() 49 | self.target_sample_hz = target_sample_hz 50 | self.seq_len_multiple_of = seq_len_multiple_of 51 | self.output_layer = output_layer 52 | 53 | if device is not None: 54 | self.to(device) 55 | 56 | model_path = Path(checkpoint_path) 57 | 58 | assert model_path.exists(), f'path {checkpoint_path} does not exist' 59 | 60 | checkpoint = torch.load(checkpoint_path) 61 | load_model_input = {checkpoint_path: checkpoint} 62 | model, *_ = fairseq.checkpoint_utils.load_model_ensemble_and_task(load_model_input) 63 | 64 | if device is not None: 65 | model[0].to(device) 66 | 67 | self.model = model[0] 68 | self.model.eval() 69 | 70 | @property 71 | def groups(self): 72 | return 1 73 | 74 | @torch.no_grad() 75 | def forward( 76 | self, 77 | wav_input, 78 | flatten=True, 79 | input_sample_hz=None 80 | ): 81 | device = wav_input.device 82 | 83 | if exists(input_sample_hz): 84 | wav_input = resample(wav_input, input_sample_hz, self.target_sample_hz) 85 | 86 | if exists(self.seq_len_multiple_of): 87 | wav_input = curtail_to_multiple(wav_input, self.seq_len_multiple_of) 88 | 89 | embed = self.model( 90 | wav_input, 91 | features_only=True, 92 | mask=False, # thanks to @maitycyrus for noticing that mask is defaulted to True in the fairseq code 93 | output_layer=self.output_layer 94 | ) 95 | 96 | embed, packed_shape = pack([embed['x']], '* d') 97 | 98 | # codebook_indices = self.kmeans.predict(embed.cpu().detach().numpy()) 99 | 100 | codebook_indices = torch.from_numpy(embed.cpu().detach().numpy()).to(device) # .long() 101 | 102 | if flatten: 103 | return codebook_indices 104 | 105 | codebook_indices, = unpack(codebook_indices, packed_shape, '*') 106 | return codebook_indices 107 | -------------------------------------------------------------------------------- /bark/api.py: -------------------------------------------------------------------------------- 1 | from typing import Optional 2 | 3 | import numpy as np 4 | 5 | from .generation import codec_decode, generate_coarse, generate_fine, generate_text_semantic 6 | 7 | 8 | def text_to_semantic( 9 | text: str, 10 | history_prompt: Optional[str] = None, 11 | temp: float = 0.7, 12 | silent: bool = False, 13 | ): 14 | """Generate semantic array from text. 15 | 16 | Args: 17 | text: text to be turned into audio 18 | history_prompt: history choice for audio cloning 19 | temp: generation temperature (1.0 more diverse, 0.0 more conservative) 20 | silent: disable progress bar 21 | 22 | Returns: 23 | numpy semantic array to be fed into `semantic_to_waveform` 24 | """ 25 | x_semantic = generate_text_semantic( 26 | text, 27 | history_prompt=history_prompt, 28 | temp=temp, 29 | silent=silent, 30 | use_kv_caching=True 31 | ) 32 | return x_semantic 33 | 34 | 35 | def semantic_to_waveform( 36 | semantic_tokens: np.ndarray, 37 | history_prompt: Optional[str] = None, 38 | temp: float = 0.7, 39 | silent: bool = False, 40 | output_full: bool = False, 41 | ): 42 | """Generate audio array from semantic input. 43 | 44 | Args: 45 | semantic_tokens: semantic token output from `text_to_semantic` 46 | history_prompt: history choice for audio cloning 47 | temp: generation temperature (1.0 more diverse, 0.0 more conservative) 48 | silent: disable progress bar 49 | output_full: return full generation to be used as a history prompt 50 | 51 | Returns: 52 | numpy audio array at sample frequency 24khz 53 | """ 54 | coarse_tokens = generate_coarse( 55 | semantic_tokens, 56 | history_prompt=history_prompt, 57 | temp=temp, 58 | silent=silent, 59 | use_kv_caching=True 60 | ) 61 | fine_tokens = generate_fine( 62 | coarse_tokens, 63 | history_prompt=history_prompt, 64 | temp=0.5, 65 | ) 66 | audio_arr = codec_decode(fine_tokens) 67 | if output_full: 68 | full_generation = { 69 | "semantic_prompt": semantic_tokens, 70 | "coarse_prompt": coarse_tokens, 71 | "fine_prompt": fine_tokens, 72 | } 73 | return full_generation, audio_arr 74 | return audio_arr 75 | 76 | 77 | def save_as_prompt(filepath, full_generation): 78 | assert(filepath.endswith(".npz")) 79 | assert(isinstance(full_generation, dict)) 80 | assert("semantic_prompt" in full_generation) 81 | assert("coarse_prompt" in full_generation) 82 | assert("fine_prompt" in full_generation) 83 | np.savez(filepath, **full_generation) 84 | 85 | 86 | def generate_audio( 87 | text: str, 88 | history_prompt: Optional[str] = None, 89 | text_temp: float = 0.7, 90 | waveform_temp: float = 0.7, 91 | silent: bool = False, 92 | output_full: bool = False, 93 | ): 94 | """Generate audio array from input text. 95 | 96 | Args: 97 | text: text to be turned into audio 98 | history_prompt: history choice for audio cloning 99 | text_temp: generation temperature (1.0 more diverse, 0.0 more conservative) 100 | waveform_temp: generation temperature (1.0 more diverse, 0.0 more conservative) 101 | silent: disable progress bar 102 | output_full: return full generation to be used as a history prompt 103 | 104 | Returns: 105 | numpy audio array at sample frequency 24khz 106 | """ 107 | semantic_tokens = text_to_semantic( 108 | text, 109 | history_prompt=history_prompt, 110 | temp=text_temp, 111 | silent=silent, 112 | ) 113 | out = semantic_to_waveform( 114 | semantic_tokens, 115 | history_prompt=history_prompt, 116 | temp=waveform_temp, 117 | silent=silent, 118 | output_full=output_full, 119 | ) 120 | if output_full: 121 | full_generation, audio_arr = out 122 | return full_generation, audio_arr 123 | else: 124 | audio_arr = out 125 | return audio_arr 126 | -------------------------------------------------------------------------------- /utils/lora.py: -------------------------------------------------------------------------------- 1 | # Adapted from https://github.com/microsoft/DeepSpeedExamples/blob/master/applications/DeepSpeed-Chat/training/utils/module/lora.py 2 | 3 | import math 4 | import torch 5 | from torch import nn 6 | import torch.nn.functional as F 7 | 8 | class LinearLayer_LoRA(nn.Module): 9 | # a simple implementation of LoRA 10 | def __init__(self, 11 | weight, 12 | lora_dim=0, 13 | lora_scaling=1, 14 | lora_dropout=0, 15 | bias=None): 16 | super(LinearLayer_LoRA, self).__init__() 17 | self.weight = weight 18 | self.bias = bias 19 | 20 | if lora_dim <= 0: 21 | raise ValueError( 22 | "You are training to use LoRA, whose reduced dim should be larger than 1" 23 | ) 24 | 25 | rows, columns = weight.shape 26 | self.lora_right_weight = nn.Parameter(torch.zeros( 27 | columns, 28 | lora_dim)) # apply transpose so in forward we do not need to 29 | self.lora_left_weight = nn.Parameter(torch.zeros(lora_dim, rows)) 30 | self.lora_scaling = lora_scaling / lora_dim 31 | 32 | if lora_dropout > 0: 33 | self.lora_dropout = nn.Dropout(lora_dropout) 34 | else: 35 | self.lora_dropout = nn.Identity() 36 | 37 | self.reset_parameters() 38 | # disable the original weight gradient 39 | self.weight.requires_grad = False 40 | # fuse LoRA to the original weight 41 | self.fuse_lora = False 42 | 43 | def eval(self): 44 | self.lora_dropout.eval() 45 | 46 | # self.fuse_lora_weight() 47 | 48 | def train(self, mode=True): 49 | self.lora_dropout.train(mode) 50 | # self.unfuse_lora_weight() 51 | 52 | def reset_parameters(self): 53 | nn.init.kaiming_uniform_(self.lora_right_weight, a=math.sqrt(5)) 54 | nn.init.zeros_(self.lora_left_weight) 55 | 56 | def fuse_lora_weight(self): 57 | if not self.fuse_lora: 58 | self.weight.data += self.lora_scaling * torch.matmul( 59 | self.lora_left_weight.t(), self.lora_right_weight.t()) 60 | self.fuse_lora = True 61 | 62 | def unfuse_lora_weight(self): 63 | if self.fuse_lora: 64 | self.weight.data -= self.lora_scaling * torch.matmul( 65 | self.lora_left_weight.t(), self.lora_right_weight.t()) 66 | self.fuse_lora = False 67 | 68 | def forward(self, input): 69 | if self.fuse_lora: 70 | return F.linear(input, self.weight, self.bias) 71 | else: 72 | return F.linear( 73 | input, self.weight, 74 | self.bias) + (self.lora_dropout(input) @ self.lora_right_weight 75 | @ self.lora_left_weight) * self.lora_scaling 76 | 77 | 78 | def recursive_getattr(model, module_name): 79 | """ 80 | From https://github.com/microsoft/DeepSpeed/blob/master/deepspeed/compression/helper.py 81 | Recursively get the attribute of a module. 82 | Args: 83 | model (`torch.nn.Module`) 84 | The model to get the attribute from. 85 | module_name (`str`) 86 | The name of the module to get the attribute from. 87 | """ 88 | split_list = module_name.split('.') 89 | output = model 90 | for name in split_list: 91 | output = getattr(output, name) 92 | return output 93 | 94 | 95 | def recursive_setattr(model, module_name, module): 96 | """ 97 | From https://github.com/microsoft/DeepSpeed/blob/master/deepspeed/compression/helper.py 98 | Recursively set the attribute of a module. 99 | Args: 100 | model (`torch.nn.Module`) 101 | The model to set the attribute in. 102 | module_name (`str`) 103 | The name of the module to set the attribute in. 104 | module (`torch.nn.Module`) 105 | The module to set the attribute to. 106 | """ 107 | split_list = module_name.split('.') 108 | output = model 109 | for name in split_list[:-1]: 110 | output = getattr(output, name) 111 | output.__setattr__(split_list[-1], module) 112 | 113 | 114 | # convert the linear layer to LoRA 115 | def convert_linear_layer_to_lora(model, 116 | part_module_name, 117 | lora_dim=0, 118 | lora_scaling=1, 119 | lora_dropout=0): 120 | replace_name = [] 121 | for name, module in model.named_modules(): 122 | if isinstance(module, nn.Linear) and part_module_name in name: 123 | replace_name.append(name) 124 | for name in replace_name: 125 | module = recursive_getattr(model, name) 126 | tmp = LinearLayer_LoRA( 127 | module.weight, lora_dim, lora_scaling, lora_dropout, 128 | module.bias).to(module.weight.device).to(module.weight.dtype) 129 | recursive_setattr(model, name, tmp) 130 | return model 131 | 132 | 133 | # convert the LoRA layer to linear layer 134 | def convert_lora_to_linear_layer(model): 135 | replace_name = [] 136 | for name, module in model.named_modules(): 137 | if isinstance(module, LinearLayer_LoRA): 138 | replace_name.append(name) 139 | for name in replace_name: 140 | module = recursive_getattr(model, name) 141 | module.fuse_lora_weight() 142 | return model 143 | 144 | 145 | def only_optimize_lora_parameters(model): 146 | # turn off the gradient of all the parameters except the LoRA parameters 147 | for name, param in model.named_parameters(): 148 | if "lora_right_weight" in name or "lora_left_weight" in name: 149 | param.requires_grad = True 150 | else: 151 | param.requires_grad = False 152 | return model -------------------------------------------------------------------------------- /generate.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": null, 6 | "metadata": {}, 7 | "outputs": [], 8 | "source": [ 9 | "from IPython.display import Audio\n", 10 | "from scipy.io.wavfile import write as write_wav\n", 11 | "\n", 12 | "from bark.api import generate_audio\n", 13 | "from bark.generation import SAMPLE_RATE, preload_models, codec_decode, generate_coarse, generate_fine, generate_text_semantic" 14 | ] 15 | }, 16 | { 17 | "cell_type": "code", 18 | "execution_count": null, 19 | "metadata": {}, 20 | "outputs": [], 21 | "source": [ 22 | "semantic_path = \"semantic_output/pytorch_model.bin\" # set to None if you don't want to use finetuned semantic\n", 23 | "coarse_path = \"coarse_output/pytorch_model.bin\" # set to None if you don't want to use finetuned coarse\n", 24 | "fine_path = \"fine_output/pytorch_model.bin\" # set to None if you don't want to use finetuned fine\n", 25 | "use_rvc = True # Set to False to use bark without RVC\n", 26 | "rvc_name = 'mi-test'\n", 27 | "rvc_path = f\"Retrieval-based-Voice-Conversion-WebUI/weights/{rvc_name}.pth\"\n", 28 | "index_path = f\"Retrieval-based-Voice-Conversion-WebUI/logs/{rvc_name}/added_IVF256_Flat_nprobe_1_{rvc_name}_v2.index\"\n", 29 | "device=\"cuda:0\"\n", 30 | "is_half=True" 31 | ] 32 | }, 33 | { 34 | "cell_type": "code", 35 | "execution_count": null, 36 | "metadata": {}, 37 | "outputs": [], 38 | "source": [ 39 | "# download and load all models\n", 40 | "preload_models(\n", 41 | " text_use_gpu=True,\n", 42 | " text_use_small=False,\n", 43 | " text_model_path=semantic_path,\n", 44 | " coarse_use_gpu=True,\n", 45 | " coarse_use_small=False,\n", 46 | " coarse_model_path=coarse_path,\n", 47 | " fine_use_gpu=True,\n", 48 | " fine_use_small=False,\n", 49 | " fine_model_path=fine_path,\n", 50 | " codec_use_gpu=True,\n", 51 | " force_reload=False,\n", 52 | " path=\"models\"\n", 53 | ")\n", 54 | "\n", 55 | "if use_rvc:\n", 56 | " from rvc_infer import get_vc, vc_single\n", 57 | " get_vc(rvc_path, device, is_half)" 58 | ] 59 | }, 60 | { 61 | "cell_type": "code", 62 | "execution_count": null, 63 | "metadata": {}, 64 | "outputs": [], 65 | "source": [ 66 | "# simple generation\n", 67 | "text_prompt = \"Hello, my name is Serpy. And, uh — and I like pizza. [laughs]\"\n", 68 | "voice_name = \"speaker_0\" # use your custom voice name here if you have on\n", 69 | "\n", 70 | "filepath = \"output/audio.wav\"\n", 71 | "audio_array = generate_audio(text_prompt, history_prompt=voice_name, text_temp=0.7, waveform_temp=0.7)\n", 72 | "write_wav(filepath, SAMPLE_RATE, audio_array)\n", 73 | "\n", 74 | "if use_rvc:\n", 75 | " index_rate = 0.75\n", 76 | " f0up_key = -6\n", 77 | " filter_radius = 3\n", 78 | " rms_mix_rate = 0.25\n", 79 | " protect = 0.33\n", 80 | " resample_sr = SAMPLE_RATE\n", 81 | " f0method = \"harvest\" #harvest or pm\n", 82 | " try:\n", 83 | " audio_array = vc_single(0,filepath,f0up_key,None,f0method,index_path,index_rate, filter_radius=filter_radius, resample_sr=resample_sr, rms_mix_rate=rms_mix_rate, protect=protect)\n", 84 | " except:\n", 85 | " audio_array = vc_single(0,filepath,f0up_key,None,'pm',index_path,index_rate, filter_radius=filter_radius, resample_sr=resample_sr, rms_mix_rate=rms_mix_rate, protect=protect)\n", 86 | " write_wav(filepath, SAMPLE_RATE, audio_array)\n", 87 | "\n", 88 | "Audio(audio_array, rate=SAMPLE_RATE)" 89 | ] 90 | }, 91 | { 92 | "cell_type": "code", 93 | "execution_count": null, 94 | "metadata": {}, 95 | "outputs": [], 96 | "source": [ 97 | "# generation with more control\n", 98 | "text_prompt = \"Hello, my name is Serpy. And, uh — and I like pizza. [laughs]\"\n", 99 | "voice_name = \"speaker_0\" # use your custom voice name here if you have on\n", 100 | "\n", 101 | "filepath = \"output/audio.wav\"\n", 102 | "\n", 103 | "x_semantic = generate_text_semantic(\n", 104 | " text_prompt,\n", 105 | " history_prompt=voice_name,\n", 106 | " temp=0.7,\n", 107 | " top_k=50,\n", 108 | " top_p=0.95,\n", 109 | ")\n", 110 | "\n", 111 | "x_coarse_gen = generate_coarse(\n", 112 | " x_semantic,\n", 113 | " history_prompt=voice_name,\n", 114 | " temp=0.7,\n", 115 | " top_k=50,\n", 116 | " top_p=0.95,\n", 117 | ")\n", 118 | "x_fine_gen = generate_fine(\n", 119 | " x_coarse_gen,\n", 120 | " history_prompt=voice_name,\n", 121 | " temp=0.5,\n", 122 | ")\n", 123 | "audio_array = codec_decode(x_fine_gen)\n", 124 | "write_wav(filepath, SAMPLE_RATE, audio_array)\n", 125 | "\n", 126 | "if use_rvc:\n", 127 | " index_rate = 0.75\n", 128 | " f0up_key = -6\n", 129 | " filter_radius = 3\n", 130 | " rms_mix_rate = 0.25\n", 131 | " protect = 0.33\n", 132 | " resample_sr = SAMPLE_RATE\n", 133 | " f0method = \"harvest\" #harvest or pm\n", 134 | " try:\n", 135 | " audio_array = vc_single(0,filepath,f0up_key,None,f0method,index_path,index_rate, filter_radius=filter_radius, resample_sr=resample_sr, rms_mix_rate=rms_mix_rate, protect=protect)\n", 136 | " except:\n", 137 | " audio_array = vc_single(0,filepath,f0up_key,None,'pm',index_path,index_rate, filter_radius=filter_radius, resample_sr=resample_sr, rms_mix_rate=rms_mix_rate, protect=protect)\n", 138 | " write_wav(filepath, SAMPLE_RATE, audio_array)\n", 139 | "\n", 140 | "Audio(audio_array, rate=SAMPLE_RATE)" 141 | ] 142 | }, 143 | { 144 | "cell_type": "code", 145 | "execution_count": null, 146 | "metadata": {}, 147 | "outputs": [], 148 | "source": [] 149 | } 150 | ], 151 | "metadata": { 152 | "kernelspec": { 153 | "display_name": "Python 3", 154 | "language": "python", 155 | "name": "python3" 156 | }, 157 | "language_info": { 158 | "codemirror_mode": { 159 | "name": "ipython", 160 | "version": 3 161 | }, 162 | "file_extension": ".py", 163 | "mimetype": "text/x-python", 164 | "name": "python", 165 | "nbconvert_exporter": "python", 166 | "pygments_lexer": "ipython3", 167 | "version": "3.10.8" 168 | }, 169 | "orig_nbformat": 4 170 | }, 171 | "nbformat": 4, 172 | "nbformat_minor": 2 173 | } 174 | -------------------------------------------------------------------------------- /bark/model_fine.py: -------------------------------------------------------------------------------- 1 | """ 2 | Much of this code is adapted from Andrej Karpathy's NanoGPT 3 | (https://github.com/karpathy/nanoGPT) 4 | """ 5 | from dataclasses import dataclass 6 | import math 7 | 8 | import torch 9 | import torch.nn as nn 10 | from torch.nn import functional as F 11 | 12 | from .model import GPT, GPTConfig, MLP 13 | 14 | 15 | class NonCausalSelfAttention(nn.Module): 16 | def __init__(self, config): 17 | super().__init__() 18 | assert config.n_embd % config.n_head == 0 19 | # key, query, value projections for all heads, but in a batch 20 | self.c_attn = nn.Linear(config.n_embd, 3 * config.n_embd, bias=config.bias) 21 | # output projection 22 | self.c_proj = nn.Linear(config.n_embd, config.n_embd, bias=config.bias) 23 | # regularization 24 | self.attn_dropout = nn.Dropout(config.dropout) 25 | self.resid_dropout = nn.Dropout(config.dropout) 26 | self.n_head = config.n_head 27 | self.n_embd = config.n_embd 28 | self.dropout = config.dropout 29 | # flash attention make GPU go brrrrr but support is only in PyTorch nightly and still a bit scary 30 | self.flash = ( 31 | hasattr(torch.nn.functional, "scaled_dot_product_attention") and self.dropout == 0.0 32 | ) 33 | 34 | def forward(self, x): 35 | B, T, C = x.size() # batch size, sequence length, embedding dimensionality (n_embd) 36 | 37 | # calculate query, key, values for all heads in batch and move head forward to be the batch dim 38 | q, k, v = self.c_attn(x).split(self.n_embd, dim=2) 39 | k = k.view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs) 40 | q = q.view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs) 41 | v = v.view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs) 42 | 43 | # causal self-attention; Self-attend: (B, nh, T, hs) x (B, nh, hs, T) -> (B, nh, T, T) 44 | if self.flash: 45 | # efficient attention using Flash Attention CUDA kernels 46 | y = torch.nn.functional.scaled_dot_product_attention( 47 | q, k, v, attn_mask=None, dropout_p=self.dropout, is_causal=False 48 | ) 49 | else: 50 | # manual implementation of attention 51 | att = (q @ k.transpose(-2, -1)) * (1.0 / math.sqrt(k.size(-1))) 52 | att = F.softmax(att, dim=-1) 53 | att = self.attn_dropout(att) 54 | y = att @ v # (B, nh, T, T) x (B, nh, T, hs) -> (B, nh, T, hs) 55 | y = ( 56 | y.transpose(1, 2).contiguous().view(B, T, C) 57 | ) # re-assemble all head outputs side by side 58 | 59 | # output projection 60 | y = self.resid_dropout(self.c_proj(y)) 61 | return y 62 | 63 | 64 | class FineBlock(nn.Module): 65 | def __init__(self, config): 66 | super().__init__() 67 | self.ln_1 = nn.LayerNorm(config.n_embd) 68 | self.attn = NonCausalSelfAttention(config) 69 | self.ln_2 = nn.LayerNorm(config.n_embd) 70 | self.mlp = MLP(config) 71 | 72 | def forward(self, x): 73 | x = x + self.attn(self.ln_1(x)) 74 | x = x + self.mlp(self.ln_2(x)) 75 | return x 76 | 77 | 78 | class FineGPT(GPT): 79 | def __init__(self, config): 80 | super().__init__(config) 81 | del self.lm_head 82 | self.config = config 83 | self.n_codes_total = config.n_codes_total 84 | self.transformer = nn.ModuleDict( 85 | dict( 86 | wtes=nn.ModuleList( 87 | [ 88 | nn.Embedding(config.input_vocab_size, config.n_embd) 89 | for _ in range(config.n_codes_total) 90 | ] 91 | ), 92 | wpe=nn.Embedding(config.block_size, config.n_embd), 93 | drop=nn.Dropout(config.dropout), 94 | h=nn.ModuleList([FineBlock(config) for _ in range(config.n_layer)]), 95 | ln_f=nn.LayerNorm(config.n_embd), 96 | ) 97 | ) 98 | self.lm_heads = nn.ModuleList( 99 | [ 100 | nn.Linear(config.n_embd, config.output_vocab_size, bias=False) 101 | for _ in range(config.n_codes_given, self.n_codes_total) 102 | ] 103 | ) 104 | for i in range(self.n_codes_total - config.n_codes_given): 105 | self.transformer.wtes[i + 1].weight = self.lm_heads[i].weight 106 | 107 | def forward(self, pred_idx, idx): 108 | device = idx.device 109 | b, t, codes = idx.size() 110 | assert ( 111 | t <= self.config.block_size 112 | ), f"Cannot forward sequence of length {t}, block size is only {self.config.block_size}" 113 | assert pred_idx > 0, "cannot predict 0th codebook" 114 | assert codes == self.n_codes_total, (b, t, codes) 115 | pos = torch.arange(0, t, dtype=torch.long, device=device).unsqueeze(0) # shape (1, t) 116 | 117 | # forward the GPT model itself 118 | tok_embs = [ 119 | wte(idx[:, :, i]).unsqueeze(-1) for i, wte in enumerate(self.transformer.wtes) 120 | ] # token embeddings of shape (b, t, n_embd) 121 | tok_emb = torch.cat(tok_embs, dim=-1) 122 | pos_emb = self.transformer.wpe(pos) # position embeddings of shape (1, t, n_embd) 123 | x = tok_emb[:, :, :, : pred_idx + 1].sum(dim=-1) 124 | x = self.transformer.drop(x + pos_emb) 125 | for block in self.transformer.h: 126 | x = block(x) 127 | x = self.transformer.ln_f(x) 128 | logits = self.lm_heads[pred_idx - self.config.n_codes_given](x) 129 | return logits 130 | 131 | def get_num_params(self, non_embedding=True): 132 | """ 133 | Return the number of parameters in the model. 134 | For non-embedding count (default), the position embeddings get subtracted. 135 | The token embeddings would too, except due to the parameter sharing these 136 | params are actually used as weights in the final layer, so we include them. 137 | """ 138 | n_params = sum(p.numel() for p in self.parameters()) 139 | if non_embedding: 140 | for wte in self.transformer.wtes: 141 | n_params -= wte.weight.numel() 142 | n_params -= self.transformer.wpe.weight.numel() 143 | return n_params 144 | 145 | 146 | @dataclass 147 | class FineGPTConfig(GPTConfig): 148 | n_codes_total: int = 8 149 | n_codes_given: int = 1 150 | -------------------------------------------------------------------------------- /rvc_infer.py: -------------------------------------------------------------------------------- 1 | import os,sys,pdb,torch 2 | now_dir = os.getcwd() 3 | sys.path.append(now_dir) 4 | import argparse 5 | import glob 6 | import sys 7 | import torch 8 | from multiprocessing import cpu_count 9 | import ffmpeg 10 | import numpy as np 11 | 12 | 13 | def load_audio(file, sr): 14 | try: 15 | # https://github.com/openai/whisper/blob/main/whisper/audio.py#L26 16 | # This launches a subprocess to decode audio while down-mixing and resampling as necessary. 17 | # Requires the ffmpeg CLI and `ffmpeg-python` package to be installed. 18 | file = ( 19 | file.strip(" ").strip('"').strip("\n").strip('"').strip(" ") 20 | ) # 防止小白拷路径头尾带了空格和"和回车 21 | out, _ = ( 22 | ffmpeg.input(file, threads=0) 23 | .output("-", format="f32le", acodec="pcm_f32le", ac=1, ar=sr) 24 | .run(cmd=["ffmpeg", "-nostdin"], capture_stdout=True, capture_stderr=True) 25 | ) 26 | except Exception as e: 27 | raise RuntimeError(f"Failed to load audio: {e}") 28 | 29 | return np.frombuffer(out, np.float32).flatten() 30 | 31 | 32 | class Config: 33 | def __init__(self,device,is_half): 34 | self.device = device 35 | self.is_half = is_half 36 | self.n_cpu = 0 37 | self.gpu_name = None 38 | self.gpu_mem = None 39 | self.x_pad, self.x_query, self.x_center, self.x_max = self.device_config() 40 | 41 | def device_config(self) -> tuple: 42 | if torch.cuda.is_available(): 43 | i_device = int(self.device.split(":")[-1]) 44 | self.gpu_name = torch.cuda.get_device_name(i_device) 45 | if ( 46 | ("16" in self.gpu_name and "V100" not in self.gpu_name.upper()) 47 | or "P40" in self.gpu_name.upper() 48 | or "1060" in self.gpu_name 49 | or "1070" in self.gpu_name 50 | or "1080" in self.gpu_name 51 | ): 52 | print("16系/10系显卡和P40强制单精度") 53 | self.is_half = False 54 | for config_file in ["32k.json", "40k.json", "48k.json"]: 55 | with open(f"configs/{config_file}", "r") as f: 56 | strr = f.read().replace("true", "false") 57 | with open(f"configs/{config_file}", "w") as f: 58 | f.write(strr) 59 | with open("trainset_preprocess_pipeline_print.py", "r") as f: 60 | strr = f.read().replace("3.7", "3.0") 61 | with open("trainset_preprocess_pipeline_print.py", "w") as f: 62 | f.write(strr) 63 | else: 64 | self.gpu_name = None 65 | self.gpu_mem = int( 66 | torch.cuda.get_device_properties(i_device).total_memory 67 | / 1024 68 | / 1024 69 | / 1024 70 | + 0.4 71 | ) 72 | if self.gpu_mem <= 4: 73 | with open("trainset_preprocess_pipeline_print.py", "r") as f: 74 | strr = f.read().replace("3.7", "3.0") 75 | with open("trainset_preprocess_pipeline_print.py", "w") as f: 76 | f.write(strr) 77 | elif torch.backends.mps.is_available(): 78 | print("没有发现支持的N卡, 使用MPS进行推理") 79 | self.device = "mps" 80 | else: 81 | print("没有发现支持的N卡, 使用CPU进行推理") 82 | self.device = "cpu" 83 | self.is_half = True 84 | 85 | if self.n_cpu == 0: 86 | self.n_cpu = cpu_count() 87 | 88 | if self.is_half: 89 | # 6G显存配置 90 | x_pad = 3 91 | x_query = 10 92 | x_center = 60 93 | x_max = 65 94 | else: 95 | # 5G显存配置 96 | x_pad = 1 97 | x_query = 6 98 | x_center = 38 99 | x_max = 41 100 | 101 | if self.gpu_mem != None and self.gpu_mem <= 4: 102 | x_pad = 1 103 | x_query = 5 104 | x_center = 30 105 | x_max = 32 106 | 107 | return x_pad, x_query, x_center, x_max 108 | 109 | 110 | now_dir=os.getcwd() 111 | sys.path.append(now_dir) 112 | sys.path.append(os.path.join(now_dir,"Retrieval-based-Voice-Conversion-WebUI")) 113 | from vc_infer_pipeline import VC 114 | from lib.infer_pack.models import SynthesizerTrnMs256NSFsid, SynthesizerTrnMs256NSFsid_nono, SynthesizerTrnMs768NSFsid, SynthesizerTrnMs768NSFsid_nono 115 | from fairseq import checkpoint_utils 116 | from scipy.io import wavfile 117 | 118 | hubert_model=None 119 | def load_hubert(): 120 | global hubert_model 121 | models, saved_cfg, task = checkpoint_utils.load_model_ensemble_and_task(["hubert_base.pt"],suffix="",) 122 | hubert_model = models[0] 123 | hubert_model = hubert_model.to(device) 124 | if(is_half):hubert_model = hubert_model.half() 125 | else:hubert_model = hubert_model.float() 126 | hubert_model.eval() 127 | 128 | def vc_single(sid,input_audio,f0_up_key,f0_file,f0_method,file_index,index_rate,filter_radius=3,resample_sr=48000,rms_mix_rate=0.25, protect=0.33): 129 | global tgt_sr,net_g,vc,hubert_model 130 | if input_audio is None:return "You need to upload an audio", None 131 | f0_up_key = int(f0_up_key) 132 | audio=load_audio(input_audio,16000) 133 | times = [0, 0, 0] 134 | if(hubert_model==None):load_hubert() 135 | if_f0 = cpt.get("f0", 1) 136 | version = cpt.get("version") 137 | audio_opt=vc.pipeline(hubert_model,net_g,sid,audio,input_audio,times,f0_up_key,f0_method,file_index,index_rate,if_f0,filter_radius=filter_radius,tgt_sr=tgt_sr,resample_sr=resample_sr,rms_mix_rate=rms_mix_rate,version=version,protect=protect,f0_file=f0_file) 138 | # print(times) 139 | return audio_opt 140 | 141 | 142 | def get_vc(model_path, device_, is_half_): 143 | global n_spk,tgt_sr,net_g,vc,cpt,device,is_half 144 | device = device_ 145 | is_half = is_half_ 146 | config = Config(device, is_half) 147 | print("loading pth %s"%model_path) 148 | cpt = torch.load(model_path, map_location="cpu") 149 | tgt_sr = cpt["config"][-1] 150 | cpt["config"][-3]=cpt["weight"]["emb_g.weight"].shape[0]#n_spk 151 | if_f0=cpt.get("f0",1) 152 | version=cpt.get("version", "v2") 153 | if(if_f0==1): 154 | if version == "v1": 155 | net_g = SynthesizerTrnMs256NSFsid(*cpt["config"], is_half=is_half) 156 | else: 157 | net_g = SynthesizerTrnMs768NSFsid(*cpt["config"], is_half=is_half) 158 | else: 159 | if version == "v1": 160 | net_g = SynthesizerTrnMs256NSFsid_nono(*cpt["config"]) 161 | else: 162 | net_g = SynthesizerTrnMs768NSFsid_nono(*cpt["config"]) 163 | del net_g.enc_q 164 | print(net_g.load_state_dict(cpt["weight"], strict=False)) # 不加这一行清不干净,真奇葩 165 | net_g.eval().to(device) 166 | if (is_half):net_g = net_g.half() 167 | else:net_g = net_g.float() 168 | vc = VC(tgt_sr, config) 169 | n_spk=cpt["config"][-3] 170 | -------------------------------------------------------------------------------- /hubert/customtokenizer.py: -------------------------------------------------------------------------------- 1 | """ 2 | Custom tokenizer model. 3 | Author: https://www.github.com/gitmylo/ 4 | License: MIT 5 | """ 6 | 7 | import json 8 | import os.path 9 | from zipfile import ZipFile 10 | 11 | import numpy 12 | import torch 13 | from torch import nn, optim 14 | from torch.serialization import MAP_LOCATION 15 | 16 | 17 | class CustomTokenizer(nn.Module): 18 | def __init__(self, hidden_size=1024, input_size=768, output_size=10000, version=0): 19 | super(CustomTokenizer, self).__init__() 20 | next_size = input_size 21 | if version == 0: 22 | self.lstm = nn.LSTM(input_size, hidden_size, 2, batch_first=True) 23 | next_size = hidden_size 24 | if version == 1: 25 | self.lstm = nn.LSTM(input_size, hidden_size, 2, batch_first=True) 26 | self.intermediate = nn.Linear(hidden_size, 4096) 27 | next_size = 4096 28 | 29 | self.fc = nn.Linear(next_size, output_size) 30 | self.softmax = nn.LogSoftmax(dim=1) 31 | self.optimizer: optim.Optimizer = None 32 | self.lossfunc = nn.CrossEntropyLoss() 33 | self.input_size = input_size 34 | self.hidden_size = hidden_size 35 | self.output_size = output_size 36 | self.version = version 37 | 38 | def forward(self, x): 39 | x, _ = self.lstm(x) 40 | if self.version == 1: 41 | x = self.intermediate(x) 42 | x = self.fc(x) 43 | x = self.softmax(x) 44 | return x 45 | 46 | @torch.no_grad() 47 | def get_token(self, x): 48 | """ 49 | Used to get the token for the first 50 | :param x: An array with shape (N, input_size) where N is a whole number greater or equal to 1, and input_size is the input size used when creating the model. 51 | :return: An array with shape (N,) where N is the same as N from the input. Every number in the array is a whole number in range 0...output_size - 1 where output_size is the output size used when creating the model. 52 | """ 53 | return torch.argmax(self(x), dim=1) 54 | 55 | def prepare_training(self): 56 | self.optimizer = optim.Adam(self.parameters(), 0.001) 57 | 58 | def train_step(self, x_train, y_train, log_loss=False): 59 | # y_train = y_train[:-1] 60 | # y_train = y_train[1:] 61 | 62 | optimizer = self.optimizer 63 | lossfunc = self.lossfunc 64 | # Zero the gradients 65 | self.zero_grad() 66 | 67 | # Forward pass 68 | y_pred = self(x_train) 69 | 70 | y_train_len = len(y_train) 71 | y_pred_len = y_pred.shape[0] 72 | 73 | if y_train_len > y_pred_len: 74 | diff = y_train_len - y_pred_len 75 | y_train = y_train[diff:] 76 | elif y_train_len < y_pred_len: 77 | diff = y_pred_len - y_train_len 78 | y_pred = y_pred[:-diff, :] 79 | 80 | y_train_hot = torch.zeros(len(y_train), self.output_size) 81 | y_train_hot[range(len(y_train)), y_train] = 1 82 | y_train_hot = y_train_hot.to('cuda') 83 | 84 | # Calculate the loss 85 | loss = lossfunc(y_pred, y_train_hot) 86 | 87 | # Print loss 88 | if log_loss: 89 | print('Loss', loss.item()) 90 | 91 | # Backward pass 92 | loss.backward() 93 | 94 | # Update the weights 95 | optimizer.step() 96 | 97 | def save(self, path): 98 | info_path = '.'.join(os.path.basename(path).split('.')[:-1]) + '/.info' 99 | torch.save(self.state_dict(), path) 100 | data_from_model = Data(self.input_size, self.hidden_size, self.output_size, self.version) 101 | with ZipFile(path, 'a') as model_zip: 102 | model_zip.writestr(info_path, data_from_model.save()) 103 | model_zip.close() 104 | 105 | @staticmethod 106 | def load_from_checkpoint(path, map_location: MAP_LOCATION = None): 107 | old = True 108 | with ZipFile(path) as model_zip: 109 | filesMatch = [file for file in model_zip.namelist() if file.endswith('/.info')] 110 | file = filesMatch[0] if filesMatch else None 111 | if file: 112 | old = False 113 | data_from_model = Data.load(model_zip.read(file).decode('utf-8')) 114 | model_zip.close() 115 | if old: 116 | model = CustomTokenizer() 117 | else: 118 | model = CustomTokenizer(data_from_model.hidden_size, data_from_model.input_size, data_from_model.output_size, data_from_model.version) 119 | model.load_state_dict(torch.load(path)) 120 | if map_location: 121 | model = model.to(map_location) 122 | return model 123 | 124 | 125 | 126 | class Data: 127 | input_size: int 128 | hidden_size: int 129 | output_size: int 130 | version: int 131 | 132 | def __init__(self, input_size=768, hidden_size=1024, output_size=10000, version=0): 133 | self.input_size = input_size 134 | self.hidden_size = hidden_size 135 | self.output_size = output_size 136 | self.version = version 137 | 138 | @staticmethod 139 | def load(string): 140 | data = json.loads(string) 141 | return Data(data['input_size'], data['hidden_size'], data['output_size'], data['version']) 142 | 143 | def save(self): 144 | data = { 145 | 'input_size': self.input_size, 146 | 'hidden_size': self.hidden_size, 147 | 'output_size': self.output_size, 148 | 'version': self.version, 149 | } 150 | return json.dumps(data) 151 | 152 | 153 | def auto_train(data_path, save_path='model.pth', load_model: str | None = None, save_epochs=1): 154 | data_x, data_y = [], [] 155 | 156 | if load_model and os.path.isfile(load_model): 157 | print('Loading model from', load_model) 158 | model_training = CustomTokenizer.load_from_checkpoint(load_model, 'cuda') 159 | else: 160 | print('Creating new model.') 161 | model_training = CustomTokenizer(version=1).to('cuda') # Settings for the model to run without lstm 162 | save_path = os.path.join(data_path, save_path) 163 | base_save_path = '.'.join(save_path.split('.')[:-1]) 164 | 165 | sem_string = '_semantic.npy' 166 | feat_string = '_semantic_features.npy' 167 | 168 | ready = os.path.join(data_path, 'ready') 169 | for input_file in os.listdir(ready): 170 | full_path = os.path.join(ready, input_file) 171 | if input_file.endswith(sem_string): 172 | data_y.append(numpy.load(full_path)) 173 | elif input_file.endswith(feat_string): 174 | data_x.append(numpy.load(full_path)) 175 | model_training.prepare_training() 176 | 177 | epoch = 1 178 | 179 | while 1: 180 | for i in range(save_epochs): 181 | j = 0 182 | for x, y in zip(data_x, data_y): 183 | model_training.train_step(torch.tensor(x).to('cuda'), torch.tensor(y).to('cuda'), j % 50 == 0) # Print loss every 50 steps 184 | j += 1 185 | save_p = save_path 186 | save_p_2 = f'{base_save_path}_epoch_{epoch}.pth' 187 | model_training.save(save_p) 188 | model_training.save(save_p_2) 189 | print(f'Epoch {epoch} completed') 190 | epoch += 1 191 | -------------------------------------------------------------------------------- /clone_voice.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": null, 6 | "metadata": {}, 7 | "outputs": [], 8 | "source": [ 9 | "from bark.generation import load_codec_model, generate_text_semantic\n", 10 | "from encodec.utils import convert_audio\n", 11 | "\n", 12 | "import torchaudio\n", 13 | "import torch\n", 14 | "\n", 15 | "device = 'cuda' # or 'cpu'\n", 16 | "model = load_codec_model(use_gpu=True if device == 'cuda' else False)" 17 | ] 18 | }, 19 | { 20 | "cell_type": "code", 21 | "execution_count": null, 22 | "metadata": {}, 23 | "outputs": [], 24 | "source": [ 25 | "# From https://github.com/gitmylo/bark-voice-cloning-HuBERT-quantizer\n", 26 | "from hubert.hubert_manager import HuBERTManager\n", 27 | "hubert_manager = HuBERTManager()\n", 28 | "hubert_manager.make_sure_hubert_installed()\n", 29 | "hubert_manager.make_sure_tokenizer_installed()" 30 | ] 31 | }, 32 | { 33 | "cell_type": "code", 34 | "execution_count": null, 35 | "metadata": {}, 36 | "outputs": [], 37 | "source": [ 38 | "# From https://github.com/gitmylo/bark-voice-cloning-HuBERT-quantizer \n", 39 | "# Load HuBERT for semantic tokens\n", 40 | "from hubert.pre_kmeans_hubert import CustomHubert\n", 41 | "from hubert.customtokenizer import CustomTokenizer\n", 42 | "\n", 43 | "# Load the HuBERT model\n", 44 | "hubert_model = CustomHubert(checkpoint_path='data/models/hubert/hubert.pt').to(device)\n", 45 | "\n", 46 | "# Load the CustomTokenizer model\n", 47 | "tokenizer = CustomTokenizer.load_from_checkpoint('data/models/hubert/tokenizer.pth').to(device) # Automatically uses the right layers" 48 | ] 49 | }, 50 | { 51 | "cell_type": "code", 52 | "execution_count": null, 53 | "metadata": {}, 54 | "outputs": [], 55 | "source": [ 56 | "# Load and pre-process the audio waveform\n", 57 | "audio_filepath = 'audio.wav' # the audio you want to clone (under 13 seconds)\n", 58 | "wav, sr = torchaudio.load(audio_filepath)\n", 59 | "wav = convert_audio(wav, sr, model.sample_rate, model.channels)\n", 60 | "wav = wav.to(device)" 61 | ] 62 | }, 63 | { 64 | "cell_type": "code", 65 | "execution_count": null, 66 | "metadata": {}, 67 | "outputs": [], 68 | "source": [ 69 | "semantic_vectors = hubert_model.forward(wav, input_sample_hz=model.sample_rate)\n", 70 | "semantic_tokens = tokenizer.get_token(semantic_vectors)" 71 | ] 72 | }, 73 | { 74 | "cell_type": "code", 75 | "execution_count": null, 76 | "metadata": {}, 77 | "outputs": [], 78 | "source": [ 79 | "# Extract discrete codes from EnCodec\n", 80 | "with torch.no_grad():\n", 81 | " encoded_frames = model.encode(wav.unsqueeze(0))\n", 82 | "codes = torch.cat([encoded[0] for encoded in encoded_frames], dim=-1).squeeze() # [n_q, T]" 83 | ] 84 | }, 85 | { 86 | "cell_type": "code", 87 | "execution_count": null, 88 | "metadata": {}, 89 | "outputs": [], 90 | "source": [ 91 | "# move codes to cpu\n", 92 | "codes = codes.cpu().numpy()\n", 93 | "# move semantic tokens to cpu\n", 94 | "semantic_tokens = semantic_tokens.cpu().numpy()" 95 | ] 96 | }, 97 | { 98 | "cell_type": "code", 99 | "execution_count": null, 100 | "metadata": {}, 101 | "outputs": [], 102 | "source": [ 103 | "import numpy as np\n", 104 | "voice_name = 'output' # whatever you want the name of the voice to be\n", 105 | "output_path = 'bark/assets/prompts/' + voice_name + '.npz'\n", 106 | "np.savez(output_path, fine_prompt=codes, coarse_prompt=codes[:2, :], semantic_prompt=semantic_tokens)" 107 | ] 108 | }, 109 | { 110 | "cell_type": "code", 111 | "execution_count": null, 112 | "metadata": {}, 113 | "outputs": [], 114 | "source": [ 115 | "# That's it! Now you can head over to the generate.ipynb and use your voice_name for the 'history_prompt'" 116 | ] 117 | }, 118 | { 119 | "cell_type": "code", 120 | "execution_count": null, 121 | "metadata": {}, 122 | "outputs": [], 123 | "source": [] 124 | }, 125 | { 126 | "cell_type": "code", 127 | "execution_count": null, 128 | "metadata": {}, 129 | "outputs": [], 130 | "source": [ 131 | "# Heres the generation stuff copy-pasted for convenience" 132 | ] 133 | }, 134 | { 135 | "cell_type": "code", 136 | "execution_count": null, 137 | "metadata": {}, 138 | "outputs": [], 139 | "source": [ 140 | "from bark.api import generate_audio\n", 141 | "from transformers import BertTokenizer\n", 142 | "from bark.generation import SAMPLE_RATE, preload_models, codec_decode, generate_coarse, generate_fine, generate_text_semantic\n", 143 | "\n", 144 | "# Enter your prompt and speaker here\n", 145 | "text_prompt = \"Hello, my name is Serpy. And, uh — and I like pizza. [laughs]\"\n", 146 | "voice_name = \"output\" # use your custom voice name here if you have one" 147 | ] 148 | }, 149 | { 150 | "cell_type": "code", 151 | "execution_count": null, 152 | "metadata": {}, 153 | "outputs": [], 154 | "source": [ 155 | "# download and load all models\n", 156 | "preload_models(\n", 157 | " text_use_gpu=True,\n", 158 | " text_use_small=False,\n", 159 | " coarse_use_gpu=True,\n", 160 | " coarse_use_small=False,\n", 161 | " fine_use_gpu=True,\n", 162 | " fine_use_small=False,\n", 163 | " codec_use_gpu=True,\n", 164 | " force_reload=False,\n", 165 | " path=\"models\"\n", 166 | ")" 167 | ] 168 | }, 169 | { 170 | "cell_type": "code", 171 | "execution_count": null, 172 | "metadata": {}, 173 | "outputs": [], 174 | "source": [ 175 | "# simple generation\n", 176 | "audio_array = generate_audio(text_prompt, history_prompt=voice_name, text_temp=0.7, waveform_temp=0.7)" 177 | ] 178 | }, 179 | { 180 | "cell_type": "code", 181 | "execution_count": null, 182 | "metadata": {}, 183 | "outputs": [], 184 | "source": [ 185 | "# generation with more control\n", 186 | "x_semantic = generate_text_semantic(\n", 187 | " text_prompt,\n", 188 | " history_prompt=voice_name,\n", 189 | " temp=0.7,\n", 190 | " top_k=50,\n", 191 | " top_p=0.95,\n", 192 | ")\n", 193 | "\n", 194 | "x_coarse_gen = generate_coarse(\n", 195 | " x_semantic,\n", 196 | " history_prompt=voice_name,\n", 197 | " temp=0.7,\n", 198 | " top_k=50,\n", 199 | " top_p=0.95,\n", 200 | ")\n", 201 | "x_fine_gen = generate_fine(\n", 202 | " x_coarse_gen,\n", 203 | " history_prompt=voice_name,\n", 204 | " temp=0.5,\n", 205 | ")\n", 206 | "audio_array = codec_decode(x_fine_gen)" 207 | ] 208 | }, 209 | { 210 | "cell_type": "code", 211 | "execution_count": null, 212 | "metadata": {}, 213 | "outputs": [], 214 | "source": [ 215 | "from IPython.display import Audio\n", 216 | "# play audio\n", 217 | "Audio(audio_array, rate=SAMPLE_RATE)" 218 | ] 219 | }, 220 | { 221 | "cell_type": "code", 222 | "execution_count": null, 223 | "metadata": {}, 224 | "outputs": [], 225 | "source": [ 226 | "from scipy.io.wavfile import write as write_wav\n", 227 | "# save audio\n", 228 | "filepath = \"/output/audio.wav\" # change this to your desired output path\n", 229 | "write_wav(filepath, SAMPLE_RATE, audio_array)" 230 | ] 231 | } 232 | ], 233 | "metadata": { 234 | "kernelspec": { 235 | "display_name": "Python 3", 236 | "language": "python", 237 | "name": "python3" 238 | }, 239 | "language_info": { 240 | "codemirror_mode": { 241 | "name": "ipython", 242 | "version": 3 243 | }, 244 | "file_extension": ".py", 245 | "mimetype": "text/x-python", 246 | "name": "python", 247 | "nbconvert_exporter": "python", 248 | "pygments_lexer": "ipython3", 249 | "version": "3.10.8" 250 | }, 251 | "orig_nbformat": 4 252 | }, 253 | "nbformat": 4, 254 | "nbformat_minor": 2 255 | } 256 | -------------------------------------------------------------------------------- /bark/model.py: -------------------------------------------------------------------------------- 1 | """ 2 | Much of this code is adapted from Andrej Karpathy's NanoGPT 3 | (https://github.com/karpathy/nanoGPT) 4 | """ 5 | import math 6 | from dataclasses import dataclass 7 | 8 | import torch 9 | import torch.nn as nn 10 | from torch.nn import functional as F 11 | from einops import rearrange, repeat, reduce 12 | 13 | 14 | class LayerNorm(nn.Module): 15 | """ LayerNorm but with an optional bias. PyTorch doesn't support simply bias=False """ 16 | 17 | def __init__(self, ndim, bias): 18 | super().__init__() 19 | self.weight = nn.Parameter(torch.ones(ndim)) 20 | self.bias = nn.Parameter(torch.zeros(ndim)) if bias else None 21 | 22 | def forward(self, input): 23 | return F.layer_norm(input, self.weight.shape, self.weight, self.bias, 1e-5) 24 | 25 | class CausalSelfAttention(nn.Module): 26 | 27 | def __init__(self, config): 28 | super().__init__() 29 | assert config.n_embd % config.n_head == 0 30 | # key, query, value projections for all heads, but in a batch 31 | self.c_attn = nn.Linear(config.n_embd, 3 * config.n_embd, bias=config.bias) 32 | # output projection 33 | self.c_proj = nn.Linear(config.n_embd, config.n_embd, bias=config.bias) 34 | # regularization 35 | self.attn_dropout = nn.Dropout(config.dropout) 36 | self.resid_dropout = nn.Dropout(config.dropout) 37 | self.n_head = config.n_head 38 | self.n_embd = config.n_embd 39 | self.dropout = config.dropout 40 | # flash attention make GPU go brrrrr but support is only in PyTorch nightly and still a bit scary 41 | self.flash = hasattr(torch.nn.functional, 'scaled_dot_product_attention') 42 | if not self.flash: 43 | # print("WARNING: using slow attention. Flash Attention atm needs PyTorch nightly and dropout=0.0") 44 | # causal mask to ensure that attention is only applied to the left in the input sequence 45 | self.register_buffer("bias", torch.tril(torch.ones(config.block_size, config.block_size)) 46 | .view(1, 1, config.block_size, config.block_size)) 47 | 48 | def forward(self, x, past_kv=None, use_cache=False): 49 | B, T, C = x.size() # batch size, sequence length, embedding dimensionality (n_embd) 50 | 51 | # calculate query, key, values for all heads in batch and move head forward to be the batch dim 52 | q, k ,v = self.c_attn(x).split(self.n_embd, dim=2) 53 | k = k.view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs) 54 | q = q.view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs) 55 | v = v.view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs) 56 | 57 | if past_kv is not None: 58 | past_key = past_kv[0] 59 | past_value = past_kv[1] 60 | k = torch.cat((past_key, k), dim=-2) 61 | v = torch.cat((past_value, v), dim=-2) 62 | 63 | FULL_T = k.shape[-2] 64 | 65 | if use_cache is True: 66 | present = (k, v) 67 | else: 68 | present = None 69 | 70 | # causal self-attention; Self-attend: (B, nh, T, hs) x (B, nh, hs, T) -> (B, nh, T, T) 71 | if self.flash: 72 | # efficient attention using Flash Attention CUDA kernels 73 | if past_kv is not None: 74 | # When `past_kv` is provided, we're doing incremental decoding and `q.shape[2] == 1`: q only contains 75 | # the query for the last token. scaled_dot_product_attention interprets this as the first token in the 76 | # sequence, so if is_causal=True it will mask out all attention from it. This is not what we want, so 77 | # to work around this we set is_causal=False. 78 | is_causal = False 79 | else: 80 | is_causal = True 81 | 82 | y = torch.nn.functional.scaled_dot_product_attention(q, k, v, dropout_p=self.dropout, is_causal=is_causal) 83 | else: 84 | # manual implementation of attention 85 | att = (q @ k.transpose(-2, -1)) * (1.0 / math.sqrt(k.size(-1))) 86 | att = att.masked_fill(self.bias[:,:,FULL_T-T:FULL_T,:FULL_T] == 0, float('-inf')) 87 | att = F.softmax(att, dim=-1) 88 | att = self.attn_dropout(att) 89 | y = att @ v # (B, nh, T, T) x (B, nh, T, hs) -> (B, nh, T, hs) 90 | y = y.transpose(1, 2).contiguous().view(B, T, C) # re-assemble all head outputs side by side 91 | 92 | # output projection 93 | y = self.resid_dropout(self.c_proj(y)) 94 | return (y, present) 95 | 96 | class MLP(nn.Module): 97 | 98 | def __init__(self, config): 99 | super().__init__() 100 | self.c_fc = nn.Linear(config.n_embd, 4 * config.n_embd, bias=config.bias) 101 | self.c_proj = nn.Linear(4 * config.n_embd, config.n_embd, bias=config.bias) 102 | self.dropout = nn.Dropout(config.dropout) 103 | self.gelu = nn.GELU() 104 | 105 | def forward(self, x): 106 | x = self.c_fc(x) 107 | x = self.gelu(x) 108 | x = self.c_proj(x) 109 | x = self.dropout(x) 110 | return x 111 | 112 | class Block(nn.Module): 113 | 114 | def __init__(self, config, layer_idx): 115 | super().__init__() 116 | self.ln_1 = LayerNorm(config.n_embd, bias=config.bias) 117 | self.attn = CausalSelfAttention(config) 118 | self.ln_2 = LayerNorm(config.n_embd, bias=config.bias) 119 | self.mlp = MLP(config) 120 | self.layer_idx = layer_idx 121 | 122 | def forward(self, x, past_kv=None, use_cache=False): 123 | attn_output, prev_kvs = self.attn(self.ln_1(x), past_kv=past_kv, use_cache=use_cache) 124 | x = x + attn_output 125 | x = x + self.mlp(self.ln_2(x)) 126 | return (x, prev_kvs) 127 | 128 | @dataclass 129 | class GPTConfig: 130 | block_size: int = 1024 131 | input_vocab_size: int = 10_048 132 | output_vocab_size: int = 10_048 133 | n_layer: int = 12 134 | n_head: int = 12 135 | n_embd: int = 768 136 | dropout: float = 0.0 137 | bias: bool = True # True: bias in Linears and LayerNorms, like GPT-2. False: a bit better and faster 138 | 139 | class GPT(nn.Module): 140 | 141 | def __init__(self, config): 142 | super().__init__() 143 | assert config.input_vocab_size is not None 144 | assert config.output_vocab_size is not None 145 | assert config.block_size is not None 146 | self.config = config 147 | 148 | self.transformer = nn.ModuleDict(dict( 149 | wte = nn.Embedding(config.input_vocab_size, config.n_embd), 150 | wpe = nn.Embedding(config.block_size, config.n_embd), 151 | drop = nn.Dropout(config.dropout), 152 | h = nn.ModuleList([Block(config, idx) for idx in range(config.n_layer)]), 153 | ln_f = LayerNorm(config.n_embd, bias=config.bias), 154 | )) 155 | self.lm_head = nn.Linear(config.n_embd, config.output_vocab_size, bias=False) 156 | 157 | def get_num_params(self, non_embedding=True): 158 | """ 159 | Return the number of parameters in the model. 160 | For non-embedding count (default), the position embeddings get subtracted. 161 | The token embeddings would too, except due to the parameter sharing these 162 | params are actually used as weights in the final layer, so we include them. 163 | """ 164 | n_params = sum(p.numel() for p in self.parameters()) 165 | if non_embedding: 166 | n_params -= self.transformer.wte.weight.numel() 167 | n_params -= self.transformer.wpe.weight.numel() 168 | return n_params 169 | 170 | def forward(self, idx, merge_context=False, past_kv=None, position_ids=None, use_cache=False, training=False): 171 | device = idx.device 172 | b, t = idx.size() 173 | if past_kv is not None: 174 | assert t == 1 175 | tok_emb = self.transformer.wte(idx) # token embeddings of shape (b, t, n_embd) 176 | else: 177 | if merge_context: 178 | assert(idx.shape[1] >= 256+256+1) 179 | t = idx.shape[1] - 256 180 | else: 181 | assert t <= self.config.block_size, f"Cannot forward sequence of length {t}, block size is only {self.config.block_size}" 182 | 183 | # forward the GPT model itself 184 | if merge_context: 185 | tok_emb = torch.cat([ 186 | self.transformer.wte(idx[:,:256]) + self.transformer.wte(idx[:,256:256+256]), 187 | self.transformer.wte(idx[:,256+256:]) 188 | ], dim=1) 189 | else: 190 | tok_emb = self.transformer.wte(idx) # token embeddings of shape (b, t, n_embd) 191 | 192 | if past_kv is None: 193 | past_length = 0 194 | past_kv = tuple([None] * len(self.transformer.h)) 195 | else: 196 | past_length = past_kv[0][0].size(-2) 197 | 198 | if position_ids is None: 199 | position_ids = torch.arange(past_length, t + past_length, dtype=torch.long, device=device) 200 | position_ids = position_ids.unsqueeze(0) # shape (1, t) 201 | assert position_ids.shape == (1, t) 202 | 203 | pos_emb = self.transformer.wpe(position_ids) # position embeddings of shape (1, t, n_embd) 204 | 205 | x = self.transformer.drop(tok_emb + pos_emb) 206 | 207 | new_kv = () if use_cache else None 208 | 209 | for i, (block, past_layer_kv) in enumerate(zip(self.transformer.h, past_kv)): 210 | x, kv = block(x, past_kv=past_layer_kv, use_cache=use_cache) 211 | 212 | if use_cache: 213 | new_kv = new_kv + (kv,) 214 | 215 | x = self.transformer.ln_f(x) 216 | 217 | 218 | if training: 219 | logits = self.lm_head(x) 220 | return logits 221 | 222 | # inference-time mini-optimization: only forward the lm_head on the very last position 223 | logits = self.lm_head(x[:, [-1], :]) # note: using list [-1] to preserve the time dim 224 | 225 | return (logits, new_kv) 226 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # 🐶 BARK AI: but with the ability to use voice cloning on custom audio samples 2 | 3 | --- 4 | 5 | ## UPDATE: We launched a follow-up to BARK -- 6 | 7 | _a hyper realistic AI Voice Cloner Desktop App_ 8 | 9 | - Runs locally 10 | - All data is yours - 100% data privacy 11 | - No costs to run 12 | 13 | 👉 Check it out here: https://github.com/serpapps/ai-voice-cloner 14 | 15 | 16 | --- 17 | 18 | For RVC `git clone https://github.com/RVC-Project/Retrieval-based-Voice-Conversion-WebUI` and train your model or point the code to you model (must clone RVC repo in bark-with-voice-clone directory) 19 | 20 | If you want to clone a voice just follow the `clone_voice.ipynb` notebook. If you want to generate audio from text, follow the `generate.ipynb` notebook. 21 | 22 | To create a voice clone sample, you need an audio sample of around 5-12 seconds 23 | 24 | You will get the best results by making generations with your cloned voice until you find one that is really close to the source. Then use that as the new history prompt (comes from the model so should theoretically be more consistent) 25 | 26 | - [BARK text to speech @ SERP AI](https://serp.ai/tools/bark-text-to-speech-ai-voice-clone-app/) 27 | 28 | # Contributors 29 | 30 | Huge shoutout & thank you to: 31 | 32 | [gitmylo](https://github.com/gitmylo/bark-voice-cloning-HuBERT-quantizer/) 33 | for the solution to the semantic token generation for better voice clones and finetunes (HuBERT, etc.) 34 | 35 | *** 36 | 37 |
38 | francislabountyjr 39 | gkucsko 40 | kmfreyberg 41 | Vaibhavs10 42 | devinschumacher 43 | mcamac 44 | fiq 45 | zygi 46 | jn-jairo 47 | gitmylo 48 | alyxdow 49 | mikeyshulman 50 |
51 | 52 | 53 | 54 | 55 | 56 | ------------------------------------------------------------------- 57 | # Original README.md 58 | ## 🤖 Usage 59 | 60 | ```python 61 | from bark import SAMPLE_RATE, generate_audio, preload_models 62 | from IPython.display import Audio 63 | 64 | # download and load all models 65 | preload_models() 66 | 67 | # generate audio from text 68 | text_prompt = """ 69 | Hello, my name is Serpy. And, uh — and I like pizza. [laughs] 70 | But I also have other interests such as playing tic tac toe. 71 | """ 72 | audio_array = generate_audio(text_prompt) 73 | 74 | # play text in notebook 75 | Audio(audio_array, rate=SAMPLE_RATE) 76 | ``` 77 | 78 | [pizza.webm](https://user-images.githubusercontent.com/5068315/230490503-417e688d-5115-4eee-9550-b46a2b465ee3.webm) 79 | 80 | 81 | To save `audio_array` as a WAV file: 82 | 83 | ```python 84 | from scipy.io.wavfile import write as write_wav 85 | 86 | write_wav("/path/to/audio.wav", SAMPLE_RATE, audio_array) 87 | ``` 88 | 89 | ### 🌎 Foreign Language 90 | 91 | Bark supports various languages out-of-the-box and automatically determines language from input text. When prompted with code-switched text, Bark will attempt to employ the native accent for the respective languages. English quality is best for the time being, and we expect other languages to further improve with scaling. 92 | 93 | ```python 94 | text_prompt = """ 95 | Buenos días Miguel. Tu colega piensa que tu alemán es extremadamente malo. 96 | But I suppose your english isn't terrible. 97 | """ 98 | audio_array = generate_audio(text_prompt) 99 | ``` 100 | 101 | [miguel.webm](https://user-images.githubusercontent.com/5068315/230684752-10baadfe-1e7c-46a2-8323-43282aef2c8c.webm) 102 | 103 | ### 🎶 Music 104 | 105 | Bark can generate all types of audio, and, in principle, doesn't see a difference between speech and music. Sometimes Bark chooses to generate text as music, but you can help it out by adding music notes around your lyrics. 106 | 107 | ```python 108 | text_prompt = """ 109 | ♪ In the jungle, the mighty jungle, the lion barks tonight ♪ 110 | """ 111 | audio_array = generate_audio(text_prompt) 112 | ``` 113 | 114 | [lion.webm](https://user-images.githubusercontent.com/5068315/230684766-97f5ea23-ad99-473c-924b-66b6fab24289.webm) 115 | 116 | ### 🎤 Voice Presets and Voice/Audio Cloning 117 | 118 | Bark has the capability to fully clone voices - including tone, pitch, emotion and prosody. The model also attempts to preserve music, ambient noise, etc. from input audio. However, to mitigate misuse of this technology, we limit the audio history prompts to a limited set of Suno-provided, fully synthetic options to choose from for each language. Specify following the pattern: `{lang_code}_speaker_{0-9}`. 119 | 120 | ```python 121 | text_prompt = """ 122 | I have a silky smooth voice, and today I will tell you about 123 | the exercise regimen of the common sloth. 124 | """ 125 | audio_array = generate_audio(text_prompt, history_prompt="en_speaker_1") 126 | ``` 127 | 128 | 129 | [sloth.webm](https://user-images.githubusercontent.com/5068315/230684883-a344c619-a560-4ff5-8b99-b4463a34487b.webm) 130 | 131 | *Note: since Bark recognizes languages automatically from input text, it is possible to use for example a german history prompt with english text. This usually leads to english audio with a german accent.* 132 | 133 | ### 👥 Speaker Prompts 134 | 135 | You can provide certain speaker prompts such as NARRATOR, MAN, WOMAN, etc. Please note that these are not always respected, especially if a conflicting audio history prompt is given. 136 | 137 | ```python 138 | text_prompt = """ 139 | WOMAN: I would like an oatmilk latte please. 140 | MAN: Wow, that's expensive! 141 | """ 142 | audio_array = generate_audio(text_prompt) 143 | ``` 144 | 145 | [latte.webm](https://user-images.githubusercontent.com/5068315/230684864-12d101a1-a726-471d-9d56-d18b108efcb8.webm) 146 | 147 | 148 | ## 💻 Installation 149 | 150 | ``` 151 | pip install git+https://github.com/suno-ai/bark.git 152 | ``` 153 | 154 | or 155 | 156 | ``` 157 | git clone https://github.com/suno-ai/bark 158 | cd bark && pip install . 159 | ``` 160 | 161 | ## 🛠️ Hardware and Inference Speed 162 | 163 | Bark has been tested and works on both CPU and GPU (`pytorch 2.0+`, CUDA 11.7 and CUDA 12.0). 164 | Running Bark requires running >100M parameter transformer models. 165 | On modern GPUs and PyTorch nightly, Bark can generate audio in roughly realtime. On older GPUs, default colab, or CPU, inference time might be 10-100x slower. 166 | 167 | ## ⚙️ Details 168 | 169 | Similar to [Vall-E](https://arxiv.org/abs/2301.02111) and some other amazing work in the field, Bark uses GPT-style 170 | models to generate audio from scratch. Different from Vall-E, the initial text prompt is embedded into high-level semantic tokens without the use of phonemes. It can therefore generalize to arbitrary instructions beyond speech that occur in the training data, such as music lyrics, sound effects or other non-speech sounds. A subsequent second model is used to convert the generated semantic tokens into audio codec tokens to generate the full waveform. To enable the community to use Bark via public code we used the fantastic 171 | [EnCodec codec](https://github.com/facebookresearch/encodec) from Facebook to act as an audio representation. 172 | 173 | Below is a list of some known non-speech sounds 174 | 175 | - `[laughter]` 176 | - `[laughs]` 177 | - `[sighs]` 178 | - `[music]` 179 | - `[gasps]` 180 | - `[clears throat]` 181 | - `—` or `...` for hesitations 182 | - `♪` for song lyrics 183 | - capitalization for emphasis of a word 184 | - `MAN/WOMAN:` for bias towards speaker 185 | 186 | **Supported Languages** 187 | 188 | | Language | Status | 189 | | --- | --- | 190 | | English (en) | ✅ | 191 | | German (de) | ✅ | 192 | | Spanish (es) | ✅ | 193 | | French (fr) | ✅ | 194 | | Hindi (hi) | ✅ | 195 | | Italian (it) | ✅ | 196 | | Japanese (ja) | ✅ | 197 | | Korean (ko) | ✅ | 198 | | Polish (pl) | ✅ | 199 | | Portuguese (pt) | ✅ | 200 | | Russian (ru) | ✅ | 201 | | Turkish (tr) | ✅ | 202 | | Chinese, simplified (zh) | ✅ | 203 | | Arabic | Coming soon! | 204 | | Bengali | Coming soon! | 205 | | Telugu | Coming soon! | 206 | -------------------------------------------------------------------------------- /generate_chunked.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": null, 6 | "metadata": {}, 7 | "outputs": [], 8 | "source": [ 9 | "from IPython.display import Audio\n", 10 | "from scipy.io.wavfile import write as write_wav\n", 11 | "\n", 12 | "from bark.generation import SAMPLE_RATE, preload_models, codec_decode, generate_coarse, generate_fine, generate_text_semantic" 13 | ] 14 | }, 15 | { 16 | "cell_type": "code", 17 | "execution_count": null, 18 | "metadata": {}, 19 | "outputs": [], 20 | "source": [ 21 | "semantic_path = \"semantic_output/pytorch_model.bin\" # set to None if you don't want to use finetuned semantic\n", 22 | "coarse_path = \"coarse_output/pytorch_model.bin\" # set to None if you don't want to use finetuned coarse\n", 23 | "fine_path = \"fine_output/pytorch_model.bin\" # set to None if you don't want to use finetuned fine\n", 24 | "use_rvc = True # Set to False to use bark without RVC\n", 25 | "rvc_name = 'mi-test'\n", 26 | "rvc_path = f\"Retrieval-based-Voice-Conversion-WebUI/weights/{rvc_name}.pth\"\n", 27 | "index_path = f\"Retrieval-based-Voice-Conversion-WebUI/logs/{rvc_name}/added_IVF256_Flat_nprobe_1_{rvc_name}_v2.index\" \n", 28 | "device=\"cuda:0\"\n", 29 | "is_half=True" 30 | ] 31 | }, 32 | { 33 | "cell_type": "code", 34 | "execution_count": null, 35 | "metadata": {}, 36 | "outputs": [], 37 | "source": [ 38 | "import re\n", 39 | "def split_and_recombine_text(text, desired_length=100, max_length=150):\n", 40 | " # from https://github.com/neonbjb/tortoise-tts\n", 41 | " \"\"\"Split text it into chunks of a desired length trying to keep sentences intact.\"\"\"\n", 42 | " # normalize text, remove redundant whitespace and convert non-ascii quotes to ascii\n", 43 | " text = re.sub(r\"\\n\\n+\", \"\\n\", text)\n", 44 | " text = re.sub(r\"\\s+\", \" \", text)\n", 45 | " text = re.sub(r\"[“”]\", '\"', text)\n", 46 | "\n", 47 | " rv = []\n", 48 | " in_quote = False\n", 49 | " current = \"\"\n", 50 | " split_pos = []\n", 51 | " pos = -1\n", 52 | " end_pos = len(text) - 1\n", 53 | "\n", 54 | " def seek(delta):\n", 55 | " nonlocal pos, in_quote, current\n", 56 | " is_neg = delta < 0\n", 57 | " for _ in range(abs(delta)):\n", 58 | " if is_neg:\n", 59 | " pos -= 1\n", 60 | " current = current[:-1]\n", 61 | " else:\n", 62 | " pos += 1\n", 63 | " current += text[pos]\n", 64 | " if text[pos] == '\"':\n", 65 | " in_quote = not in_quote\n", 66 | " return text[pos]\n", 67 | "\n", 68 | " def peek(delta):\n", 69 | " p = pos + delta\n", 70 | " return text[p] if p < end_pos and p >= 0 else \"\"\n", 71 | "\n", 72 | " def commit():\n", 73 | " nonlocal rv, current, split_pos\n", 74 | " rv.append(current)\n", 75 | " current = \"\"\n", 76 | " split_pos = []\n", 77 | "\n", 78 | " while pos < end_pos:\n", 79 | " c = seek(1)\n", 80 | " # do we need to force a split?\n", 81 | " if len(current) >= max_length:\n", 82 | " if len(split_pos) > 0 and len(current) > (desired_length / 2):\n", 83 | " # we have at least one sentence and we are over half the desired length, seek back to the last split\n", 84 | " d = pos - split_pos[-1]\n", 85 | " seek(-d)\n", 86 | " else:\n", 87 | " # no full sentences, seek back until we are not in the middle of a word and split there\n", 88 | " while c not in \"!?.\\n \" and pos > 0 and len(current) > desired_length:\n", 89 | " c = seek(-1)\n", 90 | " commit()\n", 91 | " # check for sentence boundaries\n", 92 | " elif not in_quote and (c in \"!?\\n\" or (c == \".\" and peek(1) in \"\\n \")):\n", 93 | " # seek forward if we have consecutive boundary markers but still within the max length\n", 94 | " while (\n", 95 | " pos < len(text) - 1 and len(current) < max_length and peek(1) in \"!?.\"\n", 96 | " ):\n", 97 | " c = seek(1)\n", 98 | " split_pos.append(pos)\n", 99 | " if len(current) >= desired_length:\n", 100 | " commit()\n", 101 | " # treat end of quote as a boundary if its followed by a space or newline\n", 102 | " elif in_quote and peek(1) == '\"' and peek(2) in \"\\n \":\n", 103 | " seek(2)\n", 104 | " split_pos.append(pos)\n", 105 | " rv.append(current)\n", 106 | "\n", 107 | " # clean up, remove lines with only whitespace or punctuation\n", 108 | " rv = [s.strip() for s in rv]\n", 109 | " rv = [s for s in rv if len(s) > 0 and not re.match(r\"^[\\s\\.,;:!?]*$\", s)]\n", 110 | "\n", 111 | " return rv\n", 112 | "\n", 113 | "def generate_with_settings(text_prompt, semantic_temp=0.7, semantic_top_k=50, semantic_top_p=0.95, coarse_temp=0.7, coarse_top_k=50, coarse_top_p=0.95, fine_temp=0.5, voice_name=None, use_semantic_history_prompt=True, use_coarse_history_prompt=True, use_fine_history_prompt=True, output_full=False):\n", 114 | " # generation with more control\n", 115 | " x_semantic = generate_text_semantic(\n", 116 | " text_prompt,\n", 117 | " history_prompt=voice_name if use_semantic_history_prompt else None,\n", 118 | " temp=semantic_temp,\n", 119 | " top_k=semantic_top_k,\n", 120 | " top_p=semantic_top_p,\n", 121 | " )\n", 122 | "\n", 123 | " x_coarse_gen = generate_coarse(\n", 124 | " x_semantic,\n", 125 | " history_prompt=voice_name if use_coarse_history_prompt else None,\n", 126 | " temp=coarse_temp,\n", 127 | " top_k=coarse_top_k,\n", 128 | " top_p=coarse_top_p,\n", 129 | " )\n", 130 | " x_fine_gen = generate_fine(\n", 131 | " x_coarse_gen,\n", 132 | " history_prompt=voice_name if use_fine_history_prompt else None,\n", 133 | " temp=fine_temp,\n", 134 | " )\n", 135 | "\n", 136 | " if output_full:\n", 137 | " full_generation = {\n", 138 | " 'semantic_prompt': x_semantic,\n", 139 | " 'coarse_prompt': x_coarse_gen,\n", 140 | " 'fine_prompt': x_fine_gen,\n", 141 | " }\n", 142 | " return full_generation, codec_decode(x_fine_gen)\n", 143 | " return codec_decode(x_fine_gen)" 144 | ] 145 | }, 146 | { 147 | "cell_type": "code", 148 | "execution_count": null, 149 | "metadata": {}, 150 | "outputs": [], 151 | "source": [ 152 | "# `[laughter]`\n", 153 | "# - `[laughs]`\n", 154 | "# - `[sighs]`\n", 155 | "# - `[music]`\n", 156 | "# - `[gasps]`\n", 157 | "# - `[clears throat]`\n", 158 | "# - `—` or `...` for hesitations\n", 159 | "# - `♪` for song lyrics" 160 | ] 161 | }, 162 | { 163 | "cell_type": "code", 164 | "execution_count": null, 165 | "metadata": {}, 166 | "outputs": [], 167 | "source": [ 168 | "# download and load all models\n", 169 | "preload_models(\n", 170 | " text_use_gpu=True,\n", 171 | " text_use_small=False,\n", 172 | " text_model_path=semantic_path,\n", 173 | " coarse_use_gpu=True,\n", 174 | " coarse_use_small=False,\n", 175 | " coarse_model_path=coarse_path,\n", 176 | " fine_use_gpu=True,\n", 177 | " fine_use_small=False,\n", 178 | " fine_model_path=fine_path,\n", 179 | " codec_use_gpu=True,\n", 180 | " force_reload=False,\n", 181 | " path=\"models\"\n", 182 | ")\n", 183 | "\n", 184 | "if use_rvc:\n", 185 | " from rvc_infer import get_vc, vc_single\n", 186 | " get_vc(rvc_path, device, is_half)" 187 | ] 188 | }, 189 | { 190 | "cell_type": "code", 191 | "execution_count": null, 192 | "metadata": {}, 193 | "outputs": [], 194 | "source": [ 195 | "text = \"\"\"The Uncharted Land of Discovery: A Journey Through Time and Space\n", 196 | "[clears throat]\n", 197 | "Chapter 1: The Dawn of Curiosity\n", 198 | "[takes breath]\n", 199 | "Since the dawn of humankind, our species has been driven by a powerful force: curiosity. It is an innate, unquenchable desire to explore, understand, and unravel the mysteries of the world around us. This primal urge has led us on countless adventures, pushing us to the farthest reaches of our planet and beyond.\n", 200 | "\n", 201 | "Early humans, huddled around a flickering fire, gazed up at the night sky and wondered what those twinkling lights were. They had no idea that their curiosity would eventually propel us into the vast, uncharted realm of space. As time progressed, our ancestors began to explore their surroundings, venturing beyond their caves and settlements, driven by the need to discover what lay beyond the horizon.\n", 202 | "\n", 203 | "hapter 2: The Age of Exploration\n", 204 | "\n", 205 | "The Age of Exploration marked a turning point in human history, as brave souls took to the seas in search of new lands, wealth, and knowledge. Pioneers like Christopher Columbus, Vasco da Gama, and Ferdinand Magellan set sail on perilous voyages, pushing the boundaries of what was known and understood.\n", 206 | "[clears throat]\n", 207 | "These intrepid explorers discovered new continents, mapped out previously unknown territories, and encountered diverse cultures. They also established trade routes, allowing for the exchange of goods, ideas, and innovations between distant societies. The Age of Exploration was not without its dark moments, however, as conquest, colonization, and exploitation often went hand in hand with discovery.\n", 208 | "[clears throat]\n", 209 | "Chapter 3: The Scientific Revolution\n", 210 | "[laughs]\n", 211 | "The Scientific Revolution was a period of profound change, as humanity began to question long-held beliefs and seek empirical evidence. Pioneers like Galileo Galilei, Isaac Newton, and Johannes Kepler sought to understand the natural world through observation, experimentation, and reason.\n", 212 | "[sighs]\n", 213 | "Their discoveries laid the foundation for modern science, transforming the way we view the universe and our place within it. New technologies, such as the telescope and the microscope, allowed us to peer deeper into the cosmos and the microscopic world, further expanding our understanding of reality.\n", 214 | "[gasps]\n", 215 | "Chapter 4: The Information Age\n", 216 | "\n", 217 | "The Information Age, sometimes referred to as the Digital Age, has revolutionized the way we communicate, learn, and access knowledge. With the advent of the internet and personal computers, information that was once reserved for the privileged few is now available to the masses.\n", 218 | "...\n", 219 | "This democratization of knowledge has led to an explosion of innovation, as ideas and information are shared across borders and cultures at lightning speed. The Information Age has also brought new challenges, as the rapid pace of technological advancements threatens to outpace our ability to adapt and raises questions about the ethical implications of our increasingly interconnected world.\n", 220 | "[laughter]\n", 221 | "Chapter 5: The Final Frontier\n", 222 | "[clears throat]\n", 223 | "As our knowledge of the universe expands, so too does our desire to explore the cosmos. Space exploration has come a long way since the first successful satellite, Sputnik, was launched in 1957. We have landed humans on the moon, sent probes to the far reaches of our solar system, and even glimpsed distant galaxies through powerful telescopes.\n", 224 | "\n", 225 | "The future of space exploration is filled with possibilities, from establishing colonies on Mars to the search for extraterrestrial life. As we venture further into the unknown, we continue to be driven by the same curiosity that has propelled us throughout history, always seeking to uncover the secrets of the universe and our place within it.\n", 226 | "...\n", 227 | "In conclusion, the human journey is one of discovery, driven by our innate curiosity and desire to understand the world around us. From the dawn of our species to the present day, we have continued to explore, learn, and adapt, pushing the boundaries of what is known and possible. As we continue to unravel the mysteries of the cosmos, our spirit.\"\"\"" 228 | ] 229 | }, 230 | { 231 | "cell_type": "code", 232 | "execution_count": null, 233 | "metadata": {}, 234 | "outputs": [], 235 | "source": [ 236 | "# Chunk the text into smaller pieces then combine the generated audio\n", 237 | "from time import time\n", 238 | "from tqdm.auto import tqdm\n", 239 | "from IPython.display import Audio\n", 240 | "from scipy.io.wavfile import write as write_wav\n", 241 | "import os\n", 242 | "import numpy as np\n", 243 | "\n", 244 | "# generation settings\n", 245 | "voice_name = 'en_speaker_0'\n", 246 | "out_filepath = 'audio/audio.wav'\n", 247 | "\n", 248 | "semantic_temp = 0.7\n", 249 | "semantic_top_k = 50\n", 250 | "semantic_top_p = 0.95\n", 251 | "\n", 252 | "coarse_temp = 0.7\n", 253 | "coarse_top_k = 50\n", 254 | "coarse_top_p = 0.95\n", 255 | "\n", 256 | "fine_temp = 0.5\n", 257 | "\n", 258 | "use_semantic_history_prompt = True\n", 259 | "use_coarse_history_prompt = True\n", 260 | "use_fine_history_prompt = True\n", 261 | "\n", 262 | "use_last_generation_as_history = True\n", 263 | "\n", 264 | "if use_rvc:\n", 265 | " index_rate = 0.75\n", 266 | " f0up_key = -10\n", 267 | " filter_radius = 3\n", 268 | " rms_mix_rate = 0.25\n", 269 | " protect = 0.33\n", 270 | " resample_sr = SAMPLE_RATE\n", 271 | " f0method = \"harvest\" #harvest or pm\n", 272 | "\n", 273 | "texts = split_and_recombine_text(text)\n", 274 | "\n", 275 | "all_parts = []\n", 276 | "for i, text in tqdm(enumerate(texts), total=len(texts)):\n", 277 | " full_generation, audio_array = generate_with_settings(\n", 278 | " text,\n", 279 | " semantic_temp=semantic_temp,\n", 280 | " semantic_top_k=semantic_top_k,\n", 281 | " semantic_top_p=semantic_top_p,\n", 282 | " coarse_temp=coarse_temp,\n", 283 | " coarse_top_k=coarse_top_k,\n", 284 | " coarse_top_p=coarse_top_p,\n", 285 | " fine_temp=fine_temp,\n", 286 | " voice_name=voice_name,\n", 287 | " use_semantic_history_prompt=use_semantic_history_prompt,\n", 288 | " use_coarse_history_prompt=use_coarse_history_prompt,\n", 289 | " use_fine_history_prompt=use_fine_history_prompt,\n", 290 | " output_full=True\n", 291 | " )\n", 292 | " if use_last_generation_as_history:\n", 293 | " # save to npz\n", 294 | " os.makedirs('_temp', exist_ok=True)\n", 295 | " np.savez_compressed(\n", 296 | " '_temp/history.npz',\n", 297 | " semantic_prompt=full_generation['semantic_prompt'],\n", 298 | " coarse_prompt=full_generation['coarse_prompt'],\n", 299 | " fine_prompt=full_generation['fine_prompt'],\n", 300 | " )\n", 301 | " voice_name = '_temp/history.npz'\n", 302 | " write_wav(out_filepath.replace('.wav', f'_{i}') + '.wav', SAMPLE_RATE, audio_array)\n", 303 | "\n", 304 | " if use_rvc:\n", 305 | " try:\n", 306 | " audio_array = vc_single(0,out_filepath.replace('.wav', f'_{i}') + '.wav',f0up_key,None,f0method,index_path,index_rate, filter_radius=filter_radius, resample_sr=resample_sr, rms_mix_rate=rms_mix_rate, protect=protect)\n", 307 | " except:\n", 308 | " audio_array = vc_single(0,out_filepath.replace('.wav', f'_{i}') + '.wav',f0up_key,None,'pm',index_path,index_rate, filter_radius=filter_radius, resample_sr=resample_sr, rms_mix_rate=rms_mix_rate, protect=protect)\n", 309 | " write_wav(out_filepath.replace('.wav', f'_{i}') + '.wav', SAMPLE_RATE, audio_array)\n", 310 | " all_parts.append(audio_array)\n", 311 | "\n", 312 | "audio_array = np.concatenate(all_parts, axis=-1)\n", 313 | "\n", 314 | "# save audio\n", 315 | "write_wav(out_filepath, SAMPLE_RATE, audio_array)\n", 316 | "\n", 317 | "# play audio\n", 318 | "Audio(audio_array, rate=SAMPLE_RATE)" 319 | ] 320 | } 321 | ], 322 | "metadata": { 323 | "kernelspec": { 324 | "display_name": "Python 3", 325 | "language": "python", 326 | "name": "python3" 327 | }, 328 | "language_info": { 329 | "codemirror_mode": { 330 | "name": "ipython", 331 | "version": 3 332 | }, 333 | "file_extension": ".py", 334 | "mimetype": "text/x-python", 335 | "name": "python", 336 | "nbconvert_exporter": "python", 337 | "pygments_lexer": "ipython3", 338 | "version": "3.10.8" 339 | }, 340 | "orig_nbformat": 4 341 | }, 342 | "nbformat": 4, 343 | "nbformat_minor": 2 344 | } 345 | -------------------------------------------------------------------------------- /test_models.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": null, 6 | "metadata": {}, 7 | "outputs": [], 8 | "source": [ 9 | "from IPython.display import Audio\n", 10 | "from scipy.io.wavfile import write as write_wav\n", 11 | "\n", 12 | "from bark.api import generate_audio\n", 13 | "from bark.generation import SAMPLE_RATE, preload_models, codec_decode, generate_coarse, generate_fine, generate_text_semantic" 14 | ] 15 | }, 16 | { 17 | "cell_type": "code", 18 | "execution_count": null, 19 | "metadata": {}, 20 | "outputs": [], 21 | "source": [ 22 | "semantic_path = \"semantic_output/pytorch_model.bin\" # set to None if you don't want to use finetuned semantic\n", 23 | "coarse_path = \"coarse_output/pytorch_model.bin\" # set to None if you don't want to use finetuned coarse\n", 24 | "fine_path = \"fine_output/pytorch_model.bin\" # set to None if you don't want to use finetuned fine\n", 25 | "use_rvc = True # Set to False to use bark without RVC\n", 26 | "rvc_name = 'mi-test'\n", 27 | "rvc_path = f\"Retrieval-based-Voice-Conversion-WebUI/weights/{rvc_name}.pth\"\n", 28 | "index_path = f\"Retrieval-based-Voice-Conversion-WebUI/logs/{rvc_name}/added_IVF256_Flat_nprobe_1_{rvc_name}_v2.index\"\n", 29 | "device=\"cuda:0\"\n", 30 | "is_half=True" 31 | ] 32 | }, 33 | { 34 | "cell_type": "code", 35 | "execution_count": null, 36 | "metadata": {}, 37 | "outputs": [], 38 | "source": [ 39 | "preload_models(\n", 40 | " text_use_gpu=True,\n", 41 | " text_use_small=False,\n", 42 | " text_model_path=semantic_path,\n", 43 | " coarse_use_gpu=True,\n", 44 | " coarse_use_small=False,\n", 45 | " coarse_model_path=coarse_path,\n", 46 | " fine_use_gpu=True,\n", 47 | " fine_use_small=False,\n", 48 | " fine_model_path=fine_path,\n", 49 | " codec_use_gpu=True,\n", 50 | " force_reload=False,\n", 51 | " path=\"models\"\n", 52 | ")\n", 53 | "\n", 54 | "if use_rvc:\n", 55 | " from rvc_infer import get_vc, vc_single\n", 56 | " get_vc(rvc_path, device, is_half)" 57 | ] 58 | }, 59 | { 60 | "cell_type": "code", 61 | "execution_count": null, 62 | "metadata": {}, 63 | "outputs": [], 64 | "source": [ 65 | "# simple generation\n", 66 | "text_prompt = \"I am Joe Biden... and this is the finetuned semantic, coarse and fine model! A lot better than the original!\"\n", 67 | "filepath = \"output/audio.wav\" # change this to your desired output path\n", 68 | "audio_array = generate_audio(text_prompt, history_prompt=None, text_temp=0.7, waveform_temp=0.7)\n", 69 | "write_wav(filepath, SAMPLE_RATE, audio_array)\n", 70 | "\n", 71 | "if use_rvc:\n", 72 | " index_rate = 0.75\n", 73 | " f0up_key = -6\n", 74 | " filter_radius = 3\n", 75 | " rms_mix_rate = 0.25\n", 76 | " protect = 0.33\n", 77 | " resample_sr = SAMPLE_RATE\n", 78 | " f0method = \"harvest\" #harvest or pm\n", 79 | " try:\n", 80 | " audio_array = vc_single(0,filepath,f0up_key,None,f0method,index_path,index_rate, filter_radius=filter_radius, resample_sr=resample_sr, rms_mix_rate=rms_mix_rate, protect=protect)\n", 81 | " except:\n", 82 | " audio_array = vc_single(0,filepath,f0up_key,None,'pm',index_path,index_rate, filter_radius=filter_radius, resample_sr=resample_sr, rms_mix_rate=rms_mix_rate, protect=protect)\n", 83 | " write_wav(filepath, SAMPLE_RATE, audio_array)\n", 84 | "\n", 85 | "Audio(audio_array, rate=SAMPLE_RATE)" 86 | ] 87 | }, 88 | { 89 | "cell_type": "code", 90 | "execution_count": null, 91 | "metadata": {}, 92 | "outputs": [], 93 | "source": [ 94 | "def generate_with_settings(text_prompt, semantic_temp=0.7, semantic_top_k=50, semantic_top_p=0.95, coarse_temp=0.7, coarse_top_k=50, coarse_top_p=0.95, fine_temp=0.5, voice_name=None, use_semantic_history_prompt=True, use_coarse_history_prompt=True, use_fine_history_prompt=True, output_full=False):\n", 95 | " # generation with more control\n", 96 | " x_semantic = generate_text_semantic(\n", 97 | " text_prompt,\n", 98 | " history_prompt=voice_name if use_semantic_history_prompt else None,\n", 99 | " temp=semantic_temp,\n", 100 | " top_k=semantic_top_k,\n", 101 | " top_p=semantic_top_p,\n", 102 | " )\n", 103 | "\n", 104 | " x_coarse_gen = generate_coarse(\n", 105 | " x_semantic,\n", 106 | " history_prompt=voice_name if use_coarse_history_prompt else None,\n", 107 | " temp=coarse_temp,\n", 108 | " top_k=coarse_top_k,\n", 109 | " top_p=coarse_top_p,\n", 110 | " )\n", 111 | " x_fine_gen = generate_fine(\n", 112 | " x_coarse_gen,\n", 113 | " history_prompt=voice_name if use_fine_history_prompt else None,\n", 114 | " temp=fine_temp,\n", 115 | " )\n", 116 | "\n", 117 | " if output_full:\n", 118 | " full_generation = {\n", 119 | " 'semantic_prompt': x_semantic,\n", 120 | " 'coarse_prompt': x_coarse_gen,\n", 121 | " 'fine_prompt': x_fine_gen,\n", 122 | " }\n", 123 | " return full_generation, codec_decode(x_fine_gen)\n", 124 | " return codec_decode(x_fine_gen)" 125 | ] 126 | }, 127 | { 128 | "cell_type": "code", 129 | "execution_count": null, 130 | "metadata": {}, 131 | "outputs": [], 132 | "source": [ 133 | "text_prompt = \"I am Joe Biden... and this is the finetuned semantic, coarse and fine model! A lot better than the original!\"\n", 134 | "filepath = \"output/audio.wav\" # change this to your desired output path\n", 135 | "\n", 136 | "audio_array = generate_with_settings(\n", 137 | " text_prompt,\n", 138 | " semantic_temp=0.7,\n", 139 | " semantic_top_k=50,\n", 140 | " semantic_top_p=0.99,\n", 141 | " coarse_temp=0.7,\n", 142 | " coarse_top_k=50,\n", 143 | " coarse_top_p=0.95,\n", 144 | " fine_temp=0.5,\n", 145 | " voice_name=\"datasets/joe_biden_state_of_union/tokens/257.npz\",\n", 146 | " use_semantic_history_prompt=False,\n", 147 | " use_coarse_history_prompt=True,\n", 148 | " use_fine_history_prompt=True,\n", 149 | " output_full=False\n", 150 | ")\n", 151 | "\n", 152 | "write_wav(filepath, SAMPLE_RATE, audio_array)\n", 153 | "\n", 154 | "if use_rvc:\n", 155 | " index_rate = 0.75\n", 156 | " f0up_key = -6\n", 157 | " filter_radius = 3\n", 158 | " rms_mix_rate = 0.25\n", 159 | " protect = 0.33\n", 160 | " resample_sr = SAMPLE_RATE\n", 161 | " f0method = \"harvest\" #harvest or pm\n", 162 | " try:\n", 163 | " audio_array = vc_single(0,filepath,f0up_key,None,f0method,index_path,index_rate, filter_radius=filter_radius, resample_sr=resample_sr, rms_mix_rate=rms_mix_rate, protect=protect)\n", 164 | " except:\n", 165 | " audio_array = vc_single(0,filepath,f0up_key,None,'pm',index_path,index_rate, filter_radius=filter_radius, resample_sr=resample_sr, rms_mix_rate=rms_mix_rate, protect=protect)\n", 166 | " write_wav(filepath, SAMPLE_RATE, audio_array)\n", 167 | "\n", 168 | "Audio(audio_array, rate=SAMPLE_RATE)" 169 | ] 170 | }, 171 | { 172 | "cell_type": "code", 173 | "execution_count": null, 174 | "metadata": {}, 175 | "outputs": [], 176 | "source": [] 177 | }, 178 | { 179 | "cell_type": "code", 180 | "execution_count": null, 181 | "metadata": {}, 182 | "outputs": [], 183 | "source": [ 184 | "import re\n", 185 | "def split_and_recombine_text(text, desired_length=100, max_length=150):\n", 186 | " # from https://github.com/neonbjb/tortoise-tts\n", 187 | " \"\"\"Split text it into chunks of a desired length trying to keep sentences intact.\"\"\"\n", 188 | " # normalize text, remove redundant whitespace and convert non-ascii quotes to ascii\n", 189 | " text = re.sub(r\"\\n\\n+\", \"\\n\", text)\n", 190 | " text = re.sub(r\"\\s+\", \" \", text)\n", 191 | " text = re.sub(r\"[“”]\", '\"', text)\n", 192 | "\n", 193 | " rv = []\n", 194 | " in_quote = False\n", 195 | " current = \"\"\n", 196 | " split_pos = []\n", 197 | " pos = -1\n", 198 | " end_pos = len(text) - 1\n", 199 | "\n", 200 | " def seek(delta):\n", 201 | " nonlocal pos, in_quote, current\n", 202 | " is_neg = delta < 0\n", 203 | " for _ in range(abs(delta)):\n", 204 | " if is_neg:\n", 205 | " pos -= 1\n", 206 | " current = current[:-1]\n", 207 | " else:\n", 208 | " pos += 1\n", 209 | " current += text[pos]\n", 210 | " if text[pos] == '\"':\n", 211 | " in_quote = not in_quote\n", 212 | " return text[pos]\n", 213 | "\n", 214 | " def peek(delta):\n", 215 | " p = pos + delta\n", 216 | " return text[p] if p < end_pos and p >= 0 else \"\"\n", 217 | "\n", 218 | " def commit():\n", 219 | " nonlocal rv, current, split_pos\n", 220 | " rv.append(current)\n", 221 | " current = \"\"\n", 222 | " split_pos = []\n", 223 | "\n", 224 | " while pos < end_pos:\n", 225 | " c = seek(1)\n", 226 | " # do we need to force a split?\n", 227 | " if len(current) >= max_length:\n", 228 | " if len(split_pos) > 0 and len(current) > (desired_length / 2):\n", 229 | " # we have at least one sentence and we are over half the desired length, seek back to the last split\n", 230 | " d = pos - split_pos[-1]\n", 231 | " seek(-d)\n", 232 | " else:\n", 233 | " # no full sentences, seek back until we are not in the middle of a word and split there\n", 234 | " while c not in \"!?.\\n \" and pos > 0 and len(current) > desired_length:\n", 235 | " c = seek(-1)\n", 236 | " commit()\n", 237 | " # check for sentence boundaries\n", 238 | " elif not in_quote and (c in \"!?\\n\" or (c == \".\" and peek(1) in \"\\n \")):\n", 239 | " # seek forward if we have consecutive boundary markers but still within the max length\n", 240 | " while (\n", 241 | " pos < len(text) - 1 and len(current) < max_length and peek(1) in \"!?.\"\n", 242 | " ):\n", 243 | " c = seek(1)\n", 244 | " split_pos.append(pos)\n", 245 | " if len(current) >= desired_length:\n", 246 | " commit()\n", 247 | " # treat end of quote as a boundary if its followed by a space or newline\n", 248 | " elif in_quote and peek(1) == '\"' and peek(2) in \"\\n \":\n", 249 | " seek(2)\n", 250 | " split_pos.append(pos)\n", 251 | " rv.append(current)\n", 252 | "\n", 253 | " # clean up, remove lines with only whitespace or punctuation\n", 254 | " rv = [s.strip() for s in rv]\n", 255 | " rv = [s for s in rv if len(s) > 0 and not re.match(r\"^[\\s\\.,;:!?]*$\", s)]\n", 256 | "\n", 257 | " return rv\n", 258 | "\n", 259 | "def generate_with_settings(text_prompt, semantic_temp=0.7, semantic_top_k=50, semantic_top_p=0.95, coarse_temp=0.7, coarse_top_k=50, coarse_top_p=0.95, fine_temp=0.5, voice_name=None, use_semantic_history_prompt=True, use_coarse_history_prompt=True, use_fine_history_prompt=True, output_full=False):\n", 260 | " # generation with more control\n", 261 | " x_semantic = generate_text_semantic(\n", 262 | " text_prompt,\n", 263 | " history_prompt=voice_name if use_semantic_history_prompt else None,\n", 264 | " temp=semantic_temp,\n", 265 | " top_k=semantic_top_k,\n", 266 | " top_p=semantic_top_p,\n", 267 | " )\n", 268 | "\n", 269 | " x_coarse_gen = generate_coarse(\n", 270 | " x_semantic,\n", 271 | " history_prompt=voice_name if use_coarse_history_prompt else None,\n", 272 | " temp=coarse_temp,\n", 273 | " top_k=coarse_top_k,\n", 274 | " top_p=coarse_top_p,\n", 275 | " )\n", 276 | " x_fine_gen = generate_fine(\n", 277 | " x_coarse_gen,\n", 278 | " history_prompt=voice_name if use_fine_history_prompt else None,\n", 279 | " temp=fine_temp,\n", 280 | " )\n", 281 | "\n", 282 | " if output_full:\n", 283 | " full_generation = {\n", 284 | " 'semantic_prompt': x_semantic,\n", 285 | " 'coarse_prompt': x_coarse_gen,\n", 286 | " 'fine_prompt': x_fine_gen,\n", 287 | " }\n", 288 | " return full_generation, codec_decode(x_fine_gen)\n", 289 | " return codec_decode(x_fine_gen)" 290 | ] 291 | }, 292 | { 293 | "cell_type": "code", 294 | "execution_count": null, 295 | "metadata": {}, 296 | "outputs": [], 297 | "source": [ 298 | "text = \"\"\"The Uncharted Land of Discovery: A Journey Through Time and Space\n", 299 | "[clears throat]\n", 300 | "Chapter 1: The Dawn of Curiosity\n", 301 | "[takes breath]\n", 302 | "Since the dawn of humankind, our species has been driven by a powerful force: curiosity. It is an innate, unquenchable desire to explore, understand, and unravel the mysteries of the world around us. This primal urge has led us on countless adventures, pushing us to the farthest reaches of our planet and beyond.\n", 303 | "\n", 304 | "Early humans, huddled around a flickering fire, gazed up at the night sky and wondered what those twinkling lights were. They had no idea that their curiosity would eventually propel us into the vast, uncharted realm of space. As time progressed, our ancestors began to explore their surroundings, venturing beyond their caves and settlements, driven by the need to discover what lay beyond the horizon.\n", 305 | "\n", 306 | "hapter 2: The Age of Exploration\n", 307 | "\n", 308 | "The Age of Exploration marked a turning point in human history, as brave souls took to the seas in search of new lands, wealth, and knowledge. Pioneers like Christopher Columbus, Vasco da Gama, and Ferdinand Magellan set sail on perilous voyages, pushing the boundaries of what was known and understood.\n", 309 | "[clears throat]\n", 310 | "These intrepid explorers discovered new continents, mapped out previously unknown territories, and encountered diverse cultures. They also established trade routes, allowing for the exchange of goods, ideas, and innovations between distant societies. The Age of Exploration was not without its dark moments, however, as conquest, colonization, and exploitation often went hand in hand with discovery.\n", 311 | "[clears throat]\n", 312 | "Chapter 3: The Scientific Revolution\n", 313 | "[laughs]\n", 314 | "The Scientific Revolution was a period of profound change, as humanity began to question long-held beliefs and seek empirical evidence. Pioneers like Galileo Galilei, Isaac Newton, and Johannes Kepler sought to understand the natural world through observation, experimentation, and reason.\n", 315 | "[sighs]\n", 316 | "Their discoveries laid the foundation for modern science, transforming the way we view the universe and our place within it. New technologies, such as the telescope and the microscope, allowed us to peer deeper into the cosmos and the microscopic world, further expanding our understanding of reality.\n", 317 | "[gasps]\n", 318 | "Chapter 4: The Information Age\n", 319 | "\n", 320 | "The Information Age, sometimes referred to as the Digital Age, has revolutionized the way we communicate, learn, and access knowledge. With the advent of the internet and personal computers, information that was once reserved for the privileged few is now available to the masses.\n", 321 | "...\n", 322 | "This democratization of knowledge has led to an explosion of innovation, as ideas and information are shared across borders and cultures at lightning speed. The Information Age has also brought new challenges, as the rapid pace of technological advancements threatens to outpace our ability to adapt and raises questions about the ethical implications of our increasingly interconnected world.\n", 323 | "[laughter]\n", 324 | "Chapter 5: The Final Frontier\n", 325 | "[clears throat]\n", 326 | "As our knowledge of the universe expands, so too does our desire to explore the cosmos. Space exploration has come a long way since the first successful satellite, Sputnik, was launched in 1957. We have landed humans on the moon, sent probes to the far reaches of our solar system, and even glimpsed distant galaxies through powerful telescopes.\n", 327 | "\n", 328 | "The future of space exploration is filled with possibilities, from establishing colonies on Mars to the search for extraterrestrial life. As we venture further into the unknown, we continue to be driven by the same curiosity that has propelled us throughout history, always seeking to uncover the secrets of the universe and our place within it.\n", 329 | "...\n", 330 | "In conclusion, the human journey is one of discovery, driven by our innate curiosity and desire to understand the world around us. From the dawn of our species to the present day, we have continued to explore, learn, and adapt, pushing the boundaries of what is known and possible. As we continue to unravel the mysteries of the cosmos, our spirit.\"\"\"" 331 | ] 332 | }, 333 | { 334 | "cell_type": "code", 335 | "execution_count": null, 336 | "metadata": {}, 337 | "outputs": [], 338 | "source": [ 339 | "# Chunk the text into smaller pieces then combine the generated audio\n", 340 | "from time import time\n", 341 | "from tqdm.auto import tqdm\n", 342 | "from IPython.display import Audio\n", 343 | "from scipy.io.wavfile import write as write_wav\n", 344 | "import os\n", 345 | "import numpy as np\n", 346 | "\n", 347 | "# generation settings\n", 348 | "voice_name = \"datasets/joe_biden_state_of_union/tokens/257.npz\"\n", 349 | "out_filepath = 'audio/audio.wav'\n", 350 | "\n", 351 | "semantic_temp = 0.7\n", 352 | "semantic_top_k = 100\n", 353 | "semantic_top_p = 0.99\n", 354 | "\n", 355 | "coarse_temp = 0.7\n", 356 | "coarse_top_k = 100\n", 357 | "coarse_top_p = 0.95\n", 358 | "\n", 359 | "fine_temp = 0.7\n", 360 | "\n", 361 | "use_semantic_history_prompt = True\n", 362 | "use_coarse_history_prompt = True\n", 363 | "use_fine_history_prompt = True\n", 364 | "\n", 365 | "use_last_generation_as_history = False\n", 366 | "\n", 367 | "if use_rvc:\n", 368 | " index_rate = 0.75\n", 369 | " f0up_key = -6\n", 370 | " filter_radius = 3\n", 371 | " rms_mix_rate = 0.25\n", 372 | " protect = 0.33\n", 373 | " resample_sr = SAMPLE_RATE\n", 374 | " f0method = \"harvest\" #harvest or pm\n", 375 | "\n", 376 | "texts = split_and_recombine_text(text)\n", 377 | "\n", 378 | "all_parts = []\n", 379 | "for i, text in tqdm(enumerate(texts), total=len(texts)):\n", 380 | " full_generation, audio_array = generate_with_settings(\n", 381 | " text,\n", 382 | " semantic_temp=semantic_temp,\n", 383 | " semantic_top_k=semantic_top_k,\n", 384 | " semantic_top_p=semantic_top_p,\n", 385 | " coarse_temp=coarse_temp,\n", 386 | " coarse_top_k=coarse_top_k,\n", 387 | " coarse_top_p=coarse_top_p,\n", 388 | " fine_temp=fine_temp,\n", 389 | " voice_name=voice_name,\n", 390 | " use_semantic_history_prompt=use_semantic_history_prompt,\n", 391 | " use_coarse_history_prompt=use_coarse_history_prompt,\n", 392 | " use_fine_history_prompt=use_fine_history_prompt,\n", 393 | " output_full=True\n", 394 | " )\n", 395 | " if use_last_generation_as_history:\n", 396 | " # save to npz\n", 397 | " os.makedirs('_temp', exist_ok=True)\n", 398 | " np.savez_compressed(\n", 399 | " '_temp/history.npz',\n", 400 | " semantic_prompt=full_generation['semantic_prompt'],\n", 401 | " coarse_prompt=full_generation['coarse_prompt'],\n", 402 | " fine_prompt=full_generation['fine_prompt'],\n", 403 | " )\n", 404 | " voice_name = '_temp/history.npz'\n", 405 | " write_wav(out_filepath.replace('.wav', f'_{i}') + '.wav', SAMPLE_RATE, audio_array)\n", 406 | "\n", 407 | " if use_rvc:\n", 408 | " try:\n", 409 | " audio_array = vc_single(0,out_filepath.replace('.wav', f'_{i}') + '.wav',f0up_key,None,f0method,index_path,index_rate, filter_radius=filter_radius, resample_sr=resample_sr, rms_mix_rate=rms_mix_rate, protect=protect)\n", 410 | " except:\n", 411 | " audio_array = vc_single(0,out_filepath.replace('.wav', f'_{i}') + '.wav',f0up_key,None,'pm',index_path,index_rate, filter_radius=filter_radius, resample_sr=resample_sr, rms_mix_rate=rms_mix_rate, protect=protect)\n", 412 | " write_wav(out_filepath.replace('.wav', f'_{i}') + '.wav', SAMPLE_RATE, audio_array)\n", 413 | " all_parts.append(audio_array)\n", 414 | "\n", 415 | "audio_array = np.concatenate(all_parts, axis=-1)\n", 416 | "\n", 417 | "# save audio\n", 418 | "write_wav(out_filepath, SAMPLE_RATE, audio_array)\n", 419 | "\n", 420 | "# play audio\n", 421 | "Audio(audio_array, rate=SAMPLE_RATE)" 422 | ] 423 | }, 424 | { 425 | "cell_type": "code", 426 | "execution_count": null, 427 | "metadata": {}, 428 | "outputs": [], 429 | "source": [] 430 | } 431 | ], 432 | "metadata": { 433 | "kernelspec": { 434 | "display_name": "Python 3", 435 | "language": "python", 436 | "name": "python3" 437 | }, 438 | "language_info": { 439 | "codemirror_mode": { 440 | "name": "ipython", 441 | "version": 3 442 | }, 443 | "file_extension": ".py", 444 | "mimetype": "text/x-python", 445 | "name": "python", 446 | "nbconvert_exporter": "python", 447 | "pygments_lexer": "ipython3", 448 | "version": "3.10.8" 449 | }, 450 | "orig_nbformat": 4 451 | }, 452 | "nbformat": 4, 453 | "nbformat_minor": 2 454 | } 455 | -------------------------------------------------------------------------------- /utils/bitsandbytes.py: -------------------------------------------------------------------------------- 1 | # From https://github.com/huggingface/transformers/blob/e45e756d22206ca8fa9fb057c8c3d8fa79bf81c6/src/transformers/utils/bitsandbytes.py 2 | 3 | import warnings 4 | import sys 5 | import importlib.util 6 | from copy import deepcopy 7 | import copy 8 | import json 9 | import os 10 | from dataclasses import dataclass 11 | 12 | from typing import Any, Tuple, Union, Dict 13 | 14 | from packaging import version 15 | 16 | if sys.version_info < (3, 8): 17 | import importlib_metadata 18 | else: 19 | import importlib.metadata as importlib_metadata 20 | 21 | 22 | def _is_package_available(pkg_name: str, return_version: bool = False) -> Union[Tuple[bool, str], bool]: 23 | # Check we're not importing a "pkg_name" directory somewhere but the actual library by trying to grab the version 24 | package_exists = importlib.util.find_spec(pkg_name) is not None 25 | package_version = "N/A" 26 | if package_exists: 27 | try: 28 | package_version = importlib_metadata.version(pkg_name) 29 | package_exists = True 30 | except importlib_metadata.PackageNotFoundError: 31 | package_exists = False 32 | if return_version: 33 | return package_exists, package_version 34 | else: 35 | return package_exists 36 | 37 | _accelerate_available, _accelerate_version = _is_package_available("accelerate", return_version=True) 38 | _bitsandbytes_available = _is_package_available("bitsandbytes") 39 | _torch_available, _torch_version = _is_package_available("torch", return_version=True) 40 | 41 | def is_accelerate_available(check_partial_state=False): 42 | if check_partial_state: 43 | return _accelerate_available and version.parse(_accelerate_version) >= version.parse("0.19.0") 44 | return _accelerate_available 45 | 46 | def is_bitsandbytes_available(): 47 | return _bitsandbytes_available 48 | 49 | def is_torch_available(): 50 | return _torch_available 51 | 52 | if is_bitsandbytes_available(): 53 | import bitsandbytes as bnb 54 | import torch 55 | import torch.nn as nn 56 | 57 | if is_accelerate_available(): 58 | from accelerate import init_empty_weights 59 | from accelerate.utils import find_tied_parameters 60 | 61 | 62 | def set_module_quantized_tensor_to_device(module, tensor_name, device, value=None, fp16_statistics=None): 63 | """ 64 | A helper function to set a given tensor (parameter of buffer) of a module on a specific device (note that doing 65 | `param.to(device)` creates a new tensor not linked to the parameter, which is why we need this function). The 66 | function is adapted from `set_module_tensor_to_device` function from accelerate that is adapted to support the 67 | class `Int8Params` from `bitsandbytes`. 68 | 69 | Args: 70 | module (`torch.nn.Module`): 71 | The module in which the tensor we want to move lives. 72 | tensor_name (`str`): 73 | The full name of the parameter/buffer. 74 | device (`int`, `str` or `torch.device`): 75 | The device on which to set the tensor. 76 | value (`torch.Tensor`, *optional*): 77 | The value of the tensor (useful when going from the meta device to any other device). 78 | fp16_statistics (`torch.HalfTensor`, *optional*): 79 | The list of fp16 statistics to set on the module, used for serialization. 80 | """ 81 | # Recurse if needed 82 | if "." in tensor_name: 83 | splits = tensor_name.split(".") 84 | for split in splits[:-1]: 85 | new_module = getattr(module, split) 86 | if new_module is None: 87 | raise ValueError(f"{module} has no attribute {split}.") 88 | module = new_module 89 | tensor_name = splits[-1] 90 | 91 | if tensor_name not in module._parameters and tensor_name not in module._buffers: 92 | raise ValueError(f"{module} does not have a parameter or a buffer named {tensor_name}.") 93 | is_buffer = tensor_name in module._buffers 94 | old_value = getattr(module, tensor_name) 95 | 96 | if old_value.device == torch.device("meta") and device not in ["meta", torch.device("meta")] and value is None: 97 | raise ValueError(f"{tensor_name} is on the meta device, we need a `value` to put in on {device}.") 98 | 99 | is_4bit = False 100 | is_8bit = False 101 | if is_buffer or not is_bitsandbytes_available(): 102 | is_8bit = False 103 | is_4bit = False 104 | else: 105 | is_4bit = hasattr(bnb.nn, "Params4bit") and isinstance(module._parameters[tensor_name], bnb.nn.Params4bit) 106 | is_8bit = isinstance(module._parameters[tensor_name], bnb.nn.Int8Params) 107 | 108 | if is_8bit or is_4bit: 109 | param = module._parameters[tensor_name] 110 | if param.device.type != "cuda": 111 | if value is None: 112 | new_value = old_value.to(device) 113 | elif isinstance(value, torch.Tensor): 114 | new_value = value.to("cpu") 115 | if value.dtype == torch.int8: 116 | is_8bit_serializable = version.parse(importlib_metadata.version("bitsandbytes")) > version.parse( 117 | "0.37.2" 118 | ) 119 | if not is_8bit_serializable: 120 | raise ValueError( 121 | "Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. " 122 | "Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`." 123 | ) 124 | else: 125 | new_value = torch.tensor(value, device="cpu") 126 | 127 | kwargs = old_value.__dict__ 128 | if is_8bit: 129 | new_value = bnb.nn.Int8Params(new_value, requires_grad=False, **kwargs).to(device) 130 | elif is_4bit: 131 | new_value = bnb.nn.Params4bit(new_value, requires_grad=False, **kwargs).to(device) 132 | 133 | module._parameters[tensor_name] = new_value 134 | if fp16_statistics is not None: 135 | setattr(module.weight, "SCB", fp16_statistics.to(device)) 136 | 137 | else: 138 | if value is None: 139 | new_value = old_value.to(device) 140 | elif isinstance(value, torch.Tensor): 141 | new_value = value.to(device) 142 | else: 143 | new_value = torch.tensor(value, device=device) 144 | 145 | if is_buffer: 146 | module._buffers[tensor_name] = new_value 147 | else: 148 | new_value = nn.Parameter(new_value, requires_grad=old_value.requires_grad) 149 | module._parameters[tensor_name] = new_value 150 | 151 | 152 | def replace_with_bnb_linear(model, modules_to_not_convert=None, current_key_name=None, quantization_config=None): 153 | """ 154 | A helper function to replace all `torch.nn.Linear` modules by `bnb.nn.Linear8bit` modules from the `bitsandbytes` 155 | library. This will enable running your models using mixed int8 precision as described by the paper `LLM.int8(): 156 | 8-bit Matrix Multiplication for Transformers at Scale`. Make sure `bitsandbytes` compiled with the correct CUDA 157 | version of your hardware is installed before running this function. `pip install -i https://test.pypi.org/simple/ 158 | bitsandbytes` 159 | 160 | The function will be run recursively and replace all `torch.nn.Linear` modules except for the `lm_head` that should 161 | be kept as a `torch.nn.Linear` module. The replacement is done under `init_empty_weights` context manager so no 162 | CPU/GPU memory is required to run this function. Int8 mixed-precision matrix decomposition works by separating a 163 | matrix multiplication into two streams: (1) and systematic feature outlier stream matrix multiplied in fp16 164 | (0.01%), (2) a regular stream of int8 matrix multiplication (99.9%). With this method, int8 inference with no 165 | predictive degradation is possible for very large models (>=176B parameters). 166 | 167 | Parameters: 168 | model (`torch.nn.Module`): 169 | Input model or `torch.nn.Module` as the function is run recursively. 170 | modules_to_not_convert (`List[`str`]`, *optional*, defaults to `["lm_head"]`): 171 | Names of the modules to not convert in `Linear8bitLt`. In practice we keep the `lm_head` in full precision 172 | for numerical stability reasons. 173 | current_key_name (`List[`str`]`, *optional*): 174 | An array to track the current key of the recursion. This is used to check whether the current key (part of 175 | it) is not in the list of modules to not convert (for instances modules that are offloaded to `cpu` or 176 | `disk`). 177 | """ 178 | modules_to_not_convert = ["lm_head"] if modules_to_not_convert is None else modules_to_not_convert 179 | for name, module in model.named_children(): 180 | if current_key_name is None: 181 | current_key_name = [] 182 | 183 | if isinstance(module, nn.Linear) and name not in modules_to_not_convert: 184 | # Check if the current key is not in the `modules_to_not_convert` 185 | if not any(key in ".".join(current_key_name) for key in modules_to_not_convert): 186 | with init_empty_weights(): 187 | if quantization_config.quantization_method() == "llm_int8": 188 | model._modules[name] = bnb.nn.Linear8bitLt( 189 | module.in_features, 190 | module.out_features, 191 | module.bias is not None, 192 | has_fp16_weights=quantization_config.llm_int8_has_fp16_weight, 193 | threshold=quantization_config.llm_int8_threshold, 194 | ) 195 | else: 196 | if ( 197 | quantization_config.llm_int8_skip_modules is not None 198 | and name in quantization_config.llm_int8_skip_modules 199 | ): 200 | pass 201 | else: 202 | model._modules[name] = bnb.nn.Linear4bit( 203 | module.in_features, 204 | module.out_features, 205 | module.bias is not None, 206 | quantization_config.bnb_4bit_compute_dtype, 207 | compress_statistics=quantization_config.bnb_4bit_use_double_quant, 208 | quant_type=quantization_config.bnb_4bit_quant_type, 209 | ) 210 | # Force requires grad to False to avoid unexpected errors 211 | model._modules[name].requires_grad_(False) 212 | # Remove the last key for recursion 213 | if len(list(module.children())) > 0: 214 | replace_with_bnb_linear( 215 | module, 216 | modules_to_not_convert, 217 | current_key_name, 218 | quantization_config, 219 | ) 220 | return model 221 | 222 | 223 | # For backward compatibility 224 | def replace_8bit_linear(*args, **kwargs): 225 | warnings.warn( 226 | "`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead", 227 | FutureWarning, 228 | ) 229 | return replace_with_bnb_linear(*args, **kwargs) 230 | 231 | 232 | # For backward compatiblity 233 | def set_module_8bit_tensor_to_device(*args, **kwargs): 234 | warnings.warn( 235 | "`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead", 236 | FutureWarning, 237 | ) 238 | return set_module_quantized_tensor_to_device(*args, **kwargs) 239 | 240 | 241 | def get_keys_to_not_convert(model): 242 | r""" 243 | An utility function to get the key of the module to keep in full precision if any For example for CausalLM modules 244 | we may want to keep the lm_head in full precision for numerical stability reasons. For other architectures, we want 245 | to keep the tied weights of the model. The function will return a list of the keys of the modules to not convert in 246 | int8. 247 | 248 | Parameters: 249 | model (`torch.nn.Module`): 250 | Input model 251 | """ 252 | # Create a copy of the model and tie the weights, then 253 | # check if it contains tied weights 254 | tied_model = deepcopy(model) # this has 0 cost since it is done inside `init_empty_weights` context manager` 255 | tied_model.tie_weights() 256 | 257 | tied_params = find_tied_parameters(tied_model) 258 | # For compatibility with Accelerate < 0.18 259 | if isinstance(tied_params, dict): 260 | tied_keys = list(tied_params.values()) 261 | else: 262 | tied_keys = sum([x[1:] for x in tied_params], []) 263 | has_tied_params = len(tied_keys) > 0 264 | 265 | # Check if it is a base model 266 | is_base_model = not hasattr(model, model.base_model_prefix) 267 | 268 | # Ignore this for base models (BertModel, GPT2Model, etc.) 269 | if (not has_tied_params) and is_base_model: 270 | return [] 271 | 272 | # otherwise they have an attached head 273 | list_modules = list(model.named_parameters()) 274 | list_last_module = [list_modules[-1][0]] 275 | 276 | # add last module together with tied weights 277 | intersection = set(list_last_module) - set(tied_keys) 278 | list_untouched = tied_keys + list(intersection) 279 | 280 | # remove ".weight" from the keys 281 | names_to_remove = [".weight", ".bias"] 282 | filtered_module_names = [] 283 | for name in list_untouched: 284 | for name_to_remove in names_to_remove: 285 | if name_to_remove in name: 286 | name = name.replace(name_to_remove, "") 287 | filtered_module_names.append(name) 288 | 289 | return filtered_module_names 290 | 291 | #!/usr/bin/env python 292 | # coding=utf-8 293 | 294 | # Copyright 2023 The HuggingFace Inc. team. All rights reserved. 295 | # 296 | # Licensed under the Apache License, Version 2.0 (the "License"); 297 | # you may not use this file except in compliance with the License. 298 | # You may obtain a copy of the License at 299 | # 300 | # http://www.apache.org/licenses/LICENSE-2.0 301 | # 302 | # Unless required by applicable law or agreed to in writing, software 303 | # distributed under the License is distributed on an "AS IS" BASIS, 304 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 305 | # See the License for the specific language governing permissions and 306 | # limitations under the License. 307 | 308 | 309 | 310 | if is_torch_available(): 311 | import torch 312 | 313 | 314 | @dataclass 315 | class BitsAndBytesConfig: 316 | """ 317 | This is a wrapper class about all possible attributes and features that you can play with a model that has been 318 | loaded using `bitsandbytes`. 319 | 320 | This replaces `load_in_8bit` or `load_in_4bit`therefore both options are mutually exclusive. 321 | 322 | Currently only supports `LLM.int8()`, `FP4`, and `NF4` quantization. If more methods are added to `bitsandbytes`, 323 | then more arguments will be added to this class. 324 | 325 | Args: 326 | load_in_8bit (`bool`, *optional*, defaults to `False`): 327 | This flag is used to enable 8-bit quantization with LLM.int8(). 328 | load_in_4bit (`bool`, *optional*, defaults to `False`): 329 | This flag is used to enable 4-bit quantization by replacing the Linear layers with FP4/NF4 layers from 330 | `bitsandbytes`. 331 | llm_int8_threshold (`float`, *optional*, defaults to 6): 332 | This corresponds to the outlier threshold for outlier detection as described in `LLM.int8() : 8-bit Matrix 333 | Multiplication for Transformers at Scale` paper: https://arxiv.org/abs/2208.07339 Any hidden states value 334 | that is above this threshold will be considered an outlier and the operation on those values will be done 335 | in fp16. Values are usually normally distributed, that is, most values are in the range [-3.5, 3.5], but 336 | there are some exceptional systematic outliers that are very differently distributed for large models. 337 | These outliers are often in the interval [-60, -6] or [6, 60]. Int8 quantization works well for values of 338 | magnitude ~5, but beyond that, there is a significant performance penalty. A good default threshold is 6, 339 | but a lower threshold might be needed for more unstable models (small models, fine-tuning). 340 | llm_int8_skip_modules (`List[str]`, *optional*): 341 | An explicit list of the modules that we do not want to convert in 8-bit. This is useful for models such as 342 | Jukebox that has several heads in different places and not necessarily at the last position. For example 343 | for `CausalLM` models, the last `lm_head` is kept in its original `dtype`. 344 | llm_int8_enable_fp32_cpu_offload (`bool`, *optional*, defaults to `False`): 345 | This flag is used for advanced use cases and users that are aware of this feature. If you want to split 346 | your model in different parts and run some parts in int8 on GPU and some parts in fp32 on CPU, you can use 347 | this flag. This is useful for offloading large models such as `google/flan-t5-xxl`. Note that the int8 348 | operations will not be run on CPU. 349 | llm_int8_has_fp16_weight (`bool`, *optional*, defaults to `False`): 350 | This flag runs LLM.int8() with 16-bit main weights. This is useful for fine-tuning as the weights do not 351 | have to be converted back and forth for the backward pass. 352 | bnb_4bit_compute_dtype (`torch.dtype` or str, *optional*, defaults to `torch.float32`): 353 | This sets the computational type which might be different than the input time. For example, inputs might be 354 | fp32, but computation can be set to bf16 for speedups. 355 | bnb_4bit_quant_type (`str`, {fp4, fn4}, defaults to `fp4`): 356 | This sets the quantization data type in the bnb.nn.Linear4Bit layers. Options are FP4 and NF4 data types 357 | which are specified by `fp4` or `fn4`. 358 | bnb_4bit_use_double_quant (`bool`, *optional*, defaults to `False`): 359 | This flag is used for nested quantization where the quantization constants from the first quantization are 360 | quantized again. 361 | kwargs (`Dict[str, Any]`, *optional*): 362 | Additional parameters from which to initialize the configuration object. 363 | """ 364 | 365 | def __init__( 366 | self, 367 | load_in_8bit=False, 368 | load_in_4bit=False, 369 | llm_int8_threshold=6.0, 370 | llm_int8_skip_modules=None, 371 | llm_int8_enable_fp32_cpu_offload=False, 372 | llm_int8_has_fp16_weight=False, 373 | bnb_4bit_compute_dtype=None, 374 | bnb_4bit_quant_type="fp4", 375 | bnb_4bit_use_double_quant=False, 376 | **kwargs, 377 | ): 378 | self.load_in_8bit = load_in_8bit 379 | self.load_in_4bit = load_in_4bit 380 | self.llm_int8_threshold = llm_int8_threshold 381 | self.llm_int8_skip_modules = llm_int8_skip_modules 382 | self.llm_int8_enable_fp32_cpu_offload = llm_int8_enable_fp32_cpu_offload 383 | self.llm_int8_has_fp16_weight = llm_int8_has_fp16_weight 384 | self.bnb_4bit_quant_type = bnb_4bit_quant_type 385 | self.bnb_4bit_use_double_quant = bnb_4bit_use_double_quant 386 | 387 | if bnb_4bit_compute_dtype is None: 388 | self.bnb_4bit_compute_dtype = torch.float32 389 | elif isinstance(bnb_4bit_compute_dtype, str): 390 | self.bnb_4bit_compute_dtype = getattr(torch, bnb_4bit_compute_dtype) 391 | elif isinstance(bnb_4bit_compute_dtype, torch.dtype): 392 | self.bnb_4bit_compute_dtype = bnb_4bit_compute_dtype 393 | else: 394 | raise ValueError("bnb_4bit_compute_dtype must be a string or a torch.dtype") 395 | 396 | self.post_init() 397 | 398 | def post_init(self): 399 | r""" 400 | Safety checker that arguments are correct - also replaces some NoneType arguments with their default values. 401 | """ 402 | if not isinstance(self.llm_int8_threshold, float): 403 | raise ValueError("llm_int8_threshold must be a float") 404 | 405 | if self.llm_int8_skip_modules is not None and not isinstance(self.llm_int8_skip_modules, list): 406 | raise ValueError("llm_int8_skip_modules must be a list of strings") 407 | if not isinstance(self.llm_int8_enable_fp32_cpu_offload, bool): 408 | raise ValueError("llm_int8_enable_fp32_cpu_offload must be a boolean") 409 | 410 | if not isinstance(self.llm_int8_has_fp16_weight, bool): 411 | raise ValueError("llm_int8_has_fp16_weight must be a boolean") 412 | 413 | if self.bnb_4bit_compute_dtype is not None and not isinstance(self.bnb_4bit_compute_dtype, torch.dtype): 414 | raise ValueError("bnb_4bit_compute_dtype must be torch.dtype") 415 | 416 | if not isinstance(self.bnb_4bit_quant_type, str): 417 | raise ValueError("bnb_4bit_quant_type must be a string") 418 | 419 | if not isinstance(self.bnb_4bit_use_double_quant, bool): 420 | raise ValueError("bnb_4bit_use_double_quant must be a boolean") 421 | 422 | if self.load_in_4bit and not version.parse(importlib_metadata.version("bitsandbytes")) >= version.parse( 423 | "0.39.0" 424 | ): 425 | raise ValueError( 426 | "4 bit quantization requires bitsandbytes>=0.39.0 - please upgrade your bitsandbytes version" 427 | ) 428 | 429 | def is_quantizable(self): 430 | r""" 431 | Returns `True` if the model is quantizable, `False` otherwise. 432 | """ 433 | return self.load_in_8bit or self.load_in_4bit 434 | 435 | def quantization_method(self): 436 | r""" 437 | This method returns the quantization method used for the model. If the model is not quantizable, it returns 438 | `None`. 439 | """ 440 | if self.load_in_8bit: 441 | return "llm_int8" 442 | elif self.load_in_4bit and self.bnb_4bit_quant_type == "fp4": 443 | return "fp4" 444 | elif self.load_in_4bit and self.bnb_4bit_quant_type == "nf4": 445 | return "nf4" 446 | else: 447 | return None 448 | 449 | @classmethod 450 | def from_dict(cls, config_dict, return_unused_kwargs, **kwargs): 451 | """ 452 | Instantiates a [`BitsAndBytesConfig`] from a Python dictionary of parameters. 453 | 454 | Args: 455 | config_dict (`Dict[str, Any]`): 456 | Dictionary that will be used to instantiate the configuration object. 457 | return_unused_kwargs (`bool`): 458 | Whether or not to return a list of unused keyword arguments. Used for `from_pretrained` method in 459 | `PreTrainedModel`. 460 | kwargs (`Dict[str, Any]`): 461 | Additional parameters from which to initialize the configuration object. 462 | 463 | Returns: 464 | [`BitsAndBytesConfig`]: The configuration object instantiated from those parameters. 465 | """ 466 | 467 | config = cls(**config_dict) 468 | 469 | to_remove = [] 470 | for key, value in kwargs.items(): 471 | if hasattr(config, key): 472 | setattr(config, key, value) 473 | to_remove.append(key) 474 | for key in to_remove: 475 | kwargs.pop(key, None) 476 | 477 | if return_unused_kwargs: 478 | return config, kwargs 479 | else: 480 | return config 481 | 482 | def to_json_file(self, json_file_path: Union[str, os.PathLike]): 483 | """ 484 | Save this instance to a JSON file. 485 | 486 | Args: 487 | json_file_path (`str` or `os.PathLike`): 488 | Path to the JSON file in which this configuration instance's parameters will be saved. 489 | use_diff (`bool`, *optional*, defaults to `True`): 490 | If set to `True`, only the difference between the config instance and the default 491 | `BitsAndBytesConfig()` is serialized to JSON file. 492 | """ 493 | with open(json_file_path, "w", encoding="utf-8") as writer: 494 | config_dict = self.to_dict() 495 | json_string = json.dumps(config_dict, indent=2, sort_keys=True) + "\n" 496 | 497 | writer.write(json_string) 498 | 499 | def to_dict(self) -> Dict[str, Any]: 500 | """ 501 | Serializes this instance to a Python dictionary. Returns: 502 | `Dict[str, Any]`: Dictionary of all the attributes that make up this configuration instance. 503 | """ 504 | 505 | output = copy.deepcopy(self.__dict__) 506 | output["bnb_4bit_compute_dtype"] = str(output["bnb_4bit_compute_dtype"]).split(".")[1] 507 | 508 | return output -------------------------------------------------------------------------------- /bark/generation.py: -------------------------------------------------------------------------------- 1 | import contextlib 2 | import gc 3 | import hashlib 4 | import os 5 | import re 6 | import json 7 | 8 | from encodec import EncodecModel 9 | import funcy 10 | import logging 11 | import numpy as np 12 | from scipy.special import softmax 13 | import torch 14 | import torch.nn.functional as F 15 | import tqdm 16 | from transformers import BertTokenizer 17 | from huggingface_hub import hf_hub_download 18 | 19 | from .model import GPTConfig, GPT 20 | from .model_fine import FineGPT, FineGPTConfig 21 | 22 | if ( 23 | torch.cuda.is_available() and 24 | hasattr(torch.cuda, "amp") and 25 | hasattr(torch.cuda.amp, "autocast") and 26 | hasattr(torch.cuda, "is_bf16_supported") and 27 | torch.cuda.is_bf16_supported() 28 | ): 29 | autocast = funcy.partial(torch.cuda.amp.autocast, dtype=torch.bfloat16) 30 | else: 31 | @contextlib.contextmanager 32 | def autocast(): 33 | yield 34 | 35 | 36 | # hold models in global scope to lazy load 37 | global models 38 | models = {} 39 | 40 | global models_devices 41 | models_devices = {} 42 | 43 | 44 | CONTEXT_WINDOW_SIZE = 1024 45 | 46 | SEMANTIC_RATE_HZ = 49.9 47 | SEMANTIC_VOCAB_SIZE = 10_000 48 | 49 | CODEBOOK_SIZE = 1024 50 | N_COARSE_CODEBOOKS = 2 51 | N_FINE_CODEBOOKS = 8 52 | COARSE_RATE_HZ = 75 53 | 54 | SAMPLE_RATE = 24_000 55 | 56 | 57 | logger = logging.getLogger(__name__) 58 | 59 | 60 | CUR_PATH = os.path.dirname(os.path.abspath(__file__)) 61 | 62 | 63 | default_cache_dir = os.path.join(os.path.expanduser("~"), ".cache") 64 | CACHE_DIR = os.path.join(os.getenv("XDG_CACHE_HOME", default_cache_dir), "serp", "bark_v0") 65 | 66 | 67 | USE_SMALL_MODELS = os.environ.get("SERP_USE_SMALL_MODELS", False) 68 | GLOBAL_ENABLE_MPS = os.environ.get("SERP_ENABLE_MPS", False) 69 | OFFLOAD_CPU = os.environ.get("SERP_OFFLOAD_CPU", False) 70 | 71 | 72 | REMOTE_MODEL_PATHS = { 73 | "text_small": { 74 | "repo_id": "suno/bark", 75 | "file_name": "text.pt", 76 | "checksum": "b3e42bcbab23b688355cd44128c4cdd3", 77 | }, 78 | "coarse_small": { 79 | "repo_id": "suno/bark", 80 | "file_name": "coarse.pt", 81 | "checksum": "5fe964825e3b0321f9d5f3857b89194d", 82 | }, 83 | "fine_small": { 84 | "repo_id": "suno/bark", 85 | "file_name": "fine.pt", 86 | "checksum": "5428d1befe05be2ba32195496e58dc90", 87 | }, 88 | "text": { 89 | "repo_id": "suno/bark", 90 | "file_name": "text_2.pt", 91 | "checksum": "54afa89d65e318d4f5f80e8e8799026a", 92 | }, 93 | "coarse": { 94 | "repo_id": "suno/bark", 95 | "file_name": "coarse_2.pt", 96 | "checksum": "8a98094e5e3a255a5c9c0ab7efe8fd28", 97 | }, 98 | "fine": { 99 | "repo_id": "suno/bark", 100 | "file_name": "fine_2.pt", 101 | "checksum": "59d184ed44e3650774a2f0503a48a97b", 102 | }, 103 | } 104 | 105 | 106 | if not hasattr(torch.nn.functional, 'scaled_dot_product_attention') and torch.cuda.is_available(): 107 | logger.warning( 108 | "torch version does not support flash attention. You will get faster" + 109 | " inference speed by upgrade torch to newest nightly version." 110 | ) 111 | 112 | 113 | def _string_md5(s): 114 | m = hashlib.md5() 115 | m.update(s.encode("utf-8")) 116 | return m.hexdigest() 117 | 118 | 119 | def _md5(fname): 120 | hash_md5 = hashlib.md5() 121 | with open(fname, "rb") as f: 122 | for chunk in iter(lambda: f.read(4096), b""): 123 | hash_md5.update(chunk) 124 | return hash_md5.hexdigest() 125 | 126 | 127 | def _get_ckpt_path(model_type, use_small=False, path=None): 128 | model_key = f"{model_type}_small" if use_small or USE_SMALL_MODELS else model_type 129 | model_name = REMOTE_MODEL_PATHS[model_key]["file_name"] 130 | if path is None: 131 | path = CACHE_DIR 132 | return os.path.join(path, f"{model_name}") 133 | 134 | 135 | def _grab_best_device(use_gpu=True): 136 | if torch.cuda.device_count() > 0 and use_gpu: 137 | device = "cuda" 138 | elif torch.backends.mps.is_available() and use_gpu and GLOBAL_ENABLE_MPS: 139 | device = "mps" 140 | else: 141 | device = "cpu" 142 | return device 143 | 144 | 145 | def _download(from_hf_path, file_name, to_local_path): 146 | to_local_path = to_local_path.replace("\\", "/") 147 | path = '/'.join(to_local_path.split("/")[:-1]) 148 | os.makedirs(path, exist_ok=True) 149 | hf_hub_download(repo_id=from_hf_path, filename=file_name, local_dir=path) 150 | os.replace(os.path.join(path, file_name), to_local_path) 151 | 152 | class InferenceContext: 153 | def __init__(self, benchmark=False): 154 | # we can't expect inputs to be the same length, so disable benchmarking by default 155 | self._chosen_cudnn_benchmark = benchmark 156 | self._cudnn_benchmark = None 157 | 158 | def __enter__(self): 159 | self._cudnn_benchmark = torch.backends.cudnn.benchmark 160 | torch.backends.cudnn.benchmark = self._chosen_cudnn_benchmark 161 | 162 | def __exit__(self, exc_type, exc_value, exc_traceback): 163 | torch.backends.cudnn.benchmark = self._cudnn_benchmark 164 | 165 | 166 | if torch.cuda.is_available(): 167 | torch.backends.cuda.matmul.allow_tf32 = True 168 | torch.backends.cudnn.allow_tf32 = True 169 | 170 | 171 | @contextlib.contextmanager 172 | def _inference_mode(): 173 | with InferenceContext(), torch.inference_mode(), torch.no_grad(), autocast(): 174 | yield 175 | 176 | 177 | def _clear_cuda_cache(): 178 | if torch.cuda.is_available(): 179 | torch.cuda.empty_cache() 180 | torch.cuda.synchronize() 181 | 182 | 183 | def clean_models(model_key=None): 184 | global models 185 | model_keys = [model_key] if model_key is not None else models.keys() 186 | for k in model_keys: 187 | if k in models: 188 | del models[k] 189 | _clear_cuda_cache() 190 | gc.collect() 191 | 192 | 193 | def _load_model(ckpt_path, device, use_small=False, model_type="text"): 194 | if model_type == "text": 195 | ConfigClass = GPTConfig 196 | ModelClass = GPT 197 | elif model_type == "coarse": 198 | ConfigClass = GPTConfig 199 | ModelClass = GPT 200 | elif model_type == "fine": 201 | ConfigClass = FineGPTConfig 202 | ModelClass = FineGPT 203 | else: 204 | raise NotImplementedError() 205 | model_key = f"{model_type}_small" if use_small or USE_SMALL_MODELS else model_type 206 | model_info = REMOTE_MODEL_PATHS[model_key] 207 | # if ( 208 | # os.path.exists(ckpt_path) and 209 | # _md5(ckpt_path) != model_info["checksum"] 210 | # ): 211 | # logger.warning(f"found outdated {model_type} model, removing.") 212 | # os.remove(ckpt_path) 213 | if not os.path.exists(ckpt_path): 214 | logger.info(f"{model_type} model not found, downloading into `{CACHE_DIR}`.") 215 | _download(model_info["repo_id"], model_info["file_name"], ckpt_path) 216 | checkpoint = torch.load(ckpt_path, map_location=device) 217 | # this is a hack 218 | # check if config.json is in the same directory as the checkpoint 219 | # if so, load it 220 | # otherwise, assume it's in the checkpoint 221 | config_path = os.path.join(os.path.dirname(ckpt_path), "config.json") 222 | if os.path.exists(config_path): 223 | with open(config_path, "r") as f: 224 | model_args = json.load(f) 225 | else: 226 | model_args = checkpoint["model_args"] 227 | if "input_vocab_size" not in model_args: 228 | model_args["input_vocab_size"] = model_args["vocab_size"] 229 | model_args["output_vocab_size"] = model_args["vocab_size"] 230 | del model_args["vocab_size"] 231 | gptconf = ConfigClass(**model_args) 232 | model = ModelClass(gptconf) 233 | if checkpoint.get("model", None) is not None: 234 | state_dict = checkpoint["model"] 235 | else: 236 | state_dict = checkpoint 237 | # fixup checkpoint 238 | unwanted_prefix = "_orig_mod." 239 | for k, v in list(state_dict.items()): 240 | if k.startswith(unwanted_prefix): 241 | state_dict[k[len(unwanted_prefix) :]] = state_dict.pop(k) 242 | unwanted_suffixes = [ 243 | "lora_right_weight", 244 | "lora_left_weight", 245 | "lora_right_bias", 246 | "lora_left_bias", 247 | ] 248 | for k, v in list(state_dict.items()): 249 | for suffix in unwanted_suffixes: 250 | if k.endswith(suffix): 251 | state_dict.pop(k) 252 | # super hacky - should probably refactor this 253 | if state_dict.get('lm_head.0.weight', None) is not None: 254 | state_dict['lm_head.weight'] = state_dict.pop('lm_head.0.weight') 255 | if state_dict.get('lm_heads.0.0.weight', None) is not None: 256 | state_dict['lm_heads.0.weight'] = state_dict.pop('lm_heads.0.0.weight') 257 | if state_dict.get('lm_heads.1.0.weight', None) is not None: 258 | state_dict['lm_heads.1.weight'] = state_dict.pop('lm_heads.1.0.weight') 259 | if state_dict.get('lm_heads.2.0.weight', None) is not None: 260 | state_dict['lm_heads.2.weight'] = state_dict.pop('lm_heads.2.0.weight') 261 | if state_dict.get('lm_heads.3.0.weight', None) is not None: 262 | state_dict['lm_heads.3.weight'] = state_dict.pop('lm_heads.3.0.weight') 263 | if state_dict.get('lm_heads.4.0.weight', None) is not None: 264 | state_dict['lm_heads.4.weight'] = state_dict.pop('lm_heads.4.0.weight') 265 | if state_dict.get('lm_heads.5.0.weight', None) is not None: 266 | state_dict['lm_heads.5.weight'] = state_dict.pop('lm_heads.5.0.weight') 267 | if state_dict.get('lm_heads.6.0.weight', None) is not None: 268 | state_dict['lm_heads.6.weight'] = state_dict.pop('lm_heads.6.0.weight') 269 | extra_keys = set(state_dict.keys()) - set(model.state_dict().keys()) 270 | extra_keys = set([k for k in extra_keys if not k.endswith(".attn.bias")]) 271 | missing_keys = set(model.state_dict().keys()) - set(state_dict.keys()) 272 | missing_keys = set([k for k in missing_keys if not k.endswith(".attn.bias")]) 273 | if len(extra_keys) != 0: 274 | print(f"extra keys found: {extra_keys}") 275 | if len(missing_keys) != 0: 276 | raise ValueError(f"missing keys: {missing_keys}") 277 | model.load_state_dict(state_dict, strict=False) 278 | n_params = model.get_num_params() 279 | if checkpoint.get("best_val_loss", None) is not None: 280 | val_loss = checkpoint["best_val_loss"].item() 281 | logger.info(f"model loaded: {round(n_params/1e6,1)}M params, {round(val_loss,3)} loss") 282 | model.eval() 283 | model.to(device) 284 | del checkpoint, state_dict 285 | _clear_cuda_cache() 286 | if model_type == "text": 287 | tokenizer = BertTokenizer.from_pretrained("bert-base-multilingual-cased") 288 | return { 289 | "model": model, 290 | "tokenizer": tokenizer, 291 | } 292 | return model 293 | 294 | 295 | def _load_codec_model(device): 296 | model = EncodecModel.encodec_model_24khz() 297 | model.set_target_bandwidth(6.0) 298 | model.eval() 299 | model.to(device) 300 | _clear_cuda_cache() 301 | return model 302 | 303 | 304 | def load_model(use_gpu=True, use_small=False, force_reload=False, model_type="text", path=None): 305 | _load_model_f = funcy.partial(_load_model, model_type=model_type, use_small=use_small) 306 | if model_type not in ("text", "coarse", "fine"): 307 | raise NotImplementedError() 308 | global models 309 | global models_devices 310 | device = _grab_best_device(use_gpu=use_gpu) 311 | model_key = f"{model_type}" 312 | if OFFLOAD_CPU: 313 | models_devices[model_key] = device 314 | device = "cpu" 315 | if model_key not in models or force_reload: 316 | if path.endswith(".ckpt") or path.endswith(".pt") or path.endswith(".bin"): 317 | ckpt_path = path 318 | else: 319 | ckpt_path = _get_ckpt_path(model_type, use_small=use_small, path=path) 320 | # clean_models(model_key=model_key) 321 | model = _load_model_f(ckpt_path, device) 322 | models[model_key] = model 323 | if model_type == "text": 324 | models[model_key]["model"].to(device) 325 | else: 326 | models[model_key].to(device) 327 | return models[model_key] 328 | 329 | 330 | def load_codec_model(use_gpu=True, force_reload=False): 331 | global models 332 | global models_devices 333 | device = _grab_best_device(use_gpu=use_gpu) 334 | if device == "mps": 335 | # encodec doesn't support mps 336 | device = "cpu" 337 | model_key = "codec" 338 | if OFFLOAD_CPU: 339 | models_devices[model_key] = device 340 | device = "cpu" 341 | if model_key not in models or force_reload: 342 | clean_models(model_key=model_key) 343 | model = _load_codec_model(device) 344 | models[model_key] = model 345 | models[model_key].to(device) 346 | return models[model_key] 347 | 348 | 349 | def preload_models( 350 | text_use_gpu=True, 351 | text_use_small=False, 352 | text_model_path=None, 353 | coarse_use_gpu=True, 354 | coarse_use_small=False, 355 | coarse_model_path=None, 356 | fine_use_gpu=True, 357 | fine_use_small=False, 358 | fine_model_path=None, 359 | codec_use_gpu=True, 360 | force_reload=False, 361 | path=None, 362 | ): 363 | """Load all the necessary models for the pipeline.""" 364 | if _grab_best_device() == "cpu" and ( 365 | text_use_gpu or coarse_use_gpu or fine_use_gpu or codec_use_gpu 366 | ): 367 | logger.warning("No GPU being used. Careful, inference might be very slow!") 368 | _ = load_model( 369 | model_type="text", use_gpu=text_use_gpu, use_small=text_use_small, force_reload=force_reload, path=path if text_model_path is None else text_model_path 370 | ) 371 | _ = load_model( 372 | model_type="coarse", 373 | use_gpu=coarse_use_gpu, 374 | use_small=coarse_use_small, 375 | force_reload=force_reload, 376 | path=path if coarse_model_path is None else coarse_model_path, 377 | ) 378 | _ = load_model( 379 | model_type="fine", use_gpu=fine_use_gpu, use_small=fine_use_small, force_reload=force_reload, path=path if fine_model_path is None else fine_model_path 380 | ) 381 | _ = load_codec_model(use_gpu=codec_use_gpu, force_reload=force_reload) 382 | 383 | 384 | #### 385 | # Generation Functionality 386 | #### 387 | 388 | def _tokenize(tokenizer, text): 389 | return tokenizer.encode(text, add_special_tokens=False) 390 | 391 | 392 | def _detokenize(tokenizer, enc_text): 393 | return tokenizer.decode(enc_text) 394 | 395 | 396 | def _normalize_whitespace(text): 397 | return re.sub(r"\s+", " ", text).strip() 398 | 399 | TEXT_ENCODING_OFFSET = 10_048 400 | SEMANTIC_PAD_TOKEN = 10_000 401 | TEXT_PAD_TOKEN = 129_595 402 | SEMANTIC_INFER_TOKEN = 129_599 403 | 404 | 405 | def generate_text_semantic( 406 | text, 407 | history_prompt=None, 408 | temp=0.7, 409 | top_k=None, 410 | top_p=None, 411 | silent=False, 412 | min_eos_p=0.2, 413 | max_gen_duration_s=None, 414 | allow_early_stop=True, 415 | use_kv_caching=False, 416 | ): 417 | """Generate semantic tokens from text.""" 418 | assert isinstance(text, str) 419 | text = _normalize_whitespace(text) 420 | assert len(text.strip()) > 0 421 | if history_prompt is not None: 422 | if history_prompt.endswith(".npz"): 423 | try: 424 | semantic_history = np.load(history_prompt)["semantic_prompt"] 425 | except: 426 | semantic_history = np.load(history_prompt)["semantic"] 427 | else: 428 | semantic_history = np.load( 429 | os.path.join(CUR_PATH, "assets", "prompts", f"{history_prompt}.npz") 430 | )["semantic_prompt"] 431 | assert ( 432 | isinstance(semantic_history, np.ndarray) 433 | and len(semantic_history.shape) == 1 434 | and len(semantic_history) > 0 435 | and semantic_history.min() >= 0 436 | and semantic_history.max() <= SEMANTIC_VOCAB_SIZE - 1 437 | ) 438 | else: 439 | semantic_history = None 440 | # load models if not yet exist 441 | global models 442 | global models_devices 443 | if "text" not in models: 444 | preload_models() 445 | model_container = models["text"] 446 | model = model_container["model"] 447 | tokenizer = model_container["tokenizer"] 448 | encoded_text = np.array(_tokenize(tokenizer, text)) + TEXT_ENCODING_OFFSET 449 | if OFFLOAD_CPU: 450 | model.to(models_devices["text"]) 451 | device = next(model.parameters()).device 452 | if len(encoded_text) > 256: 453 | p = round((len(encoded_text) - 256) / len(encoded_text) * 100, 1) 454 | logger.warning(f"warning, text too long, lopping of last {p}%") 455 | encoded_text = encoded_text[:256] 456 | encoded_text = np.pad( 457 | encoded_text, 458 | (0, 256 - len(encoded_text)), 459 | constant_values=TEXT_PAD_TOKEN, 460 | mode="constant", 461 | ) 462 | if semantic_history is not None: 463 | semantic_history = semantic_history.astype(np.int64) 464 | # lop off if history is too long, pad if needed 465 | semantic_history = semantic_history[-256:] 466 | semantic_history = np.pad( 467 | semantic_history, 468 | (0, 256 - len(semantic_history)), 469 | constant_values=SEMANTIC_PAD_TOKEN, 470 | mode="constant", 471 | ) 472 | else: 473 | semantic_history = np.array([SEMANTIC_PAD_TOKEN] * 256) 474 | x = torch.from_numpy( 475 | np.hstack([ 476 | encoded_text, semantic_history, np.array([SEMANTIC_INFER_TOKEN]) 477 | ]).astype(np.int64) 478 | )[None] 479 | assert x.shape[1] == 256 + 256 + 1 480 | with _inference_mode(): 481 | x = x.to(device) 482 | n_tot_steps = 768 483 | # custom tqdm updates since we don't know when eos will occur 484 | pbar = tqdm.tqdm(disable=silent, total=100) 485 | pbar_state = 0 486 | tot_generated_duration_s = 0 487 | kv_cache = None 488 | for n in range(n_tot_steps): 489 | if use_kv_caching and kv_cache is not None: 490 | x_input = x[:, [-1]] 491 | else: 492 | x_input = x 493 | logits, kv_cache = model( 494 | x_input, merge_context=True, use_cache=use_kv_caching, past_kv=kv_cache 495 | ) 496 | relevant_logits = logits[0, 0, :SEMANTIC_VOCAB_SIZE] 497 | if allow_early_stop: 498 | relevant_logits = torch.hstack( 499 | (relevant_logits, logits[0, 0, [SEMANTIC_PAD_TOKEN]]) # eos 500 | ) 501 | if top_p is not None: 502 | # faster to convert to numpy 503 | original_device = relevant_logits.device 504 | relevant_logits = relevant_logits.detach().cpu().type(torch.float32).numpy() 505 | sorted_indices = np.argsort(relevant_logits)[::-1] 506 | sorted_logits = relevant_logits[sorted_indices] 507 | cumulative_probs = np.cumsum(softmax(sorted_logits)) 508 | sorted_indices_to_remove = cumulative_probs > top_p 509 | sorted_indices_to_remove[1:] = sorted_indices_to_remove[:-1].copy() 510 | sorted_indices_to_remove[0] = False 511 | relevant_logits[sorted_indices[sorted_indices_to_remove]] = -np.inf 512 | relevant_logits = torch.from_numpy(relevant_logits) 513 | relevant_logits = relevant_logits.to(original_device) 514 | if top_k is not None: 515 | v, _ = torch.topk(relevant_logits, min(top_k, relevant_logits.size(-1))) 516 | relevant_logits[relevant_logits < v[-1]] = -float("Inf") 517 | probs = F.softmax(relevant_logits / temp, dim=-1) 518 | # multinomial bugged on mps: shuttle to cpu if necessary 519 | inf_device = probs.device 520 | if probs.device.type == "mps": 521 | probs = probs.to("cpu") 522 | item_next = torch.multinomial(probs, num_samples=1) 523 | probs = probs.to(inf_device) 524 | item_next = item_next.to(inf_device) 525 | if allow_early_stop and ( 526 | item_next == SEMANTIC_VOCAB_SIZE 527 | or (min_eos_p is not None and probs[-1] >= min_eos_p) 528 | ): 529 | # eos found, so break 530 | pbar.update(100 - pbar_state) 531 | break 532 | x = torch.cat((x, item_next[None]), dim=1) 533 | tot_generated_duration_s += 1 / SEMANTIC_RATE_HZ 534 | if max_gen_duration_s is not None and tot_generated_duration_s > max_gen_duration_s: 535 | pbar.update(100 - pbar_state) 536 | break 537 | if n == n_tot_steps - 1: 538 | pbar.update(100 - pbar_state) 539 | break 540 | del logits, relevant_logits, probs, item_next 541 | req_pbar_state = np.min([100, int(round(100 * n / n_tot_steps))]) 542 | if req_pbar_state > pbar_state: 543 | pbar.update(req_pbar_state - pbar_state) 544 | pbar_state = req_pbar_state 545 | pbar.close() 546 | out = x.detach().cpu().numpy().squeeze()[256 + 256 + 1 :] 547 | if OFFLOAD_CPU: 548 | model.to("cpu") 549 | assert all(0 <= out) and all(out < SEMANTIC_VOCAB_SIZE) 550 | _clear_cuda_cache() 551 | return out 552 | 553 | 554 | def _flatten_codebooks(arr, offset_size=CODEBOOK_SIZE): 555 | assert len(arr.shape) == 2 556 | arr = arr.copy() 557 | if offset_size is not None: 558 | for n in range(1, arr.shape[0]): 559 | arr[n, :] += offset_size * n 560 | flat_arr = arr.ravel("F") 561 | return flat_arr 562 | 563 | 564 | COARSE_SEMANTIC_PAD_TOKEN = 12_048 565 | COARSE_INFER_TOKEN = 12_050 566 | 567 | 568 | def generate_coarse( 569 | x_semantic, 570 | history_prompt=None, 571 | temp=0.7, 572 | top_k=None, 573 | top_p=None, 574 | silent=False, 575 | max_coarse_history=630, # min 60 (faster), max 630 (more context) 576 | sliding_window_len=60, 577 | use_kv_caching=False, 578 | ): 579 | """Generate coarse audio codes from semantic tokens.""" 580 | assert ( 581 | isinstance(x_semantic, np.ndarray) 582 | and len(x_semantic.shape) == 1 583 | and len(x_semantic) > 0 584 | and x_semantic.min() >= 0 585 | and x_semantic.max() <= SEMANTIC_VOCAB_SIZE - 1 586 | ) 587 | assert 60 <= max_coarse_history <= 630 588 | assert max_coarse_history + sliding_window_len <= 1024 - 256 589 | semantic_to_coarse_ratio = COARSE_RATE_HZ / SEMANTIC_RATE_HZ * N_COARSE_CODEBOOKS 590 | max_semantic_history = int(np.floor(max_coarse_history / semantic_to_coarse_ratio)) 591 | if history_prompt is not None: 592 | if history_prompt.endswith(".npz"): 593 | x_history = np.load(history_prompt) 594 | else: 595 | x_history = np.load( 596 | os.path.join(CUR_PATH, "assets", "prompts", f"{history_prompt}.npz") 597 | ) 598 | try: 599 | x_semantic_history = x_history["semantic_prompt"] 600 | x_coarse_history = x_history["coarse_prompt"] 601 | except: 602 | x_semantic_history = x_history["semantic"] 603 | x_coarse_history = x_history["coarse"] 604 | assert ( 605 | isinstance(x_semantic_history, np.ndarray) 606 | and len(x_semantic_history.shape) == 1 607 | and len(x_semantic_history) > 0 608 | and x_semantic_history.min() >= 0 609 | and x_semantic_history.max() <= SEMANTIC_VOCAB_SIZE - 1 610 | and isinstance(x_coarse_history, np.ndarray) 611 | and len(x_coarse_history.shape) == 2 612 | and x_coarse_history.shape[0] == N_COARSE_CODEBOOKS 613 | and x_coarse_history.shape[-1] >= 0 614 | and x_coarse_history.min() >= 0 615 | and x_coarse_history.max() <= CODEBOOK_SIZE - 1 616 | and ( 617 | round(x_coarse_history.shape[-1] / len(x_semantic_history), 1) 618 | == round(semantic_to_coarse_ratio / N_COARSE_CODEBOOKS, 1) 619 | ) 620 | ) 621 | x_coarse_history = _flatten_codebooks(x_coarse_history) + SEMANTIC_VOCAB_SIZE 622 | # trim histories correctly 623 | n_semantic_hist_provided = np.min( 624 | [ 625 | max_semantic_history, 626 | len(x_semantic_history) - len(x_semantic_history) % 2, 627 | int(np.floor(len(x_coarse_history) / semantic_to_coarse_ratio)), 628 | ] 629 | ) 630 | n_coarse_hist_provided = int(round(n_semantic_hist_provided * semantic_to_coarse_ratio)) 631 | x_semantic_history = x_semantic_history[-n_semantic_hist_provided:].astype(np.int32) 632 | x_coarse_history = x_coarse_history[-n_coarse_hist_provided:].astype(np.int32) 633 | # TODO: bit of a hack for time alignment (sounds better) 634 | x_coarse_history = x_coarse_history[:-2] 635 | else: 636 | x_semantic_history = np.array([], dtype=np.int32) 637 | x_coarse_history = np.array([], dtype=np.int32) 638 | # load models if not yet exist 639 | global models 640 | global models_devices 641 | if "coarse" not in models: 642 | preload_models() 643 | model = models["coarse"] 644 | if OFFLOAD_CPU: 645 | model.to(models_devices["coarse"]) 646 | device = next(model.parameters()).device 647 | # start loop 648 | n_steps = int( 649 | round( 650 | np.floor(len(x_semantic) * semantic_to_coarse_ratio / N_COARSE_CODEBOOKS) 651 | * N_COARSE_CODEBOOKS 652 | ) 653 | ) 654 | assert n_steps > 0 and n_steps % N_COARSE_CODEBOOKS == 0 655 | x_semantic = np.hstack([x_semantic_history, x_semantic]).astype(np.int32) 656 | x_coarse = x_coarse_history.astype(np.int32) 657 | base_semantic_idx = len(x_semantic_history) 658 | with _inference_mode(): 659 | x_semantic_in = torch.from_numpy(x_semantic)[None].to(device) 660 | x_coarse_in = torch.from_numpy(x_coarse)[None].to(device) 661 | n_window_steps = int(np.ceil(n_steps / sliding_window_len)) 662 | n_step = 0 663 | for _ in tqdm.tqdm(range(n_window_steps), total=n_window_steps, disable=silent): 664 | semantic_idx = base_semantic_idx + int(round(n_step / semantic_to_coarse_ratio)) 665 | # pad from right side 666 | x_in = x_semantic_in[:, np.max([0, semantic_idx - max_semantic_history]) :] 667 | x_in = x_in[:, :256] 668 | x_in = F.pad( 669 | x_in, 670 | (0, 256 - x_in.shape[-1]), 671 | "constant", 672 | COARSE_SEMANTIC_PAD_TOKEN, 673 | ) 674 | x_in = torch.hstack( 675 | [ 676 | x_in, 677 | torch.tensor([COARSE_INFER_TOKEN])[None].to(device), 678 | x_coarse_in[:, -max_coarse_history:], 679 | ] 680 | ) 681 | kv_cache = None 682 | for _ in range(sliding_window_len): 683 | if n_step >= n_steps: 684 | continue 685 | is_major_step = n_step % N_COARSE_CODEBOOKS == 0 686 | 687 | if use_kv_caching and kv_cache is not None: 688 | x_input = x_in[:, [-1]] 689 | else: 690 | x_input = x_in 691 | 692 | logits, kv_cache = model(x_input, use_cache=use_kv_caching, past_kv=kv_cache) 693 | logit_start_idx = ( 694 | SEMANTIC_VOCAB_SIZE + (1 - int(is_major_step)) * CODEBOOK_SIZE 695 | ) 696 | logit_end_idx = ( 697 | SEMANTIC_VOCAB_SIZE + (2 - int(is_major_step)) * CODEBOOK_SIZE 698 | ) 699 | relevant_logits = logits[0, 0, logit_start_idx:logit_end_idx] 700 | if top_p is not None: 701 | # faster to convert to numpy 702 | original_device = relevant_logits.device 703 | relevant_logits = relevant_logits.detach().cpu().type(torch.float32).numpy() 704 | sorted_indices = np.argsort(relevant_logits)[::-1] 705 | sorted_logits = relevant_logits[sorted_indices] 706 | cumulative_probs = np.cumsum(softmax(sorted_logits)) 707 | sorted_indices_to_remove = cumulative_probs > top_p 708 | sorted_indices_to_remove[1:] = sorted_indices_to_remove[:-1].copy() 709 | sorted_indices_to_remove[0] = False 710 | relevant_logits[sorted_indices[sorted_indices_to_remove]] = -np.inf 711 | relevant_logits = torch.from_numpy(relevant_logits) 712 | relevant_logits = relevant_logits.to(original_device) 713 | if top_k is not None: 714 | v, _ = torch.topk(relevant_logits, min(top_k, relevant_logits.size(-1))) 715 | relevant_logits[relevant_logits < v[-1]] = -float("Inf") 716 | probs = F.softmax(relevant_logits / temp, dim=-1) 717 | # multinomial bugged on mps: shuttle to cpu if necessary 718 | inf_device = probs.device 719 | if probs.device.type == "mps": 720 | probs = probs.to("cpu") 721 | item_next = torch.multinomial(probs, num_samples=1) 722 | probs = probs.to(inf_device) 723 | item_next = item_next.to(inf_device) 724 | item_next += logit_start_idx 725 | x_coarse_in = torch.cat((x_coarse_in, item_next[None]), dim=1) 726 | x_in = torch.cat((x_in, item_next[None]), dim=1) 727 | del logits, relevant_logits, probs, item_next 728 | n_step += 1 729 | del x_in 730 | del x_semantic_in 731 | if OFFLOAD_CPU: 732 | model.to("cpu") 733 | gen_coarse_arr = x_coarse_in.detach().cpu().numpy().squeeze()[len(x_coarse_history) :] 734 | del x_coarse_in 735 | assert len(gen_coarse_arr) == n_steps 736 | gen_coarse_audio_arr = gen_coarse_arr.reshape(-1, N_COARSE_CODEBOOKS).T - SEMANTIC_VOCAB_SIZE 737 | for n in range(1, N_COARSE_CODEBOOKS): 738 | gen_coarse_audio_arr[n, :] -= n * CODEBOOK_SIZE 739 | _clear_cuda_cache() 740 | return gen_coarse_audio_arr 741 | 742 | 743 | def generate_fine( 744 | x_coarse_gen, 745 | history_prompt=None, 746 | temp=0.5, 747 | silent=True, 748 | ): 749 | """Generate full audio codes from coarse audio codes.""" 750 | assert ( 751 | isinstance(x_coarse_gen, np.ndarray) 752 | and len(x_coarse_gen.shape) == 2 753 | and 1 <= x_coarse_gen.shape[0] <= N_FINE_CODEBOOKS - 1 754 | and x_coarse_gen.shape[1] > 0 755 | and x_coarse_gen.min() >= 0 756 | and x_coarse_gen.max() <= CODEBOOK_SIZE - 1 757 | ) 758 | if history_prompt is not None: 759 | if history_prompt.endswith(".npz"): 760 | try: 761 | x_fine_history = np.load(history_prompt)["fine_prompt"] 762 | except: 763 | x_fine_history = np.load(history_prompt)["fine"] 764 | else: 765 | x_fine_history = np.load( 766 | os.path.join(CUR_PATH, "assets", "prompts", f"{history_prompt}.npz") 767 | )["fine_prompt"] 768 | assert ( 769 | isinstance(x_fine_history, np.ndarray) 770 | and len(x_fine_history.shape) == 2 771 | and x_fine_history.shape[0] == N_FINE_CODEBOOKS 772 | and x_fine_history.shape[1] >= 0 773 | and x_fine_history.min() >= 0 774 | and x_fine_history.max() <= CODEBOOK_SIZE - 1 775 | ) 776 | else: 777 | x_fine_history = None 778 | n_coarse = x_coarse_gen.shape[0] 779 | # load models if not yet exist 780 | global models 781 | global models_devices 782 | if "fine" not in models: 783 | preload_models() 784 | model = models["fine"] 785 | if OFFLOAD_CPU: 786 | model.to(models_devices["fine"]) 787 | device = next(model.parameters()).device 788 | # make input arr 789 | in_arr = np.vstack( 790 | [ 791 | x_coarse_gen, 792 | np.zeros((N_FINE_CODEBOOKS - n_coarse, x_coarse_gen.shape[1])) 793 | + CODEBOOK_SIZE, # padding 794 | ] 795 | ).astype(np.int32) 796 | # prepend history if available (max 512) 797 | if x_fine_history is not None: 798 | x_fine_history = x_fine_history.astype(np.int32) 799 | in_arr = np.hstack( 800 | [ 801 | x_fine_history[:, -512:].astype(np.int32), 802 | in_arr, 803 | ] 804 | ) 805 | n_history = x_fine_history[:, -512:].shape[1] 806 | else: 807 | n_history = 0 808 | n_remove_from_end = 0 809 | # need to pad if too short (since non-causal model) 810 | if in_arr.shape[1] < 1024: 811 | n_remove_from_end = 1024 - in_arr.shape[1] 812 | in_arr = np.hstack( 813 | [ 814 | in_arr, 815 | np.zeros((N_FINE_CODEBOOKS, n_remove_from_end), dtype=np.int32) + CODEBOOK_SIZE, 816 | ] 817 | ) 818 | # we can be lazy about fractional loop and just keep overwriting codebooks 819 | n_loops = np.max([0, int(np.ceil((x_coarse_gen.shape[1] - (1024 - n_history)) / 512))]) + 1 820 | with _inference_mode(): 821 | in_arr = torch.tensor(in_arr.T).to(device) 822 | for n in tqdm.tqdm(range(n_loops), disable=silent): 823 | start_idx = np.min([n * 512, in_arr.shape[0] - 1024]) 824 | start_fill_idx = np.min([n_history + n * 512, in_arr.shape[0] - 512]) 825 | rel_start_fill_idx = start_fill_idx - start_idx 826 | in_buffer = in_arr[start_idx : start_idx + 1024, :][None] 827 | for nn in range(n_coarse, N_FINE_CODEBOOKS): 828 | logits = model(nn, in_buffer) 829 | if temp is None: 830 | relevant_logits = logits[0, rel_start_fill_idx:, :CODEBOOK_SIZE] 831 | codebook_preds = torch.argmax(relevant_logits, -1) 832 | else: 833 | relevant_logits = logits[0, :, :CODEBOOK_SIZE] / temp 834 | probs = F.softmax(relevant_logits, dim=-1) 835 | # multinomial bugged on mps: shuttle to cpu if necessary 836 | inf_device = probs.device 837 | if probs.device.type == "mps": 838 | probs = probs.to("cpu") 839 | codebook_preds = torch.hstack( 840 | [ 841 | torch.multinomial(probs[nnn], num_samples=1).to(inf_device) 842 | for nnn in range(rel_start_fill_idx, 1024) 843 | ] 844 | ) 845 | in_buffer[0, rel_start_fill_idx:, nn] = codebook_preds 846 | del logits, codebook_preds 847 | # transfer over info into model_in and convert to numpy 848 | for nn in range(n_coarse, N_FINE_CODEBOOKS): 849 | in_arr[ 850 | start_fill_idx : start_fill_idx + (1024 - rel_start_fill_idx), nn 851 | ] = in_buffer[0, rel_start_fill_idx:, nn] 852 | del in_buffer 853 | gen_fine_arr = in_arr.detach().cpu().numpy().squeeze().T 854 | del in_arr 855 | if OFFLOAD_CPU: 856 | model.to("cpu") 857 | gen_fine_arr = gen_fine_arr[:, n_history:] 858 | if n_remove_from_end > 0: 859 | gen_fine_arr = gen_fine_arr[:, :-n_remove_from_end] 860 | assert gen_fine_arr.shape[-1] == x_coarse_gen.shape[-1] 861 | _clear_cuda_cache() 862 | return gen_fine_arr 863 | 864 | 865 | def codec_decode(fine_tokens): 866 | """Turn quantized audio codes into audio array using encodec.""" 867 | # load models if not yet exist 868 | global models 869 | global models_devices 870 | if "codec" not in models: 871 | preload_models() 872 | model = models["codec"] 873 | if OFFLOAD_CPU: 874 | model.to(models_devices["codec"]) 875 | device = next(model.parameters()).device 876 | arr = torch.from_numpy(fine_tokens)[None] 877 | arr = arr.to(device) 878 | arr = arr.transpose(0, 1) 879 | emb = model.quantizer.decode(arr) 880 | out = model.decoder(emb) 881 | audio_arr = out.detach().cpu().numpy().squeeze() 882 | del arr, emb, out 883 | if OFFLOAD_CPU: 884 | model.to("cpu") 885 | return audio_arr 886 | --------------------------------------------------------------------------------