├── .gitignore
├── LICENSE
├── README.md
├── bark
├── __init__.py
├── __main__.py
├── api.py
├── assets
│ └── prompts
│ │ ├── announcer.npz
│ │ ├── de_speaker_0.npz
│ │ ├── de_speaker_1.npz
│ │ ├── de_speaker_2.npz
│ │ ├── de_speaker_3.npz
│ │ ├── de_speaker_4.npz
│ │ ├── de_speaker_5.npz
│ │ ├── de_speaker_6.npz
│ │ ├── de_speaker_7.npz
│ │ ├── de_speaker_8.npz
│ │ ├── de_speaker_9.npz
│ │ ├── en_speaker_0.npz
│ │ ├── en_speaker_1.npz
│ │ ├── en_speaker_2.npz
│ │ ├── en_speaker_3.npz
│ │ ├── en_speaker_4.npz
│ │ ├── en_speaker_5.npz
│ │ ├── en_speaker_6.npz
│ │ ├── en_speaker_7.npz
│ │ ├── en_speaker_8.npz
│ │ ├── en_speaker_9.npz
│ │ ├── es_speaker_0.npz
│ │ ├── es_speaker_1.npz
│ │ ├── es_speaker_2.npz
│ │ ├── es_speaker_3.npz
│ │ ├── es_speaker_4.npz
│ │ ├── es_speaker_5.npz
│ │ ├── es_speaker_6.npz
│ │ ├── es_speaker_7.npz
│ │ ├── es_speaker_8.npz
│ │ ├── es_speaker_9.npz
│ │ ├── fr_speaker_0.npz
│ │ ├── fr_speaker_1.npz
│ │ ├── fr_speaker_2.npz
│ │ ├── fr_speaker_3.npz
│ │ ├── fr_speaker_4.npz
│ │ ├── fr_speaker_5.npz
│ │ ├── fr_speaker_6.npz
│ │ ├── fr_speaker_7.npz
│ │ ├── fr_speaker_8.npz
│ │ ├── fr_speaker_9.npz
│ │ ├── hi_speaker_0.npz
│ │ ├── hi_speaker_1.npz
│ │ ├── hi_speaker_2.npz
│ │ ├── hi_speaker_3.npz
│ │ ├── hi_speaker_4.npz
│ │ ├── hi_speaker_5.npz
│ │ ├── hi_speaker_6.npz
│ │ ├── hi_speaker_7.npz
│ │ ├── hi_speaker_8.npz
│ │ ├── hi_speaker_9.npz
│ │ ├── it_speaker_0.npz
│ │ ├── it_speaker_1.npz
│ │ ├── it_speaker_2.npz
│ │ ├── it_speaker_3.npz
│ │ ├── it_speaker_4.npz
│ │ ├── it_speaker_5.npz
│ │ ├── it_speaker_6.npz
│ │ ├── it_speaker_7.npz
│ │ ├── it_speaker_8.npz
│ │ ├── it_speaker_9.npz
│ │ ├── ja_speaker_0.npz
│ │ ├── ja_speaker_1.npz
│ │ ├── ja_speaker_2.npz
│ │ ├── ja_speaker_3.npz
│ │ ├── ja_speaker_4.npz
│ │ ├── ja_speaker_5.npz
│ │ ├── ja_speaker_6.npz
│ │ ├── ja_speaker_7.npz
│ │ ├── ja_speaker_8.npz
│ │ ├── ja_speaker_9.npz
│ │ ├── ko_speaker_0.npz
│ │ ├── ko_speaker_1.npz
│ │ ├── ko_speaker_2.npz
│ │ ├── ko_speaker_3.npz
│ │ ├── ko_speaker_4.npz
│ │ ├── ko_speaker_5.npz
│ │ ├── ko_speaker_6.npz
│ │ ├── ko_speaker_7.npz
│ │ ├── ko_speaker_8.npz
│ │ ├── ko_speaker_9.npz
│ │ ├── pl_speaker_0.npz
│ │ ├── pl_speaker_1.npz
│ │ ├── pl_speaker_2.npz
│ │ ├── pl_speaker_3.npz
│ │ ├── pl_speaker_4.npz
│ │ ├── pl_speaker_5.npz
│ │ ├── pl_speaker_6.npz
│ │ ├── pl_speaker_7.npz
│ │ ├── pl_speaker_8.npz
│ │ ├── pl_speaker_9.npz
│ │ ├── pt_speaker_0.npz
│ │ ├── pt_speaker_1.npz
│ │ ├── pt_speaker_2.npz
│ │ ├── pt_speaker_3.npz
│ │ ├── pt_speaker_4.npz
│ │ ├── pt_speaker_5.npz
│ │ ├── pt_speaker_6.npz
│ │ ├── pt_speaker_7.npz
│ │ ├── pt_speaker_8.npz
│ │ ├── pt_speaker_9.npz
│ │ ├── readme.md
│ │ ├── ru_speaker_0.npz
│ │ ├── ru_speaker_1.npz
│ │ ├── ru_speaker_2.npz
│ │ ├── ru_speaker_3.npz
│ │ ├── ru_speaker_4.npz
│ │ ├── ru_speaker_5.npz
│ │ ├── ru_speaker_6.npz
│ │ ├── ru_speaker_7.npz
│ │ ├── ru_speaker_8.npz
│ │ ├── ru_speaker_9.npz
│ │ ├── speaker_0.npz
│ │ ├── speaker_1.npz
│ │ ├── speaker_2.npz
│ │ ├── speaker_3.npz
│ │ ├── speaker_4.npz
│ │ ├── speaker_5.npz
│ │ ├── speaker_6.npz
│ │ ├── speaker_7.npz
│ │ ├── speaker_8.npz
│ │ ├── speaker_9.npz
│ │ ├── tr_speaker_0.npz
│ │ ├── tr_speaker_1.npz
│ │ ├── tr_speaker_2.npz
│ │ ├── tr_speaker_3.npz
│ │ ├── tr_speaker_4.npz
│ │ ├── tr_speaker_5.npz
│ │ ├── tr_speaker_6.npz
│ │ ├── tr_speaker_7.npz
│ │ ├── tr_speaker_8.npz
│ │ ├── tr_speaker_9.npz
│ │ ├── v2
│ │ ├── de_speaker_0.npz
│ │ ├── de_speaker_1.npz
│ │ ├── de_speaker_2.npz
│ │ ├── de_speaker_3.npz
│ │ ├── de_speaker_4.npz
│ │ ├── de_speaker_5.npz
│ │ ├── de_speaker_6.npz
│ │ ├── de_speaker_7.npz
│ │ ├── de_speaker_8.npz
│ │ ├── de_speaker_9.npz
│ │ ├── en_speaker_0.npz
│ │ ├── en_speaker_1.npz
│ │ ├── en_speaker_2.npz
│ │ ├── en_speaker_3.npz
│ │ ├── en_speaker_4.npz
│ │ ├── en_speaker_5.npz
│ │ ├── en_speaker_6.npz
│ │ ├── en_speaker_7.npz
│ │ ├── en_speaker_8.npz
│ │ ├── en_speaker_9.npz
│ │ ├── es_speaker_0.npz
│ │ ├── es_speaker_1.npz
│ │ ├── es_speaker_2.npz
│ │ ├── es_speaker_3.npz
│ │ ├── es_speaker_4.npz
│ │ ├── es_speaker_5.npz
│ │ ├── es_speaker_6.npz
│ │ ├── es_speaker_7.npz
│ │ ├── es_speaker_8.npz
│ │ ├── es_speaker_9.npz
│ │ ├── fr_speaker_0.npz
│ │ ├── fr_speaker_1.npz
│ │ ├── fr_speaker_2.npz
│ │ ├── fr_speaker_3.npz
│ │ ├── fr_speaker_4.npz
│ │ ├── fr_speaker_5.npz
│ │ ├── fr_speaker_6.npz
│ │ ├── fr_speaker_7.npz
│ │ ├── fr_speaker_8.npz
│ │ ├── fr_speaker_9.npz
│ │ ├── hi_speaker_0.npz
│ │ ├── hi_speaker_1.npz
│ │ ├── hi_speaker_2.npz
│ │ ├── hi_speaker_3.npz
│ │ ├── hi_speaker_4.npz
│ │ ├── hi_speaker_5.npz
│ │ ├── hi_speaker_6.npz
│ │ ├── hi_speaker_7.npz
│ │ ├── hi_speaker_8.npz
│ │ ├── hi_speaker_9.npz
│ │ ├── it_speaker_0.npz
│ │ ├── it_speaker_1.npz
│ │ ├── it_speaker_2.npz
│ │ ├── it_speaker_3.npz
│ │ ├── it_speaker_4.npz
│ │ ├── it_speaker_5.npz
│ │ ├── it_speaker_6.npz
│ │ ├── it_speaker_7.npz
│ │ ├── it_speaker_8.npz
│ │ ├── it_speaker_9.npz
│ │ ├── ja_speaker_0.npz
│ │ ├── ja_speaker_1.npz
│ │ ├── ja_speaker_2.npz
│ │ ├── ja_speaker_3.npz
│ │ ├── ja_speaker_4.npz
│ │ ├── ja_speaker_5.npz
│ │ ├── ja_speaker_6.npz
│ │ ├── ja_speaker_7.npz
│ │ ├── ja_speaker_8.npz
│ │ ├── ja_speaker_9.npz
│ │ ├── ko_speaker_0.npz
│ │ ├── ko_speaker_1.npz
│ │ ├── ko_speaker_2.npz
│ │ ├── ko_speaker_3.npz
│ │ ├── ko_speaker_4.npz
│ │ ├── ko_speaker_5.npz
│ │ ├── ko_speaker_6.npz
│ │ ├── ko_speaker_7.npz
│ │ ├── ko_speaker_8.npz
│ │ ├── ko_speaker_9.npz
│ │ ├── pl_speaker_0.npz
│ │ ├── pl_speaker_1.npz
│ │ ├── pl_speaker_2.npz
│ │ ├── pl_speaker_3.npz
│ │ ├── pl_speaker_4.npz
│ │ ├── pl_speaker_5.npz
│ │ ├── pl_speaker_6.npz
│ │ ├── pl_speaker_7.npz
│ │ ├── pl_speaker_8.npz
│ │ ├── pl_speaker_9.npz
│ │ ├── pt_speaker_0.npz
│ │ ├── pt_speaker_1.npz
│ │ ├── pt_speaker_2.npz
│ │ ├── pt_speaker_3.npz
│ │ ├── pt_speaker_4.npz
│ │ ├── pt_speaker_5.npz
│ │ ├── pt_speaker_6.npz
│ │ ├── pt_speaker_7.npz
│ │ ├── pt_speaker_8.npz
│ │ ├── pt_speaker_9.npz
│ │ ├── ru_speaker_0.npz
│ │ ├── ru_speaker_1.npz
│ │ ├── ru_speaker_2.npz
│ │ ├── ru_speaker_3.npz
│ │ ├── ru_speaker_4.npz
│ │ ├── ru_speaker_5.npz
│ │ ├── ru_speaker_6.npz
│ │ ├── ru_speaker_7.npz
│ │ ├── ru_speaker_8.npz
│ │ ├── ru_speaker_9.npz
│ │ ├── tr_speaker_0.npz
│ │ ├── tr_speaker_1.npz
│ │ ├── tr_speaker_2.npz
│ │ ├── tr_speaker_3.npz
│ │ ├── tr_speaker_4.npz
│ │ ├── tr_speaker_5.npz
│ │ ├── tr_speaker_6.npz
│ │ ├── tr_speaker_7.npz
│ │ ├── tr_speaker_8.npz
│ │ ├── tr_speaker_9.npz
│ │ ├── zh_speaker_0.npz
│ │ ├── zh_speaker_1.npz
│ │ ├── zh_speaker_2.npz
│ │ ├── zh_speaker_3.npz
│ │ ├── zh_speaker_4.npz
│ │ ├── zh_speaker_5.npz
│ │ ├── zh_speaker_6.npz
│ │ ├── zh_speaker_7.npz
│ │ ├── zh_speaker_8.npz
│ │ └── zh_speaker_9.npz
│ │ ├── zh_speaker_0.npz
│ │ ├── zh_speaker_1.npz
│ │ ├── zh_speaker_2.npz
│ │ ├── zh_speaker_3.npz
│ │ ├── zh_speaker_4.npz
│ │ ├── zh_speaker_5.npz
│ │ ├── zh_speaker_6.npz
│ │ ├── zh_speaker_7.npz
│ │ ├── zh_speaker_8.npz
│ │ └── zh_speaker_9.npz
├── cli.py
├── generation.py
├── model.py
└── model_fine.py
├── create_data.py
├── create_wavs.py
├── data.py
├── model-card.md
├── notebooks
├── fake_classifier.ipynb
├── long_form_generation.ipynb
├── memory_profiling_bark.ipynb
└── use_small_models_on_cpu.ipynb
├── pyproject.toml
└── setup.py
/.gitignore:
--------------------------------------------------------------------------------
1 | __pycache__/
2 | suno_bark.egg-info/
3 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) Suno, Inc
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Setup training data generation (windows venv setup)
2 | > Download or clone this repository to a local folder.
3 | >
4 | > If you have git installed: `git clone https://github.com/gitmylo/bark-data-gen`
5 |
6 | > `py -m venv venv` - create the venv (using `py` here, which uses the latest version, if you have a windows store install, use `python`.)
7 |
8 | > `call venv/Scripts/activate.bat` - activate the venv
9 |
10 | > `pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cu117 --force` - install torch with cuda (with --force to ensure it gets reinstalled)
11 |
12 | > `python create_data.py` - run the script, outputs to `output` (make sure you're in the venv if you're running it again later)
13 |
14 | > `python create_wavs.py` - extra processing to create the wavs for the data. saved in `out_wavs`
15 |
16 | ## ~Currently there's no public training available.~
17 | ~I will release my model when I consider it ready.
18 | Dataset created from shared npy files will be shared on huggingface~
19 |
20 | ## Model, running and training.
21 | [Model](https://huggingface.co/GitMylo/bark-voice-cloning), [Training and running code](https://github.com/gitmylo/bark-voice-cloning-HuBERT-quantizer), [Dataset used](https://huggingface.co/datasets/GitMylo/bark-semantic-training)
22 |
23 | ### ~Please share your created semantics and associated wavs to help me train~
24 | ~Do NOT rename the files, making a mistake during renaming will pollute the training data. It won't know which wavs fit which semantics.~
25 |
26 | ~Create a zip with your semantics. This is the data I'll need for training. There is still 2 more steps of processing required, but having the semantics helps out a bunch. Thanks.~
27 |
28 | ~Send them in dms to `mylo#6228` on discord, or create an issue on [this github repo](https://github.com/gitmylo/bark-data-gen/issues) with a link to download your semantics.~
29 |
30 | Training has completed.
31 |
32 | ## ------------- Old readme -------------
33 | # 🐶 Bark
34 |
35 | [](https://discord.gg/J2B2vsjKuE)
36 | [](https://twitter.com/OnusFM)
37 |
38 |
39 | > 🔗 [Examples](https://suno-ai.notion.site/Bark-Examples-5edae8b02a604b54a42244ba45ebc2e2) • [Suno Studio Waitlist](https://3os84zs17th.typeform.com/suno-studio) • [Updates](#-updates) • [How to Use](#-usage-in-python) • [Installation](#-installation) • [FAQ](#-faq)
40 |
41 | [//]:
(vertical spaces around image)
42 |
43 |
44 |
45 |
46 |
47 |
48 | Bark is a transformer-based text-to-audio model created by [Suno](https://suno.ai). Bark can generate highly realistic, multilingual speech as well as other audio - including music, background noise and simple sound effects. The model can also produce nonverbal communications like laughing, sighing and crying. To support the research community, we are providing access to pretrained model checkpoints, which are ready for inference and available for commercial use.
49 |
50 | ## ⚠ Disclaimer
51 | Bark was developed for research purposes. It is not a conventional text-to-speech model but instead a fully generative text-to-audio model, which can deviate in unexpected ways from provided prompts. Suno does not take responsibility for any output generated. Use at your own risk, and please act responsibly.
52 |
53 | ## 📖 Quick Index
54 | * [🚀 Updates](#-updates)
55 | * [💻 Installation](#-installation)
56 | * [🐍 Usage](#-usage-in-python)
57 | * [🌀 Live Examples](https://suno-ai.notion.site/Bark-Examples-5edae8b02a604b54a42244ba45ebc2e2)
58 | * [❓ FAQ](#-faq)
59 |
60 | ## 🎧 Demos
61 |
62 | [](https://huggingface.co/spaces/suno/bark)
63 | [](https://replicate.com/suno-ai/bark)
64 | [](https://colab.research.google.com/drive/1eJfA2XUa-mXwdMy7DoYKVYHI1iTd9Vkt?usp=sharing)
65 |
66 | ## 🚀 Updates
67 |
68 | **2023.05.01**
69 | - ©️ Bark is now licensed under the MIT License, meaning it's now available for commercial use!
70 | - ⚡ 2x speed-up on GPU. 10x speed-up on CPU. We also added an option for a smaller version of Bark, which offers additional speed-up with the trade-off of slightly lower quality.
71 | - 📕 [Long-form generation](notebooks/long_form_generation.ipynb), voice consistency enhancements and other examples are now documented in a new [notebooks](./notebooks) section.
72 | - 👥 We created a [voice prompt library](https://suno-ai.notion.site/8b8e8749ed514b0cbf3f699013548683?v=bc67cff786b04b50b3ceb756fd05f68c). We hope this resource helps you find useful prompts for your use cases! You can also join us on [Discord](https://discord.gg/J2B2vsjKuE), where the community actively shares useful prompts in the **#audio-prompts** channel.
73 | - 💬 Growing community support and access to new features here:
74 |
75 | [](https://discord.gg/J2B2vsjKuE)
76 |
77 | - 💾 You can now use Bark with GPUs that have low VRAM (<4GB).
78 |
79 | **2023.04.20**
80 | - 🐶 Bark release!
81 |
82 | ## 🐍 Usage in Python
83 |
84 |
85 | 🪑 Basics
86 |
87 | ```python
88 | from bark import SAMPLE_RATE, generate_audio, preload_models
89 | from scipy.io.wavfile import write as write_wav
90 | from IPython.display import Audio
91 |
92 | # download and load all models
93 | preload_models()
94 |
95 | # generate audio from text
96 | text_prompt = """
97 | Hello, my name is Suno. And, uh — and I like pizza. [laughs]
98 | But I also have other interests such as playing tic tac toe.
99 | """
100 | audio_array = generate_audio(text_prompt)
101 |
102 | # save audio to disk
103 | write_wav("bark_generation.wav", SAMPLE_RATE, audio_array)
104 |
105 | # play text in notebook
106 | Audio(audio_array, rate=SAMPLE_RATE)
107 | ```
108 |
109 | [pizza1.webm](https://user-images.githubusercontent.com/34592747/cfa98e54-721c-4b9c-b962-688e09db684f.webm)
110 |
111 |
112 |
113 |
114 | 🌎 Foreign Language
115 |
116 | Bark supports various languages out-of-the-box and automatically determines language from input text. When prompted with code-switched text, Bark will attempt to employ the native accent for the respective languages. English quality is best for the time being, and we expect other languages to further improve with scaling.
117 |
118 |
119 |
120 | ```python
121 |
122 | text_prompt = """
123 | 추석은 내가 가장 좋아하는 명절이다. 나는 며칠 동안 휴식을 취하고 친구 및 가족과 시간을 보낼 수 있습니다.
124 | """
125 | audio_array = generate_audio(text_prompt)
126 | ```
127 | [suno_korean.webm](https://user-images.githubusercontent.com/32879321/235313033-dc4477b9-2da0-4b94-9c8b-a8c2d8f5bb5e.webm)
128 |
129 | *Note: since Bark recognizes languages automatically from input text, it is possible to use, for example, a german history prompt with english text. This usually leads to english audio with a german accent.*
130 | ```python
131 | text_prompt = """
132 | Der Dreißigjährige Krieg (1618-1648) war ein verheerender Konflikt, der Europa stark geprägt hat.
133 | This is a beginning of the history. If you want to hear more, please continue.
134 | """
135 | audio_array = generate_audio(text_prompt)
136 | ```
137 | [suno_german_accent.webm](https://user-images.githubusercontent.com/34592747/3f96ab3e-02ec-49cb-97a6-cf5af0b3524a.webm)
138 |
139 |
140 |
141 |
142 |
143 |
144 |
145 | 🎶 Music
146 | Bark can generate all types of audio, and, in principle, doesn't see a difference between speech and music. Sometimes Bark chooses to generate text as music, but you can help it out by adding music notes around your lyrics.
147 |
148 |
149 |
150 | ```python
151 | text_prompt = """
152 | ♪ In the jungle, the mighty jungle, the lion barks tonight ♪
153 | """
154 | audio_array = generate_audio(text_prompt)
155 | ```
156 | [lion.webm](https://user-images.githubusercontent.com/5068315/230684766-97f5ea23-ad99-473c-924b-66b6fab24289.webm)
157 |
158 |
159 |
160 | 🎤 Voice Presets
161 |
162 | Bark supports 100+ speaker presets across [supported languages](#supported-languages). You can browse the library of supported voice presets [HERE](https://suno-ai.notion.site/8b8e8749ed514b0cbf3f699013548683?v=bc67cff786b04b50b3ceb756fd05f68c), or in the [code](bark/assets/prompts). The community also often shares presets in [Discord](https://discord.gg/J2B2vsjKuE).
163 |
164 | > Bark tries to match the tone, pitch, emotion and prosody of a given preset, but does not currently support custom voice cloning. The model also attempts to preserve music, ambient noise, etc.
165 |
166 | ```python
167 | text_prompt = """
168 | I have a silky smooth voice, and today I will tell you about
169 | the exercise regimen of the common sloth.
170 | """
171 | audio_array = generate_audio(text_prompt, history_prompt="v2/en_speaker_1")
172 | ```
173 |
174 | [sloth.webm](https://user-images.githubusercontent.com/5068315/230684883-a344c619-a560-4ff5-8b99-b4463a34487b.webm)
175 |
176 |
177 | ### 📃 Generating Longer Audio
178 |
179 | By default, `generate_audio` works well with around 13 seconds of spoken text. For an example of how to do long-form generation, see 👉 **[Notebook](notebooks/long_form_generation.ipynb)** 👈
180 |
181 |
182 | Click to toggle example long-form generations (from the example notebook)
183 |
184 | [dialog.webm](https://user-images.githubusercontent.com/2565833/235463539-f57608da-e4cb-4062-8771-148e29512b01.webm)
185 |
186 | [longform_advanced.webm](https://user-images.githubusercontent.com/2565833/235463547-1c0d8744-269b-43fe-9630-897ea5731652.webm)
187 |
188 | [longform_basic.webm](https://user-images.githubusercontent.com/2565833/235463559-87efe9f8-a2db-4d59-b764-57db83f95270.webm)
189 |
190 |
191 |
192 |
193 | ## Command line
194 | ```commandline
195 | python -m bark --text "Hello, my name is Suno." --output_filename "example.wav"
196 | ```
197 |
198 | ## 💻 Installation
199 | *‼️ CAUTION ‼️ Do NOT use `pip install bark`. It installs a different package, which is not managed by Suno.*
200 | ```bash
201 | pip install git+https://github.com/suno-ai/bark.git
202 | ```
203 |
204 | or
205 |
206 | ```bash
207 | git clone https://github.com/suno-ai/bark
208 | cd bark && pip install .
209 | ```
210 |
211 |
212 | ## 🛠️ Hardware and Inference Speed
213 |
214 | Bark has been tested and works on both CPU and GPU (`pytorch 2.0+`, CUDA 11.7 and CUDA 12.0).
215 |
216 | On enterprise GPUs and PyTorch nightly, Bark can generate audio in roughly real-time. On older GPUs, default colab, or CPU, inference time might be significantly slower. For older GPUs or CPU you might want to consider using smaller models. Details can be found in out tutorial sections here.
217 |
218 | The full version of Bark requires around 12GB of VRAM to hold everything on GPU at the same time.
219 | To use a smaller version of the models, which should fit into 8GB VRAM, set the environment flag `SUNO_USE_SMALL_MODELS=True`.
220 |
221 | If you don't have hardware available or if you want to play with bigger versions of our models, you can also sign up for early access to our model playground [here](https://3os84zs17th.typeform.com/suno-studio).
222 |
223 | ## ⚙️ Details
224 |
225 | Bark is fully generative text-to-audio model devolved for research and demo purposes. It follows a GPT style architecture similar to [AudioLM](https://arxiv.org/abs/2209.03143) and [Vall-E](https://arxiv.org/abs/2301.02111) and a quantized Audio representation from [EnCodec](https://github.com/facebookresearch/encodec). It is not a conventional TTS model, but instead a fully generative text-to-audio model capable of deviating in unexpected ways from any given script. Different to previous approaches, the input text prompt is converted directly to audio without the intermediate use of phonemes. It can therefore generalize to arbitrary instructions beyond speech such as music lyrics, sound effects or other non-speech sounds.
226 |
227 | Below is a list of some known non-speech sounds, but we are finding more every day. Please let us know if you find patterns that work particularly well on [Discord](https://discord.gg/J2B2vsjKuE)!
228 |
229 | - `[laughter]`
230 | - `[laughs]`
231 | - `[sighs]`
232 | - `[music]`
233 | - `[gasps]`
234 | - `[clears throat]`
235 | - `—` or `...` for hesitations
236 | - `♪` for song lyrics
237 | - CAPITALIZATION for emphasis of a word
238 | - `[MAN]` and `[WOMAN]` to bias Bark toward male and female speakers, respectively
239 |
240 | ### Supported Languages
241 |
242 | | Language | Status |
243 | | --- | :---: |
244 | | English (en) | ✅ |
245 | | German (de) | ✅ |
246 | | Spanish (es) | ✅ |
247 | | French (fr) | ✅ |
248 | | Hindi (hi) | ✅ |
249 | | Italian (it) | ✅ |
250 | | Japanese (ja) | ✅ |
251 | | Korean (ko) | ✅ |
252 | | Polish (pl) | ✅ |
253 | | Portuguese (pt) | ✅ |
254 | | Russian (ru) | ✅ |
255 | | Turkish (tr) | ✅ |
256 | | Chinese, simplified (zh) | ✅ |
257 |
258 | Requests for future language support [here](https://github.com/suno-ai/bark/discussions/111) or in the **#forums** channel on [Discord](https://discord.com/invite/J2B2vsjKuE).
259 |
260 | ## 🙏 Appreciation
261 |
262 | - [nanoGPT](https://github.com/karpathy/nanoGPT) for a dead-simple and blazing fast implementation of GPT-style models
263 | - [EnCodec](https://github.com/facebookresearch/encodec) for a state-of-the-art implementation of a fantastic audio codec
264 | - [AudioLM](https://github.com/lucidrains/audiolm-pytorch) for related training and inference code
265 | - [Vall-E](https://arxiv.org/abs/2301.02111), [AudioLM](https://arxiv.org/abs/2209.03143) and many other ground-breaking papers that enabled the development of Bark
266 |
267 | ## © License
268 |
269 | Bark is licensed under the MIT License.
270 |
271 | Please contact us at 📧 [bark@suno.ai](mailto:bark@suno.ai) to request access to a larger version of the model.
272 |
273 | ## 📱 Community
274 |
275 | - [Twitter](https://twitter.com/OnusFM)
276 | - [Discord](https://discord.gg/J2B2vsjKuE)
277 |
278 | ## 🎧 Suno Studio (Early Access)
279 |
280 | We’re developing a playground for our models, including Bark.
281 |
282 | If you are interested, you can sign up for early access [here](https://3os84zs17th.typeform.com/suno-studio).
283 |
284 | ## ❓ FAQ
285 |
286 | #### How do I specify where models are downloaded and cached?
287 | * Bark uses Hugging Face to download and store models. You can see find more info [here](https://huggingface.co/docs/huggingface_hub/package_reference/environment_variables#hfhome).
288 |
289 |
290 | #### Bark's generations sometimes differ from my prompts. What's happening?
291 | * Bark is a GPT-style model. As such, it may take some creative liberties in its generations, resulting in higher-variance model outputs than traditional text-to-speech approaches.
292 |
293 | #### What voices are supported by Bark?
294 | * Bark supports 100+ speaker presets across [supported languages](#supported-languages). You can browse the library of speaker presets [here](https://suno-ai.notion.site/8b8e8749ed514b0cbf3f699013548683?v=bc67cff786b04b50b3ceb756fd05f68c). The community also shares presets in [Discord](https://discord.gg/J2B2vsjKuE). Bark also supports generating unique random voices that fit the input text. Bark does not currently support custom voice cloning.
295 |
296 | #### Why is the output limited to ~13-14 seconds?
297 | * Bark is a GPT-style model, and its architecture/context window is optimized to output generations with roughly this length.
298 |
299 | #### How much VRAM do I need?
300 | * The full version of Bark requires around 12Gb of memory to hold everything on GPU at the same time. However, even smaller cards down to ~2Gb work with some additional settings. Simply add the following code snippet before your generation:
301 |
302 | ```python
303 | import os
304 | os.environ["SUNO_OFFLOAD_CPU"] = True
305 | os.environ["SUNO_USE_SMALL_MODELS"] = True
306 | ```
307 |
308 | #### My generated audio sounds like a 1980s phone call. What's happening?
309 | * Bark generates audio from scratch. It is not meant to create only high-fidelity, studio-quality speech. Rather, outputs could be anything from perfect speech to multiple people arguing at a baseball game recorded with bad microphones.
310 |
--------------------------------------------------------------------------------
/bark/__init__.py:
--------------------------------------------------------------------------------
1 | from .api import generate_audio, text_to_semantic, semantic_to_waveform, save_as_prompt
2 | from .generation import SAMPLE_RATE, preload_models
3 |
--------------------------------------------------------------------------------
/bark/__main__.py:
--------------------------------------------------------------------------------
1 | from .cli import cli
2 |
3 | cli()
4 |
--------------------------------------------------------------------------------
/bark/api.py:
--------------------------------------------------------------------------------
1 | from typing import Dict, Optional, Union
2 |
3 | import numpy as np
4 |
5 | from .generation import codec_decode, generate_coarse, generate_fine, generate_text_semantic
6 |
7 |
8 | def text_to_semantic(
9 | text: str,
10 | history_prompt: Optional[Union[Dict, str]] = None,
11 | temp: float = 0.7,
12 | silent: bool = False,
13 | ):
14 | """Generate semantic array from text.
15 |
16 | Args:
17 | text: text to be turned into audio
18 | history_prompt: history choice for audio cloning
19 | temp: generation temperature (1.0 more diverse, 0.0 more conservative)
20 | silent: disable progress bar
21 |
22 | Returns:
23 | numpy semantic array to be fed into `semantic_to_waveform`
24 | """
25 | x_semantic = generate_text_semantic(
26 | text,
27 | history_prompt=history_prompt,
28 | temp=temp,
29 | silent=silent,
30 | use_kv_caching=True
31 | )
32 | return x_semantic
33 |
34 |
35 | def semantic_to_waveform(
36 | semantic_tokens: np.ndarray,
37 | history_prompt: Optional[Union[Dict, str]] = None,
38 | temp: float = 0.7,
39 | silent: bool = False,
40 | output_full: bool = False,
41 | ):
42 | """Generate audio array from semantic input.
43 |
44 | Args:
45 | semantic_tokens: semantic token output from `text_to_semantic`
46 | history_prompt: history choice for audio cloning
47 | temp: generation temperature (1.0 more diverse, 0.0 more conservative)
48 | silent: disable progress bar
49 | output_full: return full generation to be used as a history prompt
50 |
51 | Returns:
52 | numpy audio array at sample frequency 24khz
53 | """
54 | coarse_tokens = generate_coarse(
55 | semantic_tokens,
56 | history_prompt=history_prompt,
57 | temp=temp,
58 | silent=silent,
59 | use_kv_caching=True
60 | )
61 | fine_tokens = generate_fine(
62 | coarse_tokens,
63 | history_prompt=history_prompt,
64 | temp=0.5,
65 | )
66 | audio_arr = codec_decode(fine_tokens)
67 | if output_full:
68 | full_generation = {
69 | "semantic_prompt": semantic_tokens,
70 | "coarse_prompt": coarse_tokens,
71 | "fine_prompt": fine_tokens,
72 | }
73 | return full_generation, audio_arr
74 | return audio_arr
75 |
76 |
77 | def save_as_prompt(filepath, full_generation):
78 | assert(filepath.endswith(".npz"))
79 | assert(isinstance(full_generation, dict))
80 | assert("semantic_prompt" in full_generation)
81 | assert("coarse_prompt" in full_generation)
82 | assert("fine_prompt" in full_generation)
83 | np.savez(filepath, **full_generation)
84 |
85 |
86 | def generate_audio(
87 | text: str,
88 | history_prompt: Optional[Union[Dict, str]] = None,
89 | text_temp: float = 0.7,
90 | waveform_temp: float = 0.7,
91 | silent: bool = False,
92 | output_full: bool = False,
93 | ):
94 | """Generate audio array from input text.
95 |
96 | Args:
97 | text: text to be turned into audio
98 | history_prompt: history choice for audio cloning
99 | text_temp: generation temperature (1.0 more diverse, 0.0 more conservative)
100 | waveform_temp: generation temperature (1.0 more diverse, 0.0 more conservative)
101 | silent: disable progress bar
102 | output_full: return full generation to be used as a history prompt
103 |
104 | Returns:
105 | numpy audio array at sample frequency 24khz
106 | """
107 | semantic_tokens = text_to_semantic(
108 | text,
109 | history_prompt=history_prompt,
110 | temp=text_temp,
111 | silent=silent,
112 | )
113 | out = semantic_to_waveform(
114 | semantic_tokens,
115 | history_prompt=history_prompt,
116 | temp=waveform_temp,
117 | silent=silent,
118 | output_full=output_full,
119 | )
120 | if output_full:
121 | full_generation, audio_arr = out
122 | return full_generation, audio_arr
123 | else:
124 | audio_arr = out
125 | return audio_arr
126 |
--------------------------------------------------------------------------------
/bark/assets/prompts/announcer.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/announcer.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/de_speaker_0.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/de_speaker_0.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/de_speaker_1.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/de_speaker_1.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/de_speaker_2.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/de_speaker_2.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/de_speaker_3.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/de_speaker_3.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/de_speaker_4.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/de_speaker_4.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/de_speaker_5.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/de_speaker_5.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/de_speaker_6.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/de_speaker_6.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/de_speaker_7.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/de_speaker_7.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/de_speaker_8.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/de_speaker_8.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/de_speaker_9.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/de_speaker_9.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/en_speaker_0.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/en_speaker_0.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/en_speaker_1.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/en_speaker_1.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/en_speaker_2.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/en_speaker_2.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/en_speaker_3.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/en_speaker_3.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/en_speaker_4.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/en_speaker_4.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/en_speaker_5.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/en_speaker_5.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/en_speaker_6.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/en_speaker_6.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/en_speaker_7.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/en_speaker_7.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/en_speaker_8.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/en_speaker_8.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/en_speaker_9.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/en_speaker_9.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/es_speaker_0.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/es_speaker_0.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/es_speaker_1.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/es_speaker_1.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/es_speaker_2.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/es_speaker_2.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/es_speaker_3.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/es_speaker_3.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/es_speaker_4.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/es_speaker_4.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/es_speaker_5.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/es_speaker_5.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/es_speaker_6.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/es_speaker_6.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/es_speaker_7.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/es_speaker_7.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/es_speaker_8.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/es_speaker_8.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/es_speaker_9.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/es_speaker_9.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/fr_speaker_0.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/fr_speaker_0.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/fr_speaker_1.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/fr_speaker_1.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/fr_speaker_2.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/fr_speaker_2.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/fr_speaker_3.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/fr_speaker_3.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/fr_speaker_4.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/fr_speaker_4.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/fr_speaker_5.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/fr_speaker_5.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/fr_speaker_6.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/fr_speaker_6.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/fr_speaker_7.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/fr_speaker_7.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/fr_speaker_8.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/fr_speaker_8.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/fr_speaker_9.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/fr_speaker_9.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/hi_speaker_0.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/hi_speaker_0.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/hi_speaker_1.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/hi_speaker_1.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/hi_speaker_2.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/hi_speaker_2.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/hi_speaker_3.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/hi_speaker_3.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/hi_speaker_4.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/hi_speaker_4.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/hi_speaker_5.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/hi_speaker_5.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/hi_speaker_6.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/hi_speaker_6.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/hi_speaker_7.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/hi_speaker_7.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/hi_speaker_8.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/hi_speaker_8.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/hi_speaker_9.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/hi_speaker_9.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/it_speaker_0.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/it_speaker_0.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/it_speaker_1.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/it_speaker_1.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/it_speaker_2.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/it_speaker_2.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/it_speaker_3.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/it_speaker_3.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/it_speaker_4.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/it_speaker_4.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/it_speaker_5.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/it_speaker_5.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/it_speaker_6.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/it_speaker_6.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/it_speaker_7.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/it_speaker_7.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/it_speaker_8.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/it_speaker_8.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/it_speaker_9.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/it_speaker_9.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/ja_speaker_0.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/ja_speaker_0.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/ja_speaker_1.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/ja_speaker_1.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/ja_speaker_2.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/ja_speaker_2.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/ja_speaker_3.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/ja_speaker_3.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/ja_speaker_4.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/ja_speaker_4.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/ja_speaker_5.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/ja_speaker_5.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/ja_speaker_6.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/ja_speaker_6.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/ja_speaker_7.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/ja_speaker_7.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/ja_speaker_8.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/ja_speaker_8.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/ja_speaker_9.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/ja_speaker_9.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/ko_speaker_0.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/ko_speaker_0.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/ko_speaker_1.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/ko_speaker_1.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/ko_speaker_2.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/ko_speaker_2.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/ko_speaker_3.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/ko_speaker_3.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/ko_speaker_4.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/ko_speaker_4.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/ko_speaker_5.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/ko_speaker_5.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/ko_speaker_6.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/ko_speaker_6.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/ko_speaker_7.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/ko_speaker_7.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/ko_speaker_8.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/ko_speaker_8.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/ko_speaker_9.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/ko_speaker_9.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/pl_speaker_0.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/pl_speaker_0.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/pl_speaker_1.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/pl_speaker_1.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/pl_speaker_2.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/pl_speaker_2.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/pl_speaker_3.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/pl_speaker_3.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/pl_speaker_4.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/pl_speaker_4.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/pl_speaker_5.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/pl_speaker_5.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/pl_speaker_6.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/pl_speaker_6.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/pl_speaker_7.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/pl_speaker_7.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/pl_speaker_8.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/pl_speaker_8.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/pl_speaker_9.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/pl_speaker_9.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/pt_speaker_0.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/pt_speaker_0.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/pt_speaker_1.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/pt_speaker_1.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/pt_speaker_2.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/pt_speaker_2.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/pt_speaker_3.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/pt_speaker_3.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/pt_speaker_4.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/pt_speaker_4.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/pt_speaker_5.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/pt_speaker_5.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/pt_speaker_6.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/pt_speaker_6.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/pt_speaker_7.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/pt_speaker_7.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/pt_speaker_8.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/pt_speaker_8.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/pt_speaker_9.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/pt_speaker_9.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/readme.md:
--------------------------------------------------------------------------------
1 | # Example Prompts Data
2 |
3 | ## Version Two
4 | The `v2` prompts are better engineered to follow text with a consistent voice.
5 | To use them, simply include `v2` in the prompt. For example
6 | ```python
7 | from bark import generate_audio
8 | text_prompt = "madam I'm adam"
9 | audio_array = generate_audio(text_prompt, history_prompt="v2/en_speaker_1")
10 | ```
11 |
12 | ## Prompt Format
13 | The provided data is in the .npz format, which is a file format used in Python for storing arrays and data. The data contains three arrays: semantic_prompt, coarse_prompt, and fine_prompt.
14 |
15 | ```semantic_prompt```
16 |
17 | The semantic_prompt array contains a sequence of token IDs generated by the BERT tokenizer from Hugging Face. These tokens encode the text input and are used as an input to generate the audio output. The shape of this array is (n,), where n is the number of tokens in the input text.
18 |
19 | ```coarse_prompt```
20 |
21 | The coarse_prompt array is an intermediate output of the text-to-speech pipeline, and contains token IDs generated by the first two codebooks of the EnCodec Codec from Facebook. This step converts the semantic tokens into a different representation that is better suited for the subsequent step. The shape of this array is (2, m), where m is the number of tokens after conversion by the EnCodec Codec.
22 |
23 | ```fine_prompt```
24 |
25 | The fine_prompt array is a further processed output of the pipeline, and contains 8 codebooks from the EnCodec Codec. These codebooks represent the final stage of tokenization, and the resulting tokens are used to generate the audio output. The shape of this array is (8, p), where p is the number of tokens after further processing by the EnCodec Codec.
26 |
27 | Overall, these arrays represent different stages of a text-to-speech pipeline that converts text input into synthesized audio output. The semantic_prompt array represents the input text, while coarse_prompt and fine_prompt represent intermediate and final stages of tokenization, respectively.
28 |
29 |
30 |
31 |
--------------------------------------------------------------------------------
/bark/assets/prompts/ru_speaker_0.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/ru_speaker_0.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/ru_speaker_1.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/ru_speaker_1.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/ru_speaker_2.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/ru_speaker_2.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/ru_speaker_3.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/ru_speaker_3.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/ru_speaker_4.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/ru_speaker_4.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/ru_speaker_5.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/ru_speaker_5.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/ru_speaker_6.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/ru_speaker_6.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/ru_speaker_7.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/ru_speaker_7.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/ru_speaker_8.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/ru_speaker_8.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/ru_speaker_9.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/ru_speaker_9.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/speaker_0.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/speaker_0.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/speaker_1.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/speaker_1.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/speaker_2.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/speaker_2.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/speaker_3.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/speaker_3.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/speaker_4.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/speaker_4.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/speaker_5.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/speaker_5.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/speaker_6.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/speaker_6.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/speaker_7.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/speaker_7.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/speaker_8.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/speaker_8.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/speaker_9.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/speaker_9.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/tr_speaker_0.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/tr_speaker_0.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/tr_speaker_1.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/tr_speaker_1.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/tr_speaker_2.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/tr_speaker_2.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/tr_speaker_3.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/tr_speaker_3.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/tr_speaker_4.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/tr_speaker_4.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/tr_speaker_5.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/tr_speaker_5.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/tr_speaker_6.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/tr_speaker_6.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/tr_speaker_7.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/tr_speaker_7.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/tr_speaker_8.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/tr_speaker_8.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/tr_speaker_9.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/tr_speaker_9.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/v2/de_speaker_0.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/v2/de_speaker_0.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/v2/de_speaker_1.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/v2/de_speaker_1.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/v2/de_speaker_2.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/v2/de_speaker_2.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/v2/de_speaker_3.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/v2/de_speaker_3.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/v2/de_speaker_4.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/v2/de_speaker_4.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/v2/de_speaker_5.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/v2/de_speaker_5.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/v2/de_speaker_6.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/v2/de_speaker_6.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/v2/de_speaker_7.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/v2/de_speaker_7.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/v2/de_speaker_8.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/v2/de_speaker_8.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/v2/de_speaker_9.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/v2/de_speaker_9.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/v2/en_speaker_0.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/v2/en_speaker_0.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/v2/en_speaker_1.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/v2/en_speaker_1.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/v2/en_speaker_2.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/v2/en_speaker_2.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/v2/en_speaker_3.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/v2/en_speaker_3.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/v2/en_speaker_4.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/v2/en_speaker_4.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/v2/en_speaker_5.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/v2/en_speaker_5.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/v2/en_speaker_6.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/v2/en_speaker_6.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/v2/en_speaker_7.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/v2/en_speaker_7.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/v2/en_speaker_8.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/v2/en_speaker_8.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/v2/en_speaker_9.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/v2/en_speaker_9.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/v2/es_speaker_0.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/v2/es_speaker_0.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/v2/es_speaker_1.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/v2/es_speaker_1.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/v2/es_speaker_2.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/v2/es_speaker_2.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/v2/es_speaker_3.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/v2/es_speaker_3.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/v2/es_speaker_4.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/v2/es_speaker_4.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/v2/es_speaker_5.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/v2/es_speaker_5.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/v2/es_speaker_6.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/v2/es_speaker_6.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/v2/es_speaker_7.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/v2/es_speaker_7.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/v2/es_speaker_8.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/v2/es_speaker_8.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/v2/es_speaker_9.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/v2/es_speaker_9.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/v2/fr_speaker_0.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/v2/fr_speaker_0.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/v2/fr_speaker_1.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/v2/fr_speaker_1.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/v2/fr_speaker_2.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/v2/fr_speaker_2.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/v2/fr_speaker_3.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/v2/fr_speaker_3.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/v2/fr_speaker_4.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/v2/fr_speaker_4.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/v2/fr_speaker_5.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/v2/fr_speaker_5.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/v2/fr_speaker_6.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/v2/fr_speaker_6.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/v2/fr_speaker_7.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/v2/fr_speaker_7.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/v2/fr_speaker_8.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/v2/fr_speaker_8.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/v2/fr_speaker_9.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/v2/fr_speaker_9.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/v2/hi_speaker_0.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/v2/hi_speaker_0.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/v2/hi_speaker_1.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/v2/hi_speaker_1.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/v2/hi_speaker_2.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/v2/hi_speaker_2.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/v2/hi_speaker_3.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/v2/hi_speaker_3.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/v2/hi_speaker_4.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/v2/hi_speaker_4.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/v2/hi_speaker_5.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/v2/hi_speaker_5.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/v2/hi_speaker_6.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/v2/hi_speaker_6.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/v2/hi_speaker_7.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/v2/hi_speaker_7.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/v2/hi_speaker_8.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/v2/hi_speaker_8.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/v2/hi_speaker_9.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/v2/hi_speaker_9.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/v2/it_speaker_0.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/v2/it_speaker_0.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/v2/it_speaker_1.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/v2/it_speaker_1.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/v2/it_speaker_2.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/v2/it_speaker_2.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/v2/it_speaker_3.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/v2/it_speaker_3.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/v2/it_speaker_4.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/v2/it_speaker_4.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/v2/it_speaker_5.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/v2/it_speaker_5.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/v2/it_speaker_6.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/v2/it_speaker_6.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/v2/it_speaker_7.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/v2/it_speaker_7.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/v2/it_speaker_8.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/v2/it_speaker_8.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/v2/it_speaker_9.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/v2/it_speaker_9.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/v2/ja_speaker_0.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/v2/ja_speaker_0.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/v2/ja_speaker_1.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/v2/ja_speaker_1.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/v2/ja_speaker_2.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/v2/ja_speaker_2.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/v2/ja_speaker_3.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/v2/ja_speaker_3.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/v2/ja_speaker_4.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/v2/ja_speaker_4.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/v2/ja_speaker_5.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/v2/ja_speaker_5.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/v2/ja_speaker_6.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/v2/ja_speaker_6.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/v2/ja_speaker_7.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/v2/ja_speaker_7.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/v2/ja_speaker_8.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/v2/ja_speaker_8.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/v2/ja_speaker_9.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/v2/ja_speaker_9.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/v2/ko_speaker_0.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/v2/ko_speaker_0.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/v2/ko_speaker_1.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/v2/ko_speaker_1.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/v2/ko_speaker_2.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/v2/ko_speaker_2.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/v2/ko_speaker_3.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/v2/ko_speaker_3.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/v2/ko_speaker_4.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/v2/ko_speaker_4.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/v2/ko_speaker_5.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/v2/ko_speaker_5.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/v2/ko_speaker_6.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/v2/ko_speaker_6.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/v2/ko_speaker_7.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/v2/ko_speaker_7.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/v2/ko_speaker_8.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/v2/ko_speaker_8.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/v2/ko_speaker_9.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/v2/ko_speaker_9.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/v2/pl_speaker_0.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/v2/pl_speaker_0.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/v2/pl_speaker_1.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/v2/pl_speaker_1.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/v2/pl_speaker_2.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/v2/pl_speaker_2.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/v2/pl_speaker_3.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/v2/pl_speaker_3.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/v2/pl_speaker_4.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/v2/pl_speaker_4.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/v2/pl_speaker_5.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/v2/pl_speaker_5.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/v2/pl_speaker_6.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/v2/pl_speaker_6.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/v2/pl_speaker_7.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/v2/pl_speaker_7.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/v2/pl_speaker_8.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/v2/pl_speaker_8.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/v2/pl_speaker_9.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/v2/pl_speaker_9.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/v2/pt_speaker_0.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/v2/pt_speaker_0.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/v2/pt_speaker_1.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/v2/pt_speaker_1.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/v2/pt_speaker_2.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/v2/pt_speaker_2.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/v2/pt_speaker_3.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/v2/pt_speaker_3.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/v2/pt_speaker_4.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/v2/pt_speaker_4.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/v2/pt_speaker_5.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/v2/pt_speaker_5.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/v2/pt_speaker_6.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/v2/pt_speaker_6.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/v2/pt_speaker_7.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/v2/pt_speaker_7.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/v2/pt_speaker_8.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/v2/pt_speaker_8.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/v2/pt_speaker_9.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/v2/pt_speaker_9.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/v2/ru_speaker_0.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/v2/ru_speaker_0.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/v2/ru_speaker_1.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/v2/ru_speaker_1.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/v2/ru_speaker_2.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/v2/ru_speaker_2.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/v2/ru_speaker_3.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/v2/ru_speaker_3.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/v2/ru_speaker_4.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/v2/ru_speaker_4.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/v2/ru_speaker_5.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/v2/ru_speaker_5.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/v2/ru_speaker_6.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/v2/ru_speaker_6.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/v2/ru_speaker_7.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/v2/ru_speaker_7.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/v2/ru_speaker_8.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/v2/ru_speaker_8.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/v2/ru_speaker_9.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/v2/ru_speaker_9.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/v2/tr_speaker_0.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/v2/tr_speaker_0.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/v2/tr_speaker_1.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/v2/tr_speaker_1.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/v2/tr_speaker_2.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/v2/tr_speaker_2.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/v2/tr_speaker_3.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/v2/tr_speaker_3.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/v2/tr_speaker_4.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/v2/tr_speaker_4.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/v2/tr_speaker_5.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/v2/tr_speaker_5.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/v2/tr_speaker_6.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/v2/tr_speaker_6.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/v2/tr_speaker_7.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/v2/tr_speaker_7.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/v2/tr_speaker_8.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/v2/tr_speaker_8.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/v2/tr_speaker_9.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/v2/tr_speaker_9.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/v2/zh_speaker_0.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/v2/zh_speaker_0.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/v2/zh_speaker_1.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/v2/zh_speaker_1.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/v2/zh_speaker_2.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/v2/zh_speaker_2.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/v2/zh_speaker_3.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/v2/zh_speaker_3.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/v2/zh_speaker_4.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/v2/zh_speaker_4.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/v2/zh_speaker_5.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/v2/zh_speaker_5.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/v2/zh_speaker_6.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/v2/zh_speaker_6.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/v2/zh_speaker_7.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/v2/zh_speaker_7.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/v2/zh_speaker_8.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/v2/zh_speaker_8.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/v2/zh_speaker_9.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/v2/zh_speaker_9.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/zh_speaker_0.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/zh_speaker_0.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/zh_speaker_1.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/zh_speaker_1.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/zh_speaker_2.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/zh_speaker_2.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/zh_speaker_3.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/zh_speaker_3.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/zh_speaker_4.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/zh_speaker_4.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/zh_speaker_5.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/zh_speaker_5.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/zh_speaker_6.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/zh_speaker_6.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/zh_speaker_7.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/zh_speaker_7.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/zh_speaker_8.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/zh_speaker_8.npz
--------------------------------------------------------------------------------
/bark/assets/prompts/zh_speaker_9.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gitmylo/bark-data-gen/8f676db0be07be8c0e16577c5b99eea2157f3e9b/bark/assets/prompts/zh_speaker_9.npz
--------------------------------------------------------------------------------
/bark/cli.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | from typing import Dict, Optional, Union
3 | import os
4 |
5 | from scipy.io.wavfile import write as write_wav
6 | from .api import generate_audio
7 | from .generation import SAMPLE_RATE
8 |
9 |
10 | def cli():
11 | """Commandline interface."""
12 | parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
13 | parser.add_argument("--text", type=str, help="text to be turned into audio")
14 | parser.add_argument(
15 | "--output_filename",
16 | type=str,
17 | default="bark_generation.wav",
18 | help="output audio file name",
19 | )
20 | parser.add_argument("--output_dir", type=str, default=".", help="directory to save the outputs")
21 | parser.add_argument(
22 | "--history_prompt",
23 | type=Optional[Union[Dict, str]],
24 | default=None,
25 | help="history choice for audio cloning",
26 | )
27 | parser.add_argument(
28 | "--text_temp",
29 | default=0.7,
30 | type=float,
31 | help="generation temperature (1.0 more diverse, 0.0 more conservative)",
32 | )
33 | parser.add_argument(
34 | "--waveform_temp",
35 | default=0.7,
36 | type=float,
37 | help="generation temperature (1.0 more diverse, 0.0 more conservative)",
38 | )
39 | parser.add_argument("--silent", default=False, type=bool, help="disable progress bar")
40 | parser.add_argument(
41 | "--output_full",
42 | default=False,
43 | type=bool,
44 | help="return full generation to be used as a history prompt",
45 | )
46 |
47 | args = vars(parser.parse_args())
48 | input_text: str = args.get("text")
49 | output_filename: str = args.get("output_filename")
50 | output_dir: str = args.get("output_dir")
51 | history_prompt: Optional[Union[Dict, str]] = args.get("history_prompt")
52 | text_temp: float = args.get("text_temp")
53 | waveform_temp: float = args.get("waveform_temp")
54 | silent: bool = args.get("silent")
55 | output_full: bool = args.get("output_full")
56 |
57 | try:
58 | os.makedirs(output_dir, exist_ok=True)
59 | generated_audio = generate_audio(
60 | input_text,
61 | history_prompt=history_prompt,
62 | text_temp=text_temp,
63 | waveform_temp=waveform_temp,
64 | silent=silent,
65 | output_full=output_full,
66 | )
67 | output_file_path = os.path.join(output_dir, output_filename)
68 | write_wav(output_file_path, SAMPLE_RATE, generated_audio)
69 | print(f"Done! Output audio file is saved at: '{output_file_path}'")
70 | except Exception as e:
71 | print(f"Oops, an error occurred: {e}")
72 |
--------------------------------------------------------------------------------
/bark/generation.py:
--------------------------------------------------------------------------------
1 | import contextlib
2 | import gc
3 | import os
4 | import re
5 |
6 | from encodec import EncodecModel
7 | import funcy
8 | import logging
9 | import numpy as np
10 | from scipy.special import softmax
11 | import torch
12 | import torch.nn.functional as F
13 | import tqdm
14 | from transformers import BertTokenizer
15 | from huggingface_hub import hf_hub_download
16 |
17 | from .model import GPTConfig, GPT
18 | from .model_fine import FineGPT, FineGPTConfig
19 |
20 | if (
21 | torch.cuda.is_available() and
22 | hasattr(torch.cuda, "amp") and
23 | hasattr(torch.cuda.amp, "autocast") and
24 | hasattr(torch.cuda, "is_bf16_supported") and
25 | torch.cuda.is_bf16_supported()
26 | ):
27 | autocast = funcy.partial(torch.cuda.amp.autocast, dtype=torch.bfloat16)
28 | else:
29 | @contextlib.contextmanager
30 | def autocast():
31 | yield
32 |
33 |
34 | # hold models in global scope to lazy load
35 | global models
36 | models = {}
37 |
38 | global models_devices
39 | models_devices = {}
40 |
41 |
42 | CONTEXT_WINDOW_SIZE = 1024
43 |
44 | SEMANTIC_RATE_HZ = 49.9
45 | SEMANTIC_VOCAB_SIZE = 10_000
46 |
47 | CODEBOOK_SIZE = 1024
48 | N_COARSE_CODEBOOKS = 2
49 | N_FINE_CODEBOOKS = 8
50 | COARSE_RATE_HZ = 75
51 |
52 | SAMPLE_RATE = 24_000
53 |
54 |
55 | SUPPORTED_LANGS = [
56 | ("English", "en"),
57 | ("German", "de"),
58 | ("Spanish", "es"),
59 | ("French", "fr"),
60 | ("Hindi", "hi"),
61 | ("Italian", "it"),
62 | ("Japanese", "ja"),
63 | ("Korean", "ko"),
64 | ("Polish", "pl"),
65 | ("Portuguese", "pt"),
66 | ("Russian", "ru"),
67 | ("Turkish", "tr"),
68 | ("Chinese", "zh"),
69 | ]
70 |
71 | ALLOWED_PROMPTS = {"announcer"}
72 | for _, lang in SUPPORTED_LANGS:
73 | for prefix in ("", f"v2{os.path.sep}"):
74 | for n in range(10):
75 | ALLOWED_PROMPTS.add(f"{prefix}{lang}_speaker_{n}")
76 |
77 |
78 | logger = logging.getLogger(__name__)
79 |
80 |
81 | CUR_PATH = os.path.dirname(os.path.abspath(__file__))
82 |
83 |
84 | default_cache_dir = os.path.join(os.path.expanduser("~"), ".cache")
85 | CACHE_DIR = os.path.join(os.getenv("XDG_CACHE_HOME", default_cache_dir), "suno", "bark_v0")
86 |
87 |
88 | def _cast_bool_env_var(s):
89 | return s.lower() in ('true', '1', 't')
90 |
91 |
92 | USE_SMALL_MODELS = _cast_bool_env_var(os.environ.get("SUNO_USE_SMALL_MODELS", "False"))
93 | GLOBAL_ENABLE_MPS = _cast_bool_env_var(os.environ.get("SUNO_ENABLE_MPS", "False"))
94 | OFFLOAD_CPU = _cast_bool_env_var(os.environ.get("SUNO_OFFLOAD_CPU", "False"))
95 |
96 |
97 | REMOTE_MODEL_PATHS = {
98 | "text_small": {
99 | "repo_id": "suno/bark",
100 | "file_name": "text.pt",
101 | },
102 | "coarse_small": {
103 | "repo_id": "suno/bark",
104 | "file_name": "coarse.pt",
105 | },
106 | "fine_small": {
107 | "repo_id": "suno/bark",
108 | "file_name": "fine.pt",
109 | },
110 | "text": {
111 | "repo_id": "suno/bark",
112 | "file_name": "text_2.pt",
113 | },
114 | "coarse": {
115 | "repo_id": "suno/bark",
116 | "file_name": "coarse_2.pt",
117 | },
118 | "fine": {
119 | "repo_id": "suno/bark",
120 | "file_name": "fine_2.pt",
121 | },
122 | }
123 |
124 |
125 | if not hasattr(torch.nn.functional, 'scaled_dot_product_attention') and torch.cuda.is_available():
126 | logger.warning(
127 | "torch version does not support flash attention. You will get faster" +
128 | " inference speed by upgrade torch to newest nightly version."
129 | )
130 |
131 |
132 | def _grab_best_device(use_gpu=True):
133 | if torch.cuda.device_count() > 0 and use_gpu:
134 | device = "cuda"
135 | elif torch.backends.mps.is_available() and use_gpu and GLOBAL_ENABLE_MPS:
136 | device = "mps"
137 | else:
138 | device = "cpu"
139 | return device
140 |
141 |
142 | def _get_ckpt_path(model_type, use_small=False):
143 | key = model_type
144 | if use_small or USE_SMALL_MODELS:
145 | key += "_small"
146 | return os.path.join(CACHE_DIR, REMOTE_MODEL_PATHS[key]["file_name"])
147 |
148 |
149 | def _download(from_hf_path, file_name):
150 | os.makedirs(CACHE_DIR, exist_ok=True)
151 | hf_hub_download(repo_id=from_hf_path, filename=file_name, local_dir=CACHE_DIR)
152 |
153 |
154 | class InferenceContext:
155 | def __init__(self, benchmark=False):
156 | # we can't expect inputs to be the same length, so disable benchmarking by default
157 | self._chosen_cudnn_benchmark = benchmark
158 | self._cudnn_benchmark = None
159 |
160 | def __enter__(self):
161 | self._cudnn_benchmark = torch.backends.cudnn.benchmark
162 | torch.backends.cudnn.benchmark = self._chosen_cudnn_benchmark
163 |
164 | def __exit__(self, exc_type, exc_value, exc_traceback):
165 | torch.backends.cudnn.benchmark = self._cudnn_benchmark
166 |
167 |
168 | if torch.cuda.is_available():
169 | torch.backends.cuda.matmul.allow_tf32 = True
170 | torch.backends.cudnn.allow_tf32 = True
171 |
172 |
173 | @contextlib.contextmanager
174 | def _inference_mode():
175 | with InferenceContext(), torch.inference_mode(), torch.no_grad(), autocast():
176 | yield
177 |
178 |
179 | def _clear_cuda_cache():
180 | if torch.cuda.is_available():
181 | torch.cuda.empty_cache()
182 | torch.cuda.synchronize()
183 |
184 |
185 | def clean_models(model_key=None):
186 | global models
187 | model_keys = [model_key] if model_key is not None else models.keys()
188 | for k in model_keys:
189 | if k in models:
190 | del models[k]
191 | _clear_cuda_cache()
192 | gc.collect()
193 |
194 |
195 | def _load_model(ckpt_path, device, use_small=False, model_type="text"):
196 | if model_type == "text":
197 | ConfigClass = GPTConfig
198 | ModelClass = GPT
199 | elif model_type == "coarse":
200 | ConfigClass = GPTConfig
201 | ModelClass = GPT
202 | elif model_type == "fine":
203 | ConfigClass = FineGPTConfig
204 | ModelClass = FineGPT
205 | else:
206 | raise NotImplementedError()
207 | model_key = f"{model_type}_small" if use_small or USE_SMALL_MODELS else model_type
208 | model_info = REMOTE_MODEL_PATHS[model_key]
209 | if not os.path.exists(ckpt_path):
210 | logger.info(f"{model_type} model not found, downloading into `{CACHE_DIR}`.")
211 | _download(model_info["repo_id"], model_info["file_name"])
212 | checkpoint = torch.load(ckpt_path, map_location=device)
213 | # this is a hack
214 | model_args = checkpoint["model_args"]
215 | if "input_vocab_size" not in model_args:
216 | model_args["input_vocab_size"] = model_args["vocab_size"]
217 | model_args["output_vocab_size"] = model_args["vocab_size"]
218 | del model_args["vocab_size"]
219 | gptconf = ConfigClass(**checkpoint["model_args"])
220 | model = ModelClass(gptconf)
221 | state_dict = checkpoint["model"]
222 | # fixup checkpoint
223 | unwanted_prefix = "_orig_mod."
224 | for k, v in list(state_dict.items()):
225 | if k.startswith(unwanted_prefix):
226 | state_dict[k[len(unwanted_prefix) :]] = state_dict.pop(k)
227 | extra_keys = set(state_dict.keys()) - set(model.state_dict().keys())
228 | extra_keys = set([k for k in extra_keys if not k.endswith(".attn.bias")])
229 | missing_keys = set(model.state_dict().keys()) - set(state_dict.keys())
230 | missing_keys = set([k for k in missing_keys if not k.endswith(".attn.bias")])
231 | if len(extra_keys) != 0:
232 | raise ValueError(f"extra keys found: {extra_keys}")
233 | if len(missing_keys) != 0:
234 | raise ValueError(f"missing keys: {missing_keys}")
235 | model.load_state_dict(state_dict, strict=False)
236 | n_params = model.get_num_params()
237 | val_loss = checkpoint["best_val_loss"].item()
238 | logger.info(f"model loaded: {round(n_params/1e6,1)}M params, {round(val_loss,3)} loss")
239 | model.eval()
240 | model.to(device)
241 | del checkpoint, state_dict
242 | _clear_cuda_cache()
243 | if model_type == "text":
244 | tokenizer = BertTokenizer.from_pretrained("bert-base-multilingual-cased")
245 | return {
246 | "model": model,
247 | "tokenizer": tokenizer,
248 | }
249 | return model
250 |
251 |
252 | def _load_codec_model(device):
253 | model = EncodecModel.encodec_model_24khz()
254 | model.set_target_bandwidth(6.0)
255 | model.eval()
256 | model.to(device)
257 | _clear_cuda_cache()
258 | return model
259 |
260 |
261 | def load_model(use_gpu=True, use_small=False, force_reload=False, model_type="text"):
262 | _load_model_f = funcy.partial(_load_model, model_type=model_type, use_small=use_small)
263 | if model_type not in ("text", "coarse", "fine"):
264 | raise NotImplementedError()
265 | global models
266 | global models_devices
267 | device = _grab_best_device(use_gpu=use_gpu)
268 | print('Using device', device)
269 | model_key = f"{model_type}"
270 | if OFFLOAD_CPU:
271 | models_devices[model_key] = device
272 | device = "cpu"
273 | if model_key not in models or force_reload:
274 | ckpt_path = _get_ckpt_path(model_type, use_small=use_small)
275 | clean_models(model_key=model_key)
276 | model = _load_model_f(ckpt_path, device)
277 | models[model_key] = model
278 | if model_type == "text":
279 | models[model_key]["model"].to(device)
280 | else:
281 | models[model_key].to(device)
282 | return models[model_key]
283 |
284 |
285 | def load_codec_model(use_gpu=True, force_reload=False):
286 | global models
287 | global models_devices
288 | device = _grab_best_device(use_gpu=use_gpu)
289 | if device == "mps":
290 | # encodec doesn't support mps
291 | device = "cpu"
292 | model_key = "codec"
293 | if OFFLOAD_CPU:
294 | models_devices[model_key] = device
295 | device = "cpu"
296 | if model_key not in models or force_reload:
297 | clean_models(model_key=model_key)
298 | model = _load_codec_model(device)
299 | models[model_key] = model
300 | models[model_key].to(device)
301 | return models[model_key]
302 |
303 |
304 | def preload_models(
305 | text_use_gpu=True,
306 | text_use_small=False,
307 | coarse_use_gpu=True,
308 | coarse_use_small=False,
309 | fine_use_gpu=True,
310 | fine_use_small=False,
311 | codec_use_gpu=True,
312 | force_reload=False,
313 | ):
314 | """Load all the necessary models for the pipeline."""
315 | if _grab_best_device() == "cpu" and (
316 | text_use_gpu or coarse_use_gpu or fine_use_gpu or codec_use_gpu
317 | ):
318 | logger.warning("No GPU being used. Careful, inference might be very slow!")
319 | _ = load_model(
320 | model_type="text", use_gpu=text_use_gpu, use_small=text_use_small, force_reload=force_reload
321 | )
322 | _ = load_model(
323 | model_type="coarse",
324 | use_gpu=coarse_use_gpu,
325 | use_small=coarse_use_small,
326 | force_reload=force_reload,
327 | )
328 | _ = load_model(
329 | model_type="fine", use_gpu=fine_use_gpu, use_small=fine_use_small, force_reload=force_reload
330 | )
331 | _ = load_codec_model(use_gpu=codec_use_gpu, force_reload=force_reload)
332 |
333 |
334 | ####
335 | # Generation Functionality
336 | ####
337 |
338 |
339 | def _tokenize(tokenizer, text):
340 | return tokenizer.encode(text, add_special_tokens=False)
341 |
342 |
343 | def _detokenize(tokenizer, enc_text):
344 | return tokenizer.decode(enc_text)
345 |
346 |
347 | def _normalize_whitespace(text):
348 | return re.sub(r"\s+", " ", text).strip()
349 |
350 |
351 | TEXT_ENCODING_OFFSET = 10_048
352 | SEMANTIC_PAD_TOKEN = 10_000
353 | TEXT_PAD_TOKEN = 129_595
354 | SEMANTIC_INFER_TOKEN = 129_599
355 |
356 |
357 | def _load_history_prompt(history_prompt_input):
358 | if isinstance(history_prompt_input, str) and history_prompt_input.endswith(".npz"):
359 | history_prompt = np.load(history_prompt_input)
360 | elif isinstance(history_prompt_input, str):
361 | # make sure this works on non-ubuntu
362 | history_prompt_input = os.path.join(*history_prompt_input.split("/"))
363 | if history_prompt_input not in ALLOWED_PROMPTS:
364 | raise ValueError("history prompt not found")
365 | history_prompt = np.load(
366 | os.path.join(CUR_PATH, "assets", "prompts", f"{history_prompt_input}.npz")
367 | )
368 | elif isinstance(history_prompt_input, dict):
369 | assert("semantic_prompt" in history_prompt_input)
370 | assert("coarse_prompt" in history_prompt_input)
371 | assert("fine_prompt" in history_prompt_input)
372 | history_prompt = history_prompt_input
373 | else:
374 | raise ValueError("history prompt format unrecognized")
375 | return history_prompt
376 |
377 |
378 | def generate_text_semantic(
379 | text,
380 | history_prompt=None,
381 | temp=0.7,
382 | top_k=None,
383 | top_p=None,
384 | silent=False,
385 | min_eos_p=0.2,
386 | max_gen_duration_s=None,
387 | allow_early_stop=True,
388 | use_kv_caching=False,
389 | ):
390 | """Generate semantic tokens from text."""
391 | assert isinstance(text, str)
392 | text = _normalize_whitespace(text)
393 | assert len(text.strip()) > 0
394 | if history_prompt is not None:
395 | history_prompt = _load_history_prompt(history_prompt)
396 | semantic_history = history_prompt["semantic_prompt"]
397 | assert (
398 | isinstance(semantic_history, np.ndarray)
399 | and len(semantic_history.shape) == 1
400 | and len(semantic_history) > 0
401 | and semantic_history.min() >= 0
402 | and semantic_history.max() <= SEMANTIC_VOCAB_SIZE - 1
403 | )
404 | else:
405 | semantic_history = None
406 | # load models if not yet exist
407 | global models
408 | global models_devices
409 | if "text" not in models:
410 | preload_models()
411 | model_container = models["text"]
412 | model = model_container["model"]
413 | tokenizer = model_container["tokenizer"]
414 | encoded_text = np.array(_tokenize(tokenizer, text)) + TEXT_ENCODING_OFFSET
415 | if OFFLOAD_CPU:
416 | model.to(models_devices["text"])
417 | device = next(model.parameters()).device
418 | if len(encoded_text) > 256:
419 | p = round((len(encoded_text) - 256) / len(encoded_text) * 100, 1)
420 | logger.warning(f"warning, text too long, lopping of last {p}%")
421 | encoded_text = encoded_text[:256]
422 | encoded_text = np.pad(
423 | encoded_text,
424 | (0, 256 - len(encoded_text)),
425 | constant_values=TEXT_PAD_TOKEN,
426 | mode="constant",
427 | )
428 | if semantic_history is not None:
429 | semantic_history = semantic_history.astype(np.int64)
430 | # lop off if history is too long, pad if needed
431 | semantic_history = semantic_history[-256:]
432 | semantic_history = np.pad(
433 | semantic_history,
434 | (0, 256 - len(semantic_history)),
435 | constant_values=SEMANTIC_PAD_TOKEN,
436 | mode="constant",
437 | )
438 | else:
439 | semantic_history = np.array([SEMANTIC_PAD_TOKEN] * 256)
440 | x = torch.from_numpy(
441 | np.hstack([
442 | encoded_text, semantic_history, np.array([SEMANTIC_INFER_TOKEN])
443 | ]).astype(np.int64)
444 | )[None]
445 | assert x.shape[1] == 256 + 256 + 1
446 | with _inference_mode():
447 | x = x.to(device)
448 | n_tot_steps = 768
449 | # custom tqdm updates since we don't know when eos will occur
450 | pbar = tqdm.tqdm(disable=silent, total=100)
451 | pbar_state = 0
452 | tot_generated_duration_s = 0
453 | kv_cache = None
454 | for n in range(n_tot_steps):
455 | if use_kv_caching and kv_cache is not None:
456 | x_input = x[:, [-1]]
457 | else:
458 | x_input = x
459 | logits, kv_cache = model(
460 | x_input, merge_context=True, use_cache=use_kv_caching, past_kv=kv_cache
461 | )
462 | relevant_logits = logits[0, 0, :SEMANTIC_VOCAB_SIZE]
463 | if allow_early_stop:
464 | relevant_logits = torch.hstack(
465 | (relevant_logits, logits[0, 0, [SEMANTIC_PAD_TOKEN]]) # eos
466 | )
467 | if top_p is not None:
468 | # faster to convert to numpy
469 | original_device = relevant_logits.device
470 | relevant_logits = relevant_logits.detach().cpu().type(torch.float32).numpy()
471 | sorted_indices = np.argsort(relevant_logits)[::-1]
472 | sorted_logits = relevant_logits[sorted_indices]
473 | cumulative_probs = np.cumsum(softmax(sorted_logits))
474 | sorted_indices_to_remove = cumulative_probs > top_p
475 | sorted_indices_to_remove[1:] = sorted_indices_to_remove[:-1].copy()
476 | sorted_indices_to_remove[0] = False
477 | relevant_logits[sorted_indices[sorted_indices_to_remove]] = -np.inf
478 | relevant_logits = torch.from_numpy(relevant_logits)
479 | relevant_logits = relevant_logits.to(original_device)
480 | if top_k is not None:
481 | v, _ = torch.topk(relevant_logits, min(top_k, relevant_logits.size(-1)))
482 | relevant_logits[relevant_logits < v[-1]] = -float("Inf")
483 | probs = F.softmax(relevant_logits / temp, dim=-1)
484 | # multinomial bugged on mps: shuttle to cpu if necessary
485 | inf_device = probs.device
486 | if probs.device.type == "mps":
487 | probs = probs.to("cpu")
488 | item_next = torch.multinomial(probs, num_samples=1)
489 | probs = probs.to(inf_device)
490 | item_next = item_next.to(inf_device)
491 | if allow_early_stop and (
492 | item_next == SEMANTIC_VOCAB_SIZE
493 | or (min_eos_p is not None and probs[-1] >= min_eos_p)
494 | ):
495 | # eos found, so break
496 | pbar.update(100 - pbar_state)
497 | break
498 | x = torch.cat((x, item_next[None]), dim=1)
499 | tot_generated_duration_s += 1 / SEMANTIC_RATE_HZ
500 | if max_gen_duration_s is not None and tot_generated_duration_s > max_gen_duration_s:
501 | pbar.update(100 - pbar_state)
502 | break
503 | if n == n_tot_steps - 1:
504 | pbar.update(100 - pbar_state)
505 | break
506 | del logits, relevant_logits, probs, item_next
507 | req_pbar_state = np.min([100, int(round(100 * n / n_tot_steps))])
508 | if req_pbar_state > pbar_state:
509 | pbar.update(req_pbar_state - pbar_state)
510 | pbar_state = req_pbar_state
511 | pbar.close()
512 | out = x.detach().cpu().numpy().squeeze()[256 + 256 + 1 :]
513 | if OFFLOAD_CPU:
514 | model.to("cpu")
515 | assert all(0 <= out) and all(out < SEMANTIC_VOCAB_SIZE)
516 | _clear_cuda_cache()
517 | return out
518 |
519 |
520 | def _flatten_codebooks(arr, offset_size=CODEBOOK_SIZE):
521 | assert len(arr.shape) == 2
522 | arr = arr.copy()
523 | if offset_size is not None:
524 | for n in range(1, arr.shape[0]):
525 | arr[n, :] += offset_size * n
526 | flat_arr = arr.ravel("F")
527 | return flat_arr
528 |
529 |
530 | COARSE_SEMANTIC_PAD_TOKEN = 12_048
531 | COARSE_INFER_TOKEN = 12_050
532 |
533 |
534 | def generate_coarse(
535 | x_semantic,
536 | history_prompt=None,
537 | temp=0.7,
538 | top_k=None,
539 | top_p=None,
540 | silent=False,
541 | max_coarse_history=630, # min 60 (faster), max 630 (more context)
542 | sliding_window_len=60,
543 | use_kv_caching=False,
544 | ):
545 | """Generate coarse audio codes from semantic tokens."""
546 | assert (
547 | isinstance(x_semantic, np.ndarray)
548 | and len(x_semantic.shape) == 1
549 | and len(x_semantic) > 0
550 | and x_semantic.min() >= 0
551 | and x_semantic.max() <= SEMANTIC_VOCAB_SIZE - 1
552 | )
553 | assert 60 <= max_coarse_history <= 630
554 | assert max_coarse_history + sliding_window_len <= 1024 - 256
555 | semantic_to_coarse_ratio = COARSE_RATE_HZ / SEMANTIC_RATE_HZ * N_COARSE_CODEBOOKS
556 | max_semantic_history = int(np.floor(max_coarse_history / semantic_to_coarse_ratio))
557 | if history_prompt is not None:
558 | history_prompt = _load_history_prompt(history_prompt)
559 | x_semantic_history = history_prompt["semantic_prompt"]
560 | x_coarse_history = history_prompt["coarse_prompt"]
561 | assert (
562 | isinstance(x_semantic_history, np.ndarray)
563 | and len(x_semantic_history.shape) == 1
564 | and len(x_semantic_history) > 0
565 | and x_semantic_history.min() >= 0
566 | and x_semantic_history.max() <= SEMANTIC_VOCAB_SIZE - 1
567 | and isinstance(x_coarse_history, np.ndarray)
568 | and len(x_coarse_history.shape) == 2
569 | and x_coarse_history.shape[0] == N_COARSE_CODEBOOKS
570 | and x_coarse_history.shape[-1] >= 0
571 | and x_coarse_history.min() >= 0
572 | and x_coarse_history.max() <= CODEBOOK_SIZE - 1
573 | and (
574 | round(x_coarse_history.shape[-1] / len(x_semantic_history), 1)
575 | == round(semantic_to_coarse_ratio / N_COARSE_CODEBOOKS, 1)
576 | )
577 | )
578 | x_coarse_history = _flatten_codebooks(x_coarse_history) + SEMANTIC_VOCAB_SIZE
579 | # trim histories correctly
580 | n_semantic_hist_provided = np.min(
581 | [
582 | max_semantic_history,
583 | len(x_semantic_history) - len(x_semantic_history) % 2,
584 | int(np.floor(len(x_coarse_history) / semantic_to_coarse_ratio)),
585 | ]
586 | )
587 | n_coarse_hist_provided = int(round(n_semantic_hist_provided * semantic_to_coarse_ratio))
588 | x_semantic_history = x_semantic_history[-n_semantic_hist_provided:].astype(np.int32)
589 | x_coarse_history = x_coarse_history[-n_coarse_hist_provided:].astype(np.int32)
590 | # TODO: bit of a hack for time alignment (sounds better)
591 | x_coarse_history = x_coarse_history[:-2]
592 | else:
593 | x_semantic_history = np.array([], dtype=np.int32)
594 | x_coarse_history = np.array([], dtype=np.int32)
595 | # load models if not yet exist
596 | global models
597 | global models_devices
598 | if "coarse" not in models:
599 | preload_models()
600 | model = models["coarse"]
601 | if OFFLOAD_CPU:
602 | model.to(models_devices["coarse"])
603 | device = next(model.parameters()).device
604 | # start loop
605 | n_steps = int(
606 | round(
607 | np.floor(len(x_semantic) * semantic_to_coarse_ratio / N_COARSE_CODEBOOKS)
608 | * N_COARSE_CODEBOOKS
609 | )
610 | )
611 | assert n_steps > 0 and n_steps % N_COARSE_CODEBOOKS == 0
612 | x_semantic = np.hstack([x_semantic_history, x_semantic]).astype(np.int32)
613 | x_coarse = x_coarse_history.astype(np.int32)
614 | base_semantic_idx = len(x_semantic_history)
615 | with _inference_mode():
616 | x_semantic_in = torch.from_numpy(x_semantic)[None].to(device)
617 | x_coarse_in = torch.from_numpy(x_coarse)[None].to(device)
618 | n_window_steps = int(np.ceil(n_steps / sliding_window_len))
619 | n_step = 0
620 | for _ in tqdm.tqdm(range(n_window_steps), total=n_window_steps, disable=silent):
621 | semantic_idx = base_semantic_idx + int(round(n_step / semantic_to_coarse_ratio))
622 | # pad from right side
623 | x_in = x_semantic_in[:, np.max([0, semantic_idx - max_semantic_history]) :]
624 | x_in = x_in[:, :256]
625 | x_in = F.pad(
626 | x_in,
627 | (0, 256 - x_in.shape[-1]),
628 | "constant",
629 | COARSE_SEMANTIC_PAD_TOKEN,
630 | )
631 | x_in = torch.hstack(
632 | [
633 | x_in,
634 | torch.tensor([COARSE_INFER_TOKEN])[None].to(device),
635 | x_coarse_in[:, -max_coarse_history:],
636 | ]
637 | )
638 | kv_cache = None
639 | for _ in range(sliding_window_len):
640 | if n_step >= n_steps:
641 | continue
642 | is_major_step = n_step % N_COARSE_CODEBOOKS == 0
643 |
644 | if use_kv_caching and kv_cache is not None:
645 | x_input = x_in[:, [-1]]
646 | else:
647 | x_input = x_in
648 |
649 | logits, kv_cache = model(x_input, use_cache=use_kv_caching, past_kv=kv_cache)
650 | logit_start_idx = (
651 | SEMANTIC_VOCAB_SIZE + (1 - int(is_major_step)) * CODEBOOK_SIZE
652 | )
653 | logit_end_idx = (
654 | SEMANTIC_VOCAB_SIZE + (2 - int(is_major_step)) * CODEBOOK_SIZE
655 | )
656 | relevant_logits = logits[0, 0, logit_start_idx:logit_end_idx]
657 | if top_p is not None:
658 | # faster to convert to numpy
659 | original_device = relevant_logits.device
660 | relevant_logits = relevant_logits.detach().cpu().type(torch.float32).numpy()
661 | sorted_indices = np.argsort(relevant_logits)[::-1]
662 | sorted_logits = relevant_logits[sorted_indices]
663 | cumulative_probs = np.cumsum(softmax(sorted_logits))
664 | sorted_indices_to_remove = cumulative_probs > top_p
665 | sorted_indices_to_remove[1:] = sorted_indices_to_remove[:-1].copy()
666 | sorted_indices_to_remove[0] = False
667 | relevant_logits[sorted_indices[sorted_indices_to_remove]] = -np.inf
668 | relevant_logits = torch.from_numpy(relevant_logits)
669 | relevant_logits = relevant_logits.to(original_device)
670 | if top_k is not None:
671 | v, _ = torch.topk(relevant_logits, min(top_k, relevant_logits.size(-1)))
672 | relevant_logits[relevant_logits < v[-1]] = -float("Inf")
673 | probs = F.softmax(relevant_logits / temp, dim=-1)
674 | # multinomial bugged on mps: shuttle to cpu if necessary
675 | inf_device = probs.device
676 | if probs.device.type == "mps":
677 | probs = probs.to("cpu")
678 | item_next = torch.multinomial(probs, num_samples=1)
679 | probs = probs.to(inf_device)
680 | item_next = item_next.to(inf_device)
681 | item_next += logit_start_idx
682 | x_coarse_in = torch.cat((x_coarse_in, item_next[None]), dim=1)
683 | x_in = torch.cat((x_in, item_next[None]), dim=1)
684 | del logits, relevant_logits, probs, item_next
685 | n_step += 1
686 | del x_in
687 | del x_semantic_in
688 | if OFFLOAD_CPU:
689 | model.to("cpu")
690 | gen_coarse_arr = x_coarse_in.detach().cpu().numpy().squeeze()[len(x_coarse_history) :]
691 | del x_coarse_in
692 | assert len(gen_coarse_arr) == n_steps
693 | gen_coarse_audio_arr = gen_coarse_arr.reshape(-1, N_COARSE_CODEBOOKS).T - SEMANTIC_VOCAB_SIZE
694 | for n in range(1, N_COARSE_CODEBOOKS):
695 | gen_coarse_audio_arr[n, :] -= n * CODEBOOK_SIZE
696 | _clear_cuda_cache()
697 | return gen_coarse_audio_arr
698 |
699 |
700 | def generate_fine(
701 | x_coarse_gen,
702 | history_prompt=None,
703 | temp=0.5,
704 | silent=True,
705 | ):
706 | """Generate full audio codes from coarse audio codes."""
707 | assert (
708 | isinstance(x_coarse_gen, np.ndarray)
709 | and len(x_coarse_gen.shape) == 2
710 | and 1 <= x_coarse_gen.shape[0] <= N_FINE_CODEBOOKS - 1
711 | and x_coarse_gen.shape[1] > 0
712 | and x_coarse_gen.min() >= 0
713 | and x_coarse_gen.max() <= CODEBOOK_SIZE - 1
714 | )
715 | if history_prompt is not None:
716 | history_prompt = _load_history_prompt(history_prompt)
717 | x_fine_history = history_prompt["fine_prompt"]
718 | assert (
719 | isinstance(x_fine_history, np.ndarray)
720 | and len(x_fine_history.shape) == 2
721 | and x_fine_history.shape[0] == N_FINE_CODEBOOKS
722 | and x_fine_history.shape[1] >= 0
723 | and x_fine_history.min() >= 0
724 | and x_fine_history.max() <= CODEBOOK_SIZE - 1
725 | )
726 | else:
727 | x_fine_history = None
728 | n_coarse = x_coarse_gen.shape[0]
729 | # load models if not yet exist
730 | global models
731 | global models_devices
732 | if "fine" not in models:
733 | preload_models()
734 | model = models["fine"]
735 | if OFFLOAD_CPU:
736 | model.to(models_devices["fine"])
737 | device = next(model.parameters()).device
738 | # make input arr
739 | in_arr = np.vstack(
740 | [
741 | x_coarse_gen,
742 | np.zeros((N_FINE_CODEBOOKS - n_coarse, x_coarse_gen.shape[1]))
743 | + CODEBOOK_SIZE, # padding
744 | ]
745 | ).astype(np.int32)
746 | # prepend history if available (max 512)
747 | if x_fine_history is not None:
748 | x_fine_history = x_fine_history.astype(np.int32)
749 | in_arr = np.hstack(
750 | [
751 | x_fine_history[:, -512:].astype(np.int32),
752 | in_arr,
753 | ]
754 | )
755 | n_history = x_fine_history[:, -512:].shape[1]
756 | else:
757 | n_history = 0
758 | n_remove_from_end = 0
759 | # need to pad if too short (since non-causal model)
760 | if in_arr.shape[1] < 1024:
761 | n_remove_from_end = 1024 - in_arr.shape[1]
762 | in_arr = np.hstack(
763 | [
764 | in_arr,
765 | np.zeros((N_FINE_CODEBOOKS, n_remove_from_end), dtype=np.int32) + CODEBOOK_SIZE,
766 | ]
767 | )
768 | # we can be lazy about fractional loop and just keep overwriting codebooks
769 | n_loops = np.max([0, int(np.ceil((x_coarse_gen.shape[1] - (1024 - n_history)) / 512))]) + 1
770 | with _inference_mode():
771 | in_arr = torch.tensor(in_arr.T).to(device)
772 | for n in tqdm.tqdm(range(n_loops), disable=silent):
773 | start_idx = np.min([n * 512, in_arr.shape[0] - 1024])
774 | start_fill_idx = np.min([n_history + n * 512, in_arr.shape[0] - 512])
775 | rel_start_fill_idx = start_fill_idx - start_idx
776 | in_buffer = in_arr[start_idx : start_idx + 1024, :][None]
777 | for nn in range(n_coarse, N_FINE_CODEBOOKS):
778 | logits = model(nn, in_buffer)
779 | if temp is None:
780 | relevant_logits = logits[0, rel_start_fill_idx:, :CODEBOOK_SIZE]
781 | codebook_preds = torch.argmax(relevant_logits, -1)
782 | else:
783 | relevant_logits = logits[0, :, :CODEBOOK_SIZE] / temp
784 | probs = F.softmax(relevant_logits, dim=-1)
785 | # multinomial bugged on mps: shuttle to cpu if necessary
786 | inf_device = probs.device
787 | if probs.device.type == "mps":
788 | probs = probs.to("cpu")
789 | codebook_preds = torch.hstack(
790 | [
791 | torch.multinomial(probs[nnn], num_samples=1).to(inf_device)
792 | for nnn in range(rel_start_fill_idx, 1024)
793 | ]
794 | )
795 | in_buffer[0, rel_start_fill_idx:, nn] = codebook_preds
796 | del logits, codebook_preds
797 | # transfer over info into model_in and convert to numpy
798 | for nn in range(n_coarse, N_FINE_CODEBOOKS):
799 | in_arr[
800 | start_fill_idx : start_fill_idx + (1024 - rel_start_fill_idx), nn
801 | ] = in_buffer[0, rel_start_fill_idx:, nn]
802 | del in_buffer
803 | gen_fine_arr = in_arr.detach().cpu().numpy().squeeze().T
804 | del in_arr
805 | if OFFLOAD_CPU:
806 | model.to("cpu")
807 | gen_fine_arr = gen_fine_arr[:, n_history:]
808 | if n_remove_from_end > 0:
809 | gen_fine_arr = gen_fine_arr[:, :-n_remove_from_end]
810 | assert gen_fine_arr.shape[-1] == x_coarse_gen.shape[-1]
811 | _clear_cuda_cache()
812 | return gen_fine_arr
813 |
814 |
815 | def codec_decode(fine_tokens):
816 | """Turn quantized audio codes into audio array using encodec."""
817 | # load models if not yet exist
818 | global models
819 | global models_devices
820 | if "codec" not in models:
821 | preload_models()
822 | model = models["codec"]
823 | if OFFLOAD_CPU:
824 | model.to(models_devices["codec"])
825 | device = next(model.parameters()).device
826 | arr = torch.from_numpy(fine_tokens)[None]
827 | arr = arr.to(device)
828 | arr = arr.transpose(0, 1)
829 | emb = model.quantizer.decode(arr)
830 | out = model.decoder(emb)
831 | audio_arr = out.detach().cpu().numpy().squeeze()
832 | del arr, emb, out
833 | if OFFLOAD_CPU:
834 | model.to("cpu")
835 | return audio_arr
836 |
--------------------------------------------------------------------------------
/bark/model.py:
--------------------------------------------------------------------------------
1 | """
2 | Much of this code is adapted from Andrej Karpathy's NanoGPT
3 | (https://github.com/karpathy/nanoGPT)
4 | """
5 | import math
6 | from dataclasses import dataclass
7 |
8 | import torch
9 | import torch.nn as nn
10 | from torch.nn import functional as F
11 |
12 | class LayerNorm(nn.Module):
13 | """ LayerNorm but with an optional bias. PyTorch doesn't support simply bias=False """
14 |
15 | def __init__(self, ndim, bias):
16 | super().__init__()
17 | self.weight = nn.Parameter(torch.ones(ndim))
18 | self.bias = nn.Parameter(torch.zeros(ndim)) if bias else None
19 |
20 | def forward(self, input):
21 | return F.layer_norm(input, self.weight.shape, self.weight, self.bias, 1e-5)
22 |
23 | class CausalSelfAttention(nn.Module):
24 |
25 | def __init__(self, config):
26 | super().__init__()
27 | assert config.n_embd % config.n_head == 0
28 | # key, query, value projections for all heads, but in a batch
29 | self.c_attn = nn.Linear(config.n_embd, 3 * config.n_embd, bias=config.bias)
30 | # output projection
31 | self.c_proj = nn.Linear(config.n_embd, config.n_embd, bias=config.bias)
32 | # regularization
33 | self.attn_dropout = nn.Dropout(config.dropout)
34 | self.resid_dropout = nn.Dropout(config.dropout)
35 | self.n_head = config.n_head
36 | self.n_embd = config.n_embd
37 | self.dropout = config.dropout
38 | # flash attention make GPU go brrrrr but support is only in PyTorch nightly and still a bit scary
39 | self.flash = hasattr(torch.nn.functional, 'scaled_dot_product_attention')
40 | if not self.flash:
41 | # print("WARNING: using slow attention. Flash Attention atm needs PyTorch nightly and dropout=0.0")
42 | # causal mask to ensure that attention is only applied to the left in the input sequence
43 | self.register_buffer("bias", torch.tril(torch.ones(config.block_size, config.block_size))
44 | .view(1, 1, config.block_size, config.block_size))
45 |
46 | def forward(self, x, past_kv=None, use_cache=False):
47 | B, T, C = x.size() # batch size, sequence length, embedding dimensionality (n_embd)
48 |
49 | # calculate query, key, values for all heads in batch and move head forward to be the batch dim
50 | q, k ,v = self.c_attn(x).split(self.n_embd, dim=2)
51 | k = k.view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs)
52 | q = q.view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs)
53 | v = v.view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs)
54 |
55 | if past_kv is not None:
56 | past_key = past_kv[0]
57 | past_value = past_kv[1]
58 | k = torch.cat((past_key, k), dim=-2)
59 | v = torch.cat((past_value, v), dim=-2)
60 |
61 | FULL_T = k.shape[-2]
62 |
63 | if use_cache is True:
64 | present = (k, v)
65 | else:
66 | present = None
67 |
68 | # causal self-attention; Self-attend: (B, nh, T, hs) x (B, nh, hs, T) -> (B, nh, T, T)
69 | if self.flash:
70 | # efficient attention using Flash Attention CUDA kernels
71 | if past_kv is not None:
72 | # When `past_kv` is provided, we're doing incremental decoding and `q.shape[2] == 1`: q only contains
73 | # the query for the last token. scaled_dot_product_attention interprets this as the first token in the
74 | # sequence, so if is_causal=True it will mask out all attention from it. This is not what we want, so
75 | # to work around this we set is_causal=False.
76 | is_causal = False
77 | else:
78 | is_causal = True
79 |
80 | y = torch.nn.functional.scaled_dot_product_attention(q, k, v, dropout_p=self.dropout, is_causal=is_causal)
81 | else:
82 | # manual implementation of attention
83 | att = (q @ k.transpose(-2, -1)) * (1.0 / math.sqrt(k.size(-1)))
84 | att = att.masked_fill(self.bias[:,:,FULL_T-T:FULL_T,:FULL_T] == 0, float('-inf'))
85 | att = F.softmax(att, dim=-1)
86 | att = self.attn_dropout(att)
87 | y = att @ v # (B, nh, T, T) x (B, nh, T, hs) -> (B, nh, T, hs)
88 | y = y.transpose(1, 2).contiguous().view(B, T, C) # re-assemble all head outputs side by side
89 |
90 | # output projection
91 | y = self.resid_dropout(self.c_proj(y))
92 | return (y, present)
93 |
94 | class MLP(nn.Module):
95 |
96 | def __init__(self, config):
97 | super().__init__()
98 | self.c_fc = nn.Linear(config.n_embd, 4 * config.n_embd, bias=config.bias)
99 | self.c_proj = nn.Linear(4 * config.n_embd, config.n_embd, bias=config.bias)
100 | self.dropout = nn.Dropout(config.dropout)
101 | self.gelu = nn.GELU()
102 |
103 | def forward(self, x):
104 | x = self.c_fc(x)
105 | x = self.gelu(x)
106 | x = self.c_proj(x)
107 | x = self.dropout(x)
108 | return x
109 |
110 | class Block(nn.Module):
111 |
112 | def __init__(self, config, layer_idx):
113 | super().__init__()
114 | self.ln_1 = LayerNorm(config.n_embd, bias=config.bias)
115 | self.attn = CausalSelfAttention(config)
116 | self.ln_2 = LayerNorm(config.n_embd, bias=config.bias)
117 | self.mlp = MLP(config)
118 | self.layer_idx = layer_idx
119 |
120 | def forward(self, x, past_kv=None, use_cache=False):
121 | attn_output, prev_kvs = self.attn(self.ln_1(x), past_kv=past_kv, use_cache=use_cache)
122 | x = x + attn_output
123 | x = x + self.mlp(self.ln_2(x))
124 | return (x, prev_kvs)
125 |
126 | @dataclass
127 | class GPTConfig:
128 | block_size: int = 1024
129 | input_vocab_size: int = 10_048
130 | output_vocab_size: int = 10_048
131 | n_layer: int = 12
132 | n_head: int = 12
133 | n_embd: int = 768
134 | dropout: float = 0.0
135 | bias: bool = True # True: bias in Linears and LayerNorms, like GPT-2. False: a bit better and faster
136 |
137 | class GPT(nn.Module):
138 |
139 | def __init__(self, config):
140 | super().__init__()
141 | assert config.input_vocab_size is not None
142 | assert config.output_vocab_size is not None
143 | assert config.block_size is not None
144 | self.config = config
145 |
146 | self.transformer = nn.ModuleDict(dict(
147 | wte = nn.Embedding(config.input_vocab_size, config.n_embd),
148 | wpe = nn.Embedding(config.block_size, config.n_embd),
149 | drop = nn.Dropout(config.dropout),
150 | h = nn.ModuleList([Block(config, idx) for idx in range(config.n_layer)]),
151 | ln_f = LayerNorm(config.n_embd, bias=config.bias),
152 | ))
153 | self.lm_head = nn.Linear(config.n_embd, config.output_vocab_size, bias=False)
154 |
155 | def get_num_params(self, non_embedding=True):
156 | """
157 | Return the number of parameters in the model.
158 | For non-embedding count (default), the position embeddings get subtracted.
159 | The token embeddings would too, except due to the parameter sharing these
160 | params are actually used as weights in the final layer, so we include them.
161 | """
162 | n_params = sum(p.numel() for p in self.parameters())
163 | if non_embedding:
164 | n_params -= self.transformer.wte.weight.numel()
165 | n_params -= self.transformer.wpe.weight.numel()
166 | return n_params
167 |
168 | def forward(self, idx, merge_context=False, past_kv=None, position_ids=None, use_cache=False):
169 | device = idx.device
170 | b, t = idx.size()
171 | if past_kv is not None:
172 | assert t == 1
173 | tok_emb = self.transformer.wte(idx) # token embeddings of shape (b, t, n_embd)
174 | else:
175 | if merge_context:
176 | assert(idx.shape[1] >= 256+256+1)
177 | t = idx.shape[1] - 256
178 | else:
179 | assert t <= self.config.block_size, f"Cannot forward sequence of length {t}, block size is only {self.config.block_size}"
180 |
181 | # forward the GPT model itself
182 | if merge_context:
183 | tok_emb = torch.cat([
184 | self.transformer.wte(idx[:,:256]) + self.transformer.wte(idx[:,256:256+256]),
185 | self.transformer.wte(idx[:,256+256:])
186 | ], dim=1)
187 | else:
188 | tok_emb = self.transformer.wte(idx) # token embeddings of shape (b, t, n_embd)
189 |
190 | if past_kv is None:
191 | past_length = 0
192 | past_kv = tuple([None] * len(self.transformer.h))
193 | else:
194 | past_length = past_kv[0][0].size(-2)
195 |
196 | if position_ids is None:
197 | position_ids = torch.arange(past_length, t + past_length, dtype=torch.long, device=device)
198 | position_ids = position_ids.unsqueeze(0) # shape (1, t)
199 | assert position_ids.shape == (1, t)
200 |
201 | pos_emb = self.transformer.wpe(position_ids) # position embeddings of shape (1, t, n_embd)
202 |
203 | x = self.transformer.drop(tok_emb + pos_emb)
204 |
205 | new_kv = () if use_cache else None
206 |
207 | for i, (block, past_layer_kv) in enumerate(zip(self.transformer.h, past_kv)):
208 | x, kv = block(x, past_kv=past_layer_kv, use_cache=use_cache)
209 |
210 | if use_cache:
211 | new_kv = new_kv + (kv,)
212 |
213 | x = self.transformer.ln_f(x)
214 |
215 | # inference-time mini-optimization: only forward the lm_head on the very last position
216 | logits = self.lm_head(x[:, [-1], :]) # note: using list [-1] to preserve the time dim
217 |
218 | return (logits, new_kv)
219 |
--------------------------------------------------------------------------------
/bark/model_fine.py:
--------------------------------------------------------------------------------
1 | """
2 | Much of this code is adapted from Andrej Karpathy's NanoGPT
3 | (https://github.com/karpathy/nanoGPT)
4 | """
5 | from dataclasses import dataclass
6 | import math
7 |
8 | import torch
9 | import torch.nn as nn
10 | from torch.nn import functional as F
11 |
12 | from .model import GPT, GPTConfig, MLP
13 |
14 |
15 | class NonCausalSelfAttention(nn.Module):
16 | def __init__(self, config):
17 | super().__init__()
18 | assert config.n_embd % config.n_head == 0
19 | # key, query, value projections for all heads, but in a batch
20 | self.c_attn = nn.Linear(config.n_embd, 3 * config.n_embd, bias=config.bias)
21 | # output projection
22 | self.c_proj = nn.Linear(config.n_embd, config.n_embd, bias=config.bias)
23 | # regularization
24 | self.attn_dropout = nn.Dropout(config.dropout)
25 | self.resid_dropout = nn.Dropout(config.dropout)
26 | self.n_head = config.n_head
27 | self.n_embd = config.n_embd
28 | self.dropout = config.dropout
29 | # flash attention make GPU go brrrrr but support is only in PyTorch nightly and still a bit scary
30 | self.flash = (
31 | hasattr(torch.nn.functional, "scaled_dot_product_attention") and self.dropout == 0.0
32 | )
33 |
34 | def forward(self, x):
35 | B, T, C = x.size() # batch size, sequence length, embedding dimensionality (n_embd)
36 |
37 | # calculate query, key, values for all heads in batch and move head forward to be the batch dim
38 | q, k, v = self.c_attn(x).split(self.n_embd, dim=2)
39 | k = k.view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs)
40 | q = q.view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs)
41 | v = v.view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs)
42 |
43 | # causal self-attention; Self-attend: (B, nh, T, hs) x (B, nh, hs, T) -> (B, nh, T, T)
44 | if self.flash:
45 | # efficient attention using Flash Attention CUDA kernels
46 | y = torch.nn.functional.scaled_dot_product_attention(
47 | q, k, v, attn_mask=None, dropout_p=self.dropout, is_causal=False
48 | )
49 | else:
50 | # manual implementation of attention
51 | att = (q @ k.transpose(-2, -1)) * (1.0 / math.sqrt(k.size(-1)))
52 | att = F.softmax(att, dim=-1)
53 | att = self.attn_dropout(att)
54 | y = att @ v # (B, nh, T, T) x (B, nh, T, hs) -> (B, nh, T, hs)
55 | y = (
56 | y.transpose(1, 2).contiguous().view(B, T, C)
57 | ) # re-assemble all head outputs side by side
58 |
59 | # output projection
60 | y = self.resid_dropout(self.c_proj(y))
61 | return y
62 |
63 |
64 | class FineBlock(nn.Module):
65 | def __init__(self, config):
66 | super().__init__()
67 | self.ln_1 = nn.LayerNorm(config.n_embd)
68 | self.attn = NonCausalSelfAttention(config)
69 | self.ln_2 = nn.LayerNorm(config.n_embd)
70 | self.mlp = MLP(config)
71 |
72 | def forward(self, x):
73 | x = x + self.attn(self.ln_1(x))
74 | x = x + self.mlp(self.ln_2(x))
75 | return x
76 |
77 |
78 | class FineGPT(GPT):
79 | def __init__(self, config):
80 | super().__init__(config)
81 | del self.lm_head
82 | self.config = config
83 | self.n_codes_total = config.n_codes_total
84 | self.transformer = nn.ModuleDict(
85 | dict(
86 | wtes=nn.ModuleList(
87 | [
88 | nn.Embedding(config.input_vocab_size, config.n_embd)
89 | for _ in range(config.n_codes_total)
90 | ]
91 | ),
92 | wpe=nn.Embedding(config.block_size, config.n_embd),
93 | drop=nn.Dropout(config.dropout),
94 | h=nn.ModuleList([FineBlock(config) for _ in range(config.n_layer)]),
95 | ln_f=nn.LayerNorm(config.n_embd),
96 | )
97 | )
98 | self.lm_heads = nn.ModuleList(
99 | [
100 | nn.Linear(config.n_embd, config.output_vocab_size, bias=False)
101 | for _ in range(config.n_codes_given, self.n_codes_total)
102 | ]
103 | )
104 | for i in range(self.n_codes_total - config.n_codes_given):
105 | self.transformer.wtes[i + 1].weight = self.lm_heads[i].weight
106 |
107 | def forward(self, pred_idx, idx):
108 | device = idx.device
109 | b, t, codes = idx.size()
110 | assert (
111 | t <= self.config.block_size
112 | ), f"Cannot forward sequence of length {t}, block size is only {self.config.block_size}"
113 | assert pred_idx > 0, "cannot predict 0th codebook"
114 | assert codes == self.n_codes_total, (b, t, codes)
115 | pos = torch.arange(0, t, dtype=torch.long, device=device).unsqueeze(0) # shape (1, t)
116 |
117 | # forward the GPT model itself
118 | tok_embs = [
119 | wte(idx[:, :, i]).unsqueeze(-1) for i, wte in enumerate(self.transformer.wtes)
120 | ] # token embeddings of shape (b, t, n_embd)
121 | tok_emb = torch.cat(tok_embs, dim=-1)
122 | pos_emb = self.transformer.wpe(pos) # position embeddings of shape (1, t, n_embd)
123 | x = tok_emb[:, :, :, : pred_idx + 1].sum(dim=-1)
124 | x = self.transformer.drop(x + pos_emb)
125 | for block in self.transformer.h:
126 | x = block(x)
127 | x = self.transformer.ln_f(x)
128 | logits = self.lm_heads[pred_idx - self.config.n_codes_given](x)
129 | return logits
130 |
131 | def get_num_params(self, non_embedding=True):
132 | """
133 | Return the number of parameters in the model.
134 | For non-embedding count (default), the position embeddings get subtracted.
135 | The token embeddings would too, except due to the parameter sharing these
136 | params are actually used as weights in the final layer, so we include them.
137 | """
138 | n_params = sum(p.numel() for p in self.parameters())
139 | if non_embedding:
140 | for wte in self.transformer.wtes:
141 | n_params -= wte.weight.numel()
142 | n_params -= self.transformer.wpe.weight.numel()
143 | return n_params
144 |
145 |
146 | @dataclass
147 | class FineGPTConfig(GPTConfig):
148 | n_codes_total: int = 8
149 | n_codes_given: int = 1
150 |
--------------------------------------------------------------------------------
/create_data.py:
--------------------------------------------------------------------------------
1 | import os.path
2 | import random
3 | import uuid
4 |
5 | import numpy
6 |
7 | from bark import text_to_semantic
8 | from bark.generation import load_model
9 |
10 | from data import load_books, random_split_chunk
11 |
12 | loaded_data = load_books()
13 |
14 | print('Loading semantics model')
15 | load_model(use_gpu=True, use_small=False, force_reload=False, model_type='text')
16 |
17 | output = 'output'
18 |
19 | if not os.path.isdir(output):
20 | os.mkdir(output)
21 |
22 | while 1:
23 | filename = uuid.uuid4().hex + '.npy'
24 | file_name = os.path.join(output, filename)
25 | text = ''
26 | while not len(text) > 0:
27 | text = random_split_chunk(loaded_data) # Obtain a short chunk of text
28 | text = text.strip()
29 | print('Generating semantics for text:', text)
30 | semantics = text_to_semantic(text, temp=round(random.uniform(0.6, 0.8), ndigits=2))
31 | numpy.save(file_name, semantics)
32 |
--------------------------------------------------------------------------------
/create_wavs.py:
--------------------------------------------------------------------------------
1 | import os
2 | import random
3 |
4 | import numpy
5 | from scipy.io import wavfile
6 |
7 | from bark.generation import load_model, SAMPLE_RATE
8 | from bark.api import semantic_to_waveform
9 |
10 | output = 'output'
11 | output_wav = 'output_wav'
12 |
13 | if not os.path.isdir(output):
14 | raise Exception('No \'output\' folder, make sure you run create_data.py first!')
15 | if not os.path.isdir(output_wav):
16 | os.mkdir(output_wav)
17 |
18 | print('Loading coarse model')
19 | load_model(use_gpu=True, use_small=False, force_reload=False, model_type='coarse')
20 | print('Loading fine model')
21 | load_model(use_gpu=True, use_small=False, force_reload=False, model_type='fine')
22 |
23 | for f in os.listdir(output):
24 | real_name = '.'.join(f.split('.')[:-1]) # Cut off the extension
25 | file_name = os.path.join(output, f)
26 | out_file = os.path.join(output_wav, f'{real_name}.wav')
27 | if not os.path.isfile(out_file) and os.path.isfile(file_name): # Don't process files that have already been processed
28 | print(f'Processing {f}')
29 | wav = semantic_to_waveform(numpy.load(file_name), temp=round(random.uniform(0.6, 0.8), ndigits=2))
30 | wavfile.write(out_file, SAMPLE_RATE, wav)
31 |
32 | print('Done!')
33 |
--------------------------------------------------------------------------------
/data.py:
--------------------------------------------------------------------------------
1 | import random
2 |
3 | import requests
4 |
5 |
6 | books = [
7 | 'https://www.gutenberg.org/cache/epub/1513/pg1513.txt',
8 | 'https://www.gutenberg.org/files/2701/2701-0.txt',
9 | 'https://www.gutenberg.org/cache/epub/84/pg84.txt',
10 | 'https://www.gutenberg.org/cache/epub/2641/pg2641.txt',
11 | 'https://www.gutenberg.org/cache/epub/1342/pg1342.txt',
12 | 'https://www.gutenberg.org/cache/epub/100/pg100.txt'
13 | ]
14 |
15 | allowed_chars = ' abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!@#$%^&*()-_+=\"\':;[]{}/<>,.`~\n\\'
16 |
17 |
18 | def download_book(book):
19 | return requests.get(book).content.decode('utf-8')
20 |
21 |
22 | def filter_data(data):
23 | print('Filtering data')
24 | return ''.join([char for char in data if char in allowed_chars])
25 |
26 |
27 | def load_books():
28 | text_data = []
29 | print(f'Loading {len(books)} books into ram')
30 | for book in books:
31 | text_data.append(filter_data(str(download_book(book))))
32 | print('Loaded books')
33 | return ' '.join(text_data)
34 |
35 |
36 | def random_split_chunk(data, size=14):
37 | data = data.split(' ')
38 | index = random.randrange(0, len(data))
39 | return ' '.join(data[index:index+size])
40 |
--------------------------------------------------------------------------------
/model-card.md:
--------------------------------------------------------------------------------
1 | # Model Card: Bark
2 |
3 | This is the official codebase for running the text to audio model, from Suno.ai.
4 |
5 | The following is additional information about the models released here.
6 |
7 | ## Model Details
8 |
9 | Bark is a series of three transformer models that turn text into audio.
10 | ### Text to semantic tokens
11 | - Input: text, tokenized with [BERT tokenizer from Hugging Face](https://huggingface.co/docs/transformers/model_doc/bert#transformers.BertTokenizer)
12 | - Output: semantic tokens that encode the audio to be generated
13 |
14 | ### Semantic to coarse tokens
15 | - Input: semantic tokens
16 | - Output: tokens from the first two codebooks of the [EnCodec Codec](https://github.com/facebookresearch/encodec) from facebook
17 |
18 | ### Coarse to fine tokens
19 | - Input: the first two codebooks from EnCodec
20 | - Output: 8 codebooks from EnCodec
21 |
22 | ### Architecture
23 | | Model | Parameters | Attention | Output Vocab size |
24 | |:-------------------------:|:----------:|------------|:-----------------:|
25 | | Text to semantic tokens | 80 M | Causal | 10,000 |
26 | | Semantic to coarse tokens | 80 M | Causal | 2x 1,024 |
27 | | Coarse to fine tokens | 80 M | Non-causal | 6x 1,024 |
28 |
29 |
30 | ### Release date
31 | April 2023
32 |
33 | ## Broader Implications
34 | We anticipate that this model's text to audio capabilities can be used to improve accessbility tools in a variety of languages.
35 | Straightforward improvements will allow models to run faster than realtime, rendering them useful for applications such as virtual assistants.
36 |
37 | While we hope that this release will enable users to express their creativity and build applications that are a force
38 | for good, we acknowledge that any text to audio model has the potential for dual use. While it is not straightforward
39 | to voice clone known people with Bark, they can still be used for nefarious purposes. To further reduce the chances of unintended use of Bark,
40 | we also release a simple classifier to detect Bark-generated audio with high accuracy (see notebooks section of the main repository).
41 |
--------------------------------------------------------------------------------
/notebooks/long_form_generation.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": 1,
6 | "id": "39ea4bed",
7 | "metadata": {},
8 | "outputs": [],
9 | "source": [
10 | "import os\n",
11 | "\n",
12 | "os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"0\"\n",
13 | "\n",
14 | "\n",
15 | "from IPython.display import Audio\n",
16 | "import nltk # we'll use this to split into sentences\n",
17 | "import numpy as np\n",
18 | "\n",
19 | "from bark.generation import (\n",
20 | " generate_text_semantic,\n",
21 | " preload_models,\n",
22 | ")\n",
23 | "from bark.api import semantic_to_waveform\n",
24 | "from bark import generate_audio, SAMPLE_RATE"
25 | ]
26 | },
27 | {
28 | "cell_type": "code",
29 | "execution_count": 29,
30 | "id": "776964b6",
31 | "metadata": {},
32 | "outputs": [],
33 | "source": [
34 | "preload_models()"
35 | ]
36 | },
37 | {
38 | "cell_type": "code",
39 | "execution_count": null,
40 | "id": "1d03f4d2",
41 | "metadata": {},
42 | "outputs": [],
43 | "source": []
44 | },
45 | {
46 | "cell_type": "markdown",
47 | "id": "74a025a4",
48 | "metadata": {},
49 | "source": [
50 | "# Simple Long-Form Generation\n",
51 | "We split longer text into sentences using `nltk` and generate the sentences one by one."
52 | ]
53 | },
54 | {
55 | "cell_type": "code",
56 | "execution_count": 33,
57 | "id": "57b06e2a",
58 | "metadata": {},
59 | "outputs": [],
60 | "source": [
61 | "script = \"\"\"\n",
62 | "Hey, have you heard about this new text-to-audio model called \"Bark\"? \n",
63 | "Apparently, it's the most realistic and natural-sounding text-to-audio model \n",
64 | "out there right now. People are saying it sounds just like a real person speaking. \n",
65 | "I think it uses advanced machine learning algorithms to analyze and understand the \n",
66 | "nuances of human speech, and then replicates those nuances in its own speech output. \n",
67 | "It's pretty impressive, and I bet it could be used for things like audiobooks or podcasts. \n",
68 | "In fact, I heard that some publishers are already starting to use Bark to create audiobooks. \n",
69 | "It would be like having your own personal voiceover artist. I really think Bark is going to \n",
70 | "be a game-changer in the world of text-to-audio technology.\n",
71 | "\"\"\".replace(\"\\n\", \" \").strip()"
72 | ]
73 | },
74 | {
75 | "cell_type": "code",
76 | "execution_count": 34,
77 | "id": "f747f804",
78 | "metadata": {},
79 | "outputs": [],
80 | "source": [
81 | "sentences = nltk.sent_tokenize(script)"
82 | ]
83 | },
84 | {
85 | "cell_type": "code",
86 | "execution_count": 35,
87 | "id": "17400a9b",
88 | "metadata": {
89 | "scrolled": true
90 | },
91 | "outputs": [
92 | {
93 | "name": "stderr",
94 | "output_type": "stream",
95 | "text": [
96 | "100%|██████████████████████████████████████████████████████████████████████| 100/100 [00:02<00:00, 43.03it/s]\n",
97 | "100%|████████████████████████████████████████████████████████████████████████| 17/17 [00:06<00:00, 2.45it/s]\n",
98 | "100%|██████████████████████████████████████████████████████████████████████| 100/100 [00:04<00:00, 22.73it/s]\n",
99 | "100%|████████████████████████████████████████████████████████████████████████| 33/33 [00:13<00:00, 2.52it/s]\n",
100 | "100%|██████████████████████████████████████████████████████████████████████| 100/100 [00:01<00:00, 66.30it/s]\n",
101 | "100%|████████████████████████████████████████████████████████████████████████| 11/11 [00:04<00:00, 2.46it/s]\n",
102 | "100%|██████████████████████████████████████████████████████████████████████| 100/100 [00:04<00:00, 20.99it/s]\n",
103 | "100%|████████████████████████████████████████████████████████████████████████| 35/35 [00:14<00:00, 2.46it/s]\n",
104 | "100%|██████████████████████████████████████████████████████████████████████| 100/100 [00:03<00:00, 25.63it/s]\n",
105 | "100%|████████████████████████████████████████████████████████████████████████| 29/29 [00:11<00:00, 2.50it/s]\n",
106 | "100%|██████████████████████████████████████████████████████████████████████| 100/100 [00:04<00:00, 23.90it/s]\n",
107 | "100%|████████████████████████████████████████████████████████████████████████| 30/30 [00:12<00:00, 2.46it/s]\n",
108 | "100%|██████████████████████████████████████████████████████████████████████| 100/100 [00:01<00:00, 53.24it/s]\n",
109 | "100%|████████████████████████████████████████████████████████████████████████| 14/14 [00:05<00:00, 2.51it/s]\n",
110 | "100%|██████████████████████████████████████████████████████████████████████| 100/100 [00:01<00:00, 50.63it/s]\n",
111 | "100%|████████████████████████████████████████████████████████████████████████| 15/15 [00:05<00:00, 2.57it/s]\n"
112 | ]
113 | }
114 | ],
115 | "source": [
116 | "SPEAKER = \"v2/en_speaker_6\"\n",
117 | "silence = np.zeros(int(0.25 * SAMPLE_RATE)) # quarter second of silence\n",
118 | "\n",
119 | "pieces = []\n",
120 | "for sentence in sentences:\n",
121 | " audio_array = generate_audio(sentence, history_prompt=SPEAKER)\n",
122 | " pieces += [audio_array, silence.copy()]\n"
123 | ]
124 | },
125 | {
126 | "cell_type": "code",
127 | "execution_count": null,
128 | "id": "04cf77f9",
129 | "metadata": {},
130 | "outputs": [],
131 | "source": [
132 | "Audio(np.concatenate(pieces), rate=SAMPLE_RATE)"
133 | ]
134 | },
135 | {
136 | "cell_type": "code",
137 | "execution_count": null,
138 | "id": "ac2d4625",
139 | "metadata": {},
140 | "outputs": [],
141 | "source": []
142 | },
143 | {
144 | "cell_type": "markdown",
145 | "id": "6d13249b",
146 | "metadata": {},
147 | "source": [
148 | "# $ \\\\ $"
149 | ]
150 | },
151 | {
152 | "cell_type": "markdown",
153 | "id": "cdfc8bf5",
154 | "metadata": {},
155 | "source": [
156 | "# Advanced Long-Form Generation\n",
157 | "Somtimes Bark will hallucinate a little extra audio at the end of the prompt.\n",
158 | "We can solve this issue by lowering the threshold for bark to stop generating text. \n",
159 | "We use the `min_eos_p` kwarg in `generate_text_semantic`"
160 | ]
161 | },
162 | {
163 | "cell_type": "code",
164 | "execution_count": 37,
165 | "id": "62807fd0",
166 | "metadata": {},
167 | "outputs": [
168 | {
169 | "name": "stderr",
170 | "output_type": "stream",
171 | "text": [
172 | "100%|██████████████████████████████████████████████████████████████████████| 100/100 [00:02<00:00, 38.05it/s]\n",
173 | "100%|████████████████████████████████████████████████████████████████████████| 18/18 [00:07<00:00, 2.46it/s]\n",
174 | "100%|██████████████████████████████████████████████████████████████████████| 100/100 [00:03<00:00, 32.28it/s]\n",
175 | "100%|████████████████████████████████████████████████████████████████████████| 21/21 [00:08<00:00, 2.54it/s]\n",
176 | "100%|██████████████████████████████████████████████████████████████████████| 100/100 [00:01<00:00, 55.78it/s]\n",
177 | "100%|████████████████████████████████████████████████████████████████████████| 14/14 [00:05<00:00, 2.57it/s]\n",
178 | "100%|██████████████████████████████████████████████████████████████████████| 100/100 [00:06<00:00, 14.73it/s]\n",
179 | "100%|████████████████████████████████████████████████████████████████████████| 35/35 [00:14<00:00, 2.47it/s]\n",
180 | "100%|██████████████████████████████████████████████████████████████████████| 100/100 [00:02<00:00, 40.29it/s]\n",
181 | "100%|████████████████████████████████████████████████████████████████████████| 18/18 [00:07<00:00, 2.56it/s]\n",
182 | "100%|██████████████████████████████████████████████████████████████████████| 100/100 [00:03<00:00, 32.92it/s]\n",
183 | "100%|████████████████████████████████████████████████████████████████████████| 20/20 [00:08<00:00, 2.47it/s]\n",
184 | "100%|██████████████████████████████████████████████████████████████████████| 100/100 [00:01<00:00, 68.87it/s]\n",
185 | "100%|████████████████████████████████████████████████████████████████████████| 12/12 [00:04<00:00, 2.62it/s]\n",
186 | "100%|██████████████████████████████████████████████████████████████████████| 100/100 [00:02<00:00, 47.64it/s]\n",
187 | "100%|████████████████████████████████████████████████████████████████████████| 15/15 [00:06<00:00, 2.46it/s]\n"
188 | ]
189 | }
190 | ],
191 | "source": [
192 | "GEN_TEMP = 0.6\n",
193 | "SPEAKER = \"v2/en_speaker_6\"\n",
194 | "silence = np.zeros(int(0.25 * SAMPLE_RATE)) # quarter second of silence\n",
195 | "\n",
196 | "pieces = []\n",
197 | "for sentence in sentences:\n",
198 | " semantic_tokens = generate_text_semantic(\n",
199 | " sentence,\n",
200 | " history_prompt=SPEAKER,\n",
201 | " temp=GEN_TEMP,\n",
202 | " min_eos_p=0.05, # this controls how likely the generation is to end\n",
203 | " )\n",
204 | "\n",
205 | " audio_array = semantic_to_waveform(semantic_tokens, history_prompt=SPEAKER,)\n",
206 | " pieces += [audio_array, silence.copy()]\n",
207 | "\n"
208 | ]
209 | },
210 | {
211 | "cell_type": "code",
212 | "execution_count": null,
213 | "id": "133fec46",
214 | "metadata": {},
215 | "outputs": [],
216 | "source": [
217 | "Audio(np.concatenate(pieces), rate=SAMPLE_RATE)"
218 | ]
219 | },
220 | {
221 | "cell_type": "code",
222 | "execution_count": null,
223 | "id": "6eee9f5a",
224 | "metadata": {},
225 | "outputs": [],
226 | "source": []
227 | },
228 | {
229 | "cell_type": "markdown",
230 | "id": "be8e125e",
231 | "metadata": {},
232 | "source": [
233 | "# $ \\\\ $"
234 | ]
235 | },
236 | {
237 | "cell_type": "markdown",
238 | "id": "03a16c1b",
239 | "metadata": {},
240 | "source": [
241 | "# Make a Long-Form Dialog with Bark"
242 | ]
243 | },
244 | {
245 | "cell_type": "markdown",
246 | "id": "06c5eff8",
247 | "metadata": {},
248 | "source": [
249 | "### Step 1: Format a script and speaker lookup"
250 | ]
251 | },
252 | {
253 | "cell_type": "code",
254 | "execution_count": 14,
255 | "id": "5238b297",
256 | "metadata": {},
257 | "outputs": [
258 | {
259 | "data": {
260 | "text/plain": [
261 | "['Samantha: Hey, have you heard about this new text-to-audio model called \"Bark\"?',\n",
262 | " \"John: No, I haven't. What's so special about it?\",\n",
263 | " \"Samantha: Well, apparently it's the most realistic and natural-sounding text-to-audio model out there right now. People are saying it sounds just like a real person speaking.\",\n",
264 | " 'John: Wow, that sounds amazing. How does it work?',\n",
265 | " 'Samantha: I think it uses advanced machine learning algorithms to analyze and understand the nuances of human speech, and then replicates those nuances in its own speech output.',\n",
266 | " \"John: That's pretty impressive. Do you think it could be used for things like audiobooks or podcasts?\",\n",
267 | " 'Samantha: Definitely! In fact, I heard that some publishers are already starting to use Bark to create audiobooks. And I bet it would be great for podcasts too.',\n",
268 | " 'John: I can imagine. It would be like having your own personal voiceover artist.',\n",
269 | " 'Samantha: Exactly! I think Bark is going to be a game-changer in the world of text-to-audio technology.']"
270 | ]
271 | },
272 | "execution_count": 14,
273 | "metadata": {},
274 | "output_type": "execute_result"
275 | }
276 | ],
277 | "source": [
278 | "speaker_lookup = {\"Samantha\": \"v2/en_speaker_9\", \"John\": \"v2/en_speaker_2\"}\n",
279 | "\n",
280 | "# Script generated by chat GPT\n",
281 | "script = \"\"\"\n",
282 | "Samantha: Hey, have you heard about this new text-to-audio model called \"Bark\"?\n",
283 | "\n",
284 | "John: No, I haven't. What's so special about it?\n",
285 | "\n",
286 | "Samantha: Well, apparently it's the most realistic and natural-sounding text-to-audio model out there right now. People are saying it sounds just like a real person speaking.\n",
287 | "\n",
288 | "John: Wow, that sounds amazing. How does it work?\n",
289 | "\n",
290 | "Samantha: I think it uses advanced machine learning algorithms to analyze and understand the nuances of human speech, and then replicates those nuances in its own speech output.\n",
291 | "\n",
292 | "John: That's pretty impressive. Do you think it could be used for things like audiobooks or podcasts?\n",
293 | "\n",
294 | "Samantha: Definitely! In fact, I heard that some publishers are already starting to use Bark to create audiobooks. And I bet it would be great for podcasts too.\n",
295 | "\n",
296 | "John: I can imagine. It would be like having your own personal voiceover artist.\n",
297 | "\n",
298 | "Samantha: Exactly! I think Bark is going to be a game-changer in the world of text-to-audio technology.\"\"\"\n",
299 | "script = script.strip().split(\"\\n\")\n",
300 | "script = [s.strip() for s in script if s]\n",
301 | "script"
302 | ]
303 | },
304 | {
305 | "cell_type": "markdown",
306 | "id": "ee547efd",
307 | "metadata": {},
308 | "source": [
309 | "### Step 2: Generate the audio for every speaker turn"
310 | ]
311 | },
312 | {
313 | "cell_type": "code",
314 | "execution_count": 15,
315 | "id": "203e5081",
316 | "metadata": {},
317 | "outputs": [
318 | {
319 | "name": "stderr",
320 | "output_type": "stream",
321 | "text": [
322 | "100%|██████████████████████████████████████████████████████████████████████| 100/100 [00:02<00:00, 34.03it/s]\n",
323 | "100%|████████████████████████████████████████████████████████████████████████| 22/22 [00:08<00:00, 2.55it/s]\n",
324 | "100%|██████████████████████████████████████████████████████████████████████| 100/100 [00:01<00:00, 71.58it/s]\n",
325 | "100%|████████████████████████████████████████████████████████████████████████| 11/11 [00:04<00:00, 2.65it/s]\n",
326 | "100%|██████████████████████████████████████████████████████████████████████| 100/100 [00:04<00:00, 22.75it/s]\n",
327 | "100%|████████████████████████████████████████████████████████████████████████| 33/33 [00:13<00:00, 2.53it/s]\n",
328 | "100%|██████████████████████████████████████████████████████████████████████| 100/100 [00:01<00:00, 70.76it/s]\n",
329 | "100%|████████████████████████████████████████████████████████████████████████| 11/11 [00:04<00:00, 2.63it/s]\n",
330 | "100%|██████████████████████████████████████████████████████████████████████| 100/100 [00:04<00:00, 20.46it/s]\n",
331 | "100%|████████████████████████████████████████████████████████████████████████| 36/36 [00:14<00:00, 2.47it/s]\n",
332 | "100%|██████████████████████████████████████████████████████████████████████| 100/100 [00:04<00:00, 20.18it/s]\n",
333 | "100%|████████████████████████████████████████████████████████████████████████| 37/37 [00:14<00:00, 2.51it/s]\n",
334 | "100%|██████████████████████████████████████████████████████████████████████| 100/100 [00:04<00:00, 23.04it/s]\n",
335 | "100%|████████████████████████████████████████████████████████████████████████| 32/32 [00:12<00:00, 2.48it/s]\n",
336 | "100%|██████████████████████████████████████████████████████████████████████| 100/100 [00:01<00:00, 54.64it/s]\n",
337 | "100%|████████████████████████████████████████████████████████████████████████| 14/14 [00:05<00:00, 2.58it/s]\n",
338 | "100%|██████████████████████████████████████████████████████████████████████| 100/100 [00:03<00:00, 31.71it/s]\n",
339 | "100%|████████████████████████████████████████████████████████████████████████| 24/24 [00:09<00:00, 2.56it/s]\n"
340 | ]
341 | }
342 | ],
343 | "source": [
344 | "pieces = []\n",
345 | "silence = np.zeros(int(0.5*SAMPLE_RATE))\n",
346 | "for line in script:\n",
347 | " speaker, text = line.split(\": \")\n",
348 | " audio_array = generate_audio(text, history_prompt=speaker_lookup[speaker], )\n",
349 | " pieces += [audio_array, silence.copy()]"
350 | ]
351 | },
352 | {
353 | "cell_type": "markdown",
354 | "id": "7c54bada",
355 | "metadata": {},
356 | "source": [
357 | "### Step 3: Concatenate all of the audio and play it"
358 | ]
359 | },
360 | {
361 | "cell_type": "code",
362 | "execution_count": null,
363 | "id": "27a56842",
364 | "metadata": {},
365 | "outputs": [],
366 | "source": [
367 | "Audio(np.concatenate(pieces), rate=SAMPLE_RATE)"
368 | ]
369 | },
370 | {
371 | "cell_type": "code",
372 | "execution_count": null,
373 | "id": "a1bc5877",
374 | "metadata": {},
375 | "outputs": [],
376 | "source": []
377 | }
378 | ],
379 | "metadata": {
380 | "kernelspec": {
381 | "display_name": "Python 3 (ipykernel)",
382 | "language": "python",
383 | "name": "python3"
384 | },
385 | "language_info": {
386 | "codemirror_mode": {
387 | "name": "ipython",
388 | "version": 3
389 | },
390 | "file_extension": ".py",
391 | "mimetype": "text/x-python",
392 | "name": "python",
393 | "nbconvert_exporter": "python",
394 | "pygments_lexer": "ipython3",
395 | "version": "3.9.16"
396 | }
397 | },
398 | "nbformat": 4,
399 | "nbformat_minor": 5
400 | }
401 |
--------------------------------------------------------------------------------
/notebooks/memory_profiling_bark.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "id": "90641144",
6 | "metadata": {},
7 | "source": [
8 | "# Bark Memory Profiling\n",
9 | "Bark has two ways to reduce GPU memory: \n",
10 | " - Small models: a smaller version of the model. This can be set by using the environment variable `SUNO_USE_SMALL_MODELS`\n",
11 | " - offloading models to CPU: Holding only one model at a time on the GPU, and shuttling the models to the CPU in between generations. \n",
12 | "\n",
13 | "# $ \\\\ $\n",
14 | "## First, we'll use the most memory efficient configuration"
15 | ]
16 | },
17 | {
18 | "cell_type": "code",
19 | "execution_count": 1,
20 | "id": "39ea4bed",
21 | "metadata": {},
22 | "outputs": [],
23 | "source": [
24 | "import os\n",
25 | "\n",
26 | "os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"0\"\n",
27 | "os.environ[\"SUNO_USE_SMALL_MODELS\"] = \"1\"\n",
28 | "os.environ[\"SUNO_OFFLOAD_CPU\"] = \"1\"\n",
29 | "\n",
30 | "from bark.generation import (\n",
31 | " generate_text_semantic,\n",
32 | " preload_models,\n",
33 | ")\n",
34 | "from bark import generate_audio, SAMPLE_RATE\n",
35 | "\n",
36 | "import torch"
37 | ]
38 | },
39 | {
40 | "cell_type": "code",
41 | "execution_count": 2,
42 | "id": "66b0c006",
43 | "metadata": {},
44 | "outputs": [
45 | {
46 | "name": "stderr",
47 | "output_type": "stream",
48 | "text": [
49 | "100%|██████████████████████████████████████████████████████████████████████| 100/100 [00:01<00:00, 62.17it/s]\n",
50 | "100%|████████████████████████████████████████████████████████████████████████| 10/10 [00:03<00:00, 2.74it/s]\n"
51 | ]
52 | },
53 | {
54 | "name": "stdout",
55 | "output_type": "stream",
56 | "text": [
57 | "max memory usage = 2396MB\n"
58 | ]
59 | }
60 | ],
61 | "source": [
62 | "torch.cuda.reset_peak_memory_stats()\n",
63 | "preload_models()\n",
64 | "audio_array = generate_audio(\"madam I'm adam\", history_prompt=\"v2/en_speaker_5\")\n",
65 | "max_utilization = torch.cuda.max_memory_allocated()\n",
66 | "print(f\"max memory usage = {max_utilization / 1024 / 1024:.0f}MB\")"
67 | ]
68 | },
69 | {
70 | "cell_type": "code",
71 | "execution_count": null,
72 | "id": "9922dd2d",
73 | "metadata": {},
74 | "outputs": [],
75 | "source": []
76 | },
77 | {
78 | "cell_type": "code",
79 | "execution_count": null,
80 | "id": "bdbe578e",
81 | "metadata": {},
82 | "outputs": [],
83 | "source": []
84 | },
85 | {
86 | "cell_type": "markdown",
87 | "id": "213d1b5b",
88 | "metadata": {},
89 | "source": [
90 | "# Memory Profiling:\n",
91 | "We can profile the memory consumption of 4 scenarios\n",
92 | " - Small models, offloading to CPU\n",
93 | " - Large models, offloading to CPU\n",
94 | " - Small models, not offloading to CPU\n",
95 | " - Large models, not offloading to CPU"
96 | ]
97 | },
98 | {
99 | "cell_type": "code",
100 | "execution_count": 1,
101 | "id": "417d5e9c",
102 | "metadata": {},
103 | "outputs": [],
104 | "source": [
105 | "import os\n",
106 | "\n",
107 | "from bark.generation import (\n",
108 | " generate_text_semantic,\n",
109 | " preload_models,\n",
110 | " models,\n",
111 | ")\n",
112 | "import bark.generation\n",
113 | "\n",
114 | "from bark.api import semantic_to_waveform\n",
115 | "from bark import generate_audio, SAMPLE_RATE\n",
116 | "\n",
117 | "import torch\n",
118 | "import time"
119 | ]
120 | },
121 | {
122 | "cell_type": "code",
123 | "execution_count": 2,
124 | "id": "cd83b45d",
125 | "metadata": {},
126 | "outputs": [
127 | {
128 | "name": "stdout",
129 | "output_type": "stream",
130 | "text": [
131 | "Small models True, offloading to CPU: True\n",
132 | "\tmax memory usage = 967MB, time 4s\n",
133 | "\n",
134 | "Small models False, offloading to CPU: True\n",
135 | "\tmax memory usage = 2407MB, time 8s\n",
136 | "\n",
137 | "Small models True, offloading to CPU: False\n",
138 | "\tmax memory usage = 2970MB, time 3s\n",
139 | "\n",
140 | "Small models False, offloading to CPU: False\n",
141 | "\tmax memory usage = 7824MB, time 6s\n",
142 | "\n"
143 | ]
144 | }
145 | ],
146 | "source": [
147 | "global models\n",
148 | "\n",
149 | "for offload_models in (True, False):\n",
150 | " # this setattr is needed to do on the fly\n",
151 | " # the easier way to do this is with `os.environ[\"SUNO_OFFLOAD_CPU\"] = \"1\"`\n",
152 | " setattr(bark.generation, \"OFFLOAD_CPU\", offload_models)\n",
153 | " for use_small_models in (True, False):\n",
154 | " models = {}\n",
155 | " torch.cuda.empty_cache()\n",
156 | " torch.cuda.reset_peak_memory_stats()\n",
157 | " preload_models(\n",
158 | " text_use_small=use_small_models,\n",
159 | " coarse_use_small=use_small_models,\n",
160 | " fine_use_small=use_small_models,\n",
161 | " force_reload=True,\n",
162 | " )\n",
163 | " t0 = time.time()\n",
164 | " audio_array = generate_audio(\"madam I'm adam\", history_prompt=\"v2/en_speaker_5\", silent=True)\n",
165 | " dur = time.time() - t0\n",
166 | " max_utilization = torch.cuda.max_memory_allocated()\n",
167 | " print(f\"Small models {use_small_models}, offloading to CPU: {offload_models}\")\n",
168 | " print(f\"\\tmax memory usage = {max_utilization / 1024 / 1024:.0f}MB, time {dur:.0f}s\\n\")"
169 | ]
170 | },
171 | {
172 | "cell_type": "code",
173 | "execution_count": null,
174 | "id": "bfe5fa06",
175 | "metadata": {},
176 | "outputs": [],
177 | "source": []
178 | }
179 | ],
180 | "metadata": {
181 | "kernelspec": {
182 | "display_name": "Python 3 (ipykernel)",
183 | "language": "python",
184 | "name": "python3"
185 | },
186 | "language_info": {
187 | "codemirror_mode": {
188 | "name": "ipython",
189 | "version": 3
190 | },
191 | "file_extension": ".py",
192 | "mimetype": "text/x-python",
193 | "name": "python",
194 | "nbconvert_exporter": "python",
195 | "pygments_lexer": "ipython3",
196 | "version": "3.9.16"
197 | }
198 | },
199 | "nbformat": 4,
200 | "nbformat_minor": 5
201 | }
202 |
--------------------------------------------------------------------------------
/notebooks/use_small_models_on_cpu.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "id": "6a682b61",
6 | "metadata": {},
7 | "source": [
8 | "# Benchmarking small models on CPU\n",
9 | " - We can enable small models with the `SUNO_USE_SMALL_MODELS` environment variable"
10 | ]
11 | },
12 | {
13 | "cell_type": "code",
14 | "execution_count": 1,
15 | "id": "9500dd93",
16 | "metadata": {},
17 | "outputs": [],
18 | "source": [
19 | "import os\n",
20 | "\n",
21 | "os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"\"\n",
22 | "os.environ[\"SUNO_USE_SMALL_MODELS\"] = \"1\"\n",
23 | "\n",
24 | "from IPython.display import Audio\n",
25 | "import numpy as np\n",
26 | "\n",
27 | "from bark import generate_audio, preload_models, SAMPLE_RATE\n",
28 | "\n",
29 | "import time"
30 | ]
31 | },
32 | {
33 | "cell_type": "code",
34 | "execution_count": 2,
35 | "id": "4e3454b6",
36 | "metadata": {},
37 | "outputs": [
38 | {
39 | "name": "stderr",
40 | "output_type": "stream",
41 | "text": [
42 | "No GPU being used. Careful, inference might be very slow!\n"
43 | ]
44 | },
45 | {
46 | "name": "stdout",
47 | "output_type": "stream",
48 | "text": [
49 | "CPU times: user 5.52 s, sys: 2.34 s, total: 7.86 s\n",
50 | "Wall time: 4.33 s\n"
51 | ]
52 | }
53 | ],
54 | "source": [
55 | "%%time\n",
56 | "preload_models()"
57 | ]
58 | },
59 | {
60 | "cell_type": "code",
61 | "execution_count": 3,
62 | "id": "f6024e5f",
63 | "metadata": {},
64 | "outputs": [
65 | {
66 | "name": "stderr",
67 | "output_type": "stream",
68 | "text": [
69 | "100%|████████████████████████████████████████████████████████| 100/100 [00:10<00:00, 9.89it/s]\n",
70 | "100%|██████████████████████████████████████████████████████████| 15/15 [00:43<00:00, 2.90s/it]\n"
71 | ]
72 | },
73 | {
74 | "name": "stdout",
75 | "output_type": "stream",
76 | "text": [
77 | "took 62s to generate 6s of audio\n"
78 | ]
79 | }
80 | ],
81 | "source": [
82 | "t0 = time.time()\n",
83 | "text = \"In the light of the moon, a little egg lay on a leaf\"\n",
84 | "audio_array = generate_audio(text)\n",
85 | "generation_duration_s = time.time() - t0\n",
86 | "audio_duration_s = audio_array.shape[0] / SAMPLE_RATE\n",
87 | "\n",
88 | "print(f\"took {generation_duration_s:.0f}s to generate {audio_duration_s:.0f}s of audio\")"
89 | ]
90 | },
91 | {
92 | "cell_type": "code",
93 | "execution_count": 4,
94 | "id": "2dcce86c",
95 | "metadata": {},
96 | "outputs": [
97 | {
98 | "data": {
99 | "text/plain": [
100 | "10"
101 | ]
102 | },
103 | "execution_count": 4,
104 | "metadata": {},
105 | "output_type": "execute_result"
106 | }
107 | ],
108 | "source": [
109 | "os.cpu_count()"
110 | ]
111 | },
112 | {
113 | "cell_type": "code",
114 | "execution_count": null,
115 | "id": "3046eddb",
116 | "metadata": {},
117 | "outputs": [],
118 | "source": []
119 | }
120 | ],
121 | "metadata": {
122 | "kernelspec": {
123 | "display_name": "Python 3 (ipykernel)",
124 | "language": "python",
125 | "name": "python3"
126 | },
127 | "language_info": {
128 | "codemirror_mode": {
129 | "name": "ipython",
130 | "version": 3
131 | },
132 | "file_extension": ".py",
133 | "mimetype": "text/x-python",
134 | "name": "python",
135 | "nbconvert_exporter": "python",
136 | "pygments_lexer": "ipython3",
137 | "version": "3.9.16"
138 | }
139 | },
140 | "nbformat": 4,
141 | "nbformat_minor": 5
142 | }
143 |
--------------------------------------------------------------------------------
/pyproject.toml:
--------------------------------------------------------------------------------
1 | [build-system]
2 | requires = ["setuptools"]
3 | build-backend = "setuptools.build_meta"
4 |
5 | [project]
6 | name = "suno-bark"
7 | version = "0.0.1a"
8 | description = "Bark text to audio model"
9 | readme = "README.md"
10 | requires-python = ">=3.8"
11 | authors = [
12 | {name = "Suno Inc", email = "hello@suno.ai"},
13 | ]
14 | # Apache 2.0
15 | license = {file = "LICENSE"}
16 |
17 | dependencies = [
18 | "boto3",
19 | "encodec",
20 | "funcy",
21 | "huggingface-hub>=0.14.1",
22 | "numpy",
23 | "scipy",
24 | "tokenizers",
25 | "torch",
26 | "tqdm",
27 | "transformers",
28 | ]
29 |
30 | [project.urls]
31 | source = "https://github.com/suno-ai/bark"
32 |
33 | [project.optional-dependencies]
34 | dev = [
35 | "bandit",
36 | "black",
37 | "codecov",
38 | "flake8",
39 | "hypothesis>=6.14,<7",
40 | "isort>=5.0.0,<6",
41 | "jupyter",
42 | "mypy",
43 | "nbconvert",
44 | "nbformat",
45 | "pydocstyle",
46 | "pylint",
47 | "pytest",
48 | "pytest-cov",
49 | ]
50 |
51 | [tool.setuptools]
52 | packages = ["bark"]
53 |
54 | [tool.setuptools.package-data]
55 | bark = ["assets/prompts/*.npz", "assets/prompts/v2/*.npz"]
56 |
57 |
58 | [tool.black]
59 | line-length = 100
60 |
--------------------------------------------------------------------------------
/setup.py:
--------------------------------------------------------------------------------
1 | from setuptools import setup
2 |
3 | setup()
4 |
--------------------------------------------------------------------------------