├── .editorconfig ├── .eslintrc.json ├── .gitignore ├── .nvmrc ├── .prettierrc.json ├── LICENSE ├── README.md ├── bark ├── __init__.py ├── __main__.py ├── api.py ├── assets │ └── prompts │ │ ├── announcer.npz │ │ ├── de_speaker_0.npz │ │ ├── de_speaker_1.npz │ │ ├── de_speaker_2.npz │ │ ├── de_speaker_3.npz │ │ ├── de_speaker_4.npz │ │ ├── de_speaker_5.npz │ │ ├── de_speaker_6.npz │ │ ├── de_speaker_7.npz │ │ ├── de_speaker_8.npz │ │ ├── de_speaker_9.npz │ │ ├── en_speaker_0.npz │ │ ├── en_speaker_1.npz │ │ ├── en_speaker_2.npz │ │ ├── en_speaker_3.npz │ │ ├── en_speaker_4.npz │ │ ├── en_speaker_5.npz │ │ ├── en_speaker_6.npz │ │ ├── en_speaker_7.npz │ │ ├── en_speaker_8.npz │ │ ├── en_speaker_9.npz │ │ ├── es_speaker_0.npz │ │ ├── es_speaker_1.npz │ │ ├── es_speaker_2.npz │ │ ├── es_speaker_3.npz │ │ ├── es_speaker_4.npz │ │ ├── es_speaker_5.npz │ │ ├── es_speaker_6.npz │ │ ├── es_speaker_7.npz │ │ ├── es_speaker_8.npz │ │ ├── es_speaker_9.npz │ │ ├── fr_speaker_0.npz │ │ ├── fr_speaker_1.npz │ │ ├── fr_speaker_2.npz │ │ ├── fr_speaker_3.npz │ │ ├── fr_speaker_4.npz │ │ ├── fr_speaker_5.npz │ │ ├── fr_speaker_6.npz │ │ ├── fr_speaker_7.npz │ │ ├── fr_speaker_8.npz │ │ ├── fr_speaker_9.npz │ │ ├── hi_speaker_0.npz │ │ ├── hi_speaker_1.npz │ │ ├── hi_speaker_2.npz │ │ ├── hi_speaker_3.npz │ │ ├── hi_speaker_4.npz │ │ ├── hi_speaker_5.npz │ │ ├── hi_speaker_6.npz │ │ ├── hi_speaker_7.npz │ │ ├── hi_speaker_8.npz │ │ ├── hi_speaker_9.npz │ │ ├── it_speaker_0.npz │ │ ├── it_speaker_1.npz │ │ ├── it_speaker_2.npz │ │ ├── it_speaker_3.npz │ │ ├── it_speaker_4.npz │ │ ├── it_speaker_5.npz │ │ ├── it_speaker_6.npz │ │ ├── it_speaker_7.npz │ │ ├── it_speaker_8.npz │ │ ├── it_speaker_9.npz │ │ ├── ja_speaker_0.npz │ │ ├── ja_speaker_1.npz │ │ ├── ja_speaker_2.npz │ │ ├── ja_speaker_3.npz │ │ ├── ja_speaker_4.npz │ │ ├── ja_speaker_5.npz │ │ ├── ja_speaker_6.npz │ │ ├── ja_speaker_7.npz │ │ ├── ja_speaker_8.npz │ │ ├── ja_speaker_9.npz │ │ ├── ko_speaker_0.npz │ │ ├── ko_speaker_1.npz │ │ ├── ko_speaker_2.npz │ │ ├── ko_speaker_3.npz │ │ ├── ko_speaker_4.npz │ │ ├── ko_speaker_5.npz │ │ ├── ko_speaker_6.npz │ │ ├── ko_speaker_7.npz │ │ ├── ko_speaker_8.npz │ │ ├── ko_speaker_9.npz │ │ ├── pl_speaker_0.npz │ │ ├── pl_speaker_1.npz │ │ ├── pl_speaker_2.npz │ │ ├── pl_speaker_3.npz │ │ ├── pl_speaker_4.npz │ │ ├── pl_speaker_5.npz │ │ ├── pl_speaker_6.npz │ │ ├── pl_speaker_7.npz │ │ ├── pl_speaker_8.npz │ │ ├── pl_speaker_9.npz │ │ ├── pt_speaker_0.npz │ │ ├── pt_speaker_1.npz │ │ ├── pt_speaker_2.npz │ │ ├── pt_speaker_3.npz │ │ ├── pt_speaker_4.npz │ │ ├── pt_speaker_5.npz │ │ ├── pt_speaker_6.npz │ │ ├── pt_speaker_7.npz │ │ ├── pt_speaker_8.npz │ │ ├── pt_speaker_9.npz │ │ ├── readme.md │ │ ├── ru_speaker_0.npz │ │ ├── ru_speaker_1.npz │ │ ├── ru_speaker_2.npz │ │ ├── ru_speaker_3.npz │ │ ├── ru_speaker_4.npz │ │ ├── ru_speaker_5.npz │ │ ├── ru_speaker_6.npz │ │ ├── ru_speaker_7.npz │ │ ├── ru_speaker_8.npz │ │ ├── ru_speaker_9.npz │ │ ├── speaker_0.npz │ │ ├── speaker_1.npz │ │ ├── speaker_2.npz │ │ ├── speaker_3.npz │ │ ├── speaker_4.npz │ │ ├── speaker_5.npz │ │ ├── speaker_6.npz │ │ ├── speaker_7.npz │ │ ├── speaker_8.npz │ │ ├── speaker_9.npz │ │ ├── tr_speaker_0.npz │ │ ├── tr_speaker_1.npz │ │ ├── tr_speaker_2.npz │ │ ├── tr_speaker_3.npz │ │ ├── tr_speaker_4.npz │ │ ├── tr_speaker_5.npz │ │ ├── tr_speaker_6.npz │ │ ├── tr_speaker_7.npz │ │ ├── tr_speaker_8.npz │ │ ├── tr_speaker_9.npz │ │ ├── v2 │ │ ├── de_speaker_0.npz │ │ ├── de_speaker_1.npz │ │ ├── de_speaker_2.npz │ │ ├── de_speaker_3.npz │ │ ├── de_speaker_4.npz │ │ ├── de_speaker_5.npz │ │ ├── de_speaker_6.npz │ │ ├── de_speaker_7.npz │ │ ├── de_speaker_8.npz │ │ ├── de_speaker_9.npz │ │ ├── en_speaker_0.npz │ │ ├── en_speaker_1.npz │ │ ├── en_speaker_2.npz │ │ ├── en_speaker_3.npz │ │ ├── en_speaker_4.npz │ │ ├── en_speaker_5.npz │ │ ├── en_speaker_6.npz │ │ ├── en_speaker_7.npz │ │ ├── en_speaker_8.npz │ │ ├── en_speaker_9.npz │ │ ├── es_speaker_0.npz │ │ ├── es_speaker_1.npz │ │ ├── es_speaker_2.npz │ │ ├── es_speaker_3.npz │ │ ├── es_speaker_4.npz │ │ ├── es_speaker_5.npz │ │ ├── es_speaker_6.npz │ │ ├── es_speaker_7.npz │ │ ├── es_speaker_8.npz │ │ ├── es_speaker_9.npz │ │ ├── fr_speaker_0.npz │ │ ├── fr_speaker_1.npz │ │ ├── fr_speaker_2.npz │ │ ├── fr_speaker_3.npz │ │ ├── fr_speaker_4.npz │ │ ├── fr_speaker_5.npz │ │ ├── fr_speaker_6.npz │ │ ├── fr_speaker_7.npz │ │ ├── fr_speaker_8.npz │ │ ├── fr_speaker_9.npz │ │ ├── hi_speaker_0.npz │ │ ├── hi_speaker_1.npz │ │ ├── hi_speaker_2.npz │ │ ├── hi_speaker_3.npz │ │ ├── hi_speaker_4.npz │ │ ├── hi_speaker_5.npz │ │ ├── hi_speaker_6.npz │ │ ├── hi_speaker_7.npz │ │ ├── hi_speaker_8.npz │ │ ├── hi_speaker_9.npz │ │ ├── it_speaker_0.npz │ │ ├── it_speaker_1.npz │ │ ├── it_speaker_2.npz │ │ ├── it_speaker_3.npz │ │ ├── it_speaker_4.npz │ │ ├── it_speaker_5.npz │ │ ├── it_speaker_6.npz │ │ ├── it_speaker_7.npz │ │ ├── it_speaker_8.npz │ │ ├── it_speaker_9.npz │ │ ├── ja_speaker_0.npz │ │ ├── ja_speaker_1.npz │ │ ├── ja_speaker_2.npz │ │ ├── ja_speaker_3.npz │ │ ├── ja_speaker_4.npz │ │ ├── ja_speaker_5.npz │ │ ├── ja_speaker_6.npz │ │ ├── ja_speaker_7.npz │ │ ├── ja_speaker_8.npz │ │ ├── ja_speaker_9.npz │ │ ├── ko_speaker_0.npz │ │ ├── ko_speaker_1.npz │ │ ├── ko_speaker_2.npz │ │ ├── ko_speaker_3.npz │ │ ├── ko_speaker_4.npz │ │ ├── ko_speaker_5.npz │ │ ├── ko_speaker_6.npz │ │ ├── ko_speaker_7.npz │ │ ├── ko_speaker_8.npz │ │ ├── ko_speaker_9.npz │ │ ├── pl_speaker_0.npz │ │ ├── pl_speaker_1.npz │ │ ├── pl_speaker_2.npz │ │ ├── pl_speaker_3.npz │ │ ├── pl_speaker_4.npz │ │ ├── pl_speaker_5.npz │ │ ├── pl_speaker_6.npz │ │ ├── pl_speaker_7.npz │ │ ├── pl_speaker_8.npz │ │ ├── pl_speaker_9.npz │ │ ├── pt_speaker_0.npz │ │ ├── pt_speaker_1.npz │ │ ├── pt_speaker_2.npz │ │ ├── pt_speaker_3.npz │ │ ├── pt_speaker_4.npz │ │ ├── pt_speaker_5.npz │ │ ├── pt_speaker_6.npz │ │ ├── pt_speaker_7.npz │ │ ├── pt_speaker_8.npz │ │ ├── pt_speaker_9.npz │ │ ├── ru_speaker_0.npz │ │ ├── ru_speaker_1.npz │ │ ├── ru_speaker_2.npz │ │ ├── ru_speaker_3.npz │ │ ├── ru_speaker_4.npz │ │ ├── ru_speaker_5.npz │ │ ├── ru_speaker_6.npz │ │ ├── ru_speaker_7.npz │ │ ├── ru_speaker_8.npz │ │ ├── ru_speaker_9.npz │ │ ├── tr_speaker_0.npz │ │ ├── tr_speaker_1.npz │ │ ├── tr_speaker_2.npz │ │ ├── tr_speaker_3.npz │ │ ├── tr_speaker_4.npz │ │ ├── tr_speaker_5.npz │ │ ├── tr_speaker_6.npz │ │ ├── tr_speaker_7.npz │ │ ├── tr_speaker_8.npz │ │ ├── tr_speaker_9.npz │ │ ├── zh_speaker_0.npz │ │ ├── zh_speaker_1.npz │ │ ├── zh_speaker_2.npz │ │ ├── zh_speaker_3.npz │ │ ├── zh_speaker_4.npz │ │ ├── zh_speaker_5.npz │ │ ├── zh_speaker_6.npz │ │ ├── zh_speaker_7.npz │ │ ├── zh_speaker_8.npz │ │ └── zh_speaker_9.npz │ │ ├── zh_speaker_0.npz │ │ ├── zh_speaker_1.npz │ │ ├── zh_speaker_2.npz │ │ ├── zh_speaker_3.npz │ │ ├── zh_speaker_4.npz │ │ ├── zh_speaker_5.npz │ │ ├── zh_speaker_6.npz │ │ ├── zh_speaker_7.npz │ │ ├── zh_speaker_8.npz │ │ └── zh_speaker_9.npz ├── cli.py ├── generation.py ├── model.py └── model_fine.py ├── model-card.md ├── next-env.d.ts ├── next.config.js ├── notebooks ├── fake_classifier.ipynb ├── long_form_generation.ipynb ├── memory_profiling_bark.ipynb └── use_small_models_on_cpu.ipynb ├── package-lock.json ├── package.json ├── public ├── favicon.ico └── voices │ ├── announcer.png │ └── default.png ├── pyproject.toml ├── setup.py ├── src ├── ions │ ├── constants.ts │ ├── createEmotionCache.ts │ └── theme.ts ├── organisms │ └── MusicPlayer.tsx ├── pages │ ├── _app.tsx │ ├── _document.tsx │ ├── api │ │ ├── generate.ts │ │ ├── uploads │ │ │ └── [...args].ts │ │ └── voices │ │ │ └── [...args].ts │ └── index.tsx ├── templates │ └── base.tsx └── types │ ├── app.ts │ └── common.ts └── tsconfig.json /.editorconfig: -------------------------------------------------------------------------------- 1 | root = true 2 | 3 | [*] 4 | charset = utf-8 5 | indent_style = tab 6 | indent_size = 4 7 | max_line_length = 100 8 | end_of_line = lf 9 | trim_trailing_whitespace = true 10 | insert_final_newline = true 11 | 12 | # trailing spaces in markdown indicate word wrap 13 | [*.md] 14 | trim_trailing_whitespace = false 15 | 16 | 17 | [{*.json,*.md,*.yml,.*rc,.*config}] 18 | indent_style = space 19 | 20 | [{*.json,*.yml,.*rc,.*config}] 21 | indent_size = 2 22 | -------------------------------------------------------------------------------- /.eslintrc.json: -------------------------------------------------------------------------------- 1 | { 2 | "root": true, 3 | "parser": "@typescript-eslint/parser", 4 | "extends": ["typescript", "eslint:recommended", "xo", "plugin:prettier/recommended", "next/core-web-vitals"], 5 | "env": { 6 | "browser": true, 7 | "es2022": true, 8 | "jest": true, 9 | "node": true 10 | }, 11 | "globals": { 12 | "BufferEncoding": "readonly" 13 | }, 14 | "plugins": ["@typescript-eslint", "unicorn", "unused-imports", "import", "jest", "prettier"], 15 | "ignorePatterns": ["*.d.ts"], 16 | "rules": { 17 | "import/order": [ 18 | "error", 19 | { 20 | "alphabetize": { 21 | "order": "asc", 22 | "caseInsensitive": true 23 | }, 24 | "newlines-between": "always" 25 | } 26 | ], 27 | "no-unused-vars": "off", 28 | "@typescript-eslint/no-unused-vars": "off", 29 | "unused-imports/no-unused-imports": "error", 30 | "unused-imports/no-unused-vars": [ 31 | "warn", 32 | { "vars": "all", "varsIgnorePattern": "^_", "args": "after-used", "argsIgnorePattern": "^_" } 33 | ], 34 | "no-nested-ternary": "error", 35 | "no-multiple-empty-lines": "error", 36 | "object-curly-spacing": ["error", "always"], 37 | "prettier/prettier": "error", 38 | "arrow-body-style": ["error", "as-needed"], 39 | "@typescript-eslint/consistent-type-imports": "warn" 40 | }, 41 | "overrides": [ 42 | { 43 | "files": "types.ts", 44 | "rules": { 45 | "no-unused-vars": "off" 46 | } 47 | } 48 | ] 49 | } 50 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | __pycache__/ 2 | suno_bark.egg-info/ 3 | 4 | # Generated 5 | /public/uploads 6 | /public/voices/*.png 7 | !/public/voices/default.png 8 | !/public/voices/announcer.png 9 | /bark_generation.wav 10 | /build/lib/bark 11 | 12 | # dependencies 13 | /node_modules 14 | /.pnp 15 | .pnp.js 16 | 17 | # testing 18 | /coverage 19 | 20 | # next.js 21 | /.next/ 22 | /out/ 23 | 24 | # production 25 | /build 26 | 27 | # misc 28 | .DS_Store 29 | *.pem 30 | 31 | # debug 32 | npm-debug.log* 33 | yarn-debug.log* 34 | yarn-error.log* 35 | .pnpm-debug.log* 36 | 37 | # local env files 38 | .env*.local 39 | 40 | # vercel 41 | .vercel 42 | 43 | # Intellij 44 | .idea 45 | 46 | -------------------------------------------------------------------------------- /.nvmrc: -------------------------------------------------------------------------------- 1 | v18.16.0 2 | -------------------------------------------------------------------------------- /.prettierrc.json: -------------------------------------------------------------------------------- 1 | { 2 | "trailingComma": "es5", 3 | "semi": true, 4 | "singleQuote": false, 5 | "quoteProps": "as-needed", 6 | "jsxSingleQuote": false, 7 | "bracketSpacing": true, 8 | "arrowParens": "avoid", 9 | "proseWrap": "always" 10 | } 11 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # 🐶 Bark WEB UI 2 | 3 | A Next.js Frontend for [Bark](https://github.com/suno-ai/bark). Please refer to the original docs for up to date 4 | information on Bark by [Suno](https://www.suno.ai/). 5 | 6 | ## Notice 7 | 8 | Bark is limited to ~13 seconds af output. This demo does not (yet) handle longer texts, but we already have protottypes that handle it, so this will be rather easy to add. **stay tuned** 9 | 10 | This project (currently) assumes that you have used npm before and are comfortable ensuring your environment to be set 11 | up. 12 | 13 | ## User Interface 14 | 15 | ![image](https://github.com/failfa-st/bark-web-ui/assets/1148334/0843b43c-233a-402b-a519-705fb30af981) 16 | 17 | 18 | ## Setup 19 | 20 | Certain steps (to get the miniconda running with GPU supprt) were boldly copied from [this Gradio Web-ui for Bark](https://github.com/Fictiverse/bark). 21 | TBH, I have no Idea what I did here but it might just work :). If you know your way around python and setting this up manually, feel free to contribute. 22 | I am no Python developer and was only able to adjust things based on trial and error, (adjusting types in the python code) 23 | 24 | ### Windows 25 | 26 | You can try the [one click installer](https://github.com/failfa-st/bark-web-ui/releases/download/v0.1.0/bark-ui-windows.zip). 27 | Simply download it, extract it and double-click `run.bat` (and hope that it works). 28 | If you want to update the project, you can run the `update.bat` file. 29 | If it doesn't work feel free to open an issue, so that we can look into fixing any issues that might occur. 30 | 31 | Thank you 32 | 33 | ### MacOS & Linux 34 | 35 | 1. Clone this repository 36 | 2. Ensure that you have `python` installed 37 | 3. Ensure that you have `node@18` installed (you can run `nvm use` if you use [NVM](https://github.com/nvm-sh/nvm)) 38 | 4. run `npm install` to install npm dependencies 39 | 5. run `pip install .` to install python requirements 40 | 6. (once) run `npm run download:model` to download the model (also executes [bark with a test generation](#download)) 41 | 7. run `npm run dev` (or `npm run build && npm start` if you don't plan to make changes to the source code) 42 | 43 | ### Download 44 | 45 | The `download:model` script should download the model and run a test generation: 46 | 47 | > 💡 Info 48 | > Example on macOS M1 PRO, using CPU (no optimizations) 49 | > Windows/Linux with an Nvidia will be a lot faster (highly recommended) 50 | > An [RTX4090](https://www.nvidia.com/en-us/geforce/graphics-cards/40-series/rtx-4090/) will generate the test in ~2 seconds 51 | 52 | ``` 53 | > download:model 54 | > python -m bark --text "You are all set up." 55 | 56 | No GPU being used. Careful, inference might be very slow! 57 | 100%|██████████████████████████████████████████████████████████| 100/100 [00:08<00:00, 11.72it/s] 58 | 100%|██████████████████████████████████████████████████████████████| 7/7 [00:27<00:00, 3.99s/it] 59 | Done! Output audio file is saved at: './bark_generation.wav' 60 | 61 | Process finished with exit code 0 62 | ``` 63 | -------------------------------------------------------------------------------- /bark/__init__.py: -------------------------------------------------------------------------------- 1 | from .api import generate_audio, text_to_semantic, semantic_to_waveform, save_as_prompt 2 | from .generation import SAMPLE_RATE, preload_models 3 | -------------------------------------------------------------------------------- /bark/__main__.py: -------------------------------------------------------------------------------- 1 | from .cli import cli 2 | 3 | cli() 4 | -------------------------------------------------------------------------------- /bark/api.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | from .generation import codec_decode, generate_coarse, generate_fine, generate_text_semantic 4 | 5 | 6 | def text_to_semantic( 7 | text: str, 8 | history_prompt: str = None, 9 | temp: float = 0.7, 10 | silent: bool = False, 11 | ): 12 | """Generate semantic array from text. 13 | 14 | Args: 15 | text: text to be turned into audio 16 | history_prompt: history choice for audio cloning 17 | temp: generation temperature (1.0 more diverse, 0.0 more conservative) 18 | silent: disable progress bar 19 | 20 | Returns: 21 | numpy semantic array to be fed into `semantic_to_waveform` 22 | """ 23 | x_semantic = generate_text_semantic( 24 | text, 25 | history_prompt=history_prompt, 26 | temp=temp, 27 | silent=silent, 28 | use_kv_caching=True 29 | ) 30 | return x_semantic 31 | 32 | 33 | def semantic_to_waveform( 34 | semantic_tokens: np.ndarray, 35 | history_prompt: str = None, 36 | temp: float = 0.7, 37 | silent: bool = False, 38 | output_full: bool = False, 39 | ): 40 | """Generate audio array from semantic input. 41 | 42 | Args: 43 | semantic_tokens: semantic token output from `text_to_semantic` 44 | history_prompt: history choice for audio cloning 45 | temp: generation temperature (1.0 more diverse, 0.0 more conservative) 46 | silent: disable progress bar 47 | output_full: return full generation to be used as a history prompt 48 | 49 | Returns: 50 | numpy audio array at sample frequency 24khz 51 | """ 52 | coarse_tokens = generate_coarse( 53 | semantic_tokens, 54 | history_prompt=history_prompt, 55 | temp=temp, 56 | silent=silent, 57 | use_kv_caching=True 58 | ) 59 | fine_tokens = generate_fine( 60 | coarse_tokens, 61 | history_prompt=history_prompt, 62 | temp=0.5, 63 | ) 64 | audio_arr = codec_decode(fine_tokens) 65 | if output_full: 66 | full_generation = { 67 | "semantic_prompt": semantic_tokens, 68 | "coarse_prompt": coarse_tokens, 69 | "fine_prompt": fine_tokens, 70 | } 71 | return full_generation, audio_arr 72 | return audio_arr 73 | 74 | 75 | def save_as_prompt(filepath, full_generation): 76 | assert(filepath.endswith(".npz")) 77 | assert(isinstance(full_generation, dict)) 78 | assert("semantic_prompt" in full_generation) 79 | assert("coarse_prompt" in full_generation) 80 | assert("fine_prompt" in full_generation) 81 | np.savez(filepath, **full_generation) 82 | 83 | 84 | def generate_audio( 85 | text: str, 86 | history_prompt:str = None, 87 | text_temp: float = 0.7, 88 | waveform_temp: float = 0.7, 89 | silent: bool = False, 90 | output_full: bool = False, 91 | ): 92 | """Generate audio array from input text. 93 | 94 | Args: 95 | text: text to be turned into audio 96 | history_prompt: history choice for audio cloning 97 | text_temp: generation temperature (1.0 more diverse, 0.0 more conservative) 98 | waveform_temp: generation temperature (1.0 more diverse, 0.0 more conservative) 99 | silent: disable progress bar 100 | output_full: return full generation to be used as a history prompt 101 | 102 | Returns: 103 | numpy audio array at sample frequency 24khz 104 | """ 105 | semantic_tokens = text_to_semantic( 106 | text, 107 | history_prompt=history_prompt, 108 | temp=text_temp, 109 | silent=silent, 110 | ) 111 | out = semantic_to_waveform( 112 | semantic_tokens, 113 | history_prompt=history_prompt, 114 | temp=waveform_temp, 115 | silent=silent, 116 | output_full=output_full, 117 | ) 118 | if output_full: 119 | full_generation, audio_arr = out 120 | return full_generation, audio_arr 121 | else: 122 | audio_arr = out 123 | return audio_arr 124 | -------------------------------------------------------------------------------- /bark/assets/prompts/announcer.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/announcer.npz -------------------------------------------------------------------------------- /bark/assets/prompts/de_speaker_0.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/de_speaker_0.npz -------------------------------------------------------------------------------- /bark/assets/prompts/de_speaker_1.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/de_speaker_1.npz -------------------------------------------------------------------------------- /bark/assets/prompts/de_speaker_2.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/de_speaker_2.npz -------------------------------------------------------------------------------- /bark/assets/prompts/de_speaker_3.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/de_speaker_3.npz -------------------------------------------------------------------------------- /bark/assets/prompts/de_speaker_4.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/de_speaker_4.npz -------------------------------------------------------------------------------- /bark/assets/prompts/de_speaker_5.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/de_speaker_5.npz -------------------------------------------------------------------------------- /bark/assets/prompts/de_speaker_6.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/de_speaker_6.npz -------------------------------------------------------------------------------- /bark/assets/prompts/de_speaker_7.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/de_speaker_7.npz -------------------------------------------------------------------------------- /bark/assets/prompts/de_speaker_8.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/de_speaker_8.npz -------------------------------------------------------------------------------- /bark/assets/prompts/de_speaker_9.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/de_speaker_9.npz -------------------------------------------------------------------------------- /bark/assets/prompts/en_speaker_0.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/en_speaker_0.npz -------------------------------------------------------------------------------- /bark/assets/prompts/en_speaker_1.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/en_speaker_1.npz -------------------------------------------------------------------------------- /bark/assets/prompts/en_speaker_2.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/en_speaker_2.npz -------------------------------------------------------------------------------- /bark/assets/prompts/en_speaker_3.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/en_speaker_3.npz -------------------------------------------------------------------------------- /bark/assets/prompts/en_speaker_4.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/en_speaker_4.npz -------------------------------------------------------------------------------- /bark/assets/prompts/en_speaker_5.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/en_speaker_5.npz -------------------------------------------------------------------------------- /bark/assets/prompts/en_speaker_6.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/en_speaker_6.npz -------------------------------------------------------------------------------- /bark/assets/prompts/en_speaker_7.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/en_speaker_7.npz -------------------------------------------------------------------------------- /bark/assets/prompts/en_speaker_8.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/en_speaker_8.npz -------------------------------------------------------------------------------- /bark/assets/prompts/en_speaker_9.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/en_speaker_9.npz -------------------------------------------------------------------------------- /bark/assets/prompts/es_speaker_0.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/es_speaker_0.npz -------------------------------------------------------------------------------- /bark/assets/prompts/es_speaker_1.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/es_speaker_1.npz -------------------------------------------------------------------------------- /bark/assets/prompts/es_speaker_2.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/es_speaker_2.npz -------------------------------------------------------------------------------- /bark/assets/prompts/es_speaker_3.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/es_speaker_3.npz -------------------------------------------------------------------------------- /bark/assets/prompts/es_speaker_4.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/es_speaker_4.npz -------------------------------------------------------------------------------- /bark/assets/prompts/es_speaker_5.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/es_speaker_5.npz -------------------------------------------------------------------------------- /bark/assets/prompts/es_speaker_6.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/es_speaker_6.npz -------------------------------------------------------------------------------- /bark/assets/prompts/es_speaker_7.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/es_speaker_7.npz -------------------------------------------------------------------------------- /bark/assets/prompts/es_speaker_8.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/es_speaker_8.npz -------------------------------------------------------------------------------- /bark/assets/prompts/es_speaker_9.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/es_speaker_9.npz -------------------------------------------------------------------------------- /bark/assets/prompts/fr_speaker_0.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/fr_speaker_0.npz -------------------------------------------------------------------------------- /bark/assets/prompts/fr_speaker_1.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/fr_speaker_1.npz -------------------------------------------------------------------------------- /bark/assets/prompts/fr_speaker_2.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/fr_speaker_2.npz -------------------------------------------------------------------------------- /bark/assets/prompts/fr_speaker_3.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/fr_speaker_3.npz -------------------------------------------------------------------------------- /bark/assets/prompts/fr_speaker_4.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/fr_speaker_4.npz -------------------------------------------------------------------------------- /bark/assets/prompts/fr_speaker_5.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/fr_speaker_5.npz -------------------------------------------------------------------------------- /bark/assets/prompts/fr_speaker_6.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/fr_speaker_6.npz -------------------------------------------------------------------------------- /bark/assets/prompts/fr_speaker_7.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/fr_speaker_7.npz -------------------------------------------------------------------------------- /bark/assets/prompts/fr_speaker_8.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/fr_speaker_8.npz -------------------------------------------------------------------------------- /bark/assets/prompts/fr_speaker_9.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/fr_speaker_9.npz -------------------------------------------------------------------------------- /bark/assets/prompts/hi_speaker_0.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/hi_speaker_0.npz -------------------------------------------------------------------------------- /bark/assets/prompts/hi_speaker_1.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/hi_speaker_1.npz -------------------------------------------------------------------------------- /bark/assets/prompts/hi_speaker_2.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/hi_speaker_2.npz -------------------------------------------------------------------------------- /bark/assets/prompts/hi_speaker_3.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/hi_speaker_3.npz -------------------------------------------------------------------------------- /bark/assets/prompts/hi_speaker_4.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/hi_speaker_4.npz -------------------------------------------------------------------------------- /bark/assets/prompts/hi_speaker_5.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/hi_speaker_5.npz -------------------------------------------------------------------------------- /bark/assets/prompts/hi_speaker_6.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/hi_speaker_6.npz -------------------------------------------------------------------------------- /bark/assets/prompts/hi_speaker_7.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/hi_speaker_7.npz -------------------------------------------------------------------------------- /bark/assets/prompts/hi_speaker_8.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/hi_speaker_8.npz -------------------------------------------------------------------------------- /bark/assets/prompts/hi_speaker_9.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/hi_speaker_9.npz -------------------------------------------------------------------------------- /bark/assets/prompts/it_speaker_0.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/it_speaker_0.npz -------------------------------------------------------------------------------- /bark/assets/prompts/it_speaker_1.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/it_speaker_1.npz -------------------------------------------------------------------------------- /bark/assets/prompts/it_speaker_2.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/it_speaker_2.npz -------------------------------------------------------------------------------- /bark/assets/prompts/it_speaker_3.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/it_speaker_3.npz -------------------------------------------------------------------------------- /bark/assets/prompts/it_speaker_4.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/it_speaker_4.npz -------------------------------------------------------------------------------- /bark/assets/prompts/it_speaker_5.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/it_speaker_5.npz -------------------------------------------------------------------------------- /bark/assets/prompts/it_speaker_6.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/it_speaker_6.npz -------------------------------------------------------------------------------- /bark/assets/prompts/it_speaker_7.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/it_speaker_7.npz -------------------------------------------------------------------------------- /bark/assets/prompts/it_speaker_8.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/it_speaker_8.npz -------------------------------------------------------------------------------- /bark/assets/prompts/it_speaker_9.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/it_speaker_9.npz -------------------------------------------------------------------------------- /bark/assets/prompts/ja_speaker_0.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/ja_speaker_0.npz -------------------------------------------------------------------------------- /bark/assets/prompts/ja_speaker_1.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/ja_speaker_1.npz -------------------------------------------------------------------------------- /bark/assets/prompts/ja_speaker_2.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/ja_speaker_2.npz -------------------------------------------------------------------------------- /bark/assets/prompts/ja_speaker_3.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/ja_speaker_3.npz -------------------------------------------------------------------------------- /bark/assets/prompts/ja_speaker_4.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/ja_speaker_4.npz -------------------------------------------------------------------------------- /bark/assets/prompts/ja_speaker_5.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/ja_speaker_5.npz -------------------------------------------------------------------------------- /bark/assets/prompts/ja_speaker_6.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/ja_speaker_6.npz -------------------------------------------------------------------------------- /bark/assets/prompts/ja_speaker_7.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/ja_speaker_7.npz -------------------------------------------------------------------------------- /bark/assets/prompts/ja_speaker_8.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/ja_speaker_8.npz -------------------------------------------------------------------------------- /bark/assets/prompts/ja_speaker_9.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/ja_speaker_9.npz -------------------------------------------------------------------------------- /bark/assets/prompts/ko_speaker_0.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/ko_speaker_0.npz -------------------------------------------------------------------------------- /bark/assets/prompts/ko_speaker_1.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/ko_speaker_1.npz -------------------------------------------------------------------------------- /bark/assets/prompts/ko_speaker_2.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/ko_speaker_2.npz -------------------------------------------------------------------------------- /bark/assets/prompts/ko_speaker_3.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/ko_speaker_3.npz -------------------------------------------------------------------------------- /bark/assets/prompts/ko_speaker_4.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/ko_speaker_4.npz -------------------------------------------------------------------------------- /bark/assets/prompts/ko_speaker_5.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/ko_speaker_5.npz -------------------------------------------------------------------------------- /bark/assets/prompts/ko_speaker_6.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/ko_speaker_6.npz -------------------------------------------------------------------------------- /bark/assets/prompts/ko_speaker_7.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/ko_speaker_7.npz -------------------------------------------------------------------------------- /bark/assets/prompts/ko_speaker_8.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/ko_speaker_8.npz -------------------------------------------------------------------------------- /bark/assets/prompts/ko_speaker_9.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/ko_speaker_9.npz -------------------------------------------------------------------------------- /bark/assets/prompts/pl_speaker_0.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/pl_speaker_0.npz -------------------------------------------------------------------------------- /bark/assets/prompts/pl_speaker_1.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/pl_speaker_1.npz -------------------------------------------------------------------------------- /bark/assets/prompts/pl_speaker_2.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/pl_speaker_2.npz -------------------------------------------------------------------------------- /bark/assets/prompts/pl_speaker_3.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/pl_speaker_3.npz -------------------------------------------------------------------------------- /bark/assets/prompts/pl_speaker_4.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/pl_speaker_4.npz -------------------------------------------------------------------------------- /bark/assets/prompts/pl_speaker_5.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/pl_speaker_5.npz -------------------------------------------------------------------------------- /bark/assets/prompts/pl_speaker_6.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/pl_speaker_6.npz -------------------------------------------------------------------------------- /bark/assets/prompts/pl_speaker_7.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/pl_speaker_7.npz -------------------------------------------------------------------------------- /bark/assets/prompts/pl_speaker_8.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/pl_speaker_8.npz -------------------------------------------------------------------------------- /bark/assets/prompts/pl_speaker_9.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/pl_speaker_9.npz -------------------------------------------------------------------------------- /bark/assets/prompts/pt_speaker_0.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/pt_speaker_0.npz -------------------------------------------------------------------------------- /bark/assets/prompts/pt_speaker_1.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/pt_speaker_1.npz -------------------------------------------------------------------------------- /bark/assets/prompts/pt_speaker_2.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/pt_speaker_2.npz -------------------------------------------------------------------------------- /bark/assets/prompts/pt_speaker_3.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/pt_speaker_3.npz -------------------------------------------------------------------------------- /bark/assets/prompts/pt_speaker_4.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/pt_speaker_4.npz -------------------------------------------------------------------------------- /bark/assets/prompts/pt_speaker_5.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/pt_speaker_5.npz -------------------------------------------------------------------------------- /bark/assets/prompts/pt_speaker_6.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/pt_speaker_6.npz -------------------------------------------------------------------------------- /bark/assets/prompts/pt_speaker_7.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/pt_speaker_7.npz -------------------------------------------------------------------------------- /bark/assets/prompts/pt_speaker_8.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/pt_speaker_8.npz -------------------------------------------------------------------------------- /bark/assets/prompts/pt_speaker_9.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/pt_speaker_9.npz -------------------------------------------------------------------------------- /bark/assets/prompts/readme.md: -------------------------------------------------------------------------------- 1 | # Example Prompts Data 2 | 3 | ## Version Two 4 | The `v2` prompts are better engineered to follow text with a consistent voice. 5 | To use them, simply include `v2` in the prompt. For example 6 | ```python 7 | from bark import generate_audio 8 | text_prompt = "madam I'm adam" 9 | audio_array = generate_audio(text_prompt, history_prompt="v2/en_speaker_1") 10 | ``` 11 | 12 | ## Prompt Format 13 | The provided data is in the .npz format, which is a file format used in Python for storing arrays and data. The data contains three arrays: semantic_prompt, coarse_prompt, and fine_prompt. 14 | 15 | ```semantic_prompt``` 16 | 17 | The semantic_prompt array contains a sequence of token IDs generated by the BERT tokenizer from Hugging Face. These tokens encode the text input and are used as an input to generate the audio output. The shape of this array is (n,), where n is the number of tokens in the input text. 18 | 19 | ```coarse_prompt``` 20 | 21 | The coarse_prompt array is an intermediate output of the text-to-speech pipeline, and contains token IDs generated by the first two codebooks of the EnCodec Codec from Facebook. This step converts the semantic tokens into a different representation that is better suited for the subsequent step. The shape of this array is (2, m), where m is the number of tokens after conversion by the EnCodec Codec. 22 | 23 | ```fine_prompt``` 24 | 25 | The fine_prompt array is a further processed output of the pipeline, and contains 8 codebooks from the EnCodec Codec. These codebooks represent the final stage of tokenization, and the resulting tokens are used to generate the audio output. The shape of this array is (8, p), where p is the number of tokens after further processing by the EnCodec Codec. 26 | 27 | Overall, these arrays represent different stages of a text-to-speech pipeline that converts text input into synthesized audio output. The semantic_prompt array represents the input text, while coarse_prompt and fine_prompt represent intermediate and final stages of tokenization, respectively. 28 | 29 | 30 | 31 | -------------------------------------------------------------------------------- /bark/assets/prompts/ru_speaker_0.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/ru_speaker_0.npz -------------------------------------------------------------------------------- /bark/assets/prompts/ru_speaker_1.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/ru_speaker_1.npz -------------------------------------------------------------------------------- /bark/assets/prompts/ru_speaker_2.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/ru_speaker_2.npz -------------------------------------------------------------------------------- /bark/assets/prompts/ru_speaker_3.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/ru_speaker_3.npz -------------------------------------------------------------------------------- /bark/assets/prompts/ru_speaker_4.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/ru_speaker_4.npz -------------------------------------------------------------------------------- /bark/assets/prompts/ru_speaker_5.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/ru_speaker_5.npz -------------------------------------------------------------------------------- /bark/assets/prompts/ru_speaker_6.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/ru_speaker_6.npz -------------------------------------------------------------------------------- /bark/assets/prompts/ru_speaker_7.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/ru_speaker_7.npz -------------------------------------------------------------------------------- /bark/assets/prompts/ru_speaker_8.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/ru_speaker_8.npz -------------------------------------------------------------------------------- /bark/assets/prompts/ru_speaker_9.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/ru_speaker_9.npz -------------------------------------------------------------------------------- /bark/assets/prompts/speaker_0.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/speaker_0.npz -------------------------------------------------------------------------------- /bark/assets/prompts/speaker_1.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/speaker_1.npz -------------------------------------------------------------------------------- /bark/assets/prompts/speaker_2.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/speaker_2.npz -------------------------------------------------------------------------------- /bark/assets/prompts/speaker_3.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/speaker_3.npz -------------------------------------------------------------------------------- /bark/assets/prompts/speaker_4.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/speaker_4.npz -------------------------------------------------------------------------------- /bark/assets/prompts/speaker_5.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/speaker_5.npz -------------------------------------------------------------------------------- /bark/assets/prompts/speaker_6.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/speaker_6.npz -------------------------------------------------------------------------------- /bark/assets/prompts/speaker_7.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/speaker_7.npz -------------------------------------------------------------------------------- /bark/assets/prompts/speaker_8.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/speaker_8.npz -------------------------------------------------------------------------------- /bark/assets/prompts/speaker_9.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/speaker_9.npz -------------------------------------------------------------------------------- /bark/assets/prompts/tr_speaker_0.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/tr_speaker_0.npz -------------------------------------------------------------------------------- /bark/assets/prompts/tr_speaker_1.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/tr_speaker_1.npz -------------------------------------------------------------------------------- /bark/assets/prompts/tr_speaker_2.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/tr_speaker_2.npz -------------------------------------------------------------------------------- /bark/assets/prompts/tr_speaker_3.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/tr_speaker_3.npz -------------------------------------------------------------------------------- /bark/assets/prompts/tr_speaker_4.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/tr_speaker_4.npz -------------------------------------------------------------------------------- /bark/assets/prompts/tr_speaker_5.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/tr_speaker_5.npz -------------------------------------------------------------------------------- /bark/assets/prompts/tr_speaker_6.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/tr_speaker_6.npz -------------------------------------------------------------------------------- /bark/assets/prompts/tr_speaker_7.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/tr_speaker_7.npz -------------------------------------------------------------------------------- /bark/assets/prompts/tr_speaker_8.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/tr_speaker_8.npz -------------------------------------------------------------------------------- /bark/assets/prompts/tr_speaker_9.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/tr_speaker_9.npz -------------------------------------------------------------------------------- /bark/assets/prompts/v2/de_speaker_0.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/v2/de_speaker_0.npz -------------------------------------------------------------------------------- /bark/assets/prompts/v2/de_speaker_1.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/v2/de_speaker_1.npz -------------------------------------------------------------------------------- /bark/assets/prompts/v2/de_speaker_2.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/v2/de_speaker_2.npz -------------------------------------------------------------------------------- /bark/assets/prompts/v2/de_speaker_3.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/v2/de_speaker_3.npz -------------------------------------------------------------------------------- /bark/assets/prompts/v2/de_speaker_4.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/v2/de_speaker_4.npz -------------------------------------------------------------------------------- /bark/assets/prompts/v2/de_speaker_5.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/v2/de_speaker_5.npz -------------------------------------------------------------------------------- /bark/assets/prompts/v2/de_speaker_6.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/v2/de_speaker_6.npz -------------------------------------------------------------------------------- /bark/assets/prompts/v2/de_speaker_7.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/v2/de_speaker_7.npz -------------------------------------------------------------------------------- /bark/assets/prompts/v2/de_speaker_8.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/v2/de_speaker_8.npz -------------------------------------------------------------------------------- /bark/assets/prompts/v2/de_speaker_9.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/v2/de_speaker_9.npz -------------------------------------------------------------------------------- /bark/assets/prompts/v2/en_speaker_0.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/v2/en_speaker_0.npz -------------------------------------------------------------------------------- /bark/assets/prompts/v2/en_speaker_1.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/v2/en_speaker_1.npz -------------------------------------------------------------------------------- /bark/assets/prompts/v2/en_speaker_2.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/v2/en_speaker_2.npz -------------------------------------------------------------------------------- /bark/assets/prompts/v2/en_speaker_3.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/v2/en_speaker_3.npz -------------------------------------------------------------------------------- /bark/assets/prompts/v2/en_speaker_4.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/v2/en_speaker_4.npz -------------------------------------------------------------------------------- /bark/assets/prompts/v2/en_speaker_5.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/v2/en_speaker_5.npz -------------------------------------------------------------------------------- /bark/assets/prompts/v2/en_speaker_6.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/v2/en_speaker_6.npz -------------------------------------------------------------------------------- /bark/assets/prompts/v2/en_speaker_7.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/v2/en_speaker_7.npz -------------------------------------------------------------------------------- /bark/assets/prompts/v2/en_speaker_8.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/v2/en_speaker_8.npz -------------------------------------------------------------------------------- /bark/assets/prompts/v2/en_speaker_9.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/v2/en_speaker_9.npz -------------------------------------------------------------------------------- /bark/assets/prompts/v2/es_speaker_0.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/v2/es_speaker_0.npz -------------------------------------------------------------------------------- /bark/assets/prompts/v2/es_speaker_1.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/v2/es_speaker_1.npz -------------------------------------------------------------------------------- /bark/assets/prompts/v2/es_speaker_2.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/v2/es_speaker_2.npz -------------------------------------------------------------------------------- /bark/assets/prompts/v2/es_speaker_3.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/v2/es_speaker_3.npz -------------------------------------------------------------------------------- /bark/assets/prompts/v2/es_speaker_4.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/v2/es_speaker_4.npz -------------------------------------------------------------------------------- /bark/assets/prompts/v2/es_speaker_5.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/v2/es_speaker_5.npz -------------------------------------------------------------------------------- /bark/assets/prompts/v2/es_speaker_6.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/v2/es_speaker_6.npz -------------------------------------------------------------------------------- /bark/assets/prompts/v2/es_speaker_7.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/v2/es_speaker_7.npz -------------------------------------------------------------------------------- /bark/assets/prompts/v2/es_speaker_8.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/v2/es_speaker_8.npz -------------------------------------------------------------------------------- /bark/assets/prompts/v2/es_speaker_9.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/v2/es_speaker_9.npz -------------------------------------------------------------------------------- /bark/assets/prompts/v2/fr_speaker_0.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/v2/fr_speaker_0.npz -------------------------------------------------------------------------------- /bark/assets/prompts/v2/fr_speaker_1.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/v2/fr_speaker_1.npz -------------------------------------------------------------------------------- /bark/assets/prompts/v2/fr_speaker_2.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/v2/fr_speaker_2.npz -------------------------------------------------------------------------------- /bark/assets/prompts/v2/fr_speaker_3.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/v2/fr_speaker_3.npz -------------------------------------------------------------------------------- /bark/assets/prompts/v2/fr_speaker_4.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/v2/fr_speaker_4.npz -------------------------------------------------------------------------------- /bark/assets/prompts/v2/fr_speaker_5.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/v2/fr_speaker_5.npz -------------------------------------------------------------------------------- /bark/assets/prompts/v2/fr_speaker_6.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/v2/fr_speaker_6.npz -------------------------------------------------------------------------------- /bark/assets/prompts/v2/fr_speaker_7.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/v2/fr_speaker_7.npz -------------------------------------------------------------------------------- /bark/assets/prompts/v2/fr_speaker_8.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/v2/fr_speaker_8.npz -------------------------------------------------------------------------------- /bark/assets/prompts/v2/fr_speaker_9.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/v2/fr_speaker_9.npz -------------------------------------------------------------------------------- /bark/assets/prompts/v2/hi_speaker_0.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/v2/hi_speaker_0.npz -------------------------------------------------------------------------------- /bark/assets/prompts/v2/hi_speaker_1.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/v2/hi_speaker_1.npz -------------------------------------------------------------------------------- /bark/assets/prompts/v2/hi_speaker_2.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/v2/hi_speaker_2.npz -------------------------------------------------------------------------------- /bark/assets/prompts/v2/hi_speaker_3.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/v2/hi_speaker_3.npz -------------------------------------------------------------------------------- /bark/assets/prompts/v2/hi_speaker_4.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/v2/hi_speaker_4.npz -------------------------------------------------------------------------------- /bark/assets/prompts/v2/hi_speaker_5.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/v2/hi_speaker_5.npz -------------------------------------------------------------------------------- /bark/assets/prompts/v2/hi_speaker_6.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/v2/hi_speaker_6.npz -------------------------------------------------------------------------------- /bark/assets/prompts/v2/hi_speaker_7.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/v2/hi_speaker_7.npz -------------------------------------------------------------------------------- /bark/assets/prompts/v2/hi_speaker_8.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/v2/hi_speaker_8.npz -------------------------------------------------------------------------------- /bark/assets/prompts/v2/hi_speaker_9.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/v2/hi_speaker_9.npz -------------------------------------------------------------------------------- /bark/assets/prompts/v2/it_speaker_0.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/v2/it_speaker_0.npz -------------------------------------------------------------------------------- /bark/assets/prompts/v2/it_speaker_1.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/v2/it_speaker_1.npz -------------------------------------------------------------------------------- /bark/assets/prompts/v2/it_speaker_2.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/v2/it_speaker_2.npz -------------------------------------------------------------------------------- /bark/assets/prompts/v2/it_speaker_3.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/v2/it_speaker_3.npz -------------------------------------------------------------------------------- /bark/assets/prompts/v2/it_speaker_4.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/v2/it_speaker_4.npz -------------------------------------------------------------------------------- /bark/assets/prompts/v2/it_speaker_5.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/v2/it_speaker_5.npz -------------------------------------------------------------------------------- /bark/assets/prompts/v2/it_speaker_6.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/v2/it_speaker_6.npz -------------------------------------------------------------------------------- /bark/assets/prompts/v2/it_speaker_7.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/v2/it_speaker_7.npz -------------------------------------------------------------------------------- /bark/assets/prompts/v2/it_speaker_8.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/v2/it_speaker_8.npz -------------------------------------------------------------------------------- /bark/assets/prompts/v2/it_speaker_9.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/v2/it_speaker_9.npz -------------------------------------------------------------------------------- /bark/assets/prompts/v2/ja_speaker_0.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/v2/ja_speaker_0.npz -------------------------------------------------------------------------------- /bark/assets/prompts/v2/ja_speaker_1.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/v2/ja_speaker_1.npz -------------------------------------------------------------------------------- /bark/assets/prompts/v2/ja_speaker_2.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/v2/ja_speaker_2.npz -------------------------------------------------------------------------------- /bark/assets/prompts/v2/ja_speaker_3.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/v2/ja_speaker_3.npz -------------------------------------------------------------------------------- /bark/assets/prompts/v2/ja_speaker_4.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/v2/ja_speaker_4.npz -------------------------------------------------------------------------------- /bark/assets/prompts/v2/ja_speaker_5.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/v2/ja_speaker_5.npz -------------------------------------------------------------------------------- /bark/assets/prompts/v2/ja_speaker_6.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/v2/ja_speaker_6.npz -------------------------------------------------------------------------------- /bark/assets/prompts/v2/ja_speaker_7.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/v2/ja_speaker_7.npz -------------------------------------------------------------------------------- /bark/assets/prompts/v2/ja_speaker_8.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/v2/ja_speaker_8.npz -------------------------------------------------------------------------------- /bark/assets/prompts/v2/ja_speaker_9.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/v2/ja_speaker_9.npz -------------------------------------------------------------------------------- /bark/assets/prompts/v2/ko_speaker_0.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/v2/ko_speaker_0.npz -------------------------------------------------------------------------------- /bark/assets/prompts/v2/ko_speaker_1.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/v2/ko_speaker_1.npz -------------------------------------------------------------------------------- /bark/assets/prompts/v2/ko_speaker_2.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/v2/ko_speaker_2.npz -------------------------------------------------------------------------------- /bark/assets/prompts/v2/ko_speaker_3.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/v2/ko_speaker_3.npz -------------------------------------------------------------------------------- /bark/assets/prompts/v2/ko_speaker_4.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/v2/ko_speaker_4.npz -------------------------------------------------------------------------------- /bark/assets/prompts/v2/ko_speaker_5.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/v2/ko_speaker_5.npz -------------------------------------------------------------------------------- /bark/assets/prompts/v2/ko_speaker_6.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/v2/ko_speaker_6.npz -------------------------------------------------------------------------------- /bark/assets/prompts/v2/ko_speaker_7.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/v2/ko_speaker_7.npz -------------------------------------------------------------------------------- /bark/assets/prompts/v2/ko_speaker_8.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/v2/ko_speaker_8.npz -------------------------------------------------------------------------------- /bark/assets/prompts/v2/ko_speaker_9.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/v2/ko_speaker_9.npz -------------------------------------------------------------------------------- /bark/assets/prompts/v2/pl_speaker_0.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/v2/pl_speaker_0.npz -------------------------------------------------------------------------------- /bark/assets/prompts/v2/pl_speaker_1.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/v2/pl_speaker_1.npz -------------------------------------------------------------------------------- /bark/assets/prompts/v2/pl_speaker_2.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/v2/pl_speaker_2.npz -------------------------------------------------------------------------------- /bark/assets/prompts/v2/pl_speaker_3.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/v2/pl_speaker_3.npz -------------------------------------------------------------------------------- /bark/assets/prompts/v2/pl_speaker_4.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/v2/pl_speaker_4.npz -------------------------------------------------------------------------------- /bark/assets/prompts/v2/pl_speaker_5.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/v2/pl_speaker_5.npz -------------------------------------------------------------------------------- /bark/assets/prompts/v2/pl_speaker_6.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/v2/pl_speaker_6.npz -------------------------------------------------------------------------------- /bark/assets/prompts/v2/pl_speaker_7.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/v2/pl_speaker_7.npz -------------------------------------------------------------------------------- /bark/assets/prompts/v2/pl_speaker_8.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/v2/pl_speaker_8.npz -------------------------------------------------------------------------------- /bark/assets/prompts/v2/pl_speaker_9.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/v2/pl_speaker_9.npz -------------------------------------------------------------------------------- /bark/assets/prompts/v2/pt_speaker_0.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/v2/pt_speaker_0.npz -------------------------------------------------------------------------------- /bark/assets/prompts/v2/pt_speaker_1.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/v2/pt_speaker_1.npz -------------------------------------------------------------------------------- /bark/assets/prompts/v2/pt_speaker_2.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/v2/pt_speaker_2.npz -------------------------------------------------------------------------------- /bark/assets/prompts/v2/pt_speaker_3.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/v2/pt_speaker_3.npz -------------------------------------------------------------------------------- /bark/assets/prompts/v2/pt_speaker_4.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/v2/pt_speaker_4.npz -------------------------------------------------------------------------------- /bark/assets/prompts/v2/pt_speaker_5.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/v2/pt_speaker_5.npz -------------------------------------------------------------------------------- /bark/assets/prompts/v2/pt_speaker_6.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/v2/pt_speaker_6.npz -------------------------------------------------------------------------------- /bark/assets/prompts/v2/pt_speaker_7.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/v2/pt_speaker_7.npz -------------------------------------------------------------------------------- /bark/assets/prompts/v2/pt_speaker_8.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/v2/pt_speaker_8.npz -------------------------------------------------------------------------------- /bark/assets/prompts/v2/pt_speaker_9.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/v2/pt_speaker_9.npz -------------------------------------------------------------------------------- /bark/assets/prompts/v2/ru_speaker_0.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/v2/ru_speaker_0.npz -------------------------------------------------------------------------------- /bark/assets/prompts/v2/ru_speaker_1.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/v2/ru_speaker_1.npz -------------------------------------------------------------------------------- /bark/assets/prompts/v2/ru_speaker_2.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/v2/ru_speaker_2.npz -------------------------------------------------------------------------------- /bark/assets/prompts/v2/ru_speaker_3.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/v2/ru_speaker_3.npz -------------------------------------------------------------------------------- /bark/assets/prompts/v2/ru_speaker_4.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/v2/ru_speaker_4.npz -------------------------------------------------------------------------------- /bark/assets/prompts/v2/ru_speaker_5.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/v2/ru_speaker_5.npz -------------------------------------------------------------------------------- /bark/assets/prompts/v2/ru_speaker_6.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/v2/ru_speaker_6.npz -------------------------------------------------------------------------------- /bark/assets/prompts/v2/ru_speaker_7.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/v2/ru_speaker_7.npz -------------------------------------------------------------------------------- /bark/assets/prompts/v2/ru_speaker_8.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/v2/ru_speaker_8.npz -------------------------------------------------------------------------------- /bark/assets/prompts/v2/ru_speaker_9.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/v2/ru_speaker_9.npz -------------------------------------------------------------------------------- /bark/assets/prompts/v2/tr_speaker_0.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/v2/tr_speaker_0.npz -------------------------------------------------------------------------------- /bark/assets/prompts/v2/tr_speaker_1.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/v2/tr_speaker_1.npz -------------------------------------------------------------------------------- /bark/assets/prompts/v2/tr_speaker_2.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/v2/tr_speaker_2.npz -------------------------------------------------------------------------------- /bark/assets/prompts/v2/tr_speaker_3.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/v2/tr_speaker_3.npz -------------------------------------------------------------------------------- /bark/assets/prompts/v2/tr_speaker_4.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/v2/tr_speaker_4.npz -------------------------------------------------------------------------------- /bark/assets/prompts/v2/tr_speaker_5.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/v2/tr_speaker_5.npz -------------------------------------------------------------------------------- /bark/assets/prompts/v2/tr_speaker_6.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/v2/tr_speaker_6.npz -------------------------------------------------------------------------------- /bark/assets/prompts/v2/tr_speaker_7.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/v2/tr_speaker_7.npz -------------------------------------------------------------------------------- /bark/assets/prompts/v2/tr_speaker_8.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/v2/tr_speaker_8.npz -------------------------------------------------------------------------------- /bark/assets/prompts/v2/tr_speaker_9.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/v2/tr_speaker_9.npz -------------------------------------------------------------------------------- /bark/assets/prompts/v2/zh_speaker_0.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/v2/zh_speaker_0.npz -------------------------------------------------------------------------------- /bark/assets/prompts/v2/zh_speaker_1.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/v2/zh_speaker_1.npz -------------------------------------------------------------------------------- /bark/assets/prompts/v2/zh_speaker_2.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/v2/zh_speaker_2.npz -------------------------------------------------------------------------------- /bark/assets/prompts/v2/zh_speaker_3.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/v2/zh_speaker_3.npz -------------------------------------------------------------------------------- /bark/assets/prompts/v2/zh_speaker_4.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/v2/zh_speaker_4.npz -------------------------------------------------------------------------------- /bark/assets/prompts/v2/zh_speaker_5.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/v2/zh_speaker_5.npz -------------------------------------------------------------------------------- /bark/assets/prompts/v2/zh_speaker_6.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/v2/zh_speaker_6.npz -------------------------------------------------------------------------------- /bark/assets/prompts/v2/zh_speaker_7.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/v2/zh_speaker_7.npz -------------------------------------------------------------------------------- /bark/assets/prompts/v2/zh_speaker_8.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/v2/zh_speaker_8.npz -------------------------------------------------------------------------------- /bark/assets/prompts/v2/zh_speaker_9.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/v2/zh_speaker_9.npz -------------------------------------------------------------------------------- /bark/assets/prompts/zh_speaker_0.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/zh_speaker_0.npz -------------------------------------------------------------------------------- /bark/assets/prompts/zh_speaker_1.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/zh_speaker_1.npz -------------------------------------------------------------------------------- /bark/assets/prompts/zh_speaker_2.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/zh_speaker_2.npz -------------------------------------------------------------------------------- /bark/assets/prompts/zh_speaker_3.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/zh_speaker_3.npz -------------------------------------------------------------------------------- /bark/assets/prompts/zh_speaker_4.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/zh_speaker_4.npz -------------------------------------------------------------------------------- /bark/assets/prompts/zh_speaker_5.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/zh_speaker_5.npz -------------------------------------------------------------------------------- /bark/assets/prompts/zh_speaker_6.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/zh_speaker_6.npz -------------------------------------------------------------------------------- /bark/assets/prompts/zh_speaker_7.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/zh_speaker_7.npz -------------------------------------------------------------------------------- /bark/assets/prompts/zh_speaker_8.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/zh_speaker_8.npz -------------------------------------------------------------------------------- /bark/assets/prompts/zh_speaker_9.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/bark/assets/prompts/zh_speaker_9.npz -------------------------------------------------------------------------------- /bark/cli.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import os 3 | 4 | from scipy.io.wavfile import write as write_wav 5 | from .api import generate_audio 6 | from .generation import SAMPLE_RATE 7 | 8 | 9 | def cli(): 10 | """Commandline interface.""" 11 | parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter) 12 | parser.add_argument("--text", type=str, help="text to be turned into audio") 13 | parser.add_argument( 14 | "--output_filename", 15 | type=str, 16 | default="bark_generation.wav", 17 | help="output audio file name", 18 | ) 19 | parser.add_argument("--output_dir", type=str, default=".", help="directory to save the outputs") 20 | parser.add_argument( 21 | "--history_prompt", 22 | type=str, 23 | default=None, 24 | help="history choice for audio cloning", 25 | ) 26 | parser.add_argument( 27 | "--text_temp", 28 | default=0.7, 29 | type=float, 30 | help="generation temperature (1.0 more diverse, 0.0 more conservative)", 31 | ) 32 | parser.add_argument( 33 | "--waveform_temp", 34 | default=0.7, 35 | type=float, 36 | help="generation temperature (1.0 more diverse, 0.0 more conservative)", 37 | ) 38 | parser.add_argument("--silent", default=False, type=bool, help="disable progress bar") 39 | parser.add_argument( 40 | "--output_full", 41 | default=False, 42 | type=bool, 43 | help="return full generation to be used as a history prompt", 44 | ) 45 | 46 | args = vars(parser.parse_args()) 47 | input_text: str = args.get("text") 48 | output_filename: str = args.get("output_filename") 49 | output_dir: str = args.get("output_dir") 50 | history_prompt: str = args.get("history_prompt") 51 | text_temp: float = args.get("text_temp") 52 | waveform_temp: float = args.get("waveform_temp") 53 | silent: bool = args.get("silent") 54 | output_full: bool = args.get("output_full") 55 | 56 | try: 57 | os.makedirs(output_dir, exist_ok=True) 58 | generated_audio = generate_audio( 59 | input_text, 60 | history_prompt=history_prompt, 61 | text_temp=text_temp, 62 | waveform_temp=waveform_temp, 63 | silent=silent, 64 | output_full=output_full, 65 | ) 66 | output_file_path = os.path.join(output_dir, output_filename) 67 | write_wav(output_file_path, SAMPLE_RATE, generated_audio) 68 | print(f"Done! Output audio file is saved at: '{output_file_path}'") 69 | except Exception as e: 70 | print(f"Oops, an error occurred: {e}") 71 | -------------------------------------------------------------------------------- /bark/generation.py: -------------------------------------------------------------------------------- 1 | import contextlib 2 | import gc 3 | import os 4 | import re 5 | 6 | from encodec import EncodecModel 7 | import funcy 8 | import logging 9 | import numpy as np 10 | from scipy.special import softmax 11 | import torch 12 | import torch.nn.functional as F 13 | import tqdm 14 | from transformers import BertTokenizer 15 | from huggingface_hub import hf_hub_download 16 | 17 | from .model import GPTConfig, GPT 18 | from .model_fine import FineGPT, FineGPTConfig 19 | 20 | if ( 21 | torch.cuda.is_available() and 22 | hasattr(torch.cuda, "amp") and 23 | hasattr(torch.cuda.amp, "autocast") and 24 | hasattr(torch.cuda, "is_bf16_supported") and 25 | torch.cuda.is_bf16_supported() 26 | ): 27 | autocast = funcy.partial(torch.cuda.amp.autocast, dtype=torch.bfloat16) 28 | else: 29 | @contextlib.contextmanager 30 | def autocast(): 31 | yield 32 | 33 | 34 | # hold models in global scope to lazy load 35 | global models 36 | models = {} 37 | 38 | global models_devices 39 | models_devices = {} 40 | 41 | 42 | CONTEXT_WINDOW_SIZE = 1024 43 | 44 | SEMANTIC_RATE_HZ = 49.9 45 | SEMANTIC_VOCAB_SIZE = 10_000 46 | 47 | CODEBOOK_SIZE = 1024 48 | N_COARSE_CODEBOOKS = 2 49 | N_FINE_CODEBOOKS = 8 50 | COARSE_RATE_HZ = 75 51 | 52 | SAMPLE_RATE = 24_000 53 | 54 | 55 | SUPPORTED_LANGS = [ 56 | ("English", "en"), 57 | ("German", "de"), 58 | ("Spanish", "es"), 59 | ("French", "fr"), 60 | ("Hindi", "hi"), 61 | ("Italian", "it"), 62 | ("Japanese", "ja"), 63 | ("Korean", "ko"), 64 | ("Polish", "pl"), 65 | ("Portuguese", "pt"), 66 | ("Russian", "ru"), 67 | ("Turkish", "tr"), 68 | ("Chinese", "zh"), 69 | ] 70 | 71 | ALLOWED_PROMPTS = {"announcer"} 72 | for _, lang in SUPPORTED_LANGS: 73 | for prefix in ("", f"v2{os.path.sep}"): 74 | for n in range(10): 75 | ALLOWED_PROMPTS.add(f"{prefix}{lang}_speaker_{n}") 76 | 77 | 78 | logger = logging.getLogger(__name__) 79 | 80 | 81 | CUR_PATH = os.path.dirname(os.path.abspath(__file__)) 82 | 83 | 84 | default_cache_dir = os.path.join(os.path.expanduser("~"), ".cache") 85 | CACHE_DIR = os.path.join(os.getenv("XDG_CACHE_HOME", default_cache_dir), "suno", "bark_v0") 86 | 87 | 88 | def _cast_bool_env_var(s): 89 | return s.lower() in ('true', '1', 't') 90 | 91 | 92 | USE_SMALL_MODELS = _cast_bool_env_var(os.environ.get("SUNO_USE_SMALL_MODELS", "False")) 93 | GLOBAL_ENABLE_MPS = _cast_bool_env_var(os.environ.get("SUNO_ENABLE_MPS", "False")) 94 | OFFLOAD_CPU = _cast_bool_env_var(os.environ.get("SUNO_OFFLOAD_CPU", "False")) 95 | 96 | 97 | REMOTE_MODEL_PATHS = { 98 | "text_small": { 99 | "repo_id": "suno/bark", 100 | "file_name": "text.pt", 101 | }, 102 | "coarse_small": { 103 | "repo_id": "suno/bark", 104 | "file_name": "coarse.pt", 105 | }, 106 | "fine_small": { 107 | "repo_id": "suno/bark", 108 | "file_name": "fine.pt", 109 | }, 110 | "text": { 111 | "repo_id": "suno/bark", 112 | "file_name": "text_2.pt", 113 | }, 114 | "coarse": { 115 | "repo_id": "suno/bark", 116 | "file_name": "coarse_2.pt", 117 | }, 118 | "fine": { 119 | "repo_id": "suno/bark", 120 | "file_name": "fine_2.pt", 121 | }, 122 | } 123 | 124 | 125 | if not hasattr(torch.nn.functional, 'scaled_dot_product_attention') and torch.cuda.is_available(): 126 | logger.warning( 127 | "torch version does not support flash attention. You will get faster" + 128 | " inference speed by upgrade torch to newest nightly version." 129 | ) 130 | 131 | 132 | def _grab_best_device(use_gpu=True): 133 | if torch.cuda.device_count() > 0 and use_gpu: 134 | device = "cuda" 135 | elif torch.backends.mps.is_available() and use_gpu and GLOBAL_ENABLE_MPS: 136 | device = "mps" 137 | else: 138 | device = "cpu" 139 | return device 140 | 141 | 142 | def _get_ckpt_path(model_type, use_small=False): 143 | key = model_type 144 | if use_small or USE_SMALL_MODELS: 145 | key += "_small" 146 | return os.path.join(CACHE_DIR, REMOTE_MODEL_PATHS[key]["file_name"]) 147 | 148 | 149 | def _download(from_hf_path, file_name): 150 | os.makedirs(CACHE_DIR, exist_ok=True) 151 | hf_hub_download(repo_id=from_hf_path, filename=file_name, local_dir=CACHE_DIR) 152 | 153 | 154 | class InferenceContext: 155 | def __init__(self, benchmark=False): 156 | # we can't expect inputs to be the same length, so disable benchmarking by default 157 | self._chosen_cudnn_benchmark = benchmark 158 | self._cudnn_benchmark = None 159 | 160 | def __enter__(self): 161 | self._cudnn_benchmark = torch.backends.cudnn.benchmark 162 | torch.backends.cudnn.benchmark = self._chosen_cudnn_benchmark 163 | 164 | def __exit__(self, exc_type, exc_value, exc_traceback): 165 | torch.backends.cudnn.benchmark = self._cudnn_benchmark 166 | 167 | 168 | if torch.cuda.is_available(): 169 | torch.backends.cuda.matmul.allow_tf32 = True 170 | torch.backends.cudnn.allow_tf32 = True 171 | 172 | 173 | @contextlib.contextmanager 174 | def _inference_mode(): 175 | with InferenceContext(), torch.inference_mode(), torch.no_grad(), autocast(): 176 | yield 177 | 178 | 179 | def _clear_cuda_cache(): 180 | if torch.cuda.is_available(): 181 | torch.cuda.empty_cache() 182 | torch.cuda.synchronize() 183 | 184 | 185 | def clean_models(model_key=None): 186 | global models 187 | model_keys = [model_key] if model_key is not None else models.keys() 188 | for k in model_keys: 189 | if k in models: 190 | del models[k] 191 | _clear_cuda_cache() 192 | gc.collect() 193 | 194 | 195 | def _load_model(ckpt_path, device, use_small=False, model_type="text"): 196 | if model_type == "text": 197 | ConfigClass = GPTConfig 198 | ModelClass = GPT 199 | elif model_type == "coarse": 200 | ConfigClass = GPTConfig 201 | ModelClass = GPT 202 | elif model_type == "fine": 203 | ConfigClass = FineGPTConfig 204 | ModelClass = FineGPT 205 | else: 206 | raise NotImplementedError() 207 | model_key = f"{model_type}_small" if use_small or USE_SMALL_MODELS else model_type 208 | model_info = REMOTE_MODEL_PATHS[model_key] 209 | if not os.path.exists(ckpt_path): 210 | logger.info(f"{model_type} model not found, downloading into `{CACHE_DIR}`.") 211 | _download(model_info["repo_id"], model_info["file_name"]) 212 | checkpoint = torch.load(ckpt_path, map_location=device) 213 | # this is a hack 214 | model_args = checkpoint["model_args"] 215 | if "input_vocab_size" not in model_args: 216 | model_args["input_vocab_size"] = model_args["vocab_size"] 217 | model_args["output_vocab_size"] = model_args["vocab_size"] 218 | del model_args["vocab_size"] 219 | gptconf = ConfigClass(**checkpoint["model_args"]) 220 | model = ModelClass(gptconf) 221 | state_dict = checkpoint["model"] 222 | # fixup checkpoint 223 | unwanted_prefix = "_orig_mod." 224 | for k, v in list(state_dict.items()): 225 | if k.startswith(unwanted_prefix): 226 | state_dict[k[len(unwanted_prefix) :]] = state_dict.pop(k) 227 | extra_keys = set(state_dict.keys()) - set(model.state_dict().keys()) 228 | extra_keys = set([k for k in extra_keys if not k.endswith(".attn.bias")]) 229 | missing_keys = set(model.state_dict().keys()) - set(state_dict.keys()) 230 | missing_keys = set([k for k in missing_keys if not k.endswith(".attn.bias")]) 231 | if len(extra_keys) != 0: 232 | raise ValueError(f"extra keys found: {extra_keys}") 233 | if len(missing_keys) != 0: 234 | raise ValueError(f"missing keys: {missing_keys}") 235 | model.load_state_dict(state_dict, strict=False) 236 | n_params = model.get_num_params() 237 | val_loss = checkpoint["best_val_loss"].item() 238 | logger.info(f"model loaded: {round(n_params/1e6,1)}M params, {round(val_loss,3)} loss") 239 | model.eval() 240 | model.to(device) 241 | del checkpoint, state_dict 242 | _clear_cuda_cache() 243 | if model_type == "text": 244 | tokenizer = BertTokenizer.from_pretrained("bert-base-multilingual-cased") 245 | return { 246 | "model": model, 247 | "tokenizer": tokenizer, 248 | } 249 | return model 250 | 251 | 252 | def _load_codec_model(device): 253 | model = EncodecModel.encodec_model_24khz() 254 | model.set_target_bandwidth(6.0) 255 | model.eval() 256 | model.to(device) 257 | _clear_cuda_cache() 258 | return model 259 | 260 | 261 | def load_model(use_gpu=True, use_small=False, force_reload=False, model_type="text"): 262 | _load_model_f = funcy.partial(_load_model, model_type=model_type, use_small=use_small) 263 | if model_type not in ("text", "coarse", "fine"): 264 | raise NotImplementedError() 265 | global models 266 | global models_devices 267 | device = _grab_best_device(use_gpu=use_gpu) 268 | model_key = f"{model_type}" 269 | if OFFLOAD_CPU: 270 | models_devices[model_key] = device 271 | device = "cpu" 272 | if model_key not in models or force_reload: 273 | ckpt_path = _get_ckpt_path(model_type, use_small=use_small) 274 | clean_models(model_key=model_key) 275 | model = _load_model_f(ckpt_path, device) 276 | models[model_key] = model 277 | if model_type == "text": 278 | models[model_key]["model"].to(device) 279 | else: 280 | models[model_key].to(device) 281 | return models[model_key] 282 | 283 | 284 | def load_codec_model(use_gpu=True, force_reload=False): 285 | global models 286 | global models_devices 287 | device = _grab_best_device(use_gpu=use_gpu) 288 | if device == "mps": 289 | # encodec doesn't support mps 290 | device = "cpu" 291 | model_key = "codec" 292 | if OFFLOAD_CPU: 293 | models_devices[model_key] = device 294 | device = "cpu" 295 | if model_key not in models or force_reload: 296 | clean_models(model_key=model_key) 297 | model = _load_codec_model(device) 298 | models[model_key] = model 299 | models[model_key].to(device) 300 | return models[model_key] 301 | 302 | 303 | def preload_models( 304 | text_use_gpu=True, 305 | text_use_small=False, 306 | coarse_use_gpu=True, 307 | coarse_use_small=False, 308 | fine_use_gpu=True, 309 | fine_use_small=False, 310 | codec_use_gpu=True, 311 | force_reload=False, 312 | ): 313 | """Load all the necessary models for the pipeline.""" 314 | if _grab_best_device() == "cpu" and ( 315 | text_use_gpu or coarse_use_gpu or fine_use_gpu or codec_use_gpu 316 | ): 317 | logger.warning("No GPU being used. Careful, inference might be very slow!") 318 | _ = load_model( 319 | model_type="text", use_gpu=text_use_gpu, use_small=text_use_small, force_reload=force_reload 320 | ) 321 | _ = load_model( 322 | model_type="coarse", 323 | use_gpu=coarse_use_gpu, 324 | use_small=coarse_use_small, 325 | force_reload=force_reload, 326 | ) 327 | _ = load_model( 328 | model_type="fine", use_gpu=fine_use_gpu, use_small=fine_use_small, force_reload=force_reload 329 | ) 330 | _ = load_codec_model(use_gpu=codec_use_gpu, force_reload=force_reload) 331 | 332 | 333 | #### 334 | # Generation Functionality 335 | #### 336 | 337 | 338 | def _tokenize(tokenizer, text): 339 | return tokenizer.encode(text, add_special_tokens=False) 340 | 341 | 342 | def _detokenize(tokenizer, enc_text): 343 | return tokenizer.decode(enc_text) 344 | 345 | 346 | def _normalize_whitespace(text): 347 | return re.sub(r"\s+", " ", text).strip() 348 | 349 | 350 | TEXT_ENCODING_OFFSET = 10_048 351 | SEMANTIC_PAD_TOKEN = 10_000 352 | TEXT_PAD_TOKEN = 129_595 353 | SEMANTIC_INFER_TOKEN = 129_599 354 | 355 | 356 | def _load_history_prompt(history_prompt_input): 357 | if isinstance(history_prompt_input, str) and history_prompt_input.endswith(".npz"): 358 | history_prompt = np.load(history_prompt_input) 359 | elif isinstance(history_prompt_input, str): 360 | # make sure this works on non-ubuntu 361 | history_prompt_input = os.path.join(*history_prompt_input.split("/")) 362 | if history_prompt_input not in ALLOWED_PROMPTS: 363 | raise ValueError("history prompt not found") 364 | history_prompt = np.load( 365 | os.path.join(CUR_PATH, "assets", "prompts", f"{history_prompt_input}.npz") 366 | ) 367 | elif isinstance(history_prompt_input, dict): 368 | assert("semantic_prompt" in history_prompt_input) 369 | assert("coarse_prompt" in history_prompt_input) 370 | assert("fine_prompt" in history_prompt_input) 371 | history_prompt = history_prompt_input 372 | else: 373 | raise ValueError("history prompt format unrecognized") 374 | return history_prompt 375 | 376 | 377 | def generate_text_semantic( 378 | text, 379 | history_prompt=None, 380 | temp=0.7, 381 | top_k=None, 382 | top_p=None, 383 | silent=False, 384 | min_eos_p=0.2, 385 | max_gen_duration_s=None, 386 | allow_early_stop=True, 387 | use_kv_caching=False, 388 | ): 389 | """Generate semantic tokens from text.""" 390 | assert isinstance(text, str) 391 | text = _normalize_whitespace(text) 392 | assert len(text.strip()) > 0 393 | if history_prompt is not None: 394 | history_prompt = _load_history_prompt(history_prompt) 395 | semantic_history = history_prompt["semantic_prompt"] 396 | assert ( 397 | isinstance(semantic_history, np.ndarray) 398 | and len(semantic_history.shape) == 1 399 | and len(semantic_history) > 0 400 | and semantic_history.min() >= 0 401 | and semantic_history.max() <= SEMANTIC_VOCAB_SIZE - 1 402 | ) 403 | else: 404 | semantic_history = None 405 | # load models if not yet exist 406 | global models 407 | global models_devices 408 | if "text" not in models: 409 | preload_models() 410 | model_container = models["text"] 411 | model = model_container["model"] 412 | tokenizer = model_container["tokenizer"] 413 | encoded_text = np.array(_tokenize(tokenizer, text)) + TEXT_ENCODING_OFFSET 414 | if OFFLOAD_CPU: 415 | model.to(models_devices["text"]) 416 | device = next(model.parameters()).device 417 | if len(encoded_text) > 256: 418 | p = round((len(encoded_text) - 256) / len(encoded_text) * 100, 1) 419 | logger.warning(f"warning, text too long, lopping of last {p}%") 420 | encoded_text = encoded_text[:256] 421 | encoded_text = np.pad( 422 | encoded_text, 423 | (0, 256 - len(encoded_text)), 424 | constant_values=TEXT_PAD_TOKEN, 425 | mode="constant", 426 | ) 427 | if semantic_history is not None: 428 | semantic_history = semantic_history.astype(np.int64) 429 | # lop off if history is too long, pad if needed 430 | semantic_history = semantic_history[-256:] 431 | semantic_history = np.pad( 432 | semantic_history, 433 | (0, 256 - len(semantic_history)), 434 | constant_values=SEMANTIC_PAD_TOKEN, 435 | mode="constant", 436 | ) 437 | else: 438 | semantic_history = np.array([SEMANTIC_PAD_TOKEN] * 256) 439 | x = torch.from_numpy( 440 | np.hstack([ 441 | encoded_text, semantic_history, np.array([SEMANTIC_INFER_TOKEN]) 442 | ]).astype(np.int64) 443 | )[None] 444 | assert x.shape[1] == 256 + 256 + 1 445 | with _inference_mode(): 446 | x = x.to(device) 447 | n_tot_steps = 768 448 | # custom tqdm updates since we don't know when eos will occur 449 | pbar = tqdm.tqdm(disable=silent, total=100) 450 | pbar_state = 0 451 | tot_generated_duration_s = 0 452 | kv_cache = None 453 | for n in range(n_tot_steps): 454 | if use_kv_caching and kv_cache is not None: 455 | x_input = x[:, [-1]] 456 | else: 457 | x_input = x 458 | logits, kv_cache = model( 459 | x_input, merge_context=True, use_cache=use_kv_caching, past_kv=kv_cache 460 | ) 461 | relevant_logits = logits[0, 0, :SEMANTIC_VOCAB_SIZE] 462 | if allow_early_stop: 463 | relevant_logits = torch.hstack( 464 | (relevant_logits, logits[0, 0, [SEMANTIC_PAD_TOKEN]]) # eos 465 | ) 466 | if top_p is not None: 467 | # faster to convert to numpy 468 | original_device = relevant_logits.device 469 | relevant_logits = relevant_logits.detach().cpu().type(torch.float32).numpy() 470 | sorted_indices = np.argsort(relevant_logits)[::-1] 471 | sorted_logits = relevant_logits[sorted_indices] 472 | cumulative_probs = np.cumsum(softmax(sorted_logits)) 473 | sorted_indices_to_remove = cumulative_probs > top_p 474 | sorted_indices_to_remove[1:] = sorted_indices_to_remove[:-1].copy() 475 | sorted_indices_to_remove[0] = False 476 | relevant_logits[sorted_indices[sorted_indices_to_remove]] = -np.inf 477 | relevant_logits = torch.from_numpy(relevant_logits) 478 | relevant_logits = relevant_logits.to(original_device) 479 | if top_k is not None: 480 | v, _ = torch.topk(relevant_logits, min(top_k, relevant_logits.size(-1))) 481 | relevant_logits[relevant_logits < v[-1]] = -float("Inf") 482 | probs = F.softmax(relevant_logits / temp, dim=-1) 483 | item_next = torch.multinomial(probs, num_samples=1).to(torch.int32) 484 | if allow_early_stop and ( 485 | item_next == SEMANTIC_VOCAB_SIZE 486 | or (min_eos_p is not None and probs[-1] >= min_eos_p) 487 | ): 488 | # eos found, so break 489 | pbar.update(100 - pbar_state) 490 | break 491 | x = torch.cat((x, item_next[None]), dim=1) 492 | tot_generated_duration_s += 1 / SEMANTIC_RATE_HZ 493 | if max_gen_duration_s is not None and tot_generated_duration_s > max_gen_duration_s: 494 | pbar.update(100 - pbar_state) 495 | break 496 | if n == n_tot_steps - 1: 497 | pbar.update(100 - pbar_state) 498 | break 499 | del logits, relevant_logits, probs, item_next 500 | req_pbar_state = np.min([100, int(round(100 * n / n_tot_steps))]) 501 | if req_pbar_state > pbar_state: 502 | pbar.update(req_pbar_state - pbar_state) 503 | pbar_state = req_pbar_state 504 | pbar.close() 505 | out = x.detach().cpu().numpy().squeeze()[256 + 256 + 1 :] 506 | if OFFLOAD_CPU: 507 | model.to("cpu") 508 | assert all(0 <= out) and all(out < SEMANTIC_VOCAB_SIZE) 509 | _clear_cuda_cache() 510 | return out 511 | 512 | 513 | def _flatten_codebooks(arr, offset_size=CODEBOOK_SIZE): 514 | assert len(arr.shape) == 2 515 | arr = arr.copy() 516 | if offset_size is not None: 517 | for n in range(1, arr.shape[0]): 518 | arr[n, :] += offset_size * n 519 | flat_arr = arr.ravel("F") 520 | return flat_arr 521 | 522 | 523 | COARSE_SEMANTIC_PAD_TOKEN = 12_048 524 | COARSE_INFER_TOKEN = 12_050 525 | 526 | 527 | def generate_coarse( 528 | x_semantic, 529 | history_prompt=None, 530 | temp=0.7, 531 | top_k=None, 532 | top_p=None, 533 | silent=False, 534 | max_coarse_history=630, # min 60 (faster), max 630 (more context) 535 | sliding_window_len=60, 536 | use_kv_caching=False, 537 | ): 538 | """Generate coarse audio codes from semantic tokens.""" 539 | assert ( 540 | isinstance(x_semantic, np.ndarray) 541 | and len(x_semantic.shape) == 1 542 | and len(x_semantic) > 0 543 | and x_semantic.min() >= 0 544 | and x_semantic.max() <= SEMANTIC_VOCAB_SIZE - 1 545 | ) 546 | assert 60 <= max_coarse_history <= 630 547 | assert max_coarse_history + sliding_window_len <= 1024 - 256 548 | semantic_to_coarse_ratio = COARSE_RATE_HZ / SEMANTIC_RATE_HZ * N_COARSE_CODEBOOKS 549 | max_semantic_history = int(np.floor(max_coarse_history / semantic_to_coarse_ratio)) 550 | if history_prompt is not None: 551 | history_prompt = _load_history_prompt(history_prompt) 552 | x_semantic_history = history_prompt["semantic_prompt"] 553 | x_coarse_history = history_prompt["coarse_prompt"] 554 | assert ( 555 | isinstance(x_semantic_history, np.ndarray) 556 | and len(x_semantic_history.shape) == 1 557 | and len(x_semantic_history) > 0 558 | and x_semantic_history.min() >= 0 559 | and x_semantic_history.max() <= SEMANTIC_VOCAB_SIZE - 1 560 | and isinstance(x_coarse_history, np.ndarray) 561 | and len(x_coarse_history.shape) == 2 562 | and x_coarse_history.shape[0] == N_COARSE_CODEBOOKS 563 | and x_coarse_history.shape[-1] >= 0 564 | and x_coarse_history.min() >= 0 565 | and x_coarse_history.max() <= CODEBOOK_SIZE - 1 566 | and ( 567 | round(x_coarse_history.shape[-1] / len(x_semantic_history), 1) 568 | == round(semantic_to_coarse_ratio / N_COARSE_CODEBOOKS, 1) 569 | ) 570 | ) 571 | x_coarse_history = _flatten_codebooks(x_coarse_history) + SEMANTIC_VOCAB_SIZE 572 | # trim histories correctly 573 | n_semantic_hist_provided = np.min( 574 | [ 575 | max_semantic_history, 576 | len(x_semantic_history) - len(x_semantic_history) % 2, 577 | int(np.floor(len(x_coarse_history) / semantic_to_coarse_ratio)), 578 | ] 579 | ) 580 | n_coarse_hist_provided = int(round(n_semantic_hist_provided * semantic_to_coarse_ratio)) 581 | x_semantic_history = x_semantic_history[-n_semantic_hist_provided:].astype(np.int32) 582 | x_coarse_history = x_coarse_history[-n_coarse_hist_provided:].astype(np.int32) 583 | # TODO: bit of a hack for time alignment (sounds better) 584 | x_coarse_history = x_coarse_history[:-2] 585 | else: 586 | x_semantic_history = np.array([], dtype=np.int32) 587 | x_coarse_history = np.array([], dtype=np.int32) 588 | # load models if not yet exist 589 | global models 590 | global models_devices 591 | if "coarse" not in models: 592 | preload_models() 593 | model = models["coarse"] 594 | if OFFLOAD_CPU: 595 | model.to(models_devices["coarse"]) 596 | device = next(model.parameters()).device 597 | # start loop 598 | n_steps = int( 599 | round( 600 | np.floor(len(x_semantic) * semantic_to_coarse_ratio / N_COARSE_CODEBOOKS) 601 | * N_COARSE_CODEBOOKS 602 | ) 603 | ) 604 | assert n_steps > 0 and n_steps % N_COARSE_CODEBOOKS == 0 605 | x_semantic = np.hstack([x_semantic_history, x_semantic]).astype(np.int32) 606 | x_coarse = x_coarse_history.astype(np.int32) 607 | base_semantic_idx = len(x_semantic_history) 608 | with _inference_mode(): 609 | x_semantic_in = torch.from_numpy(x_semantic)[None].to(device) 610 | x_coarse_in = torch.from_numpy(x_coarse)[None].to(device) 611 | n_window_steps = int(np.ceil(n_steps / sliding_window_len)) 612 | n_step = 0 613 | for _ in tqdm.tqdm(range(n_window_steps), total=n_window_steps, disable=silent): 614 | semantic_idx = base_semantic_idx + int(round(n_step / semantic_to_coarse_ratio)) 615 | # pad from right side 616 | x_in = x_semantic_in[:, np.max([0, semantic_idx - max_semantic_history]) :] 617 | x_in = x_in[:, :256] 618 | x_in = F.pad( 619 | x_in, 620 | (0, 256 - x_in.shape[-1]), 621 | "constant", 622 | COARSE_SEMANTIC_PAD_TOKEN, 623 | ) 624 | x_in = torch.hstack( 625 | [ 626 | x_in, 627 | torch.tensor([COARSE_INFER_TOKEN])[None].to(device), 628 | x_coarse_in[:, -max_coarse_history:], 629 | ] 630 | ) 631 | kv_cache = None 632 | for _ in range(sliding_window_len): 633 | if n_step >= n_steps: 634 | continue 635 | is_major_step = n_step % N_COARSE_CODEBOOKS == 0 636 | 637 | if use_kv_caching and kv_cache is not None: 638 | x_input = x_in[:, [-1]] 639 | else: 640 | x_input = x_in 641 | 642 | logits, kv_cache = model(x_input, use_cache=use_kv_caching, past_kv=kv_cache) 643 | logit_start_idx = ( 644 | SEMANTIC_VOCAB_SIZE + (1 - int(is_major_step)) * CODEBOOK_SIZE 645 | ) 646 | logit_end_idx = ( 647 | SEMANTIC_VOCAB_SIZE + (2 - int(is_major_step)) * CODEBOOK_SIZE 648 | ) 649 | relevant_logits = logits[0, 0, logit_start_idx:logit_end_idx] 650 | if top_p is not None: 651 | # faster to convert to numpy 652 | original_device = relevant_logits.device 653 | relevant_logits = relevant_logits.detach().cpu().type(torch.float32).numpy() 654 | sorted_indices = np.argsort(relevant_logits)[::-1] 655 | sorted_logits = relevant_logits[sorted_indices] 656 | cumulative_probs = np.cumsum(softmax(sorted_logits)) 657 | sorted_indices_to_remove = cumulative_probs > top_p 658 | sorted_indices_to_remove[1:] = sorted_indices_to_remove[:-1].copy() 659 | sorted_indices_to_remove[0] = False 660 | relevant_logits[sorted_indices[sorted_indices_to_remove]] = -np.inf 661 | relevant_logits = torch.from_numpy(relevant_logits) 662 | relevant_logits = relevant_logits.to(original_device) 663 | if top_k is not None: 664 | v, _ = torch.topk(relevant_logits, min(top_k, relevant_logits.size(-1))) 665 | relevant_logits[relevant_logits < v[-1]] = -float("Inf") 666 | probs = F.softmax(relevant_logits / temp, dim=-1) 667 | item_next = torch.multinomial(probs, num_samples=1).to(torch.int32) 668 | item_next += logit_start_idx 669 | x_coarse_in = torch.cat((x_coarse_in, item_next[None]), dim=1) 670 | x_in = torch.cat((x_in, item_next[None]), dim=1) 671 | del logits, relevant_logits, probs, item_next 672 | n_step += 1 673 | del x_in 674 | del x_semantic_in 675 | if OFFLOAD_CPU: 676 | model.to("cpu") 677 | gen_coarse_arr = x_coarse_in.detach().cpu().numpy().squeeze()[len(x_coarse_history) :] 678 | del x_coarse_in 679 | assert len(gen_coarse_arr) == n_steps 680 | gen_coarse_audio_arr = gen_coarse_arr.reshape(-1, N_COARSE_CODEBOOKS).T - SEMANTIC_VOCAB_SIZE 681 | for n in range(1, N_COARSE_CODEBOOKS): 682 | gen_coarse_audio_arr[n, :] -= n * CODEBOOK_SIZE 683 | _clear_cuda_cache() 684 | return gen_coarse_audio_arr 685 | 686 | 687 | def generate_fine( 688 | x_coarse_gen, 689 | history_prompt=None, 690 | temp=0.5, 691 | silent=True, 692 | ): 693 | """Generate full audio codes from coarse audio codes.""" 694 | assert ( 695 | isinstance(x_coarse_gen, np.ndarray) 696 | and len(x_coarse_gen.shape) == 2 697 | and 1 <= x_coarse_gen.shape[0] <= N_FINE_CODEBOOKS - 1 698 | and x_coarse_gen.shape[1] > 0 699 | and x_coarse_gen.min() >= 0 700 | and x_coarse_gen.max() <= CODEBOOK_SIZE - 1 701 | ) 702 | if history_prompt is not None: 703 | history_prompt = _load_history_prompt(history_prompt) 704 | x_fine_history = history_prompt["fine_prompt"] 705 | assert ( 706 | isinstance(x_fine_history, np.ndarray) 707 | and len(x_fine_history.shape) == 2 708 | and x_fine_history.shape[0] == N_FINE_CODEBOOKS 709 | and x_fine_history.shape[1] >= 0 710 | and x_fine_history.min() >= 0 711 | and x_fine_history.max() <= CODEBOOK_SIZE - 1 712 | ) 713 | else: 714 | x_fine_history = None 715 | n_coarse = x_coarse_gen.shape[0] 716 | # load models if not yet exist 717 | global models 718 | global models_devices 719 | if "fine" not in models: 720 | preload_models() 721 | model = models["fine"] 722 | if OFFLOAD_CPU: 723 | model.to(models_devices["fine"]) 724 | device = next(model.parameters()).device 725 | # make input arr 726 | in_arr = np.vstack( 727 | [ 728 | x_coarse_gen, 729 | np.zeros((N_FINE_CODEBOOKS - n_coarse, x_coarse_gen.shape[1])) 730 | + CODEBOOK_SIZE, # padding 731 | ] 732 | ).astype(np.int32) 733 | # prepend history if available (max 512) 734 | if x_fine_history is not None: 735 | x_fine_history = x_fine_history.astype(np.int32) 736 | in_arr = np.hstack( 737 | [ 738 | x_fine_history[:, -512:].astype(np.int32), 739 | in_arr, 740 | ] 741 | ) 742 | n_history = x_fine_history[:, -512:].shape[1] 743 | else: 744 | n_history = 0 745 | n_remove_from_end = 0 746 | # need to pad if too short (since non-causal model) 747 | if in_arr.shape[1] < 1024: 748 | n_remove_from_end = 1024 - in_arr.shape[1] 749 | in_arr = np.hstack( 750 | [ 751 | in_arr, 752 | np.zeros((N_FINE_CODEBOOKS, n_remove_from_end), dtype=np.int32) + CODEBOOK_SIZE, 753 | ] 754 | ) 755 | # we can be lazy about fractional loop and just keep overwriting codebooks 756 | n_loops = np.max([0, int(np.ceil((x_coarse_gen.shape[1] - (1024 - n_history)) / 512))]) + 1 757 | with _inference_mode(): 758 | in_arr = torch.tensor(in_arr.T).to(device) 759 | for n in tqdm.tqdm(range(n_loops), disable=silent): 760 | start_idx = np.min([n * 512, in_arr.shape[0] - 1024]) 761 | start_fill_idx = np.min([n_history + n * 512, in_arr.shape[0] - 512]) 762 | rel_start_fill_idx = start_fill_idx - start_idx 763 | in_buffer = in_arr[start_idx : start_idx + 1024, :][None] 764 | for nn in range(n_coarse, N_FINE_CODEBOOKS): 765 | logits = model(nn, in_buffer) 766 | if temp is None: 767 | relevant_logits = logits[0, rel_start_fill_idx:, :CODEBOOK_SIZE] 768 | codebook_preds = torch.argmax(relevant_logits, -1) 769 | else: 770 | relevant_logits = logits[0, :, :CODEBOOK_SIZE] / temp 771 | probs = F.softmax(relevant_logits, dim=-1) 772 | codebook_preds = torch.multinomial( 773 | probs[rel_start_fill_idx:1024], num_samples=1 774 | ).reshape(-1) 775 | codebook_preds = codebook_preds.to(torch.int32) 776 | in_buffer[0, rel_start_fill_idx:, nn] = codebook_preds 777 | del logits, codebook_preds 778 | # transfer over info into model_in and convert to numpy 779 | for nn in range(n_coarse, N_FINE_CODEBOOKS): 780 | in_arr[ 781 | start_fill_idx : start_fill_idx + (1024 - rel_start_fill_idx), nn 782 | ] = in_buffer[0, rel_start_fill_idx:, nn] 783 | del in_buffer 784 | gen_fine_arr = in_arr.detach().cpu().numpy().squeeze().T 785 | del in_arr 786 | if OFFLOAD_CPU: 787 | model.to("cpu") 788 | gen_fine_arr = gen_fine_arr[:, n_history:] 789 | if n_remove_from_end > 0: 790 | gen_fine_arr = gen_fine_arr[:, :-n_remove_from_end] 791 | assert gen_fine_arr.shape[-1] == x_coarse_gen.shape[-1] 792 | _clear_cuda_cache() 793 | return gen_fine_arr 794 | 795 | 796 | def codec_decode(fine_tokens): 797 | """Turn quantized audio codes into audio array using encodec.""" 798 | # load models if not yet exist 799 | global models 800 | global models_devices 801 | if "codec" not in models: 802 | preload_models() 803 | model = models["codec"] 804 | if OFFLOAD_CPU: 805 | model.to(models_devices["codec"]) 806 | device = next(model.parameters()).device 807 | arr = torch.from_numpy(fine_tokens)[None] 808 | arr = arr.to(device) 809 | arr = arr.transpose(0, 1) 810 | emb = model.quantizer.decode(arr) 811 | out = model.decoder(emb) 812 | audio_arr = out.detach().cpu().numpy().squeeze() 813 | del arr, emb, out 814 | if OFFLOAD_CPU: 815 | model.to("cpu") 816 | return audio_arr 817 | -------------------------------------------------------------------------------- /bark/model.py: -------------------------------------------------------------------------------- 1 | """ 2 | Much of this code is adapted from Andrej Karpathy's NanoGPT 3 | (https://github.com/karpathy/nanoGPT) 4 | """ 5 | import math 6 | from dataclasses import dataclass 7 | 8 | import torch 9 | import torch.nn as nn 10 | from torch.nn import functional as F 11 | 12 | class LayerNorm(nn.Module): 13 | """ LayerNorm but with an optional bias. PyTorch doesn't support simply bias=False """ 14 | 15 | def __init__(self, ndim, bias): 16 | super().__init__() 17 | self.weight = nn.Parameter(torch.ones(ndim)) 18 | self.bias = nn.Parameter(torch.zeros(ndim)) if bias else None 19 | 20 | def forward(self, input): 21 | return F.layer_norm(input, self.weight.shape, self.weight, self.bias, 1e-5) 22 | 23 | class CausalSelfAttention(nn.Module): 24 | 25 | def __init__(self, config): 26 | super().__init__() 27 | assert config.n_embd % config.n_head == 0 28 | # key, query, value projections for all heads, but in a batch 29 | self.c_attn = nn.Linear(config.n_embd, 3 * config.n_embd, bias=config.bias) 30 | # output projection 31 | self.c_proj = nn.Linear(config.n_embd, config.n_embd, bias=config.bias) 32 | # regularization 33 | self.attn_dropout = nn.Dropout(config.dropout) 34 | self.resid_dropout = nn.Dropout(config.dropout) 35 | self.n_head = config.n_head 36 | self.n_embd = config.n_embd 37 | self.dropout = config.dropout 38 | # flash attention make GPU go brrrrr but support is only in PyTorch nightly and still a bit scary 39 | self.flash = hasattr(torch.nn.functional, 'scaled_dot_product_attention') 40 | if not self.flash: 41 | # print("WARNING: using slow attention. Flash Attention atm needs PyTorch nightly and dropout=0.0") 42 | # causal mask to ensure that attention is only applied to the left in the input sequence 43 | self.register_buffer("bias", torch.tril(torch.ones(config.block_size, config.block_size)) 44 | .view(1, 1, config.block_size, config.block_size)) 45 | 46 | def forward(self, x, past_kv=None, use_cache=False): 47 | B, T, C = x.size() # batch size, sequence length, embedding dimensionality (n_embd) 48 | 49 | # calculate query, key, values for all heads in batch and move head forward to be the batch dim 50 | q, k ,v = self.c_attn(x).split(self.n_embd, dim=2) 51 | k = k.view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs) 52 | q = q.view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs) 53 | v = v.view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs) 54 | 55 | if past_kv is not None: 56 | past_key = past_kv[0] 57 | past_value = past_kv[1] 58 | k = torch.cat((past_key, k), dim=-2) 59 | v = torch.cat((past_value, v), dim=-2) 60 | 61 | FULL_T = k.shape[-2] 62 | 63 | if use_cache is True: 64 | present = (k, v) 65 | else: 66 | present = None 67 | 68 | # causal self-attention; Self-attend: (B, nh, T, hs) x (B, nh, hs, T) -> (B, nh, T, T) 69 | if self.flash: 70 | # efficient attention using Flash Attention CUDA kernels 71 | if past_kv is not None: 72 | # When `past_kv` is provided, we're doing incremental decoding and `q.shape[2] == 1`: q only contains 73 | # the query for the last token. scaled_dot_product_attention interprets this as the first token in the 74 | # sequence, so if is_causal=True it will mask out all attention from it. This is not what we want, so 75 | # to work around this we set is_causal=False. 76 | is_causal = False 77 | else: 78 | is_causal = True 79 | 80 | y = torch.nn.functional.scaled_dot_product_attention(q, k, v, dropout_p=self.dropout, is_causal=is_causal) 81 | else: 82 | # manual implementation of attention 83 | att = (q @ k.transpose(-2, -1)) * (1.0 / math.sqrt(k.size(-1))) 84 | att = att.masked_fill(self.bias[:,:,FULL_T-T:FULL_T,:FULL_T] == 0, float('-inf')) 85 | att = F.softmax(att, dim=-1) 86 | att = self.attn_dropout(att) 87 | y = att @ v # (B, nh, T, T) x (B, nh, T, hs) -> (B, nh, T, hs) 88 | y = y.transpose(1, 2).contiguous().view(B, T, C) # re-assemble all head outputs side by side 89 | 90 | # output projection 91 | y = self.resid_dropout(self.c_proj(y)) 92 | return (y, present) 93 | 94 | class MLP(nn.Module): 95 | 96 | def __init__(self, config): 97 | super().__init__() 98 | self.c_fc = nn.Linear(config.n_embd, 4 * config.n_embd, bias=config.bias) 99 | self.c_proj = nn.Linear(4 * config.n_embd, config.n_embd, bias=config.bias) 100 | self.dropout = nn.Dropout(config.dropout) 101 | self.gelu = nn.GELU() 102 | 103 | def forward(self, x): 104 | x = self.c_fc(x) 105 | x = self.gelu(x) 106 | x = self.c_proj(x) 107 | x = self.dropout(x) 108 | return x 109 | 110 | class Block(nn.Module): 111 | 112 | def __init__(self, config, layer_idx): 113 | super().__init__() 114 | self.ln_1 = LayerNorm(config.n_embd, bias=config.bias) 115 | self.attn = CausalSelfAttention(config) 116 | self.ln_2 = LayerNorm(config.n_embd, bias=config.bias) 117 | self.mlp = MLP(config) 118 | self.layer_idx = layer_idx 119 | 120 | def forward(self, x, past_kv=None, use_cache=False): 121 | attn_output, prev_kvs = self.attn(self.ln_1(x), past_kv=past_kv, use_cache=use_cache) 122 | x = x + attn_output 123 | x = x + self.mlp(self.ln_2(x)) 124 | return (x, prev_kvs) 125 | 126 | @dataclass 127 | class GPTConfig: 128 | block_size: int = 1024 129 | input_vocab_size: int = 10_048 130 | output_vocab_size: int = 10_048 131 | n_layer: int = 12 132 | n_head: int = 12 133 | n_embd: int = 768 134 | dropout: float = 0.0 135 | bias: bool = True # True: bias in Linears and LayerNorms, like GPT-2. False: a bit better and faster 136 | 137 | class GPT(nn.Module): 138 | 139 | def __init__(self, config): 140 | super().__init__() 141 | assert config.input_vocab_size is not None 142 | assert config.output_vocab_size is not None 143 | assert config.block_size is not None 144 | self.config = config 145 | 146 | self.transformer = nn.ModuleDict(dict( 147 | wte = nn.Embedding(config.input_vocab_size, config.n_embd), 148 | wpe = nn.Embedding(config.block_size, config.n_embd), 149 | drop = nn.Dropout(config.dropout), 150 | h = nn.ModuleList([Block(config, idx) for idx in range(config.n_layer)]), 151 | ln_f = LayerNorm(config.n_embd, bias=config.bias), 152 | )) 153 | self.lm_head = nn.Linear(config.n_embd, config.output_vocab_size, bias=False) 154 | 155 | def get_num_params(self, non_embedding=True): 156 | """ 157 | Return the number of parameters in the model. 158 | For non-embedding count (default), the position embeddings get subtracted. 159 | The token embeddings would too, except due to the parameter sharing these 160 | params are actually used as weights in the final layer, so we include them. 161 | """ 162 | n_params = sum(p.numel() for p in self.parameters()) 163 | if non_embedding: 164 | n_params -= self.transformer.wte.weight.numel() 165 | n_params -= self.transformer.wpe.weight.numel() 166 | return n_params 167 | 168 | def forward(self, idx, merge_context=False, past_kv=None, position_ids=None, use_cache=False): 169 | device = idx.device 170 | b, t = idx.size() 171 | if past_kv is not None: 172 | assert t == 1 173 | tok_emb = self.transformer.wte(idx) # token embeddings of shape (b, t, n_embd) 174 | else: 175 | if merge_context: 176 | assert(idx.shape[1] >= 256+256+1) 177 | t = idx.shape[1] - 256 178 | else: 179 | assert t <= self.config.block_size, f"Cannot forward sequence of length {t}, block size is only {self.config.block_size}" 180 | 181 | # forward the GPT model itself 182 | if merge_context: 183 | tok_emb = torch.cat([ 184 | self.transformer.wte(idx[:,:256]) + self.transformer.wte(idx[:,256:256+256]), 185 | self.transformer.wte(idx[:,256+256:]) 186 | ], dim=1) 187 | else: 188 | tok_emb = self.transformer.wte(idx) # token embeddings of shape (b, t, n_embd) 189 | 190 | if past_kv is None: 191 | past_length = 0 192 | past_kv = tuple([None] * len(self.transformer.h)) 193 | else: 194 | past_length = past_kv[0][0].size(-2) 195 | 196 | if position_ids is None: 197 | position_ids = torch.arange(past_length, t + past_length, dtype=torch.long, device=device) 198 | position_ids = position_ids.unsqueeze(0) # shape (1, t) 199 | assert position_ids.shape == (1, t) 200 | 201 | pos_emb = self.transformer.wpe(position_ids) # position embeddings of shape (1, t, n_embd) 202 | 203 | x = self.transformer.drop(tok_emb + pos_emb) 204 | 205 | new_kv = () if use_cache else None 206 | 207 | for i, (block, past_layer_kv) in enumerate(zip(self.transformer.h, past_kv)): 208 | x, kv = block(x, past_kv=past_layer_kv, use_cache=use_cache) 209 | 210 | if use_cache: 211 | new_kv = new_kv + (kv,) 212 | 213 | x = self.transformer.ln_f(x) 214 | 215 | # inference-time mini-optimization: only forward the lm_head on the very last position 216 | logits = self.lm_head(x[:, [-1], :]) # note: using list [-1] to preserve the time dim 217 | 218 | return (logits, new_kv) 219 | -------------------------------------------------------------------------------- /bark/model_fine.py: -------------------------------------------------------------------------------- 1 | """ 2 | Much of this code is adapted from Andrej Karpathy's NanoGPT 3 | (https://github.com/karpathy/nanoGPT) 4 | """ 5 | from dataclasses import dataclass 6 | import math 7 | 8 | import torch 9 | import torch.nn as nn 10 | from torch.nn import functional as F 11 | 12 | from .model import GPT, GPTConfig, MLP 13 | 14 | 15 | class NonCausalSelfAttention(nn.Module): 16 | def __init__(self, config): 17 | super().__init__() 18 | assert config.n_embd % config.n_head == 0 19 | # key, query, value projections for all heads, but in a batch 20 | self.c_attn = nn.Linear(config.n_embd, 3 * config.n_embd, bias=config.bias) 21 | # output projection 22 | self.c_proj = nn.Linear(config.n_embd, config.n_embd, bias=config.bias) 23 | # regularization 24 | self.attn_dropout = nn.Dropout(config.dropout) 25 | self.resid_dropout = nn.Dropout(config.dropout) 26 | self.n_head = config.n_head 27 | self.n_embd = config.n_embd 28 | self.dropout = config.dropout 29 | # flash attention make GPU go brrrrr but support is only in PyTorch nightly and still a bit scary 30 | self.flash = ( 31 | hasattr(torch.nn.functional, "scaled_dot_product_attention") and self.dropout == 0.0 32 | ) 33 | 34 | def forward(self, x): 35 | B, T, C = x.size() # batch size, sequence length, embedding dimensionality (n_embd) 36 | 37 | # calculate query, key, values for all heads in batch and move head forward to be the batch dim 38 | q, k, v = self.c_attn(x).split(self.n_embd, dim=2) 39 | k = k.view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs) 40 | q = q.view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs) 41 | v = v.view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs) 42 | 43 | # causal self-attention; Self-attend: (B, nh, T, hs) x (B, nh, hs, T) -> (B, nh, T, T) 44 | if self.flash: 45 | # efficient attention using Flash Attention CUDA kernels 46 | y = torch.nn.functional.scaled_dot_product_attention( 47 | q, k, v, attn_mask=None, dropout_p=self.dropout, is_causal=False 48 | ) 49 | else: 50 | # manual implementation of attention 51 | att = (q @ k.transpose(-2, -1)) * (1.0 / math.sqrt(k.size(-1))) 52 | att = F.softmax(att, dim=-1) 53 | att = self.attn_dropout(att) 54 | y = att @ v # (B, nh, T, T) x (B, nh, T, hs) -> (B, nh, T, hs) 55 | y = ( 56 | y.transpose(1, 2).contiguous().view(B, T, C) 57 | ) # re-assemble all head outputs side by side 58 | 59 | # output projection 60 | y = self.resid_dropout(self.c_proj(y)) 61 | return y 62 | 63 | 64 | class FineBlock(nn.Module): 65 | def __init__(self, config): 66 | super().__init__() 67 | self.ln_1 = nn.LayerNorm(config.n_embd) 68 | self.attn = NonCausalSelfAttention(config) 69 | self.ln_2 = nn.LayerNorm(config.n_embd) 70 | self.mlp = MLP(config) 71 | 72 | def forward(self, x): 73 | x = x + self.attn(self.ln_1(x)) 74 | x = x + self.mlp(self.ln_2(x)) 75 | return x 76 | 77 | 78 | class FineGPT(GPT): 79 | def __init__(self, config): 80 | super().__init__(config) 81 | del self.lm_head 82 | self.config = config 83 | self.n_codes_total = config.n_codes_total 84 | self.transformer = nn.ModuleDict( 85 | dict( 86 | wtes=nn.ModuleList( 87 | [ 88 | nn.Embedding(config.input_vocab_size, config.n_embd) 89 | for _ in range(config.n_codes_total) 90 | ] 91 | ), 92 | wpe=nn.Embedding(config.block_size, config.n_embd), 93 | drop=nn.Dropout(config.dropout), 94 | h=nn.ModuleList([FineBlock(config) for _ in range(config.n_layer)]), 95 | ln_f=nn.LayerNorm(config.n_embd), 96 | ) 97 | ) 98 | self.lm_heads = nn.ModuleList( 99 | [ 100 | nn.Linear(config.n_embd, config.output_vocab_size, bias=False) 101 | for _ in range(config.n_codes_given, self.n_codes_total) 102 | ] 103 | ) 104 | for i in range(self.n_codes_total - config.n_codes_given): 105 | self.transformer.wtes[i + 1].weight = self.lm_heads[i].weight 106 | 107 | def forward(self, pred_idx, idx): 108 | device = idx.device 109 | b, t, codes = idx.size() 110 | assert ( 111 | t <= self.config.block_size 112 | ), f"Cannot forward sequence of length {t}, block size is only {self.config.block_size}" 113 | assert pred_idx > 0, "cannot predict 0th codebook" 114 | assert codes == self.n_codes_total, (b, t, codes) 115 | pos = torch.arange(0, t, dtype=torch.long, device=device).unsqueeze(0) # shape (1, t) 116 | 117 | # forward the GPT model itself 118 | tok_embs = [ 119 | wte(idx[:, :, i]).unsqueeze(-1) for i, wte in enumerate(self.transformer.wtes) 120 | ] # token embeddings of shape (b, t, n_embd) 121 | tok_emb = torch.cat(tok_embs, dim=-1) 122 | pos_emb = self.transformer.wpe(pos) # position embeddings of shape (1, t, n_embd) 123 | x = tok_emb[:, :, :, : pred_idx + 1].sum(dim=-1) 124 | x = self.transformer.drop(x + pos_emb) 125 | for block in self.transformer.h: 126 | x = block(x) 127 | x = self.transformer.ln_f(x) 128 | logits = self.lm_heads[pred_idx - self.config.n_codes_given](x) 129 | return logits 130 | 131 | def get_num_params(self, non_embedding=True): 132 | """ 133 | Return the number of parameters in the model. 134 | For non-embedding count (default), the position embeddings get subtracted. 135 | The token embeddings would too, except due to the parameter sharing these 136 | params are actually used as weights in the final layer, so we include them. 137 | """ 138 | n_params = sum(p.numel() for p in self.parameters()) 139 | if non_embedding: 140 | for wte in self.transformer.wtes: 141 | n_params -= wte.weight.numel() 142 | n_params -= self.transformer.wpe.weight.numel() 143 | return n_params 144 | 145 | 146 | @dataclass 147 | class FineGPTConfig(GPTConfig): 148 | n_codes_total: int = 8 149 | n_codes_given: int = 1 150 | -------------------------------------------------------------------------------- /model-card.md: -------------------------------------------------------------------------------- 1 | # Model Card: Bark 2 | 3 | This is the official codebase for running the text to audio model, from Suno.ai. 4 | 5 | The following is additional information about the models released here. 6 | 7 | ## Model Details 8 | 9 | Bark is a series of three transformer models that turn text into audio. 10 | ### Text to semantic tokens 11 | - Input: text, tokenized with [BERT tokenizer from Hugging Face](https://huggingface.co/docs/transformers/model_doc/bert#transformers.BertTokenizer) 12 | - Output: semantic tokens that encode the audio to be generated 13 | 14 | ### Semantic to coarse tokens 15 | - Input: semantic tokens 16 | - Output: tokens from the first two codebooks of the [EnCodec Codec](https://github.com/facebookresearch/encodec) from facebook 17 | 18 | ### Coarse to fine tokens 19 | - Input: the first two codebooks from EnCodec 20 | - Output: 8 codebooks from EnCodec 21 | 22 | ### Architecture 23 | | Model | Parameters | Attention | Output Vocab size | 24 | |:-------------------------:|:----------:|------------|:-----------------:| 25 | | Text to semantic tokens | 80 M | Causal | 10,000 | 26 | | Semantic to coarse tokens | 80 M | Causal | 2x 1,024 | 27 | | Coarse to fine tokens | 80 M | Non-causal | 6x 1,024 | 28 | 29 | 30 | ### Release date 31 | April 2023 32 | 33 | ## Broader Implications 34 | We anticipate that this model's text to audio capabilities can be used to improve accessbility tools in a variety of languages. 35 | Straightforward improvements will allow models to run faster than realtime, rendering them useful for applications such as virtual assistants. 36 | 37 | While we hope that this release will enable users to express their creativity and build applications that are a force 38 | for good, we acknowledge that any text to audio model has the potential for dual use. While it is not straightforward 39 | to voice clone known people with Bark, they can still be used for nefarious purposes. To further reduce the chances of unintended use of Bark, 40 | we also release a simple classifier to detect Bark-generated audio with high accuracy (see notebooks section of the main repository). 41 | -------------------------------------------------------------------------------- /next-env.d.ts: -------------------------------------------------------------------------------- 1 | /// 2 | /// 3 | 4 | // NOTE: This file should not be edited 5 | // see https://nextjs.org/docs/basic-features/typescript for more information. 6 | -------------------------------------------------------------------------------- /next.config.js: -------------------------------------------------------------------------------- 1 | const process = require("node:process"); 2 | 3 | /** @type {import('next').NextConfig} */ 4 | const nextConfig = { 5 | reactStrictMode: true, 6 | images: { 7 | remotePatterns: [ 8 | { 9 | protocol: "http", 10 | hostname: "127.0.0.1", 11 | port: process.env.API_PORT, 12 | }, 13 | ], 14 | }, 15 | }; 16 | 17 | module.exports = nextConfig; 18 | -------------------------------------------------------------------------------- /notebooks/long_form_generation.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": 1, 6 | "id": "39ea4bed", 7 | "metadata": {}, 8 | "outputs": [], 9 | "source": [ 10 | "import os\n", 11 | "\n", 12 | "os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"0\"\n", 13 | "\n", 14 | "\n", 15 | "from IPython.display import Audio\n", 16 | "import nltk # we'll use this to split into sentences\n", 17 | "import numpy as np\n", 18 | "\n", 19 | "from bark.generation import (\n", 20 | " generate_text_semantic,\n", 21 | " preload_models,\n", 22 | ")\n", 23 | "from bark.api import semantic_to_waveform\n", 24 | "from bark import generate_audio, SAMPLE_RATE" 25 | ] 26 | }, 27 | { 28 | "cell_type": "code", 29 | "execution_count": 29, 30 | "id": "776964b6", 31 | "metadata": {}, 32 | "outputs": [], 33 | "source": [ 34 | "preload_models()" 35 | ] 36 | }, 37 | { 38 | "cell_type": "code", 39 | "execution_count": null, 40 | "id": "1d03f4d2", 41 | "metadata": {}, 42 | "outputs": [], 43 | "source": [] 44 | }, 45 | { 46 | "cell_type": "markdown", 47 | "id": "74a025a4", 48 | "metadata": {}, 49 | "source": [ 50 | "# Simple Long-Form Generation\n", 51 | "We split longer text into sentences using `nltk` and generate the sentences one by one." 52 | ] 53 | }, 54 | { 55 | "cell_type": "code", 56 | "execution_count": 33, 57 | "id": "57b06e2a", 58 | "metadata": {}, 59 | "outputs": [], 60 | "source": [ 61 | "script = \"\"\"\n", 62 | "Hey, have you heard about this new text-to-audio model called \"Bark\"? \n", 63 | "Apparently, it's the most realistic and natural-sounding text-to-audio model \n", 64 | "out there right now. People are saying it sounds just like a real person speaking. \n", 65 | "I think it uses advanced machine learning algorithms to analyze and understand the \n", 66 | "nuances of human speech, and then replicates those nuances in its own speech output. \n", 67 | "It's pretty impressive, and I bet it could be used for things like audiobooks or podcasts. \n", 68 | "In fact, I heard that some publishers are already starting to use Bark to create audiobooks. \n", 69 | "It would be like having your own personal voiceover artist. I really think Bark is going to \n", 70 | "be a game-changer in the world of text-to-audio technology.\n", 71 | "\"\"\".replace(\"\\n\", \" \").strip()" 72 | ] 73 | }, 74 | { 75 | "cell_type": "code", 76 | "execution_count": 34, 77 | "id": "f747f804", 78 | "metadata": {}, 79 | "outputs": [], 80 | "source": [ 81 | "sentences = nltk.sent_tokenize(script)" 82 | ] 83 | }, 84 | { 85 | "cell_type": "code", 86 | "execution_count": 35, 87 | "id": "17400a9b", 88 | "metadata": { 89 | "scrolled": true 90 | }, 91 | "outputs": [ 92 | { 93 | "name": "stderr", 94 | "output_type": "stream", 95 | "text": [ 96 | "100%|██████████████████████████████████████████████████████████████████████| 100/100 [00:02<00:00, 43.03it/s]\n", 97 | "100%|████████████████████████████████████████████████████████████████████████| 17/17 [00:06<00:00, 2.45it/s]\n", 98 | "100%|██████████████████████████████████████████████████████████████████████| 100/100 [00:04<00:00, 22.73it/s]\n", 99 | "100%|████████████████████████████████████████████████████████████████████████| 33/33 [00:13<00:00, 2.52it/s]\n", 100 | "100%|██████████████████████████████████████████████████████████████████████| 100/100 [00:01<00:00, 66.30it/s]\n", 101 | "100%|████████████████████████████████████████████████████████████████████████| 11/11 [00:04<00:00, 2.46it/s]\n", 102 | "100%|██████████████████████████████████████████████████████████████████████| 100/100 [00:04<00:00, 20.99it/s]\n", 103 | "100%|████████████████████████████████████████████████████████████████████████| 35/35 [00:14<00:00, 2.46it/s]\n", 104 | "100%|██████████████████████████████████████████████████████████████████████| 100/100 [00:03<00:00, 25.63it/s]\n", 105 | "100%|████████████████████████████████████████████████████████████████████████| 29/29 [00:11<00:00, 2.50it/s]\n", 106 | "100%|██████████████████████████████████████████████████████████████████████| 100/100 [00:04<00:00, 23.90it/s]\n", 107 | "100%|████████████████████████████████████████████████████████████████████████| 30/30 [00:12<00:00, 2.46it/s]\n", 108 | "100%|██████████████████████████████████████████████████████████████████████| 100/100 [00:01<00:00, 53.24it/s]\n", 109 | "100%|████████████████████████████████████████████████████████████████████████| 14/14 [00:05<00:00, 2.51it/s]\n", 110 | "100%|██████████████████████████████████████████████████████████████████████| 100/100 [00:01<00:00, 50.63it/s]\n", 111 | "100%|████████████████████████████████████████████████████████████████████████| 15/15 [00:05<00:00, 2.57it/s]\n" 112 | ] 113 | } 114 | ], 115 | "source": [ 116 | "SPEAKER = \"v2/en_speaker_6\"\n", 117 | "silence = np.zeros(int(0.25 * SAMPLE_RATE)) # quarter second of silence\n", 118 | "\n", 119 | "pieces = []\n", 120 | "for sentence in sentences:\n", 121 | " audio_array = generate_audio(sentence, history_prompt=SPEAKER)\n", 122 | " pieces += [audio_array, silence.copy()]\n" 123 | ] 124 | }, 125 | { 126 | "cell_type": "code", 127 | "execution_count": null, 128 | "id": "04cf77f9", 129 | "metadata": {}, 130 | "outputs": [], 131 | "source": [ 132 | "Audio(np.concatenate(pieces), rate=SAMPLE_RATE)" 133 | ] 134 | }, 135 | { 136 | "cell_type": "code", 137 | "execution_count": null, 138 | "id": "ac2d4625", 139 | "metadata": {}, 140 | "outputs": [], 141 | "source": [] 142 | }, 143 | { 144 | "cell_type": "markdown", 145 | "id": "6d13249b", 146 | "metadata": {}, 147 | "source": [ 148 | "# $ \\\\ $" 149 | ] 150 | }, 151 | { 152 | "cell_type": "markdown", 153 | "id": "cdfc8bf5", 154 | "metadata": {}, 155 | "source": [ 156 | "# Advanced Long-Form Generation\n", 157 | "Somtimes Bark will hallucinate a little extra audio at the end of the prompt.\n", 158 | "We can solve this issue by lowering the threshold for bark to stop generating text. \n", 159 | "We use the `min_eos_p` kwarg in `generate_text_semantic`" 160 | ] 161 | }, 162 | { 163 | "cell_type": "code", 164 | "execution_count": 37, 165 | "id": "62807fd0", 166 | "metadata": {}, 167 | "outputs": [ 168 | { 169 | "name": "stderr", 170 | "output_type": "stream", 171 | "text": [ 172 | "100%|██████████████████████████████████████████████████████████████████████| 100/100 [00:02<00:00, 38.05it/s]\n", 173 | "100%|████████████████████████████████████████████████████████████████████████| 18/18 [00:07<00:00, 2.46it/s]\n", 174 | "100%|██████████████████████████████████████████████████████████████████████| 100/100 [00:03<00:00, 32.28it/s]\n", 175 | "100%|████████████████████████████████████████████████████████████████████████| 21/21 [00:08<00:00, 2.54it/s]\n", 176 | "100%|██████████████████████████████████████████████████████████████████████| 100/100 [00:01<00:00, 55.78it/s]\n", 177 | "100%|████████████████████████████████████████████████████████████████████████| 14/14 [00:05<00:00, 2.57it/s]\n", 178 | "100%|██████████████████████████████████████████████████████████████████████| 100/100 [00:06<00:00, 14.73it/s]\n", 179 | "100%|████████████████████████████████████████████████████████████████████████| 35/35 [00:14<00:00, 2.47it/s]\n", 180 | "100%|██████████████████████████████████████████████████████████████████████| 100/100 [00:02<00:00, 40.29it/s]\n", 181 | "100%|████████████████████████████████████████████████████████████████████████| 18/18 [00:07<00:00, 2.56it/s]\n", 182 | "100%|██████████████████████████████████████████████████████████████████████| 100/100 [00:03<00:00, 32.92it/s]\n", 183 | "100%|████████████████████████████████████████████████████████████████████████| 20/20 [00:08<00:00, 2.47it/s]\n", 184 | "100%|██████████████████████████████████████████████████████████████████████| 100/100 [00:01<00:00, 68.87it/s]\n", 185 | "100%|████████████████████████████████████████████████████████████████████████| 12/12 [00:04<00:00, 2.62it/s]\n", 186 | "100%|██████████████████████████████████████████████████████████████████████| 100/100 [00:02<00:00, 47.64it/s]\n", 187 | "100%|████████████████████████████████████████████████████████████████████████| 15/15 [00:06<00:00, 2.46it/s]\n" 188 | ] 189 | } 190 | ], 191 | "source": [ 192 | "GEN_TEMP = 0.6\n", 193 | "SPEAKER = \"v2/en_speaker_6\"\n", 194 | "silence = np.zeros(int(0.25 * SAMPLE_RATE)) # quarter second of silence\n", 195 | "\n", 196 | "pieces = []\n", 197 | "for sentence in sentences:\n", 198 | " semantic_tokens = generate_text_semantic(\n", 199 | " sentence,\n", 200 | " history_prompt=SPEAKER,\n", 201 | " temp=GEN_TEMP,\n", 202 | " min_eos_p=0.05, # this controls how likely the generation is to end\n", 203 | " )\n", 204 | "\n", 205 | " audio_array = semantic_to_waveform(semantic_tokens, history_prompt=SPEAKER,)\n", 206 | " pieces += [audio_array, silence.copy()]\n", 207 | "\n" 208 | ] 209 | }, 210 | { 211 | "cell_type": "code", 212 | "execution_count": null, 213 | "id": "133fec46", 214 | "metadata": {}, 215 | "outputs": [], 216 | "source": [ 217 | "Audio(np.concatenate(pieces), rate=SAMPLE_RATE)" 218 | ] 219 | }, 220 | { 221 | "cell_type": "code", 222 | "execution_count": null, 223 | "id": "6eee9f5a", 224 | "metadata": {}, 225 | "outputs": [], 226 | "source": [] 227 | }, 228 | { 229 | "cell_type": "markdown", 230 | "id": "be8e125e", 231 | "metadata": {}, 232 | "source": [ 233 | "# $ \\\\ $" 234 | ] 235 | }, 236 | { 237 | "cell_type": "markdown", 238 | "id": "03a16c1b", 239 | "metadata": {}, 240 | "source": [ 241 | "# Make a Long-Form Dialog with Bark" 242 | ] 243 | }, 244 | { 245 | "cell_type": "markdown", 246 | "id": "06c5eff8", 247 | "metadata": {}, 248 | "source": [ 249 | "### Step 1: Format a script and speaker lookup" 250 | ] 251 | }, 252 | { 253 | "cell_type": "code", 254 | "execution_count": 14, 255 | "id": "5238b297", 256 | "metadata": {}, 257 | "outputs": [ 258 | { 259 | "data": { 260 | "text/plain": [ 261 | "['Samantha: Hey, have you heard about this new text-to-audio model called \"Bark\"?',\n", 262 | " \"John: No, I haven't. What's so special about it?\",\n", 263 | " \"Samantha: Well, apparently it's the most realistic and natural-sounding text-to-audio model out there right now. People are saying it sounds just like a real person speaking.\",\n", 264 | " 'John: Wow, that sounds amazing. How does it work?',\n", 265 | " 'Samantha: I think it uses advanced machine learning algorithms to analyze and understand the nuances of human speech, and then replicates those nuances in its own speech output.',\n", 266 | " \"John: That's pretty impressive. Do you think it could be used for things like audiobooks or podcasts?\",\n", 267 | " 'Samantha: Definitely! In fact, I heard that some publishers are already starting to use Bark to create audiobooks. And I bet it would be great for podcasts too.',\n", 268 | " 'John: I can imagine. It would be like having your own personal voiceover artist.',\n", 269 | " 'Samantha: Exactly! I think Bark is going to be a game-changer in the world of text-to-audio technology.']" 270 | ] 271 | }, 272 | "execution_count": 14, 273 | "metadata": {}, 274 | "output_type": "execute_result" 275 | } 276 | ], 277 | "source": [ 278 | "speaker_lookup = {\"Samantha\": \"v2/en_speaker_9\", \"John\": \"v2/en_speaker_2\"}\n", 279 | "\n", 280 | "# Script generated by chat GPT\n", 281 | "script = \"\"\"\n", 282 | "Samantha: Hey, have you heard about this new text-to-audio model called \"Bark\"?\n", 283 | "\n", 284 | "John: No, I haven't. What's so special about it?\n", 285 | "\n", 286 | "Samantha: Well, apparently it's the most realistic and natural-sounding text-to-audio model out there right now. People are saying it sounds just like a real person speaking.\n", 287 | "\n", 288 | "John: Wow, that sounds amazing. How does it work?\n", 289 | "\n", 290 | "Samantha: I think it uses advanced machine learning algorithms to analyze and understand the nuances of human speech, and then replicates those nuances in its own speech output.\n", 291 | "\n", 292 | "John: That's pretty impressive. Do you think it could be used for things like audiobooks or podcasts?\n", 293 | "\n", 294 | "Samantha: Definitely! In fact, I heard that some publishers are already starting to use Bark to create audiobooks. And I bet it would be great for podcasts too.\n", 295 | "\n", 296 | "John: I can imagine. It would be like having your own personal voiceover artist.\n", 297 | "\n", 298 | "Samantha: Exactly! I think Bark is going to be a game-changer in the world of text-to-audio technology.\"\"\"\n", 299 | "script = script.strip().split(\"\\n\")\n", 300 | "script = [s.strip() for s in script if s]\n", 301 | "script" 302 | ] 303 | }, 304 | { 305 | "cell_type": "markdown", 306 | "id": "ee547efd", 307 | "metadata": {}, 308 | "source": [ 309 | "### Step 2: Generate the audio for every speaker turn" 310 | ] 311 | }, 312 | { 313 | "cell_type": "code", 314 | "execution_count": 15, 315 | "id": "203e5081", 316 | "metadata": {}, 317 | "outputs": [ 318 | { 319 | "name": "stderr", 320 | "output_type": "stream", 321 | "text": [ 322 | "100%|██████████████████████████████████████████████████████████████████████| 100/100 [00:02<00:00, 34.03it/s]\n", 323 | "100%|████████████████████████████████████████████████████████████████████████| 22/22 [00:08<00:00, 2.55it/s]\n", 324 | "100%|██████████████████████████████████████████████████████████████████████| 100/100 [00:01<00:00, 71.58it/s]\n", 325 | "100%|████████████████████████████████████████████████████████████████████████| 11/11 [00:04<00:00, 2.65it/s]\n", 326 | "100%|██████████████████████████████████████████████████████████████████████| 100/100 [00:04<00:00, 22.75it/s]\n", 327 | "100%|████████████████████████████████████████████████████████████████████████| 33/33 [00:13<00:00, 2.53it/s]\n", 328 | "100%|██████████████████████████████████████████████████████████████████████| 100/100 [00:01<00:00, 70.76it/s]\n", 329 | "100%|████████████████████████████████████████████████████████████████████████| 11/11 [00:04<00:00, 2.63it/s]\n", 330 | "100%|██████████████████████████████████████████████████████████████████████| 100/100 [00:04<00:00, 20.46it/s]\n", 331 | "100%|████████████████████████████████████████████████████████████████████████| 36/36 [00:14<00:00, 2.47it/s]\n", 332 | "100%|██████████████████████████████████████████████████████████████████████| 100/100 [00:04<00:00, 20.18it/s]\n", 333 | "100%|████████████████████████████████████████████████████████████████████████| 37/37 [00:14<00:00, 2.51it/s]\n", 334 | "100%|██████████████████████████████████████████████████████████████████████| 100/100 [00:04<00:00, 23.04it/s]\n", 335 | "100%|████████████████████████████████████████████████████████████████████████| 32/32 [00:12<00:00, 2.48it/s]\n", 336 | "100%|██████████████████████████████████████████████████████████████████████| 100/100 [00:01<00:00, 54.64it/s]\n", 337 | "100%|████████████████████████████████████████████████████████████████████████| 14/14 [00:05<00:00, 2.58it/s]\n", 338 | "100%|██████████████████████████████████████████████████████████████████████| 100/100 [00:03<00:00, 31.71it/s]\n", 339 | "100%|████████████████████████████████████████████████████████████████████████| 24/24 [00:09<00:00, 2.56it/s]\n" 340 | ] 341 | } 342 | ], 343 | "source": [ 344 | "pieces = []\n", 345 | "silence = np.zeros(int(0.5*SAMPLE_RATE))\n", 346 | "for line in script:\n", 347 | " speaker, text = line.split(\": \")\n", 348 | " audio_array = generate_audio(text, history_prompt=speaker_lookup[speaker], )\n", 349 | " pieces += [audio_array, silence.copy()]" 350 | ] 351 | }, 352 | { 353 | "cell_type": "markdown", 354 | "id": "7c54bada", 355 | "metadata": {}, 356 | "source": [ 357 | "### Step 3: Concatenate all of the audio and play it" 358 | ] 359 | }, 360 | { 361 | "cell_type": "code", 362 | "execution_count": null, 363 | "id": "27a56842", 364 | "metadata": {}, 365 | "outputs": [], 366 | "source": [ 367 | "Audio(np.concatenate(pieces), rate=SAMPLE_RATE)" 368 | ] 369 | }, 370 | { 371 | "cell_type": "code", 372 | "execution_count": null, 373 | "id": "a1bc5877", 374 | "metadata": {}, 375 | "outputs": [], 376 | "source": [] 377 | } 378 | ], 379 | "metadata": { 380 | "kernelspec": { 381 | "display_name": "Python 3 (ipykernel)", 382 | "language": "python", 383 | "name": "python3" 384 | }, 385 | "language_info": { 386 | "codemirror_mode": { 387 | "name": "ipython", 388 | "version": 3 389 | }, 390 | "file_extension": ".py", 391 | "mimetype": "text/x-python", 392 | "name": "python", 393 | "nbconvert_exporter": "python", 394 | "pygments_lexer": "ipython3", 395 | "version": "3.9.16" 396 | } 397 | }, 398 | "nbformat": 4, 399 | "nbformat_minor": 5 400 | } 401 | -------------------------------------------------------------------------------- /notebooks/memory_profiling_bark.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "id": "90641144", 6 | "metadata": {}, 7 | "source": [ 8 | "# Bark Memory Profiling\n", 9 | "Bark has two ways to reduce GPU memory: \n", 10 | " - Small models: a smaller version of the model. This can be set by using the environment variable `SUNO_USE_SMALL_MODELS`\n", 11 | " - offloading models to CPU: Holding only one model at a time on the GPU, and shuttling the models to the CPU in between generations. \n", 12 | "\n", 13 | "# $ \\\\ $\n", 14 | "## First, we'll use the most memory efficient configuration" 15 | ] 16 | }, 17 | { 18 | "cell_type": "code", 19 | "execution_count": 1, 20 | "id": "39ea4bed", 21 | "metadata": {}, 22 | "outputs": [], 23 | "source": [ 24 | "import os\n", 25 | "\n", 26 | "os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"0\"\n", 27 | "os.environ[\"SUNO_USE_SMALL_MODELS\"] = \"1\"\n", 28 | "os.environ[\"SUNO_OFFLOAD_CPU\"] = \"1\"\n", 29 | "\n", 30 | "from bark.generation import (\n", 31 | " generate_text_semantic,\n", 32 | " preload_models,\n", 33 | ")\n", 34 | "from bark import generate_audio, SAMPLE_RATE\n", 35 | "\n", 36 | "import torch" 37 | ] 38 | }, 39 | { 40 | "cell_type": "code", 41 | "execution_count": 2, 42 | "id": "66b0c006", 43 | "metadata": {}, 44 | "outputs": [ 45 | { 46 | "name": "stderr", 47 | "output_type": "stream", 48 | "text": [ 49 | "100%|██████████████████████████████████████████████████████████████████████| 100/100 [00:01<00:00, 62.17it/s]\n", 50 | "100%|████████████████████████████████████████████████████████████████████████| 10/10 [00:03<00:00, 2.74it/s]\n" 51 | ] 52 | }, 53 | { 54 | "name": "stdout", 55 | "output_type": "stream", 56 | "text": [ 57 | "max memory usage = 2396MB\n" 58 | ] 59 | } 60 | ], 61 | "source": [ 62 | "torch.cuda.reset_peak_memory_stats()\n", 63 | "preload_models()\n", 64 | "audio_array = generate_audio(\"madam I'm adam\", history_prompt=\"v2/en_speaker_5\")\n", 65 | "max_utilization = torch.cuda.max_memory_allocated()\n", 66 | "print(f\"max memory usage = {max_utilization / 1024 / 1024:.0f}MB\")" 67 | ] 68 | }, 69 | { 70 | "cell_type": "code", 71 | "execution_count": null, 72 | "id": "9922dd2d", 73 | "metadata": {}, 74 | "outputs": [], 75 | "source": [] 76 | }, 77 | { 78 | "cell_type": "code", 79 | "execution_count": null, 80 | "id": "bdbe578e", 81 | "metadata": {}, 82 | "outputs": [], 83 | "source": [] 84 | }, 85 | { 86 | "cell_type": "markdown", 87 | "id": "213d1b5b", 88 | "metadata": {}, 89 | "source": [ 90 | "# Memory Profiling:\n", 91 | "We can profile the memory consumption of 4 scenarios\n", 92 | " - Small models, offloading to CPU\n", 93 | " - Large models, offloading to CPU\n", 94 | " - Small models, not offloading to CPU\n", 95 | " - Large models, not offloading to CPU" 96 | ] 97 | }, 98 | { 99 | "cell_type": "code", 100 | "execution_count": 1, 101 | "id": "417d5e9c", 102 | "metadata": {}, 103 | "outputs": [], 104 | "source": [ 105 | "import os\n", 106 | "\n", 107 | "from bark.generation import (\n", 108 | " generate_text_semantic,\n", 109 | " preload_models,\n", 110 | " models,\n", 111 | ")\n", 112 | "import bark.generation\n", 113 | "\n", 114 | "from bark.api import semantic_to_waveform\n", 115 | "from bark import generate_audio, SAMPLE_RATE\n", 116 | "\n", 117 | "import torch\n", 118 | "import time" 119 | ] 120 | }, 121 | { 122 | "cell_type": "code", 123 | "execution_count": 2, 124 | "id": "cd83b45d", 125 | "metadata": {}, 126 | "outputs": [ 127 | { 128 | "name": "stdout", 129 | "output_type": "stream", 130 | "text": [ 131 | "Small models True, offloading to CPU: True\n", 132 | "\tmax memory usage = 967MB, time 4s\n", 133 | "\n", 134 | "Small models False, offloading to CPU: True\n", 135 | "\tmax memory usage = 2407MB, time 8s\n", 136 | "\n", 137 | "Small models True, offloading to CPU: False\n", 138 | "\tmax memory usage = 2970MB, time 3s\n", 139 | "\n", 140 | "Small models False, offloading to CPU: False\n", 141 | "\tmax memory usage = 7824MB, time 6s\n", 142 | "\n" 143 | ] 144 | } 145 | ], 146 | "source": [ 147 | "global models\n", 148 | "\n", 149 | "for offload_models in (True, False):\n", 150 | " # this setattr is needed to do on the fly\n", 151 | " # the easier way to do this is with `os.environ[\"SUNO_OFFLOAD_CPU\"] = \"1\"`\n", 152 | " setattr(bark.generation, \"OFFLOAD_CPU\", offload_models)\n", 153 | " for use_small_models in (True, False):\n", 154 | " models = {}\n", 155 | " torch.cuda.empty_cache()\n", 156 | " torch.cuda.reset_peak_memory_stats()\n", 157 | " preload_models(\n", 158 | " text_use_small=use_small_models,\n", 159 | " coarse_use_small=use_small_models,\n", 160 | " fine_use_small=use_small_models,\n", 161 | " force_reload=True,\n", 162 | " )\n", 163 | " t0 = time.time()\n", 164 | " audio_array = generate_audio(\"madam I'm adam\", history_prompt=\"v2/en_speaker_5\", silent=True)\n", 165 | " dur = time.time() - t0\n", 166 | " max_utilization = torch.cuda.max_memory_allocated()\n", 167 | " print(f\"Small models {use_small_models}, offloading to CPU: {offload_models}\")\n", 168 | " print(f\"\\tmax memory usage = {max_utilization / 1024 / 1024:.0f}MB, time {dur:.0f}s\\n\")" 169 | ] 170 | }, 171 | { 172 | "cell_type": "code", 173 | "execution_count": null, 174 | "id": "bfe5fa06", 175 | "metadata": {}, 176 | "outputs": [], 177 | "source": [] 178 | } 179 | ], 180 | "metadata": { 181 | "kernelspec": { 182 | "display_name": "Python 3 (ipykernel)", 183 | "language": "python", 184 | "name": "python3" 185 | }, 186 | "language_info": { 187 | "codemirror_mode": { 188 | "name": "ipython", 189 | "version": 3 190 | }, 191 | "file_extension": ".py", 192 | "mimetype": "text/x-python", 193 | "name": "python", 194 | "nbconvert_exporter": "python", 195 | "pygments_lexer": "ipython3", 196 | "version": "3.9.16" 197 | } 198 | }, 199 | "nbformat": 4, 200 | "nbformat_minor": 5 201 | } 202 | -------------------------------------------------------------------------------- /notebooks/use_small_models_on_cpu.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "id": "6a682b61", 6 | "metadata": {}, 7 | "source": [ 8 | "# Benchmarking small models on CPU\n", 9 | " - We can enable small models with the `SUNO_USE_SMALL_MODELS` environment variable" 10 | ] 11 | }, 12 | { 13 | "cell_type": "code", 14 | "execution_count": 1, 15 | "id": "9500dd93", 16 | "metadata": {}, 17 | "outputs": [], 18 | "source": [ 19 | "import os\n", 20 | "\n", 21 | "os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"\"\n", 22 | "os.environ[\"SUNO_USE_SMALL_MODELS\"] = \"1\"\n", 23 | "\n", 24 | "from IPython.display import Audio\n", 25 | "import numpy as np\n", 26 | "\n", 27 | "from bark import generate_audio, preload_models, SAMPLE_RATE\n", 28 | "\n", 29 | "import time" 30 | ] 31 | }, 32 | { 33 | "cell_type": "code", 34 | "execution_count": 2, 35 | "id": "4e3454b6", 36 | "metadata": {}, 37 | "outputs": [ 38 | { 39 | "name": "stderr", 40 | "output_type": "stream", 41 | "text": [ 42 | "No GPU being used. Careful, inference might be very slow!\n" 43 | ] 44 | }, 45 | { 46 | "name": "stdout", 47 | "output_type": "stream", 48 | "text": [ 49 | "CPU times: user 5.52 s, sys: 2.34 s, total: 7.86 s\n", 50 | "Wall time: 4.33 s\n" 51 | ] 52 | } 53 | ], 54 | "source": [ 55 | "%%time\n", 56 | "preload_models()" 57 | ] 58 | }, 59 | { 60 | "cell_type": "code", 61 | "execution_count": 3, 62 | "id": "f6024e5f", 63 | "metadata": {}, 64 | "outputs": [ 65 | { 66 | "name": "stderr", 67 | "output_type": "stream", 68 | "text": [ 69 | "100%|████████████████████████████████████████████████████████| 100/100 [00:10<00:00, 9.89it/s]\n", 70 | "100%|██████████████████████████████████████████████████████████| 15/15 [00:43<00:00, 2.90s/it]\n" 71 | ] 72 | }, 73 | { 74 | "name": "stdout", 75 | "output_type": "stream", 76 | "text": [ 77 | "took 62s to generate 6s of audio\n" 78 | ] 79 | } 80 | ], 81 | "source": [ 82 | "t0 = time.time()\n", 83 | "text = \"In the light of the moon, a little egg lay on a leaf\"\n", 84 | "audio_array = generate_audio(text)\n", 85 | "generation_duration_s = time.time() - t0\n", 86 | "audio_duration_s = audio_array.shape[0] / SAMPLE_RATE\n", 87 | "\n", 88 | "print(f\"took {generation_duration_s:.0f}s to generate {audio_duration_s:.0f}s of audio\")" 89 | ] 90 | }, 91 | { 92 | "cell_type": "code", 93 | "execution_count": 4, 94 | "id": "2dcce86c", 95 | "metadata": {}, 96 | "outputs": [ 97 | { 98 | "data": { 99 | "text/plain": [ 100 | "10" 101 | ] 102 | }, 103 | "execution_count": 4, 104 | "metadata": {}, 105 | "output_type": "execute_result" 106 | } 107 | ], 108 | "source": [ 109 | "os.cpu_count()" 110 | ] 111 | }, 112 | { 113 | "cell_type": "code", 114 | "execution_count": null, 115 | "id": "3046eddb", 116 | "metadata": {}, 117 | "outputs": [], 118 | "source": [] 119 | } 120 | ], 121 | "metadata": { 122 | "kernelspec": { 123 | "display_name": "Python 3 (ipykernel)", 124 | "language": "python", 125 | "name": "python3" 126 | }, 127 | "language_info": { 128 | "codemirror_mode": { 129 | "name": "ipython", 130 | "version": 3 131 | }, 132 | "file_extension": ".py", 133 | "mimetype": "text/x-python", 134 | "name": "python", 135 | "nbconvert_exporter": "python", 136 | "pygments_lexer": "ipython3", 137 | "version": "3.9.16" 138 | } 139 | }, 140 | "nbformat": 4, 141 | "nbformat_minor": 5 142 | } 143 | -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "express-bark", 3 | "description": "A simple express server for Bark by Suno", 4 | "license": "AGPL", 5 | "scripts": { 6 | "dev": "next", 7 | "build": "next build", 8 | "start": "next start", 9 | "install:python": "pip install .", 10 | "download:model": "python -m bark --text \"You are all set up.\"" 11 | }, 12 | "dependencies": { 13 | "@emotion/react": "11.11.0", 14 | "@emotion/server": "11.11.0", 15 | "@emotion/styled": "11.11.0", 16 | "@mui/icons-material": "5.11.16", 17 | "@mui/material": "5.13.3", 18 | "@sindresorhus/slugify": "2.2.0", 19 | "axios": "1.4.0", 20 | "execa": "7.1.1", 21 | "nanoid": "4.0.2", 22 | "next": "13.4.4" 23 | }, 24 | "devDependencies": { 25 | "@types/fluent-ffmpeg": "^2.1.21", 26 | "@types/node": "^18.16.3", 27 | "eslint": "^8.39.0", 28 | "eslint-config-prettier": "^8.8.0", 29 | "eslint-config-typescript": "^3.0.0", 30 | "eslint-config-xo": "^0.43.1", 31 | "eslint-plugin-import": "^2.27.5", 32 | "eslint-plugin-jest": "^27.2.1", 33 | "eslint-config-next": "13.2.4", 34 | "eslint-plugin-prettier": "^4.2.1", 35 | "eslint-plugin-unicorn": "^46.0.0", 36 | "eslint-plugin-unused-imports": "^2.0.0", 37 | "fluent-ffmpeg": "^2.1.2", 38 | "nodemon": "^2.0.22", 39 | "prettier": "^2.8.8", 40 | "typescript": "^5.1.3" 41 | } 42 | } 43 | -------------------------------------------------------------------------------- /public/favicon.ico: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/public/favicon.ico -------------------------------------------------------------------------------- /public/voices/announcer.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/public/voices/announcer.png -------------------------------------------------------------------------------- /public/voices/default.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blib-la/bark-web-ui/f280af2d2315607ba69b8bb6f6c09adbbc2938e7/public/voices/default.png -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [build-system] 2 | requires = ["setuptools"] 3 | build-backend = "setuptools.build_meta" 4 | 5 | [project] 6 | name = "suno-bark" 7 | version = "0.0.1a" 8 | description = "Bark text to audio model" 9 | readme = "README.md" 10 | requires-python = ">=3.8" 11 | authors = [ 12 | {name = "Suno Inc", email = "hello@suno.ai"}, 13 | ] 14 | # Apache 2.0 15 | license = {file = "LICENSE"} 16 | 17 | dependencies = [ 18 | "boto3", 19 | "encodec", 20 | "funcy", 21 | "huggingface-hub>=0.14.1", 22 | "numpy", 23 | "scipy", 24 | "tokenizers", 25 | "torch", 26 | "tqdm", 27 | "transformers", 28 | ] 29 | 30 | [project.urls] 31 | source = "https://github.com/suno-ai/bark" 32 | 33 | [project.optional-dependencies] 34 | dev = [ 35 | "bandit", 36 | "black", 37 | "codecov", 38 | "flake8", 39 | "hypothesis>=6.14,<7", 40 | "isort>=5.0.0,<6", 41 | "jupyter", 42 | "mypy", 43 | "nbconvert", 44 | "nbformat", 45 | "pydocstyle", 46 | "pylint", 47 | "pytest", 48 | "pytest-cov", 49 | ] 50 | 51 | [tool.setuptools] 52 | packages = ["bark"] 53 | 54 | [tool.setuptools.package-data] 55 | bark = ["assets/prompts/*.npz", "assets/prompts/v2/*.npz"] 56 | 57 | 58 | [tool.black] 59 | line-length = 100 60 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | from setuptools import setup 2 | 3 | setup() 4 | -------------------------------------------------------------------------------- /src/ions/constants.ts: -------------------------------------------------------------------------------- 1 | import path from "node:path"; 2 | import process from "node:process"; 3 | 4 | export const UPLOADS_PATH = path.join(process.cwd(), "public/uploads"); 5 | export const DATA_JSON_PATH = path.join(process.cwd(), "public/uploads/data.json"); 6 | -------------------------------------------------------------------------------- /src/ions/createEmotionCache.ts: -------------------------------------------------------------------------------- 1 | import createCache from "@emotion/cache"; 2 | 3 | const isBrowser = typeof document !== "undefined"; 4 | 5 | // On the client side, Create a meta tag at the top of the and set it as insertionPoint. 6 | // This assures that MUI styles are loaded first. 7 | // It allows developers to easily override MUI styles with other styling solutions, like CSS modules. 8 | export default function createEmotionCache() { 9 | let insertionPoint; 10 | 11 | if (isBrowser) { 12 | const emotionInsertionPoint = document.querySelector( 13 | 'meta[name="emotion-insertion-point"]' 14 | ); 15 | insertionPoint = emotionInsertionPoint ?? undefined; 16 | } 17 | 18 | return createCache({ key: "mui-style", insertionPoint }); 19 | } 20 | -------------------------------------------------------------------------------- /src/ions/theme.ts: -------------------------------------------------------------------------------- 1 | import { experimental_extendTheme as extendTheme } from "@mui/material/styles"; 2 | import { Roboto as nextRoboto } from "next/font/google"; 3 | 4 | export const roboto = nextRoboto({ 5 | weight: ["300", "400", "500", "700"], 6 | subsets: ["latin"], 7 | display: "swap", 8 | fallback: ["Helvetica", "Arial", "sans-serif"], 9 | }); 10 | 11 | // Create a theme instance. 12 | const theme = extendTheme({ 13 | colorSchemes: { 14 | light: { 15 | palette: { 16 | primary: { 17 | main: "#123321", 18 | }, 19 | secondary: { 20 | main: "#321123", 21 | }, 22 | }, 23 | }, 24 | dark: { 25 | palette: { 26 | primary: { 27 | main: "#fedabc", 28 | }, 29 | secondary: { 30 | main: "#abcfde", 31 | }, 32 | }, 33 | }, 34 | }, 35 | typography: { 36 | ...roboto.style, 37 | }, 38 | components: { 39 | MuiCssBaseline: { 40 | styleOverrides: { 41 | "html, body": { 42 | height: "100%", 43 | }, 44 | "#__next": { 45 | display: "contents", 46 | }, 47 | }, 48 | }, 49 | }, 50 | }); 51 | 52 | export default theme; 53 | -------------------------------------------------------------------------------- /src/organisms/MusicPlayer.tsx: -------------------------------------------------------------------------------- 1 | import PauseRounded from "@mui/icons-material/PauseRounded"; 2 | import PlayArrowRounded from "@mui/icons-material/PlayArrowRounded"; 3 | import ReplyIcon from "@mui/icons-material/Reply"; 4 | import VolumeDownRounded from "@mui/icons-material/VolumeDownRounded"; 5 | import VolumeUpRounded from "@mui/icons-material/VolumeUpRounded"; 6 | import { Card, Tooltip } from "@mui/material"; 7 | import Box from "@mui/material/Box"; 8 | import IconButton from "@mui/material/IconButton"; 9 | import Slider from "@mui/material/Slider"; 10 | import Stack from "@mui/material/Stack"; 11 | import { styled } from "@mui/material/styles"; 12 | import Typography from "@mui/material/Typography"; 13 | import Image from "next/image"; 14 | import { useCallback, useEffect, useRef, useState } from "react"; 15 | 16 | import type { Generation } from "@/types/common"; 17 | 18 | function useAudio({ src }: { src: string }) { 19 | const ref = useRef(null); 20 | const [state, setState] = useState({ 21 | paused: false, 22 | playing: false, 23 | volume: 1, 24 | duration: 0, 25 | time: 0, 26 | }); 27 | const controls = { 28 | seek: useCallback((time: number) => { 29 | if (ref.current) { 30 | setState(previousState => ({ 31 | ...previousState, 32 | time, 33 | })); 34 | ref.current.currentTime = time; 35 | } 36 | }, []), 37 | setVolume: useCallback((volume: number) => { 38 | if (ref.current) { 39 | ref.current.volume = volume; 40 | setState(previousState => ({ 41 | ...previousState, 42 | volume, 43 | })); 44 | } 45 | }, []), 46 | play: useCallback(() => { 47 | if (ref.current) { 48 | ref.current.play(); 49 | } 50 | }, []), 51 | pause: useCallback(() => { 52 | if (ref.current) { 53 | ref.current.pause(); 54 | } 55 | }, []), 56 | }; 57 | 58 | useEffect(() => { 59 | const audio = new window.Audio(src); 60 | ref.current = audio; 61 | function handlePlay() { 62 | setState(previousState => ({ 63 | ...previousState, 64 | playing: true, 65 | paused: false, 66 | })); 67 | } 68 | 69 | function handlePause() { 70 | setState(previousState => ({ 71 | ...previousState, 72 | playing: false, 73 | paused: true, 74 | })); 75 | } 76 | 77 | function handleTimeUpdate() { 78 | setState(previousState => ({ 79 | ...previousState, 80 | time: audio.currentTime, 81 | })); 82 | } 83 | 84 | function handleDurationChange() { 85 | setState(previousState => ({ 86 | ...previousState, 87 | duration: audio.duration, 88 | })); 89 | } 90 | 91 | function handleEnded() { 92 | audio.currentTime = 0; 93 | setState(previousState => ({ 94 | ...previousState, 95 | playing: false, 96 | paused: false, 97 | time: 0, 98 | })); 99 | } 100 | 101 | audio.addEventListener("ended", handleEnded); 102 | audio.addEventListener("durationchange", handleDurationChange); 103 | audio.addEventListener("timeupdate", handleTimeUpdate); 104 | audio.addEventListener("play", handlePlay); 105 | audio.addEventListener("pause", handlePause); 106 | return () => { 107 | audio.removeEventListener("ended", handleEnded); 108 | audio.removeEventListener("durationchange", handleDurationChange); 109 | audio.removeEventListener("timeupdate", handleTimeUpdate); 110 | audio.removeEventListener("play", handlePlay); 111 | audio.removeEventListener("pause", handlePause); 112 | }; 113 | }, [src]); 114 | 115 | return [state, controls, ref] as const; 116 | } 117 | 118 | const Widget = styled(Card)({ 119 | padding: 16, 120 | borderRadius: 16, 121 | position: "relative", 122 | zIndex: 0, 123 | }); 124 | 125 | const CoverImage = styled("div")({ 126 | width: 100, 127 | height: 100, 128 | objectFit: "cover", 129 | overflow: "hidden", 130 | flexShrink: 0, 131 | borderRadius: 8, 132 | }); 133 | 134 | const TinyText = styled(Typography)({ 135 | fontSize: "0.75rem", 136 | opacity: 0.75, 137 | fontWeight: 500, 138 | letterSpacing: 0.2, 139 | }); 140 | 141 | function formatDuration(time: number) { 142 | const value = Math.round(time); 143 | const minute = Math.floor(value / 60); 144 | const secondLeft = value - minute * 60; 145 | return `${minute}:${secondLeft < 10 ? `0${secondLeft}` : secondLeft}`; 146 | } 147 | 148 | export interface MusicPlayerProps { 149 | onUse?(parameters: { 150 | text: string; 151 | voice: string; 152 | textTemperature: number; 153 | waveformTemperature: number; 154 | }): void; 155 | } 156 | export default function MusicPlayer({ 157 | download, 158 | text, 159 | voice, 160 | img, 161 | textTemperature, 162 | waveformTemperature, 163 | onUse, 164 | }: Generation & MusicPlayerProps) { 165 | const [src, setSrc] = useState(img); 166 | 167 | const [state, { play, pause, seek, setVolume }] = useAudio({ 168 | src: download, 169 | }); 170 | 171 | return ( 172 | 173 | 174 | 175 | {voice} { 181 | setSrc("/voices/default.png"); 182 | }} 183 | style={{ 184 | height: "100%", 185 | width: "100%", 186 | objectPosition: "center", 187 | objectFit: "cover", 188 | }} 189 | /> 190 | 191 | 192 | 193 | {voice} 194 | 195 | 196 | {text} 197 | 198 | 199 | 200 | 201 | { 203 | if (onUse) { 204 | onUse({ voice, waveformTemperature, textTemperature, text }); 205 | } 206 | }} 207 | > 208 | 209 | 210 | 211 | 212 | 213 | { 221 | seek(value as number); 222 | }} 223 | sx={{ 224 | height: 4, 225 | "& .MuiSlider-thumb": { 226 | width: 8, 227 | height: 8, 228 | transition: "0.3s cubic-bezier(.47,1.64,.41,.8)", 229 | "&:before": { 230 | boxShadow: "0 2px 12px 0 rgba(0,0,0,0.4)", 231 | }, 232 | "&.Mui-active": { 233 | width: 20, 234 | height: 20, 235 | }, 236 | }, 237 | "& .MuiSlider-rail": { 238 | opacity: 0.28, 239 | }, 240 | }} 241 | /> 242 | 250 | {formatDuration(state.time)} 251 | -{formatDuration(state.duration - state.time)} 252 | 253 | 261 | { 265 | if (state.playing) { 266 | pause(); 267 | } else { 268 | play(); 269 | } 270 | }} 271 | > 272 | {state.playing ? ( 273 | 274 | ) : ( 275 | 276 | )} 277 | 278 | 279 | 280 | 281 | { 288 | setVolume(value as number); 289 | }} 290 | sx={{ 291 | "& .MuiSlider-track": { 292 | border: "none", 293 | }, 294 | "& .MuiSlider-thumb": { 295 | width: 24, 296 | height: 24, 297 | "&:before": { 298 | boxShadow: "0 4px 8px rgba(0,0,0,0.4)", 299 | }, 300 | "&:hover, &.Mui-focusVisible, &.Mui-active": { 301 | boxShadow: "none", 302 | }, 303 | }, 304 | }} 305 | /> 306 | 307 | 308 | 309 | ); 310 | } 311 | -------------------------------------------------------------------------------- /src/pages/_app.tsx: -------------------------------------------------------------------------------- 1 | import type { EmotionCache } from "@emotion/react"; 2 | import { CacheProvider } from "@emotion/react"; 3 | import { CssBaseline } from "@mui/material"; 4 | import { Experimental_CssVarsProvider as CssVarsProvider } from "@mui/material/styles"; 5 | import type { AppProps } from "next/app"; 6 | import Head from "next/head"; 7 | 8 | import createEmotionCache from "@/ions/createEmotionCache"; 9 | import theme from "@/ions/theme"; 10 | 11 | const clientSideEmotionCache = createEmotionCache(); 12 | 13 | export interface MyAppProps extends AppProps { 14 | emotionCache?: EmotionCache; 15 | } 16 | 17 | export default function MyApp(props: MyAppProps) { 18 | const { Component, emotionCache = clientSideEmotionCache, pageProps } = props; 19 | return ( 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | ); 30 | } 31 | -------------------------------------------------------------------------------- /src/pages/_document.tsx: -------------------------------------------------------------------------------- 1 | import createEmotionServer from "@emotion/server/create-instance"; 2 | import { getInitColorSchemeScript } from "@mui/material/styles"; 3 | import type { AppType } from "next/app"; 4 | import type { DocumentContext, DocumentProps } from "next/document"; 5 | import Document, { Head, Html, Main, NextScript } from "next/document"; 6 | import type { ComponentProps, ComponentType, JSX } from "react"; 7 | 8 | import type { MyAppProps } from "./_app"; 9 | 10 | import createEmotionCache from "@/ions/createEmotionCache"; 11 | import { roboto } from "@/ions/theme"; 12 | 13 | interface MyDocumentProps extends DocumentProps { 14 | emotionStyleTags: JSX.Element[]; 15 | } 16 | 17 | export default function MyDocument({ emotionStyleTags }: MyDocumentProps) { 18 | return ( 19 | 20 | 21 | 22 | 23 | {emotionStyleTags} 24 | 25 | 26 | {getInitColorSchemeScript()} 27 |
28 | 29 | 30 | 31 | ); 32 | } 33 | 34 | // `getInitialProps` belongs to `_document` (instead of `_app`), 35 | // it's compatible with static-site generation (SSG). 36 | MyDocument.getInitialProps = async (ctx: DocumentContext) => { 37 | // Resolution order 38 | // 39 | // On the server: 40 | // 1. app.getInitialProps 41 | // 2. page.getInitialProps 42 | // 3. document.getInitialProps 43 | // 4. app.render 44 | // 5. page.render 45 | // 6. document.render 46 | // 47 | // On the server with error: 48 | // 1. document.getInitialProps 49 | // 2. app.render 50 | // 3. page.render 51 | // 4. document.render 52 | // 53 | // On the client 54 | // 1. app.getInitialProps 55 | // 2. page.getInitialProps 56 | // 3. app.render 57 | // 4. page.render 58 | 59 | const originalRenderPage = ctx.renderPage; 60 | 61 | // You can consider sharing the same Emotion cache between all the SSR requests to speed up performance. 62 | // However, be aware that it can have global side effects. 63 | const cache = createEmotionCache(); 64 | const { extractCriticalToChunks } = createEmotionServer(cache); 65 | 66 | ctx.renderPage = () => 67 | originalRenderPage({ 68 | enhanceApp: (App: ComponentType & MyAppProps>) => 69 | function (props) { 70 | return ; 71 | }, 72 | }); 73 | 74 | const initialProps = await Document.getInitialProps(ctx); 75 | // This is important. It prevents Emotion to render invalid HTML. 76 | // See https://github.com/mui/material-ui/issues/26561#issuecomment-855286153 77 | const emotionStyles = extractCriticalToChunks(initialProps.html); 78 | const emotionStyleTags = emotionStyles.styles.map(style => ( 79 |