├── .github
└── workflows
│ └── ci.yaml
├── .gitignore
├── .python-version
├── LICENSE
├── README.md
├── app.py
├── cli.py
├── dia
├── __init__.py
├── audio.py
├── config.py
├── layers.py
├── model.py
├── state.py
└── static
│ └── images
│ └── banner.png
├── docker
├── Dockerfile.cpu
└── Dockerfile.gpu
├── example
├── benchmark.py
├── simple-cpu.py
├── simple-mac.py
├── simple.py
├── simple_batch.py
├── voice_clone.py
└── voice_clone_batch.py
├── example_prompt.mp3
├── pyproject.toml
└── uv.lock
/.github/workflows/ci.yaml:
--------------------------------------------------------------------------------
1 | name: Continuous Integration
2 |
3 | on:
4 | pull_request:
5 | branches:
6 | - main
7 |
8 | jobs:
9 | lint_and_format:
10 | runs-on: ubuntu-latest
11 | name: Lint and Format
12 | steps:
13 | - uses: actions/checkout@v4
14 | - uses: astral-sh/ruff-action@v3
15 | with:
16 | version: latest
17 |
18 | - name: Check Lint using Ruff
19 | run: ruff check
20 |
21 | - name: Check Format using Ruff
22 | run: ruff format --check --diff
23 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Python-generated files
2 | __pycache__/
3 | *.py[oc]
4 | build/
5 | dist/
6 | wheels/
7 | *.egg-info
8 |
9 | # Virtual environments
10 | .venv
11 |
12 | .gradio
13 |
14 | **/*.pth
15 | **/*.mp3
16 | !example_prompt.mp3
17 | **/*.txt
18 |
19 | .ruff_cache
20 | .ipynb_checkpoints
21 | config.json
--------------------------------------------------------------------------------
/.python-version:
--------------------------------------------------------------------------------
1 | 3.10
2 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | Apache License
2 | Version 2.0, January 2004
3 | http://www.apache.org/licenses/
4 |
5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6 |
7 | 1. Definitions.
8 |
9 | "License" shall mean the terms and conditions for use, reproduction,
10 | and distribution as defined by Sections 1 through 9 of this document.
11 |
12 | "Licensor" shall mean the copyright owner or entity authorized by
13 | the copyright owner that is granting the License.
14 |
15 | "Legal Entity" shall mean the union of the acting entity and all
16 | other entities that control, are controlled by, or are under common
17 | control with that entity. For the purposes of this definition,
18 | "control" means (i) the power, direct or indirect, to cause the
19 | direction or management of such entity, whether by contract or
20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the
21 | outstanding shares, or (iii) beneficial ownership of such entity.
22 |
23 | "You" (or "Your") shall mean an individual or Legal Entity
24 | exercising permissions granted by this License.
25 |
26 | "Source" form shall mean the preferred form for making modifications,
27 | including but not limited to software source code, documentation
28 | source, and configuration files.
29 |
30 | "Object" form shall mean any form resulting from mechanical
31 | transformation or translation of a Source form, including but
32 | not limited to compiled object code, generated documentation,
33 | and conversions to other media types.
34 |
35 | "Work" shall mean the work of authorship, whether in Source or
36 | Object form, made available under the License, as indicated by a
37 | copyright notice that is included in or attached to the work
38 | (an example is provided in the Appendix below).
39 |
40 | "Derivative Works" shall mean any work, whether in Source or Object
41 | form, that is based on (or derived from) the Work and for which the
42 | editorial revisions, annotations, elaborations, or other modifications
43 | represent, as a whole, an original work of authorship. For the purposes
44 | of this License, Derivative Works shall not include works that remain
45 | separable from, or merely link (or bind by name) to the interfaces of,
46 | the Work and Derivative Works thereof.
47 |
48 | "Contribution" shall mean any work of authorship, including
49 | the original version of the Work and any modifications or additions
50 | to that Work or Derivative Works thereof, that is intentionally
51 | submitted to Licensor for inclusion in the Work by the copyright owner
52 | or by an individual or Legal Entity authorized to submit on behalf of
53 | the copyright owner. For the purposes of this definition, "submitted"
54 | means any form of electronic, verbal, or written communication sent
55 | to the Licensor or its representatives, including but not limited to
56 | communication on electronic mailing lists, source code control systems,
57 | and issue tracking systems that are managed by, or on behalf of, the
58 | Licensor for the purpose of discussing and improving the Work, but
59 | excluding communication that is conspicuously marked or otherwise
60 | designated in writing by the copyright owner as "Not a Contribution."
61 |
62 | "Contributor" shall mean Licensor and any individual or Legal Entity
63 | on behalf of whom a Contribution has been received by Licensor and
64 | subsequently incorporated within the Work.
65 |
66 | 2. Grant of Copyright License. Subject to the terms and conditions of
67 | this License, each Contributor hereby grants to You a perpetual,
68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69 | copyright license to reproduce, prepare Derivative Works of,
70 | publicly display, publicly perform, sublicense, and distribute the
71 | Work and such Derivative Works in Source or Object form.
72 |
73 | 3. Grant of Patent License. Subject to the terms and conditions of
74 | this License, each Contributor hereby grants to You a perpetual,
75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76 | (except as stated in this section) patent license to make, have made,
77 | use, offer to sell, sell, import, and otherwise transfer the Work,
78 | where such license applies only to those patent claims licensable
79 | by such Contributor that are necessarily infringed by their
80 | Contribution(s) alone or by combination of their Contribution(s)
81 | with the Work to which such Contribution(s) was submitted. If You
82 | institute patent litigation against any entity (including a
83 | cross-claim or counterclaim in a lawsuit) alleging that the Work
84 | or a Contribution incorporated within the Work constitutes direct
85 | or contributory patent infringement, then any patent licenses
86 | granted to You under this License for that Work shall terminate
87 | as of the date such litigation is filed.
88 |
89 | 4. Redistribution. You may reproduce and distribute copies of the
90 | Work or Derivative Works thereof in any medium, with or without
91 | modifications, and in Source or Object form, provided that You
92 | meet the following conditions:
93 |
94 | (a) You must give any other recipients of the Work or
95 | Derivative Works a copy of this License; and
96 |
97 | (b) You must cause any modified files to carry prominent notices
98 | stating that You changed the files; and
99 |
100 | (c) You must retain, in the Source form of any Derivative Works
101 | that You distribute, all copyright, patent, trademark, and
102 | attribution notices from the Source form of the Work,
103 | excluding those notices that do not pertain to any part of
104 | the Derivative Works; and
105 |
106 | (d) If the Work includes a "NOTICE" text file as part of its
107 | distribution, then any Derivative Works that You distribute must
108 | include a readable copy of the attribution notices contained
109 | within such NOTICE file, excluding those notices that do not
110 | pertain to any part of the Derivative Works, in at least one
111 | of the following places: within a NOTICE text file distributed
112 | as part of the Derivative Works; within the Source form or
113 | documentation, if provided along with the Derivative Works; or,
114 | within a display generated by the Derivative Works, if and
115 | wherever such third-party notices normally appear. The contents
116 | of the NOTICE file are for informational purposes only and
117 | do not modify the License. You may add Your own attribution
118 | notices within Derivative Works that You distribute, alongside
119 | or as an addendum to the NOTICE text from the Work, provided
120 | that such additional attribution notices cannot be construed
121 | as modifying the License.
122 |
123 | You may add Your own copyright statement to Your modifications and
124 | may provide additional or different license terms and conditions
125 | for use, reproduction, or distribution of Your modifications, or
126 | for any such Derivative Works as a whole, provided Your use,
127 | reproduction, and distribution of the Work otherwise complies with
128 | the conditions stated in this License.
129 |
130 | 5. Submission of Contributions. Unless You explicitly state otherwise,
131 | any Contribution intentionally submitted for inclusion in the Work
132 | by You to the Licensor shall be under the terms and conditions of
133 | this License, without any additional terms or conditions.
134 | Notwithstanding the above, nothing herein shall supersede or modify
135 | the terms of any separate license agreement you may have executed
136 | with Licensor regarding such Contributions.
137 |
138 | 6. Trademarks. This License does not grant permission to use the trade
139 | names, trademarks, service marks, or product names of the Licensor,
140 | except as required for reasonable and customary use in describing the
141 | origin of the Work and reproducing the content of the NOTICE file.
142 |
143 | 7. Disclaimer of Warranty. Unless required by applicable law or
144 | agreed to in writing, Licensor provides the Work (and each
145 | Contributor provides its Contributions) on an "AS IS" BASIS,
146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147 | implied, including, without limitation, any warranties or conditions
148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149 | PARTICULAR PURPOSE. You are solely responsible for determining the
150 | appropriateness of using or redistributing the Work and assume any
151 | risks associated with Your exercise of permissions under this License.
152 |
153 | 8. Limitation of Liability. In no event and under no legal theory,
154 | whether in tort (including negligence), contract, or otherwise,
155 | unless required by applicable law (such as deliberate and grossly
156 | negligent acts) or agreed to in writing, shall any Contributor be
157 | liable to You for damages, including any direct, indirect, special,
158 | incidental, or consequential damages of any character arising as a
159 | result of this License or out of the use or inability to use the
160 | Work (including but not limited to damages for loss of goodwill,
161 | work stoppage, computer failure or malfunction, or any and all
162 | other commercial damages or losses), even if such Contributor
163 | has been advised of the possibility of such damages.
164 |
165 | 9. Accepting Warranty or Additional Liability. While redistributing
166 | the Work or Derivative Works thereof, You may choose to offer,
167 | and charge a fee for, acceptance of support, warranty, indemnity,
168 | or other liability obligations and/or rights consistent with this
169 | License. However, in accepting such obligations, You may act only
170 | on Your own behalf and on Your sole responsibility, not on behalf
171 | of any other Contributor, and only if You agree to indemnify,
172 | defend, and hold each Contributor harmless for any liability
173 | incurred by, or claims asserted against, such Contributor by reason
174 | of your accepting any such warranty or additional liability.
175 |
176 | END OF TERMS AND CONDITIONS
177 |
178 | APPENDIX: How to apply the Apache License to your work.
179 |
180 | To apply the Apache License to your work, attach the following
181 | boilerplate notice, with the fields enclosed by brackets "[]"
182 | replaced with your own identifying information. (Don't include
183 | the brackets!) The text should be enclosed in the appropriate
184 | comment syntax for the file format. We also recommend that a
185 | file or class name and description of purpose be included on the
186 | same "printed page" as the copyright notice for easier
187 | identification within third-party archives.
188 |
189 | Copyright 2025 Nari Labs
190 |
191 | Licensed under the Apache License, Version 2.0 (the "License");
192 | you may not use this file except in compliance with the License.
193 | You may obtain a copy of the License at
194 |
195 | http://www.apache.org/licenses/LICENSE-2.0
196 |
197 | Unless required by applicable law or agreed to in writing, software
198 | distributed under the License is distributed on an "AS IS" BASIS,
199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200 | See the License for the specific language governing permissions and
201 | limitations under the License.
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 | Dia is a 1.6B parameter text to speech model created by Nari Labs.
17 |
18 | Dia **directly generates highly realistic dialogue from a transcript**. You can condition the output on audio, enabling emotion and tone control. The model can also produce nonverbal communications like laughter, coughing, clearing throat, etc.
19 |
20 | To accelerate research, we are providing access to pretrained model checkpoints and inference code. The model weights are hosted on [Hugging Face](https://huggingface.co/nari-labs/Dia-1.6B). The model only supports English generation at the moment.
21 |
22 | We also provide a [demo page](https://yummy-fir-7a4.notion.site/dia) comparing our model to [ElevenLabs Studio](https://elevenlabs.io/studio) and [Sesame CSM-1B](https://github.com/SesameAILabs/csm).
23 |
24 | - (Update) We have a ZeroGPU Space running! Try it now [here](https://huggingface.co/spaces/nari-labs/Dia-1.6B). Thanks to the HF team for the support :)
25 | - Join our [discord server](https://discord.gg/bJq6vjRRKv) for community support and access to new features.
26 | - Play with a larger version of Dia: generate fun conversations, remix content, and share with friends. 🔮 Join the [waitlist](https://tally.so/r/meokbo) for early access.
27 |
28 | ## Generation Guidelines
29 |
30 | - Keep input text length moderate
31 | - Short input (corresponding to under 5s of audio) will sound unnatural
32 | - Very long input (corresponding to over 20s of audio) will make the speech unnaturally fast.
33 | - Use non-verbal tags sparingly, from the list in the README. Overusing or using unlisted non-verbals may cause weird artifacts.
34 | - Always begin input text with `[S1]`, and always alternate between `[S1]` and `[S2]` (i.e. `[S1]`... `[S1]`... is not good)
35 | - When using audio prompts (voice cloning), follow these instructions carefully:
36 | - Provide the transcript of the to-be cloned audio before the generation text.
37 | - Transcript must use `[S1]`, `[S2]` speaker tags correctly (i.e. single speaker: `[S1]`..., two speakers: `[S1]`... `[S2]`...)
38 | - Duration of the to-be cloned audio should be 5~10 seconds for the best results.
39 | (Keep in mind: 1 second ≈ 86 tokens)
40 | - Put `[S1]` or `[S2]` (the second-to-last speaker's tag) at the end of the audio to improve audio quality at the end
41 |
42 | ### Install via pip
43 |
44 | ```bash
45 | # Install directly from GitHub
46 | pip install git+https://github.com/nari-labs/dia.git
47 | ```
48 |
49 | ### Set HF_TOKEN ENV var
50 |
51 | ```bash
52 | # Set the HF_TOKEN ENV var to auto download config from HF Hub
53 | export HF_TOKEN="your token"
54 | ```
55 |
56 | ### Run the Gradio UI
57 |
58 | This will open a Gradio UI that you can work on.
59 |
60 | ```bash
61 | git clone https://github.com/nari-labs/dia.git
62 | cd dia && uv run app.py
63 | ```
64 |
65 | or if you do not have `uv` pre-installed:
66 |
67 | ```bash
68 | git clone https://github.com/nari-labs/dia.git
69 | cd dia
70 | python -m venv .venv
71 | source .venv/bin/activate
72 | pip install -e .
73 | python app.py
74 | ```
75 |
76 | Note that the model was not fine-tuned on a specific voice. Hence, you will get different voices every time you run the model.
77 | You can keep speaker consistency by either adding an audio prompt (a guide coming VERY soon - try it with the second example on Gradio for now), or fixing the seed.
78 |
79 | ## Features
80 |
81 | - Generate dialogue via `[S1]` and `[S2]` tag
82 | - Generate non-verbal like `(laughs)`, `(coughs)`, etc.
83 | - Below verbal tags will be recognized, but might result in unexpected output.
84 | - `(laughs), (clears throat), (sighs), (gasps), (coughs), (singing), (sings), (mumbles), (beep), (groans), (sniffs), (claps), (screams), (inhales), (exhales), (applause), (burps), (humming), (sneezes), (chuckle), (whistles)`
85 | - Voice cloning. See [`example/voice_clone.py`](example/voice_clone.py) for more information.
86 | - In the Hugging Face space, you can upload the audio you want to clone and place its transcript before your script. Make sure the transcript follows the required format. The model will then output only the content of your script.
87 |
88 | ## ⚙️ Usage
89 |
90 | ### As a Python Library
91 |
92 | ```python
93 | from dia.model import Dia
94 |
95 |
96 | model = Dia.from_pretrained("nari-labs/Dia-1.6B", compute_dtype="float16")
97 |
98 | text = "[S1] Dia is an open weights text to dialogue model. [S2] You get full control over scripts and voices. [S1] Wow. Amazing. (laughs) [S2] Try it now on Git hub or Hugging Face."
99 |
100 | output = model.generate(text, use_torch_compile=True, verbose=True)
101 |
102 | model.save_audio("simple.mp3", output)
103 | ```
104 |
105 | If you're on Mac with Apple Silicon, you can use the following code to make it work. For MPS to work `use_torch_compile` must be set to `False`. As that feature isn't supported yet.
106 |
107 | ```python
108 | from dia.model import Dia
109 |
110 |
111 | model = Dia.from_pretrained("nari-labs/Dia-1.6B", compute_dtype="float16")
112 |
113 | text = "[S1] Dia is an open weights text to dialogue model. [S2] You get full control over scripts and voices. [S1] Wow. Amazing. (laughs) [S2] Try it now on Git hub or Hugging Face."
114 |
115 | # It is important to set the `use_torch_compile` argument to `False` when using Dia on MacOS.
116 | # This is because the `torch.compile` function is not supported on MacOS.
117 | output = model.generate(text, use_torch_compile=False, verbose=True)
118 |
119 | model.save_audio("simple.mp3", output)
120 | ```
121 |
122 | A pypi package and a working CLI tool will be available soon.
123 |
124 | ## 💻 Hardware and Inference Speed
125 |
126 | Dia has been tested on only GPUs (pytorch 2.0+, CUDA 12.6). CPU support is to be added soon.
127 | The initial run will take longer as the Descript Audio Codec also needs to be downloaded.
128 |
129 | These are the speed we benchmarked in RTX 4090.
130 |
131 | | precision | realtime factor w/ compile | realtime factor w/o compile | VRAM |
132 | |:-:|:-:|:-:|:-:|
133 | | `bfloat16` | x2.1 | x1.5 | ~10GB |
134 | | `float16` | x2.2 | x1.3 | ~10GB |
135 | | `float32` | x1 | x0.9 | ~13GB |
136 |
137 | We will be adding a quantized version in the future.
138 |
139 | If you don't have hardware available or if you want to play with bigger versions of our models, join the waitlist [here](https://tally.so/r/meokbo).
140 |
141 | ## 🪪 License
142 |
143 | This project is licensed under the Apache License 2.0 - see the [LICENSE](LICENSE) file for details.
144 |
145 | ## ⚠️ Disclaimer
146 |
147 | This project offers a high-fidelity speech generation model intended for research and educational use. The following uses are **strictly forbidden**:
148 |
149 | - **Identity Misuse**: Do not produce audio resembling real individuals without permission.
150 | - **Deceptive Content**: Do not use this model to generate misleading content (e.g. fake news)
151 | - **Illegal or Malicious Use**: Do not use this model for activities that are illegal or intended to cause harm.
152 |
153 | By using this model, you agree to uphold relevant legal standards and ethical responsibilities. We **are not responsible** for any misuse and firmly oppose any unethical usage of this technology.
154 |
155 | ## 🔭 TODO / Future Work
156 |
157 | - Docker support for ARM architecture and MacOS.
158 | - Optimize inference speed.
159 | - Add quantization for memory efficiency.
160 |
161 | ## 🤝 Contributing
162 |
163 | We are a tiny team of 1 full-time and 1 part-time research-engineers. We are extra-welcome to any contributions!
164 | Join our [Discord Server](https://discord.gg/bJq6vjRRKv) for discussions.
165 |
166 | ## 🤗 Acknowledgements
167 |
168 | - We thank the [Google TPU Research Cloud program](https://sites.research.google/trc/about/) for providing computation resources.
169 | - Our work was heavily inspired by [SoundStorm](https://arxiv.org/abs/2305.09636), [Parakeet](https://jordandarefsky.com/blog/2024/parakeet/), and [Descript Audio Codec](https://github.com/descriptinc/descript-audio-codec).
170 | - Hugging Face for providing the ZeroGPU Grant.
171 | - "Nari" is a pure Korean word for lily.
172 | - We thank Jason Y. for providing help with data filtering.
173 |
174 |
175 | ## ⭐ Star History
176 |
177 |
178 |
179 |
180 |
181 |
182 |
183 |
184 |
--------------------------------------------------------------------------------
/app.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | import tempfile
3 | import time
4 | from pathlib import Path
5 | from typing import Optional, Tuple
6 |
7 | import gradio as gr
8 | import numpy as np
9 | import soundfile as sf
10 | import torch
11 |
12 | from dia.model import Dia
13 |
14 |
15 | # --- Global Setup ---
16 | parser = argparse.ArgumentParser(description="Gradio interface for Nari TTS")
17 | parser.add_argument("--device", type=str, default=None, help="Force device (e.g., 'cuda', 'mps', 'cpu')")
18 | parser.add_argument("--share", action="store_true", help="Enable Gradio sharing")
19 |
20 | args = parser.parse_args()
21 |
22 |
23 | # Determine device
24 | if args.device:
25 | device = torch.device(args.device)
26 | elif torch.cuda.is_available():
27 | device = torch.device("cuda")
28 | # Simplified MPS check for broader compatibility
29 | elif hasattr(torch.backends, "mps") and torch.backends.mps.is_available():
30 | # Basic check is usually sufficient, detailed check can be problematic
31 | device = torch.device("mps")
32 | else:
33 | device = torch.device("cpu")
34 |
35 | print(f"Using device: {device}")
36 |
37 | # Load Nari model and config
38 | print("Loading Nari model...")
39 | try:
40 | dtype_map = {
41 | "cpu": "float32",
42 | "mps": "float32", # Apple M series – better with float32
43 | "cuda": "float16", # NVIDIA – better with float16
44 | }
45 |
46 | dtype = dtype_map.get(device.type, "float16")
47 | print(f"Using device: {device}, attempting to load model with {dtype}")
48 | model = Dia.from_pretrained("nari-labs/Dia-1.6B", compute_dtype=dtype, device=device)
49 | except Exception as e:
50 | print(f"Error loading Nari model: {e}")
51 | raise
52 |
53 |
54 | def run_inference(
55 | text_input: str,
56 | audio_prompt_input: Optional[Tuple[int, np.ndarray]],
57 | max_new_tokens: int,
58 | cfg_scale: float,
59 | temperature: float,
60 | top_p: float,
61 | cfg_filter_top_k: int,
62 | speed_factor: float,
63 | ):
64 | """
65 | Runs Nari inference using the globally loaded model and provided inputs.
66 | Uses temporary files for text and audio prompt compatibility with inference.generate.
67 | """
68 | global model, device # Access global model, config, device
69 |
70 | if not text_input or text_input.isspace():
71 | raise gr.Error("Text input cannot be empty.")
72 |
73 | temp_txt_file_path = None
74 | temp_audio_prompt_path = None
75 | output_audio = (44100, np.zeros(1, dtype=np.float32))
76 |
77 | try:
78 | prompt_path_for_generate = None
79 | if audio_prompt_input is not None:
80 | sr, audio_data = audio_prompt_input
81 | # Check if audio_data is valid
82 | if audio_data is None or audio_data.size == 0 or audio_data.max() == 0: # Check for silence/empty
83 | gr.Warning("Audio prompt seems empty or silent, ignoring prompt.")
84 | else:
85 | # Save prompt audio to a temporary WAV file
86 | with tempfile.NamedTemporaryFile(mode="wb", suffix=".wav", delete=False) as f_audio:
87 | temp_audio_prompt_path = f_audio.name # Store path for cleanup
88 |
89 | # Basic audio preprocessing for consistency
90 | # Convert to float32 in [-1, 1] range if integer type
91 | if np.issubdtype(audio_data.dtype, np.integer):
92 | max_val = np.iinfo(audio_data.dtype).max
93 | audio_data = audio_data.astype(np.float32) / max_val
94 | elif not np.issubdtype(audio_data.dtype, np.floating):
95 | gr.Warning(f"Unsupported audio prompt dtype {audio_data.dtype}, attempting conversion.")
96 | # Attempt conversion, might fail for complex types
97 | try:
98 | audio_data = audio_data.astype(np.float32)
99 | except Exception as conv_e:
100 | raise gr.Error(f"Failed to convert audio prompt to float32: {conv_e}")
101 |
102 | # Ensure mono (average channels if stereo)
103 | if audio_data.ndim > 1:
104 | if audio_data.shape[0] == 2: # Assume (2, N)
105 | audio_data = np.mean(audio_data, axis=0)
106 | elif audio_data.shape[1] == 2: # Assume (N, 2)
107 | audio_data = np.mean(audio_data, axis=1)
108 | else:
109 | gr.Warning(
110 | f"Audio prompt has unexpected shape {audio_data.shape}, taking first channel/axis."
111 | )
112 | audio_data = (
113 | audio_data[0] if audio_data.shape[0] < audio_data.shape[1] else audio_data[:, 0]
114 | )
115 | audio_data = np.ascontiguousarray(audio_data) # Ensure contiguous after slicing/mean
116 |
117 | # Write using soundfile
118 | try:
119 | sf.write(
120 | temp_audio_prompt_path, audio_data, sr, subtype="FLOAT"
121 | ) # Explicitly use FLOAT subtype
122 | prompt_path_for_generate = temp_audio_prompt_path
123 | print(f"Created temporary audio prompt file: {temp_audio_prompt_path} (orig sr: {sr})")
124 | except Exception as write_e:
125 | print(f"Error writing temporary audio file: {write_e}")
126 | raise gr.Error(f"Failed to save audio prompt: {write_e}")
127 |
128 | # 3. Run Generation
129 |
130 | start_time = time.time()
131 |
132 | # Use torch.inference_mode() context manager for the generation call
133 | with torch.inference_mode():
134 | output_audio_np = model.generate(
135 | text_input,
136 | max_tokens=max_new_tokens,
137 | cfg_scale=cfg_scale,
138 | temperature=temperature,
139 | top_p=top_p,
140 | cfg_filter_top_k=cfg_filter_top_k, # Pass the value here
141 | use_torch_compile=False, # Keep False for Gradio stability
142 | audio_prompt=prompt_path_for_generate,
143 | )
144 |
145 | end_time = time.time()
146 | print(f"Generation finished in {end_time - start_time:.2f} seconds.")
147 |
148 | # 4. Convert Codes to Audio
149 | if output_audio_np is not None:
150 | # Get sample rate from the loaded DAC model
151 | output_sr = 44100
152 |
153 | # --- Slow down audio ---
154 | original_len = len(output_audio_np)
155 | # Ensure speed_factor is positive and not excessively small/large to avoid issues
156 | speed_factor = max(0.1, min(speed_factor, 5.0))
157 | target_len = int(original_len / speed_factor) # Target length based on speed_factor
158 | if target_len != original_len and target_len > 0: # Only interpolate if length changes and is valid
159 | x_original = np.arange(original_len)
160 | x_resampled = np.linspace(0, original_len - 1, target_len)
161 | resampled_audio_np = np.interp(x_resampled, x_original, output_audio_np)
162 | output_audio = (
163 | output_sr,
164 | resampled_audio_np.astype(np.float32),
165 | ) # Use resampled audio
166 | print(f"Resampled audio from {original_len} to {target_len} samples for {speed_factor:.2f}x speed.")
167 | else:
168 | output_audio = (
169 | output_sr,
170 | output_audio_np,
171 | ) # Keep original if calculation fails or no change
172 | print(f"Skipping audio speed adjustment (factor: {speed_factor:.2f}).")
173 | # --- End slowdown ---
174 |
175 | print(f"Audio conversion successful. Final shape: {output_audio[1].shape}, Sample Rate: {output_sr}")
176 |
177 | # Explicitly convert to int16 to prevent Gradio warning
178 | if output_audio[1].dtype == np.float32 or output_audio[1].dtype == np.float64:
179 | audio_for_gradio = np.clip(output_audio[1], -1.0, 1.0)
180 | audio_for_gradio = (audio_for_gradio * 32767).astype(np.int16)
181 | output_audio = (output_sr, audio_for_gradio)
182 | print("Converted audio to int16 for Gradio output.")
183 |
184 | else:
185 | print("\nGeneration finished, but no valid tokens were produced.")
186 | # Return default silence
187 | gr.Warning("Generation produced no output.")
188 |
189 | except Exception as e:
190 | print(f"Error during inference: {e}")
191 | import traceback
192 |
193 | traceback.print_exc()
194 | # Re-raise as Gradio error to display nicely in the UI
195 | raise gr.Error(f"Inference failed: {e}")
196 |
197 | finally:
198 | # 5. Cleanup Temporary Files defensively
199 | if temp_txt_file_path and Path(temp_txt_file_path).exists():
200 | try:
201 | Path(temp_txt_file_path).unlink()
202 | print(f"Deleted temporary text file: {temp_txt_file_path}")
203 | except OSError as e:
204 | print(f"Warning: Error deleting temporary text file {temp_txt_file_path}: {e}")
205 | if temp_audio_prompt_path and Path(temp_audio_prompt_path).exists():
206 | try:
207 | Path(temp_audio_prompt_path).unlink()
208 | print(f"Deleted temporary audio prompt file: {temp_audio_prompt_path}")
209 | except OSError as e:
210 | print(f"Warning: Error deleting temporary audio prompt file {temp_audio_prompt_path}: {e}")
211 |
212 | return output_audio
213 |
214 |
215 | # --- Create Gradio Interface ---
216 | css = """
217 | #col-container {max-width: 90%; margin-left: auto; margin-right: auto;}
218 | """
219 | # Attempt to load default text from example.txt
220 | default_text = "[S1] Dia is an open weights text to dialogue model. \n[S2] You get full control over scripts and voices. \n[S1] Wow. Amazing. (laughs) \n[S2] Try it now on Git hub or Hugging Face."
221 | example_txt_path = Path("./example.txt")
222 | if example_txt_path.exists():
223 | try:
224 | default_text = example_txt_path.read_text(encoding="utf-8").strip()
225 | if not default_text: # Handle empty example file
226 | default_text = "Example text file was empty."
227 | except Exception as e:
228 | print(f"Warning: Could not read example.txt: {e}")
229 |
230 |
231 | # Build Gradio UI
232 | with gr.Blocks(css=css) as demo:
233 | gr.Markdown("# Nari Text-to-Speech Synthesis")
234 |
235 | with gr.Row(equal_height=False):
236 | with gr.Column(scale=1):
237 | text_input = gr.Textbox(
238 | label="Input Text",
239 | placeholder="Enter text here...",
240 | value=default_text,
241 | lines=5, # Increased lines
242 | )
243 | audio_prompt_input = gr.Audio(
244 | label="Audio Prompt (Optional)",
245 | show_label=True,
246 | sources=["upload", "microphone"],
247 | type="numpy",
248 | )
249 | with gr.Accordion("Generation Parameters", open=False):
250 | max_new_tokens = gr.Slider(
251 | label="Max New Tokens (Audio Length)",
252 | minimum=860,
253 | maximum=3072,
254 | value=model.config.data.audio_length, # Use config default if available, else fallback
255 | step=50,
256 | info="Controls the maximum length of the generated audio (more tokens = longer audio).",
257 | )
258 | cfg_scale = gr.Slider(
259 | label="CFG Scale (Guidance Strength)",
260 | minimum=1.0,
261 | maximum=5.0,
262 | value=3.0, # Default from inference.py
263 | step=0.1,
264 | info="Higher values increase adherence to the text prompt.",
265 | )
266 | temperature = gr.Slider(
267 | label="Temperature (Randomness)",
268 | minimum=1.0,
269 | maximum=1.5,
270 | value=1.3, # Default from inference.py
271 | step=0.05,
272 | info="Lower values make the output more deterministic, higher values increase randomness.",
273 | )
274 | top_p = gr.Slider(
275 | label="Top P (Nucleus Sampling)",
276 | minimum=0.80,
277 | maximum=1.0,
278 | value=0.95, # Default from inference.py
279 | step=0.01,
280 | info="Filters vocabulary to the most likely tokens cumulatively reaching probability P.",
281 | )
282 | cfg_filter_top_k = gr.Slider(
283 | label="CFG Filter Top K",
284 | minimum=15,
285 | maximum=50,
286 | value=30,
287 | step=1,
288 | info="Top k filter for CFG guidance.",
289 | )
290 | speed_factor_slider = gr.Slider(
291 | label="Speed Factor",
292 | minimum=0.8,
293 | maximum=1.0,
294 | value=0.94,
295 | step=0.02,
296 | info="Adjusts the speed of the generated audio (1.0 = original speed).",
297 | )
298 |
299 | run_button = gr.Button("Generate Audio", variant="primary")
300 |
301 | with gr.Column(scale=1):
302 | audio_output = gr.Audio(
303 | label="Generated Audio",
304 | type="numpy",
305 | autoplay=False,
306 | )
307 |
308 | # Link button click to function
309 | run_button.click(
310 | fn=run_inference,
311 | inputs=[
312 | text_input,
313 | audio_prompt_input,
314 | max_new_tokens,
315 | cfg_scale,
316 | temperature,
317 | top_p,
318 | cfg_filter_top_k,
319 | speed_factor_slider,
320 | ],
321 | outputs=[audio_output], # Add status_output here if using it
322 | api_name="generate_audio",
323 | )
324 |
325 | # Add examples (ensure the prompt path is correct or remove it if example file doesn't exist)
326 | example_prompt_path = "./example_prompt.mp3" # Adjust if needed
327 | examples_list = [
328 | [
329 | "[S1] Oh fire! Oh my goodness! What's the procedure? What to we do people? The smoke could be coming through an air duct! \n[S2] Oh my god! Okay.. it's happening. Everybody stay calm! \n[S1] What's the procedure... \n[S2] Everybody stay fucking calm!!!... Everybody fucking calm down!!!!! \n[S1] No! No! If you touch the handle, if its hot there might be a fire down the hallway! ",
330 | None,
331 | 3072,
332 | 3.0,
333 | 1.3,
334 | 0.95,
335 | 35,
336 | 0.94,
337 | ],
338 | [
339 | "[S1] Open weights text to dialogue model. \n[S2] You get full control over scripts and voices. \n[S1] I'm biased, but I think we clearly won. \n[S2] Hard to disagree. (laughs) \n[S1] Thanks for listening to this demo. \n[S2] Try it now on Git hub and Hugging Face. \n[S1] If you liked our model, please give us a star and share to your friends. \n[S2] This was Nari Labs.",
340 | example_prompt_path if Path(example_prompt_path).exists() else None,
341 | 3072,
342 | 3.0,
343 | 1.3,
344 | 0.95,
345 | 35,
346 | 0.94,
347 | ],
348 | ]
349 |
350 | if examples_list:
351 | gr.Examples(
352 | examples=examples_list,
353 | inputs=[
354 | text_input,
355 | audio_prompt_input,
356 | max_new_tokens,
357 | cfg_scale,
358 | temperature,
359 | top_p,
360 | cfg_filter_top_k,
361 | speed_factor_slider,
362 | ],
363 | outputs=[audio_output],
364 | fn=run_inference,
365 | cache_examples=False,
366 | label="Examples (Click to Run)",
367 | )
368 | else:
369 | gr.Markdown("_(No examples configured or example prompt file missing)_")
370 |
371 | # --- Launch the App ---
372 | if __name__ == "__main__":
373 | print("Launching Gradio interface...")
374 |
375 | # set `GRADIO_SERVER_NAME`, `GRADIO_SERVER_PORT` env vars to override default values
376 | # use `GRADIO_SERVER_NAME=0.0.0.0` for Docker
377 | demo.launch(share=args.share)
378 |
--------------------------------------------------------------------------------
/cli.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | import os
3 | import random
4 |
5 | import numpy as np
6 | import soundfile as sf
7 | import torch
8 |
9 | from dia.model import Dia
10 |
11 |
12 | def set_seed(seed: int):
13 | """Sets the random seed for reproducibility."""
14 | random.seed(seed)
15 | np.random.seed(seed)
16 | torch.manual_seed(seed)
17 | if torch.cuda.is_available():
18 | torch.cuda.manual_seed(seed)
19 | torch.cuda.manual_seed_all(seed)
20 | # Ensure deterministic behavior for cuDNN (if used)
21 | torch.backends.cudnn.deterministic = True
22 | torch.backends.cudnn.benchmark = False
23 |
24 |
25 | def main():
26 | parser = argparse.ArgumentParser(description="Generate audio using the Dia model.")
27 |
28 | parser.add_argument("text", type=str, help="Input text for speech generation.")
29 | parser.add_argument(
30 | "--output", type=str, required=True, help="Path to save the generated audio file (e.g., output.wav)."
31 | )
32 |
33 | parser.add_argument(
34 | "--repo-id",
35 | type=str,
36 | default="nari-labs/Dia-1.6B",
37 | help="Hugging Face repository ID (e.g., nari-labs/Dia-1.6B).",
38 | )
39 | parser.add_argument(
40 | "--local-paths", action="store_true", help="Load model from local config and checkpoint files."
41 | )
42 |
43 | parser.add_argument(
44 | "--config", type=str, help="Path to local config.json file (required if --local-paths is set)."
45 | )
46 | parser.add_argument(
47 | "--checkpoint", type=str, help="Path to local model checkpoint .pth file (required if --local-paths is set)."
48 | )
49 | parser.add_argument(
50 | "--audio-prompt", type=str, default=None, help="Path to an optional audio prompt WAV file for voice cloning."
51 | )
52 |
53 | gen_group = parser.add_argument_group("Generation Parameters")
54 | gen_group.add_argument(
55 | "--max-tokens",
56 | type=int,
57 | default=None,
58 | help="Maximum number of audio tokens to generate (defaults to config value).",
59 | )
60 | gen_group.add_argument(
61 | "--cfg-scale", type=float, default=3.0, help="Classifier-Free Guidance scale (default: 3.0)."
62 | )
63 | gen_group.add_argument(
64 | "--temperature", type=float, default=1.3, help="Sampling temperature (higher is more random, default: 0.7)."
65 | )
66 | gen_group.add_argument("--top-p", type=float, default=0.95, help="Nucleus sampling probability (default: 0.95).")
67 |
68 | infra_group = parser.add_argument_group("Infrastructure")
69 | infra_group.add_argument("--seed", type=int, default=None, help="Random seed for reproducibility.")
70 | infra_group.add_argument(
71 | "--device",
72 | type=str,
73 | default="cuda" if torch.cuda.is_available() else "cpu",
74 | help="Device to run inference on (e.g., 'cuda', 'cpu', default: auto).",
75 | )
76 |
77 | args = parser.parse_args()
78 |
79 | # Validation for local paths
80 | if args.local_paths:
81 | if not args.config:
82 | parser.error("--config is required when --local-paths is set.")
83 | if not args.checkpoint:
84 | parser.error("--checkpoint is required when --local-paths is set.")
85 | if not os.path.exists(args.config):
86 | parser.error(f"Config file not found: {args.config}")
87 | if not os.path.exists(args.checkpoint):
88 | parser.error(f"Checkpoint file not found: {args.checkpoint}")
89 |
90 | # Set seed if provided
91 | if args.seed is not None:
92 | set_seed(args.seed)
93 | print(f"Using random seed: {args.seed}")
94 |
95 | # Determine device
96 | device = torch.device(args.device)
97 | print(f"Using device: {device}")
98 |
99 | # Load model
100 | print("Loading model...")
101 | if args.local_paths:
102 | print(f"Loading from local paths: config='{args.config}', checkpoint='{args.checkpoint}'")
103 | try:
104 | model = Dia.from_local(args.config, args.checkpoint, device=device)
105 | except Exception as e:
106 | print(f"Error loading local model: {e}")
107 | exit(1)
108 | else:
109 | print(f"Loading from Hugging Face Hub: repo_id='{args.repo_id}'")
110 | try:
111 | model = Dia.from_pretrained(args.repo_id, device=device)
112 | except Exception as e:
113 | print(f"Error loading model from Hub: {e}")
114 | exit(1)
115 | print("Model loaded.")
116 |
117 | # Generate audio
118 | print("Generating audio...")
119 | try:
120 | sample_rate = 44100 # Default assumption
121 |
122 | output_audio = model.generate(
123 | text=args.text,
124 | audio_prompt=args.audio_prompt,
125 | max_tokens=args.max_tokens,
126 | cfg_scale=args.cfg_scale,
127 | temperature=args.temperature,
128 | top_p=args.top_p,
129 | )
130 | print("Audio generation complete.")
131 |
132 | print(f"Saving audio to {args.output}...")
133 | os.makedirs(os.path.dirname(args.output) or ".", exist_ok=True)
134 |
135 | sf.write(args.output, output_audio, sample_rate)
136 | print(f"Audio successfully saved to {args.output}")
137 |
138 | except Exception as e:
139 | print(f"Error during audio generation or saving: {e}")
140 | exit(1)
141 |
142 |
143 | if __name__ == "__main__":
144 | main()
145 |
--------------------------------------------------------------------------------
/dia/__init__.py:
--------------------------------------------------------------------------------
1 | from .model import Dia
2 |
3 |
4 | __all__ = [
5 | "Dia",
6 | ]
7 |
--------------------------------------------------------------------------------
/dia/audio.py:
--------------------------------------------------------------------------------
1 | import typing as tp
2 |
3 | import torch
4 |
5 |
6 | def build_delay_indices(B: int, T: int, C: int, delay_pattern: tp.List[int]) -> tp.Tuple[torch.Tensor, torch.Tensor]:
7 | """
8 | Precompute (t_idx_BxTxC, indices_BTCx3) so that out[t, c] = in[t - delay[c], c].
9 | Negative t_idx => BOS; t_idx >= T => PAD.
10 | """
11 | delay_arr = torch.tensor(delay_pattern, dtype=torch.int32)
12 |
13 | t_idx_BxT = torch.broadcast_to(
14 | torch.arange(T, dtype=torch.int32)[None, :],
15 | [B, T],
16 | )
17 | t_idx_BxTx1 = t_idx_BxT[..., None]
18 | t_idx_BxTxC = t_idx_BxTx1 - delay_arr.view(1, 1, C)
19 |
20 | b_idx_BxTxC = torch.broadcast_to(
21 | torch.arange(B, dtype=torch.int32).view(B, 1, 1),
22 | [B, T, C],
23 | )
24 | c_idx_BxTxC = torch.broadcast_to(
25 | torch.arange(C, dtype=torch.int32).view(1, 1, C),
26 | [B, T, C],
27 | )
28 |
29 | # We must clamp time indices to [0..T-1] so gather_nd equivalent won't fail
30 | t_clamped_BxTxC = torch.clamp(t_idx_BxTxC, 0, T - 1)
31 |
32 | indices_BTCx3 = torch.stack(
33 | [
34 | b_idx_BxTxC.reshape(-1),
35 | t_clamped_BxTxC.reshape(-1),
36 | c_idx_BxTxC.reshape(-1),
37 | ],
38 | dim=1,
39 | ).long() # Ensure indices are long type for indexing
40 |
41 | return t_idx_BxTxC, indices_BTCx3
42 |
43 |
44 | def apply_audio_delay(
45 | audio_BxTxC: torch.Tensor,
46 | pad_value: int,
47 | bos_value: int,
48 | precomp: tp.Tuple[torch.Tensor, torch.Tensor],
49 | ) -> torch.Tensor:
50 | """
51 | Applies the delay pattern to batched audio tokens using precomputed indices,
52 | inserting BOS where t_idx < 0 and PAD where t_idx >= T.
53 |
54 | Args:
55 | audio_BxTxC: [B, T, C] int16 audio tokens (or int32/float)
56 | pad_value: the padding token
57 | bos_value: the BOS token
58 | precomp: (t_idx_BxTxC, indices_BTCx3) from build_delay_indices
59 |
60 | Returns:
61 | result_BxTxC: [B, T, C] delayed audio tokens
62 | """
63 | device = audio_BxTxC.device # Get device from input tensor
64 | t_idx_BxTxC, indices_BTCx3 = precomp
65 | t_idx_BxTxC = t_idx_BxTxC.to(device) # Move precomputed indices to device
66 | indices_BTCx3 = indices_BTCx3.to(device)
67 |
68 | # Equivalent of tf.gather_nd using advanced indexing
69 | # Ensure indices are long type if not already (build_delay_indices should handle this)
70 | gathered_flat = audio_BxTxC[indices_BTCx3[:, 0], indices_BTCx3[:, 1], indices_BTCx3[:, 2]]
71 | gathered_BxTxC = gathered_flat.view(audio_BxTxC.shape)
72 |
73 | # Create masks on the correct device
74 | mask_bos = t_idx_BxTxC < 0 # => place bos_value
75 | mask_pad = t_idx_BxTxC >= audio_BxTxC.shape[1] # => place pad_value
76 |
77 | # Create scalar tensors on the correct device
78 | bos_tensor = torch.tensor(bos_value, dtype=audio_BxTxC.dtype, device=device)
79 | pad_tensor = torch.tensor(pad_value, dtype=audio_BxTxC.dtype, device=device)
80 |
81 | # If mask_bos, BOS; else if mask_pad, PAD; else original gather
82 | # All tensors should now be on the same device
83 | result_BxTxC = torch.where(mask_bos, bos_tensor, torch.where(mask_pad, pad_tensor, gathered_BxTxC))
84 |
85 | return result_BxTxC
86 |
87 |
88 | def build_revert_indices(B: int, T: int, C: int, delay_pattern: tp.List[int]) -> tp.Tuple[torch.Tensor, torch.Tensor]:
89 | """
90 | Precompute indices for the revert operation using PyTorch.
91 |
92 | Returns:
93 | A tuple (t_idx_BxTxC, indices_BTCx3) where:
94 | - t_idx_BxTxC is a tensor of shape [B, T, C] computed as time indices plus the delay.
95 | - indices_BTCx3 is a tensor of shape [B*T*C, 3] used for gathering, computed from:
96 | batch indices, clamped time indices, and channel indices.
97 | """
98 | # Use default device unless specified otherwise; assumes inputs might define device later
99 | device = None # Or determine dynamically if needed, e.g., from a model parameter
100 |
101 | delay_arr = torch.tensor(delay_pattern, dtype=torch.int32, device=device)
102 |
103 | t_idx_BT1 = torch.broadcast_to(torch.arange(T, device=device).unsqueeze(0), [B, T])
104 | t_idx_BT1 = t_idx_BT1.unsqueeze(-1)
105 |
106 | t_idx_BxTxC = torch.minimum(
107 | t_idx_BT1 + delay_arr.view(1, 1, C),
108 | torch.tensor(T - 1, device=device),
109 | )
110 | b_idx_BxTxC = torch.broadcast_to(torch.arange(B, device=device).view(B, 1, 1), [B, T, C])
111 | c_idx_BxTxC = torch.broadcast_to(torch.arange(C, device=device).view(1, 1, C), [B, T, C])
112 |
113 | indices_BTCx3 = torch.stack(
114 | [
115 | b_idx_BxTxC.reshape(-1),
116 | t_idx_BxTxC.reshape(-1),
117 | c_idx_BxTxC.reshape(-1),
118 | ],
119 | axis=1,
120 | ).long() # Ensure indices are long type
121 |
122 | return t_idx_BxTxC, indices_BTCx3
123 |
124 |
125 | def revert_audio_delay(
126 | audio_BxTxC: torch.Tensor,
127 | pad_value: int,
128 | precomp: tp.Tuple[torch.Tensor, torch.Tensor],
129 | T: int,
130 | ) -> torch.Tensor:
131 | """
132 | Reverts a delay pattern from batched audio tokens using precomputed indices (PyTorch version).
133 |
134 | Args:
135 | audio_BxTxC: Input delayed audio tensor
136 | pad_value: Padding value for out-of-bounds indices
137 | precomp: Precomputed revert indices tuple containing:
138 | - t_idx_BxTxC: Time offset indices tensor
139 | - indices_BTCx3: Gather indices tensor for original audio
140 | T: Original sequence length before padding
141 |
142 | Returns:
143 | Reverted audio tensor with same shape as input
144 | """
145 | t_idx_BxTxC, indices_BTCx3 = precomp
146 | device = audio_BxTxC.device # Get device from input tensor
147 |
148 | # Move precomputed indices to the same device as audio_BxTxC if they aren't already
149 | t_idx_BxTxC = t_idx_BxTxC.to(device)
150 | indices_BTCx3 = indices_BTCx3.to(device)
151 |
152 | # Using PyTorch advanced indexing (equivalent to tf.gather_nd or np equivalent)
153 | gathered_flat = audio_BxTxC[indices_BTCx3[:, 0], indices_BTCx3[:, 1], indices_BTCx3[:, 2]]
154 | gathered_BxTxC = gathered_flat.view(audio_BxTxC.size()) # Use .size() for robust reshaping
155 |
156 | # Create pad_tensor on the correct device
157 | pad_tensor = torch.tensor(pad_value, dtype=audio_BxTxC.dtype, device=device)
158 | # Create T tensor on the correct device for comparison
159 | T_tensor = torch.tensor(T, device=device)
160 |
161 | result_BxTxC = torch.where(t_idx_BxTxC >= T_tensor, pad_tensor, gathered_BxTxC) # Changed np.where to torch.where
162 |
163 | return result_BxTxC
164 |
--------------------------------------------------------------------------------
/dia/config.py:
--------------------------------------------------------------------------------
1 | """Configuration management module for the Dia model.
2 |
3 | This module provides comprehensive configuration management for the Dia model,
4 | utilizing Pydantic for validation. It defines configurations for data processing,
5 | model architecture (encoder and decoder), and training settings.
6 |
7 | Key components:
8 | - DataConfig: Parameters for data loading and preprocessing.
9 | - EncoderConfig: Architecture details for the encoder module.
10 | - DecoderConfig: Architecture details for the decoder module.
11 | - ModelConfig: Combined model architecture settings.
12 | - TrainingConfig: Training hyperparameters and settings.
13 | - DiaConfig: Master configuration combining all components.
14 | """
15 |
16 | import os
17 | from typing import Annotated
18 |
19 | from pydantic import BaseModel, BeforeValidator, Field
20 |
21 |
22 | class DataConfig(BaseModel, frozen=True):
23 | """Configuration for data loading and preprocessing.
24 |
25 | Attributes:
26 | text_length: Maximum length of text sequences (must be multiple of 128).
27 | audio_length: Maximum length of audio sequences (must be multiple of 128).
28 | channels: Number of audio channels.
29 | text_pad_value: Value used for padding text sequences.
30 | audio_eos_value: Value representing the end of audio sequences.
31 | audio_bos_value: Value representing the beginning of audio sequences.
32 | audio_pad_value: Value used for padding audio sequences.
33 | delay_pattern: List of delay values for each audio channel.
34 | """
35 |
36 | text_length: Annotated[int, BeforeValidator(lambda x: (x + 127) // 128 * 128)] = Field(gt=0, multiple_of=128)
37 | audio_length: Annotated[int, BeforeValidator(lambda x: (x + 127) // 128 * 128)] = Field(gt=0, multiple_of=128)
38 | channels: int = Field(default=9, gt=0, multiple_of=1)
39 | text_pad_value: int = Field(default=0)
40 | audio_eos_value: int = Field(default=1024)
41 | audio_pad_value: int = Field(default=1025)
42 | audio_bos_value: int = Field(default=1026)
43 | delay_pattern: list[Annotated[int, Field(ge=0)]] = Field(default_factory=lambda: [0, 8, 9, 10, 11, 12, 13, 14, 15])
44 |
45 | def __hash__(self) -> int:
46 | """Generate a hash based on all fields of the config."""
47 | return hash(
48 | (
49 | self.text_length,
50 | self.audio_length,
51 | self.channels,
52 | self.text_pad_value,
53 | self.audio_pad_value,
54 | self.audio_bos_value,
55 | self.audio_eos_value,
56 | tuple(self.delay_pattern),
57 | )
58 | )
59 |
60 |
61 | class EncoderConfig(BaseModel, frozen=True):
62 | """Configuration for the encoder component of the Dia model.
63 |
64 | Attributes:
65 | n_layer: Number of transformer layers.
66 | n_embd: Embedding dimension.
67 | n_hidden: Hidden dimension size in the MLP layers.
68 | n_head: Number of attention heads.
69 | head_dim: Dimension per attention head.
70 | """
71 |
72 | n_layer: int = Field(gt=0)
73 | n_embd: int = Field(gt=0)
74 | n_hidden: int = Field(gt=0)
75 | n_head: int = Field(gt=0)
76 | head_dim: int = Field(gt=0)
77 |
78 |
79 | class DecoderConfig(BaseModel, frozen=True):
80 | """Configuration for the decoder component of the Dia model.
81 |
82 | Attributes:
83 | n_layer: Number of transformer layers.
84 | n_embd: Embedding dimension.
85 | n_hidden: Hidden dimension size in the MLP layers.
86 | gqa_query_heads: Number of query heads for grouped-query self-attention.
87 | kv_heads: Number of key/value heads for grouped-query self-attention.
88 | gqa_head_dim: Dimension per query head for grouped-query self-attention.
89 | cross_query_heads: Number of query heads for cross-attention.
90 | cross_head_dim: Dimension per cross-attention head.
91 | """
92 |
93 | n_layer: int = Field(gt=0)
94 | n_embd: int = Field(gt=0)
95 | n_hidden: int = Field(gt=0)
96 | gqa_query_heads: int = Field(gt=0)
97 | kv_heads: int = Field(gt=0)
98 | gqa_head_dim: int = Field(gt=0)
99 | cross_query_heads: int = Field(gt=0)
100 | cross_head_dim: int = Field(gt=0)
101 |
102 |
103 | class ModelConfig(BaseModel, frozen=True):
104 | """Main configuration container for the Dia model architecture.
105 |
106 | Attributes:
107 | encoder: Configuration for the encoder component.
108 | decoder: Configuration for the decoder component.
109 | src_vocab_size: Size of the source (text) vocabulary.
110 | tgt_vocab_size: Size of the target (audio code) vocabulary.
111 | dropout: Dropout probability applied within the model.
112 | normalization_layer_epsilon: Epsilon value for normalization layers (e.g., LayerNorm).
113 | weight_dtype: Data type for model weights (e.g., "float32", "bfloat16").
114 | rope_min_timescale: Minimum timescale for Rotary Positional Embeddings (RoPE).
115 | rope_max_timescale: Maximum timescale for Rotary Positional Embeddings (RoPE).
116 | """
117 |
118 | encoder: EncoderConfig
119 | decoder: DecoderConfig
120 | src_vocab_size: int = Field(default=128, gt=0)
121 | tgt_vocab_size: int = Field(default=1028, gt=0)
122 | dropout: float = Field(default=0.0, ge=0.0, lt=1.0)
123 | normalization_layer_epsilon: float = Field(default=1.0e-5, ge=0.0)
124 | weight_dtype: str = Field(default="float32", description="Weight precision")
125 | rope_min_timescale: int = Field(default=1, description="Timescale For global Attention")
126 | rope_max_timescale: int = Field(default=10_000, description="Timescale For global Attention")
127 |
128 |
129 | class TrainingConfig(BaseModel, frozen=True):
130 | pass
131 |
132 |
133 | class DiaConfig(BaseModel, frozen=True):
134 | """Master configuration for the Dia model.
135 |
136 | Combines all sub-configurations into a single validated object.
137 |
138 | Attributes:
139 | version: Configuration version string.
140 | model: Model architecture configuration.
141 | training: Training process configuration (precision settings).
142 | data: Data loading and processing configuration.
143 | """
144 |
145 | version: str = Field(default="1.0")
146 | model: ModelConfig
147 | # TODO: remove training. this is just for backward compatibility
148 | training: TrainingConfig | None = Field(default=None)
149 | data: DataConfig
150 |
151 | def save(self, path: str) -> None:
152 | """Save the current configuration instance to a JSON file.
153 |
154 | Ensures the parent directory exists and the file has a .json extension.
155 |
156 | Args:
157 | path: The target file path to save the configuration.
158 |
159 | Raises:
160 | ValueError: If the path is not a file with a .json extension.
161 | """
162 | os.makedirs(os.path.dirname(path), exist_ok=True)
163 | config_json = self.model_dump_json(indent=2)
164 | with open(path, "w") as f:
165 | f.write(config_json)
166 |
167 | @classmethod
168 | def load(cls, path: str) -> "DiaConfig | None":
169 | """Load and validate a Dia configuration from a JSON file.
170 |
171 | Args:
172 | path: The path to the configuration file.
173 |
174 | Returns:
175 | A validated DiaConfig instance if the file exists and is valid,
176 | otherwise None if the file is not found.
177 |
178 | Raises:
179 | ValueError: If the path does not point to an existing .json file.
180 | pydantic.ValidationError: If the JSON content fails validation against the DiaConfig schema.
181 | """
182 | try:
183 | with open(path, "r") as f:
184 | content = f.read()
185 | return cls.model_validate_json(content)
186 | except FileNotFoundError:
187 | return None
188 |
--------------------------------------------------------------------------------
/dia/layers.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torch.nn as nn
3 | import torch.nn.functional as F
4 | from huggingface_hub import PyTorchModelHubMixin
5 | from torch import Tensor
6 | from torch.nn import RMSNorm
7 |
8 | from .config import DiaConfig
9 | from .state import DecoderInferenceState, EncoderInferenceState, KVCache
10 |
11 |
12 | def _normalize_axes(axes: tuple[int, ...], ndim: int) -> tuple[int, ...]:
13 | return tuple(ax if ax >= 0 else ndim + ax for ax in axes)
14 |
15 |
16 | class DenseGeneral(nn.Module):
17 | """
18 | PyTorch equivalent of flax.linen.DenseGeneral with shapes defined at init.
19 |
20 | Stores weights (`kernel`) in the same layout as Jax and uses torch.tensordot
21 | for the generalized matrix multiplication. Weight/bias shapes are calculated
22 | and parameters created during initialization based on config.
23 | `load_weights` validates shapes and copies data.
24 |
25 | Attributes:
26 | axis (Tuple[int, ...]): Input axis or axes to contract.
27 | in_shapes (Tuple[int, ...]): Sizes of the input dimensions specified by `axis`.
28 | out_features (Tuple[int, ...]): Shape of the output features (non-contracted dims).
29 | use_bias (bool): Whether to add a bias term.
30 | weight (nn.Parameter): The kernel parameter.
31 | bias (Optional[nn.Parameter]): The bias parameter (if use_bias=True).
32 | """
33 |
34 | def __init__(
35 | self,
36 | in_shapes: tuple[int, ...],
37 | out_features: tuple[int, ...],
38 | axis: tuple[int, ...] = (-1,),
39 | weight_dtype: torch.dtype | None = None,
40 | device: torch.device | None = None,
41 | ):
42 | super().__init__()
43 | self.in_shapes = in_shapes
44 | self.out_features = out_features
45 | self.axis = axis
46 | self.kernel_shape = self.in_shapes + self.out_features
47 |
48 | factory_kwargs = {"device": device, "dtype": weight_dtype}
49 | self.weight = nn.Parameter(torch.empty(self.kernel_shape, **factory_kwargs))
50 |
51 | def forward(self, inputs: Tensor) -> Tensor:
52 | norm_axis = _normalize_axes(self.axis, inputs.ndim)
53 | kernel_contract_axes = tuple(range(len(norm_axis)))
54 |
55 | output = torch.tensordot(
56 | inputs.to(self.weight.dtype),
57 | self.weight,
58 | dims=(norm_axis, kernel_contract_axes),
59 | ).to(inputs.dtype)
60 | return output
61 |
62 |
63 | class MlpBlock(nn.Module):
64 | """MLP block using DenseGeneral."""
65 |
66 | def __init__(self, embed_dim: int, intermediate_dim: int, compute_dtype: torch.dtype):
67 | super().__init__()
68 | self.dtype = compute_dtype
69 |
70 | self.wi_fused = DenseGeneral(
71 | in_shapes=(embed_dim,),
72 | out_features=(2, intermediate_dim),
73 | axis=(-1,),
74 | weight_dtype=compute_dtype,
75 | )
76 |
77 | self.wo = DenseGeneral(
78 | in_shapes=(intermediate_dim,),
79 | out_features=(embed_dim,),
80 | axis=(-1,),
81 | weight_dtype=compute_dtype,
82 | )
83 |
84 | def forward(self, x: torch.Tensor) -> torch.Tensor:
85 | """Forward pass."""
86 | fused_x = self.wi_fused(x)
87 |
88 | gate = fused_x[..., 0, :]
89 | up = fused_x[..., 1, :]
90 |
91 | hidden = torch.mul(F.silu(gate), up).to(self.dtype)
92 |
93 | output = self.wo(hidden)
94 | return output
95 |
96 |
97 | class RotaryEmbedding(nn.Module):
98 | """Rotary Position Embedding (RoPE) implementation in PyTorch."""
99 |
100 | def __init__(
101 | self,
102 | embedding_dims: int,
103 | min_timescale: int = 1,
104 | max_timescale: int = 10000,
105 | dtype: torch.dtype = torch.float32,
106 | ):
107 | super().__init__()
108 | if embedding_dims % 2 != 0:
109 | raise ValueError("Embedding dim must be even for RoPE.")
110 | self.embedding_dims = embedding_dims
111 | self.min_timescale = min_timescale
112 | self.max_timescale = max_timescale
113 | self.compute_dtype = dtype
114 |
115 | half_embedding_dim = embedding_dims // 2
116 | fraction = (2.0 * torch.arange(0, half_embedding_dim)) / embedding_dims
117 | timescale = (self.min_timescale * (self.max_timescale / self.min_timescale) ** fraction).to(torch.float32)
118 | self.register_buffer("timescale", timescale, persistent=False)
119 |
120 | def forward(self, inputs: torch.Tensor, position: torch.Tensor):
121 | """Applies RoPE."""
122 | position = position.unsqueeze(-1).unsqueeze(-1)
123 | sinusoid_inp = position / self.timescale
124 | sin = torch.sin(sinusoid_inp)
125 | cos = torch.cos(sinusoid_inp)
126 | first_half, second_half = torch.chunk(inputs.to(torch.float32), 2, dim=-1)
127 | first_part = first_half * cos - second_half * sin
128 | second_part = second_half * cos + first_half * sin
129 | return torch.cat((first_part.to(self.compute_dtype), second_part.to(self.compute_dtype)), dim=-1)
130 |
131 | def apply_rope(self, inputs: torch.Tensor, sin: torch.Tensor, cos: torch.Tensor):
132 | first_half, second_half = torch.chunk(inputs.to(torch.float32), 2, dim=-1)
133 | first_part = first_half * cos - second_half * sin
134 | second_part = second_half * cos + first_half * sin
135 | return torch.cat((first_part.to(self.compute_dtype), second_part.to(self.compute_dtype)), dim=-1)
136 |
137 |
138 | def custom_scaled_dot_product_attention(
139 | query: torch.Tensor,
140 | key: torch.Tensor,
141 | value: torch.Tensor,
142 | attn_mask: torch.Tensor | None = None,
143 | scale: float = 1.0,
144 | is_causal: bool = False,
145 | num_gqa_groups: int = 1,
146 | ) -> torch.Tensor:
147 | """
148 | Custom scaled dot-product attention with GQA support for MPS compatibility.
149 |
150 | Args:
151 | query: (B, N_q, T, H) - Query tensor, N_q = num_query_heads
152 | key: (B, N_kv, S, H) - Key tensor, N_kv = num_kv_heads
153 | value: (B, N_kv, S, H) - Value tensor
154 | attn_mask: (B, 1, T, S) - Attention mask, optional
155 | scale: Scaling factor for attention scores
156 | is_causal: If True, apply causal masking
157 | num_gqa_groups: Number of query groups per KV head (N_q / N_kv)
158 |
159 | Returns:
160 | output: (B, N_q, T, H) - Attention output
161 | """
162 | B, N_q, T, H = query.shape
163 | _, N_kv, S, _ = key.shape
164 |
165 | # For GQA, repeat key and value tensors to match query heads
166 | if num_gqa_groups > 1:
167 | key = key.repeat_interleave(num_gqa_groups, dim=1) # (B, N_q, S, H)
168 | value = value.repeat_interleave(num_gqa_groups, dim=1) # (B, N_q, S, H)
169 |
170 | # Compute attention scores: (B, N_q, T, H) @ (B, N_q, H, S) -> (B, N_q, T, S)
171 | scores = torch.matmul(query, key.transpose(-1, -2)) * scale
172 |
173 | # Apply causal mask if needed
174 | if is_causal:
175 | causal_mask = torch.tril(torch.ones(T, S, dtype=torch.bool, device=query.device))
176 | scores = scores.masked_fill(~causal_mask, float("-inf"))
177 |
178 | # Apply attention mask if provided
179 | if attn_mask is not None:
180 | scores = scores.masked_fill(~attn_mask, float("-inf"))
181 |
182 | # Softmax over the last dimension (S)
183 | attn_weights = F.softmax(scores, dim=-1)
184 |
185 | # Compute output: (B, N_q, T, S) @ (B, N_q, S, H) -> (B, N_q, T, H)
186 | output = torch.matmul(attn_weights, value)
187 |
188 | return output
189 |
190 |
191 | class CrossAttention(nn.Module):
192 | """Cross-Attention using DenseGeneral."""
193 |
194 | def __init__(
195 | self,
196 | config: DiaConfig,
197 | q_embed_dim: int,
198 | kv_embed_dim: int,
199 | num_query_heads: int,
200 | num_kv_heads: int,
201 | head_dim: int,
202 | compute_dtype: torch.dtype,
203 | out_embed_dim: int | None = None,
204 | ):
205 | super().__init__()
206 | self.num_query_heads = num_query_heads
207 | self.num_kv_heads = num_kv_heads
208 | self.head_dim = head_dim
209 | self.output_dim = out_embed_dim if out_embed_dim is not None else q_embed_dim
210 | self.projected_query_dim = num_query_heads * head_dim
211 | if num_query_heads % num_kv_heads != 0:
212 | raise ValueError(f"num_query_heads ({num_query_heads}) must be divisible by num_kv_heads ({num_kv_heads})")
213 | self.num_gqa_groups = num_query_heads // num_kv_heads
214 |
215 | # --- Projection Layers using DenseGeneral ---
216 | self.q_proj = DenseGeneral(
217 | in_shapes=(q_embed_dim,),
218 | out_features=(num_query_heads, head_dim),
219 | axis=(-1,),
220 | weight_dtype=compute_dtype,
221 | )
222 | self.k_proj = DenseGeneral(
223 | in_shapes=(kv_embed_dim,),
224 | out_features=(num_kv_heads, head_dim),
225 | axis=(-1,),
226 | weight_dtype=compute_dtype,
227 | )
228 | self.v_proj = DenseGeneral(
229 | in_shapes=(kv_embed_dim,),
230 | out_features=(num_kv_heads, head_dim),
231 | axis=(-1,),
232 | weight_dtype=compute_dtype,
233 | )
234 | self.o_proj = DenseGeneral(
235 | in_shapes=(num_query_heads, head_dim),
236 | out_features=(self.output_dim,),
237 | axis=(-2, -1),
238 | weight_dtype=compute_dtype,
239 | )
240 |
241 | # --- Rotary Embedding ---
242 | self.rotary_emb = RotaryEmbedding(
243 | embedding_dims=self.head_dim,
244 | min_timescale=config.model.rope_min_timescale,
245 | max_timescale=config.model.rope_max_timescale,
246 | dtype=compute_dtype,
247 | )
248 |
249 | def forward(
250 | self,
251 | Xq: torch.Tensor, # (B, T, D) T = 1 in AR generation
252 | q_positions: torch.Tensor, # (B, T)
253 | kv_positions: torch.Tensor | None = None, # (B, S)
254 | attn_mask: torch.Tensor | None = None, # None in Decoder Self Attention, Valid mask in Others
255 | cache: KVCache | None = None, # None in Encoder, KVCache in Decoder
256 | is_causal: bool = False,
257 | ) -> tuple[torch.Tensor, tuple[torch.Tensor, torch.Tensor] | None]:
258 | """
259 | Performs attention calculation with optional KV caching.
260 |
261 | Args:
262 | Xq: Query tensor (B, T, D). T=1 during single-step decoding.
263 | Xkv: Key/Value source tensor (B, S, E). S=1 during single-step decoding for self-attn.
264 | q_positions: Positions for queries (B, T).
265 | kv_positions: Positions for keys/values (B, S). If None, uses q_positions.
266 | attn_mask: Attention mask.
267 | cache: KVCache.
268 |
269 | Returns:
270 | A tuple containing:
271 | - output: The attention output tensor (B, T, output_dim).
272 | - present_kv: The K/V state to be cached for the next step ((B, N, S_new, H), (B, N, S_new, H)). For self-attn, S_new = S_past + S. For cross-attn, S_new = S_kv.
273 | """
274 | if kv_positions is None:
275 | kv_positions = q_positions
276 | original_dtype = Xq.dtype
277 |
278 | Xq_BxTxNxH = self.q_proj(Xq)
279 | Xq_BxTxNxH = self.rotary_emb(Xq_BxTxNxH, position=q_positions)
280 | Xq_BxNxTxH = Xq_BxTxNxH.transpose(1, 2)
281 |
282 | attn_k: torch.Tensor | None = None
283 | attn_v: torch.Tensor | None = None
284 |
285 | attn_k, attn_v = cache.k, cache.v
286 |
287 | # Use custom attention for MPS backend, otherwise use optimized PyTorch function
288 | is_mps = Xq.device.type == "mps" and torch.backends.mps.is_available()
289 | if is_mps:
290 | attn_output = custom_scaled_dot_product_attention(
291 | query=Xq_BxNxTxH,
292 | key=attn_k,
293 | value=attn_v,
294 | attn_mask=attn_mask if not is_causal else None,
295 | scale=1.0,
296 | is_causal=is_causal,
297 | num_gqa_groups=self.num_gqa_groups,
298 | )
299 | else:
300 | attn_output = F.scaled_dot_product_attention(
301 | Xq_BxNxTxH,
302 | attn_k,
303 | attn_v,
304 | attn_mask=attn_mask if not is_causal else None,
305 | scale=1.0,
306 | enable_gqa=self.num_gqa_groups > 1,
307 | is_causal=is_causal,
308 | )
309 |
310 | attn_output = attn_output.transpose(1, 2).contiguous() # (B, T, N, H)
311 | output = self.o_proj(attn_output)
312 |
313 | return output.to(original_dtype)
314 |
315 |
316 | class FusedQKV(nn.Module):
317 | def __init__(
318 | self,
319 | in_features: int,
320 | out_features: int,
321 | bias: bool = False,
322 | num_q_heads: int = 1,
323 | q_head_dim: int = 1,
324 | num_kv_heads: int = 1,
325 | kv_head_dim: int = 1,
326 | ):
327 | super().__init__()
328 | self.num_q_heads = num_q_heads
329 | self.q_head_dim = q_head_dim
330 | self.num_kv_heads = num_kv_heads
331 | self.kv_head_dim = kv_head_dim
332 | self.q_output_dim = num_q_heads * q_head_dim
333 | self.kv_output_dim = num_kv_heads * kv_head_dim
334 | self.linear = nn.Linear(in_features, out_features, bias=bias)
335 |
336 | def forward(self, inputs: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
337 | x = self.linear(inputs)
338 |
339 | q, k, v = x.split([self.q_output_dim, self.kv_output_dim, self.kv_output_dim], dim=-1)
340 |
341 | q = q.reshape(q.shape[:-1] + (self.num_q_heads, self.q_head_dim))
342 | k = k.reshape(k.shape[:-1] + (self.num_kv_heads, self.kv_head_dim))
343 | v = v.reshape(v.shape[:-1] + (self.num_kv_heads, self.kv_head_dim))
344 |
345 | return q, k, v
346 |
347 |
348 | class SelfAttention(nn.Module):
349 | """Attention using DenseGeneral."""
350 |
351 | def __init__(
352 | self,
353 | config: DiaConfig,
354 | q_embed_dim: int,
355 | kv_embed_dim: int,
356 | num_query_heads: int,
357 | num_kv_heads: int,
358 | head_dim: int,
359 | compute_dtype: torch.dtype,
360 | is_cross_attn: bool = False,
361 | out_embed_dim: int | None = None,
362 | ):
363 | super().__init__()
364 | self.num_query_heads = num_query_heads
365 | self.num_kv_heads = num_kv_heads
366 | self.head_dim = head_dim
367 | self.is_cross_attn = is_cross_attn
368 | self.output_dim = out_embed_dim if out_embed_dim is not None else q_embed_dim
369 | self.projected_query_dim = num_query_heads * head_dim
370 | if num_query_heads % num_kv_heads != 0:
371 | raise ValueError(f"num_query_heads ({num_query_heads}) must be divisible by num_kv_heads ({num_kv_heads})")
372 | self.num_gqa_groups = num_query_heads // num_kv_heads
373 | self.kv_embed_dim = kv_embed_dim
374 | self.q_embed_dim = q_embed_dim
375 |
376 | # --- Projection Layers using DenseGeneral ---
377 | self.q_proj = DenseGeneral(
378 | in_shapes=(q_embed_dim,),
379 | out_features=(num_query_heads, head_dim),
380 | axis=(-1,),
381 | weight_dtype=compute_dtype,
382 | )
383 | self.k_proj = DenseGeneral(
384 | in_shapes=(kv_embed_dim,),
385 | out_features=(num_kv_heads, head_dim),
386 | axis=(-1,),
387 | weight_dtype=compute_dtype,
388 | )
389 | self.v_proj = DenseGeneral(
390 | in_shapes=(kv_embed_dim,),
391 | out_features=(num_kv_heads, head_dim),
392 | axis=(-1,),
393 | weight_dtype=compute_dtype,
394 | )
395 | self.o_proj = DenseGeneral(
396 | in_shapes=(num_query_heads, head_dim),
397 | out_features=(self.output_dim,),
398 | axis=(-2, -1),
399 | weight_dtype=compute_dtype,
400 | )
401 |
402 | # --- Rotary Embedding ---
403 | self.rotary_emb = RotaryEmbedding(
404 | embedding_dims=self.head_dim,
405 | min_timescale=config.model.rope_min_timescale,
406 | max_timescale=config.model.rope_max_timescale,
407 | dtype=compute_dtype,
408 | )
409 |
410 | self.is_fused_qkv = False
411 |
412 | def get_linear_weight(self, dense: DenseGeneral):
413 | W_dg = dense.weight.data
414 |
415 | out_features = 1
416 | input_features = 1
417 | for dim in dense.out_features:
418 | out_features *= dim
419 | for dim in dense.in_shapes:
420 | input_features *= dim
421 |
422 | W_dg_reshaped_for_linear_T = W_dg.reshape(input_features, out_features)
423 | linear_weight = W_dg_reshaped_for_linear_T.transpose(0, 1).contiguous()
424 | return linear_weight
425 |
426 | def patch_fused_qkv(self):
427 | q_proj_weight = self.get_linear_weight(self.q_proj)
428 | k_proj_weight = self.get_linear_weight(self.k_proj)
429 | v_proj_weight = self.get_linear_weight(self.v_proj)
430 |
431 | self.qkv = FusedQKV(
432 | self.kv_embed_dim,
433 | (self.num_query_heads * self.head_dim + 2 * (self.num_kv_heads * self.head_dim)),
434 | bias=False,
435 | num_q_heads=self.num_query_heads,
436 | q_head_dim=self.head_dim,
437 | num_kv_heads=self.num_kv_heads,
438 | kv_head_dim=self.head_dim,
439 | )
440 | self.qkv.linear.weight.data = torch.cat([q_proj_weight, k_proj_weight, v_proj_weight], dim=0)
441 |
442 | # print(f"qkv.weight.shape: {self.qkv.linear.weight.shape}")
443 | self.is_fused_qkv = True
444 |
445 | def forward(
446 | self,
447 | X: torch.Tensor, # (B, T, D) T = 1 in AR generation
448 | q_positions: torch.Tensor, # (B, T)
449 | kv_positions: torch.Tensor | None = None, # (B, S)
450 | attn_mask: torch.Tensor | None = None, # None in Decoder Self Attention, Valid mask in Others
451 | cache: KVCache | None = None, # None in Encoder, KVCache in Decoder
452 | prefill: bool = False,
453 | is_causal: bool = False,
454 | current_idx: torch.Tensor | None = None,
455 | ) -> tuple[torch.Tensor, tuple[torch.Tensor, torch.Tensor] | None]:
456 | """
457 | Performs attention calculation with optional KV caching.
458 |
459 | Args:
460 | Xq: Query tensor (B, T, D). T=1 during single-step decoding.
461 | Xkv: Key/Value source tensor (B, S, E). S=1 during single-step decoding for self-attn.
462 | q_positions: Positions for queries (B, T).
463 | kv_positions: Positions for keys/values (B, S). If None, uses q_positions.
464 | attn_mask: Attention mask.
465 | cache: KVCache.
466 | prefill: If True, use prefill mode.
467 |
468 | Returns:
469 | A tuple containing:
470 | - output: The attention output tensor (B, T, output_dim).
471 | - present_kv: The K/V state to be cached for the next step ((B, N, S_new, H), (B, N, S_new, H)). For self-attn, S_new = S_past + S. For cross-attn, S_new = S_kv.
472 | """
473 | if kv_positions is None:
474 | kv_positions = q_positions
475 |
476 | original_dtype = X.dtype
477 |
478 | if self.is_fused_qkv:
479 | Xq_BxTxNxH, Xk_BxSxKxH, Xv_BxSxKxH = self.qkv(X)
480 | else:
481 | Xq_BxTxNxH = self.q_proj(X)
482 | Xk_BxSxKxH = self.k_proj(X)
483 | Xv_BxSxKxH = self.v_proj(X)
484 |
485 | position = q_positions.unsqueeze(-1).unsqueeze(-1)
486 | sinusoid_inp = position / self.rotary_emb.timescale
487 | sin = torch.sin(sinusoid_inp)
488 | cos = torch.cos(sinusoid_inp)
489 |
490 | Xq_BxTxNxH = self.rotary_emb.apply_rope(Xq_BxTxNxH, sin, cos)
491 | Xk_BxSxKxH = self.rotary_emb.apply_rope(Xk_BxSxKxH, sin, cos)
492 |
493 | Xq_BxNxTxH = Xq_BxTxNxH.transpose(1, 2)
494 |
495 | attn_k: torch.Tensor | None = None
496 | attn_v: torch.Tensor | None = None
497 |
498 | Xk_BxKxSxH = Xk_BxSxKxH.transpose(1, 2) # (B, K, S, H)
499 | Xv_BxKxSxH = Xv_BxSxKxH.transpose(1, 2) # (B, K, S, H)
500 |
501 | if cache is None:
502 | attn_k = Xk_BxKxSxH
503 | attn_v = Xv_BxKxSxH
504 | elif prefill:
505 | attn_k, attn_v = Xk_BxKxSxH, Xv_BxKxSxH
506 | cache.prefill(attn_k, attn_v)
507 | else:
508 | attn_k, attn_v = cache.update(Xk_BxKxSxH, Xv_BxKxSxH, current_idx)
509 |
510 | # Use custom attention for MPS backend, otherwise use optimized PyTorch function
511 | is_mps = Xv_BxSxKxH.device.type == "mps" and torch.backends.mps.is_available()
512 | if is_mps:
513 | attn_output = custom_scaled_dot_product_attention(
514 | query=Xq_BxNxTxH,
515 | key=attn_k,
516 | value=attn_v,
517 | attn_mask=attn_mask if not is_causal else None,
518 | scale=1.0,
519 | is_causal=is_causal,
520 | num_gqa_groups=self.num_gqa_groups,
521 | )
522 | else:
523 | attn_output = F.scaled_dot_product_attention(
524 | Xq_BxNxTxH,
525 | attn_k,
526 | attn_v,
527 | attn_mask=attn_mask if not is_causal else None,
528 | scale=1.0,
529 | enable_gqa=self.num_gqa_groups > 1,
530 | is_causal=is_causal,
531 | )
532 |
533 | attn_output = attn_output.transpose(1, 2).contiguous() # (B, T, N, H)
534 | output = self.o_proj(attn_output)
535 |
536 | return output.to(original_dtype)
537 |
538 |
539 | class EncoderLayer(nn.Module):
540 | """Transformer Encoder Layer using DenseGeneral."""
541 |
542 | def __init__(self, config: DiaConfig, compute_dtype: torch.dtype):
543 | super().__init__()
544 | self.config = config
545 | model_config = config.model
546 | enc_config = config.model.encoder
547 | embed_dim = enc_config.n_embd
548 | self.compute_dtype = compute_dtype
549 |
550 | self.pre_sa_norm = RMSNorm(
551 | embed_dim,
552 | eps=model_config.normalization_layer_epsilon,
553 | dtype=torch.float32,
554 | )
555 | self.self_attention = SelfAttention(
556 | config,
557 | q_embed_dim=embed_dim,
558 | kv_embed_dim=embed_dim,
559 | num_query_heads=enc_config.n_head,
560 | num_kv_heads=enc_config.n_head,
561 | head_dim=enc_config.head_dim,
562 | compute_dtype=compute_dtype,
563 | is_cross_attn=False,
564 | out_embed_dim=embed_dim,
565 | )
566 | self.post_sa_norm = RMSNorm(
567 | embed_dim,
568 | eps=model_config.normalization_layer_epsilon,
569 | dtype=torch.float32,
570 | )
571 | self.mlp = MlpBlock(embed_dim=embed_dim, intermediate_dim=enc_config.n_hidden, compute_dtype=compute_dtype)
572 |
573 | def forward(
574 | self,
575 | x: torch.Tensor,
576 | state: EncoderInferenceState,
577 | ) -> torch.Tensor:
578 | residual = x
579 | x_norm = self.pre_sa_norm(x).to(self.compute_dtype)
580 |
581 | sa_out = self.self_attention(
582 | X=x_norm,
583 | q_positions=state.positions,
584 | kv_positions=state.positions,
585 | attn_mask=state.attn_mask,
586 | )
587 | x = residual + sa_out
588 |
589 | residual = x
590 | x_norm = self.post_sa_norm(x).to(self.compute_dtype)
591 | mlp_out = self.mlp(x_norm)
592 | x = residual + mlp_out
593 |
594 | return x
595 |
596 |
597 | class Encoder(nn.Module):
598 | """Transformer Encoder Stack using DenseGeneral."""
599 |
600 | def __init__(self, config: DiaConfig, compute_dtype: torch.dtype):
601 | super().__init__()
602 | self.config = config
603 | model_config = config.model
604 | enc_config = config.model.encoder
605 | self.compute_dtype = compute_dtype
606 |
607 | self.embedding = nn.Embedding(
608 | model_config.src_vocab_size,
609 | enc_config.n_embd,
610 | dtype=compute_dtype,
611 | )
612 | self.layers = nn.ModuleList([EncoderLayer(config, compute_dtype) for _ in range(enc_config.n_layer)])
613 | self.norm = RMSNorm(
614 | enc_config.n_embd,
615 | eps=model_config.normalization_layer_epsilon,
616 | dtype=torch.float32,
617 | )
618 |
619 | def forward(
620 | self,
621 | x_ids: torch.Tensor,
622 | state: EncoderInferenceState,
623 | ) -> torch.Tensor:
624 | x = self.embedding(x_ids)
625 |
626 | for layer in self.layers:
627 | x = layer(x, state)
628 |
629 | x = self.norm(x).to(self.compute_dtype)
630 | return x
631 |
632 |
633 | class DecoderLayer(nn.Module):
634 | """Transformer Decoder Layer using DenseGeneral."""
635 |
636 | def __init__(self, config: DiaConfig, compute_dtype: torch.dtype):
637 | super().__init__()
638 | self.config = config
639 | model_config = config.model
640 | dec_config = config.model.decoder
641 | enc_config = config.model.encoder
642 | dec_embed_dim = dec_config.n_embd
643 | enc_embed_dim = enc_config.n_embd
644 | self.compute_dtype = compute_dtype
645 |
646 | # Norms
647 | self.pre_sa_norm = RMSNorm(
648 | dec_embed_dim,
649 | eps=model_config.normalization_layer_epsilon,
650 | dtype=torch.float32,
651 | )
652 | self.pre_ca_norm = RMSNorm(
653 | dec_embed_dim,
654 | eps=model_config.normalization_layer_epsilon,
655 | dtype=torch.float32,
656 | )
657 | self.pre_mlp_norm = RMSNorm(
658 | dec_embed_dim,
659 | eps=model_config.normalization_layer_epsilon,
660 | dtype=torch.float32,
661 | )
662 |
663 | # Self-Attention (GQA) with Causal Masking
664 | self.self_attention = SelfAttention(
665 | config,
666 | q_embed_dim=dec_embed_dim,
667 | kv_embed_dim=dec_embed_dim,
668 | num_query_heads=dec_config.gqa_query_heads,
669 | num_kv_heads=dec_config.kv_heads,
670 | head_dim=dec_config.gqa_head_dim,
671 | compute_dtype=compute_dtype,
672 | is_cross_attn=False,
673 | out_embed_dim=dec_embed_dim,
674 | )
675 | # Cross-Attention (MHA)
676 | self.cross_attention = CrossAttention(
677 | config=config,
678 | q_embed_dim=dec_embed_dim,
679 | kv_embed_dim=enc_embed_dim, # Note kv_embed_dim
680 | num_query_heads=dec_config.cross_query_heads,
681 | num_kv_heads=dec_config.cross_query_heads,
682 | head_dim=dec_config.cross_head_dim,
683 | compute_dtype=compute_dtype,
684 | out_embed_dim=dec_embed_dim,
685 | )
686 | # MLP
687 | self.mlp = MlpBlock(
688 | embed_dim=dec_embed_dim,
689 | intermediate_dim=dec_config.n_hidden,
690 | compute_dtype=compute_dtype,
691 | )
692 |
693 | def forward(
694 | self,
695 | x: torch.Tensor,
696 | state: DecoderInferenceState,
697 | self_attn_cache: KVCache | None = None,
698 | cross_attn_cache: KVCache | None = None,
699 | prefill: bool = False,
700 | current_idx: int = 0,
701 | ) -> torch.Tensor:
702 | residual = x
703 | x_norm = self.pre_sa_norm(x).to(self.compute_dtype)
704 |
705 | self_attn_mask = state.casual_attn_mask[None, None, current_idx]
706 |
707 | sa_out = self.self_attention(
708 | X=x_norm, # (2, 1, D)
709 | q_positions=state.dec_positions, # (2, 1)
710 | kv_positions=state.dec_positions, # (2, 1)
711 | attn_mask=self_attn_mask,
712 | cache=self_attn_cache,
713 | prefill=prefill,
714 | is_causal=prefill,
715 | current_idx=current_idx,
716 | )
717 |
718 | x = residual + sa_out
719 |
720 | residual = x
721 | x_norm = self.pre_ca_norm(x).to(self.compute_dtype)
722 | ca_out = self.cross_attention(
723 | Xq=x_norm,
724 | q_positions=state.dec_positions,
725 | kv_positions=state.enc_positions,
726 | attn_mask=state.cross_attn_mask,
727 | cache=cross_attn_cache,
728 | )
729 | x = residual + ca_out
730 |
731 | residual = x
732 | x_norm = self.pre_mlp_norm(x).to(self.compute_dtype)
733 | mlp_out = self.mlp(x_norm)
734 | x = residual + mlp_out
735 |
736 | return x
737 |
738 |
739 | class Decoder(nn.Module):
740 | """Transformer Decoder Stack using DenseGeneral."""
741 |
742 | def __init__(self, config: DiaConfig, compute_dtype: torch.dtype):
743 | super().__init__()
744 | self.config = config
745 | model_config = config.model
746 | dec_config = config.model.decoder
747 | data_config = config.data
748 | self.num_channels = data_config.channels
749 | self.num_layers = dec_config.n_layer
750 |
751 | self.embeddings = nn.ModuleList(
752 | [
753 | nn.Embedding(model_config.tgt_vocab_size, dec_config.n_embd, dtype=compute_dtype)
754 | for _ in range(self.num_channels)
755 | ]
756 | )
757 | self.layers = nn.ModuleList(
758 | [DecoderLayer(config=config, compute_dtype=compute_dtype) for _ in range(self.num_layers)]
759 | )
760 |
761 | self.norm = RMSNorm(
762 | dec_config.n_embd,
763 | eps=model_config.normalization_layer_epsilon,
764 | dtype=torch.float32,
765 | )
766 |
767 | self.logits_dense = DenseGeneral(
768 | in_shapes=(dec_config.n_embd,),
769 | out_features=(self.num_channels, model_config.tgt_vocab_size),
770 | axis=(-1,),
771 | weight_dtype=compute_dtype,
772 | )
773 |
774 | def precompute_cross_attn_cache(
775 | self,
776 | enc_out: torch.Tensor, # (B, S, E)
777 | enc_positions: torch.Tensor, # (B, S)
778 | k_padding_mask: torch.Tensor | None = None,
779 | ) -> list[KVCache]:
780 | """
781 | Computes the Key and Value tensors for cross-attention for each layer from the encoder output.
782 | """
783 | per_layer_kv_cache: list[KVCache] = []
784 |
785 | for layer in self.layers:
786 | cross_attn_module = layer.cross_attention
787 | k_proj = cross_attn_module.k_proj(enc_out)
788 | v_proj = cross_attn_module.v_proj(enc_out)
789 |
790 | k_proj = cross_attn_module.rotary_emb(k_proj, position=enc_positions)
791 | k = k_proj.transpose(1, 2)
792 | v = v_proj.transpose(1, 2)
793 | if k_padding_mask is not None:
794 | k = k.masked_fill(~k_padding_mask.unsqueeze(1).unsqueeze(3), 0.0)
795 |
796 | per_layer_kv_cache.append(KVCache.from_kv(k, v))
797 |
798 | return per_layer_kv_cache
799 |
800 | def decode_step(
801 | self,
802 | tgt_ids_Bx1xC: torch.Tensor, # [B, 1, C]
803 | state: DecoderInferenceState,
804 | current_idx: int,
805 | ) -> torch.Tensor:
806 | """
807 | Performs a single decoding step, managing KV caches layer by layer.
808 |
809 | Returns:
810 | A tuple containing:
811 | - logits_Bx1xCV: The final output logits for the current step (B, 1, C*V), cast to float32.
812 | """
813 |
814 | x = None
815 | for i in range(self.num_channels):
816 | channel_tokens = tgt_ids_Bx1xC[..., i]
817 | channel_embed = self.embeddings[i](channel_tokens)
818 | x = channel_embed if x is None else x + channel_embed
819 |
820 | for i, layer in enumerate(self.layers):
821 | self_cache = state.self_attn_cache[i]
822 | cross_cache = state.cross_attn_cache[i]
823 | x = layer(
824 | x, # (2, 1, D)
825 | state,
826 | self_attn_cache=self_cache,
827 | cross_attn_cache=cross_cache,
828 | current_idx=current_idx,
829 | )
830 |
831 | x = self.norm(x)
832 | logits_Bx1xCxV = self.logits_dense(x)
833 |
834 | return logits_Bx1xCxV.to(torch.float32)
835 |
836 | def forward(self, tgt_ids_BxTxC: torch.Tensor, state: DecoderInferenceState) -> torch.Tensor:
837 | """
838 | Forward pass for the Decoder stack, managing KV caches.
839 |
840 | Args:
841 | tgt_ids_BxTxC: Target token IDs (B, T, C).
842 | encoder_out: Output from the encoder (B, S, E).
843 | tgt_positions: Positions for target sequence (B, T).
844 | src_positions: Positions for source sequence (B, S).
845 | self_attn_mask: Mask for self-attention.
846 | cross_attn_mask: Mask for cross-attention.
847 | past_key_values: List containing the self-attention KV cache for each layer
848 | from the previous decoding step. `len(past_key_values)` should
849 | equal `num_layers`.
850 | precomputed_cross_attn_kv: A single tuple containing the pre-computed K/V cache
851 | derived from `encoder_out`. This is passed identically
852 | to all layers.
853 |
854 | Returns:
855 | A tuple containing:
856 | - logits: The final output logits (B, T, C * V), cast to float32.
857 | - present_key_values: A list containing the updated self-attention KV cache
858 | for each layer for the *current* decoding step.
859 | """
860 | _, _, num_channels_in = tgt_ids_BxTxC.shape
861 | assert num_channels_in == self.num_channels, "Input channels mismatch"
862 |
863 | # Embeddings
864 | x = None
865 | for i in range(self.num_channels):
866 | channel_tokens = tgt_ids_BxTxC[..., i]
867 | channel_embed = self.embeddings[i](channel_tokens)
868 | x = channel_embed if x is None else x + channel_embed
869 |
870 | for i, layer in enumerate(self.layers):
871 | self_cache = state.self_attn_cache[i]
872 | cross_cache = state.cross_attn_cache[i]
873 | x = layer(x, state, self_attn_cache=self_cache, cross_attn_cache=cross_cache, prefill=True)
874 |
875 | # Final Norm
876 | x = self.norm(x)
877 | logits_BxTxCxV = self.logits_dense(x)
878 |
879 | return logits_BxTxCxV.to(torch.float32)
880 |
881 |
882 | class DiaModel(
883 | nn.Module,
884 | PyTorchModelHubMixin,
885 | repo_url="https://github.com/nari-labs/dia",
886 | pipeline_tag="text-to-speech",
887 | license="apache-2.0",
888 | coders={
889 | DiaConfig: (
890 | lambda x: x.model_dump(),
891 | lambda data: DiaConfig.model_validate(data),
892 | ),
893 | },
894 | ):
895 | """PyTorch Dia Model using DenseGeneral."""
896 |
897 | def __init__(self, config: DiaConfig, compute_dtype: torch.dtype):
898 | super().__init__()
899 | self.config = config
900 | self.encoder = Encoder(config, compute_dtype)
901 | self.decoder = Decoder(config, compute_dtype)
902 |
--------------------------------------------------------------------------------
/dia/model.py:
--------------------------------------------------------------------------------
1 | import time
2 | from enum import Enum
3 |
4 | import numpy as np
5 | import torch
6 | import torchaudio
7 |
8 | # Assuming these imports are relative to the package structure
9 | from .audio import apply_audio_delay, build_delay_indices, build_revert_indices, revert_audio_delay
10 | from .config import DiaConfig
11 | from .layers import DiaModel
12 | from .state import DecoderInferenceState, DecoderOutput, EncoderInferenceState
13 |
14 |
15 | DEFAULT_SAMPLE_RATE = 44100
16 | SAMPLE_RATE_RATIO = 512
17 |
18 |
19 | def _get_default_device():
20 | if torch.cuda.is_available():
21 | return torch.device("cuda")
22 | elif hasattr(torch.backends, "mps") and torch.backends.mps.is_available():
23 | return torch.device("mps")
24 | return torch.device("cpu")
25 |
26 |
27 | def _sample_next_token(
28 | logits_BCxV: torch.Tensor,
29 | temperature: float,
30 | top_p: float,
31 | top_k: int | None,
32 | audio_eos_value: int,
33 | ) -> torch.Tensor:
34 | if temperature == 0.0:
35 | return torch.argmax(logits_BCxV, dim=-1)
36 |
37 | logits_BCxV = logits_BCxV / temperature
38 |
39 | if audio_eos_value is not None and audio_eos_value >= 0:
40 | top_logit_indices_BC = torch.argmax(logits_BCxV, dim=-1)
41 | eos_not_highest_mask_BC = top_logit_indices_BC != audio_eos_value
42 | mask_eos_unless_highest_BCxV = torch.zeros_like(logits_BCxV, dtype=torch.bool)
43 | mask_eos_unless_highest_BCxV[eos_not_highest_mask_BC, audio_eos_value] = True
44 | logits_BCxV = logits_BCxV.masked_fill(mask_eos_unless_highest_BCxV, -torch.inf)
45 |
46 | if top_k is not None:
47 | _, top_k_indices_BCxV = torch.topk(logits_BCxV, k=top_k, dim=-1)
48 | mask = torch.ones_like(logits_BCxV, dtype=torch.bool)
49 | mask = mask.scatter(dim=-1, index=top_k_indices_BCxV, value=False)
50 | logits_BCxV = logits_BCxV.masked_fill(mask, -torch.inf)
51 |
52 | if top_p < 1.0:
53 | probs_BCxV = torch.softmax(logits_BCxV, dim=-1)
54 | sorted_probs_BCxV, sorted_indices_BCxV = torch.sort(probs_BCxV, dim=-1, descending=True)
55 | cumulative_probs_BCxV = torch.cumsum(sorted_probs_BCxV, dim=-1)
56 |
57 | sorted_indices_to_remove_BCxV = cumulative_probs_BCxV > top_p
58 | sorted_indices_to_remove_BCxV = torch.roll(sorted_indices_to_remove_BCxV, shifts=1, dims=-1)
59 | sorted_indices_to_remove_BCxV[..., 0] = torch.zeros_like(sorted_indices_to_remove_BCxV[..., 0])
60 |
61 | indices_to_remove_BCxV = torch.zeros_like(sorted_indices_to_remove_BCxV)
62 | indices_to_remove_BCxV = indices_to_remove_BCxV.scatter(
63 | dim=-1, index=sorted_indices_BCxV, src=sorted_indices_to_remove_BCxV
64 | )
65 | logits_BCxV = logits_BCxV.masked_fill(indices_to_remove_BCxV, -torch.inf)
66 |
67 | final_probs_BCxV = torch.softmax(logits_BCxV, dim=-1)
68 |
69 | sampled_indices_BC = torch.multinomial(final_probs_BCxV, num_samples=1)
70 | sampled_indices_C = sampled_indices_BC.squeeze(-1)
71 | return sampled_indices_C
72 |
73 |
74 | class ComputeDtype(str, Enum):
75 | FLOAT32 = "float32"
76 | FLOAT16 = "float16"
77 | BFLOAT16 = "bfloat16"
78 |
79 | def to_dtype(self) -> torch.dtype:
80 | if self == ComputeDtype.FLOAT32:
81 | return torch.float32
82 | elif self == ComputeDtype.FLOAT16:
83 | return torch.float16
84 | elif self == ComputeDtype.BFLOAT16:
85 | return torch.bfloat16
86 | else:
87 | raise ValueError(f"Unsupported compute dtype: {self}")
88 |
89 |
90 | class Dia:
91 | def __init__(
92 | self,
93 | config: DiaConfig,
94 | compute_dtype: str | ComputeDtype = ComputeDtype.FLOAT32,
95 | device: torch.device | None = None,
96 | load_dac: bool = True,
97 | ):
98 | """Initializes the Dia model.
99 |
100 | Args:
101 | config: The configuration object for the model.
102 | compute_dtype: The computation dtype to use.
103 | device: The device to load the model onto. If None, will automatically select the best available device.
104 | load_dac: Whether to load the DAC model.
105 |
106 | Raises:
107 | RuntimeError: If there is an error loading the DAC model.
108 | """
109 | super().__init__()
110 | self.config = config
111 | self.device = device if device is not None else _get_default_device()
112 | if isinstance(compute_dtype, str):
113 | compute_dtype = ComputeDtype(compute_dtype)
114 | self.compute_dtype = compute_dtype.to_dtype()
115 | self.model: DiaModel = DiaModel(config, self.compute_dtype)
116 | self.dac_model = None
117 | self._compiled_step = None
118 | self.load_dac = load_dac
119 |
120 | if not self.load_dac:
121 | print("Warning: DAC model will not be loaded. This is not recommended.")
122 |
123 | if torch.cuda.is_available():
124 | torch.backends.cuda.matmul.allow_tf32 = True
125 |
126 | @classmethod
127 | def from_local(
128 | cls,
129 | config_path: str,
130 | checkpoint_path: str,
131 | compute_dtype: str | ComputeDtype = ComputeDtype.FLOAT32,
132 | device: torch.device | None = None,
133 | load_dac: bool = True,
134 | ) -> "Dia":
135 | """Loads the Dia model from local configuration and checkpoint files.
136 |
137 | Args:
138 | config_path: Path to the configuration JSON file.
139 | checkpoint_path: Path to the model checkpoint (.pth) file.
140 | compute_dtype: The computation dtype to use.
141 | device: The device to load the model onto. If None, will automatically select the best available device.
142 | load_dac: Whether to load the DAC model.
143 |
144 | Returns:
145 | An instance of the Dia model loaded with weights and set to eval mode.
146 |
147 | Raises:
148 | FileNotFoundError: If the config or checkpoint file is not found.
149 | RuntimeError: If there is an error loading the checkpoint.
150 | """
151 | config = DiaConfig.load(config_path)
152 | if config is None:
153 | raise FileNotFoundError(f"Config file not found at {config_path}")
154 |
155 | dia = cls(config, compute_dtype, device, load_dac)
156 |
157 | try:
158 | state_dict = torch.load(checkpoint_path, map_location=dia.device)
159 | dia.model.load_state_dict(state_dict)
160 | except FileNotFoundError:
161 | raise FileNotFoundError(f"Checkpoint file not found at {checkpoint_path}")
162 | except Exception as e:
163 | raise RuntimeError(f"Error loading checkpoint from {checkpoint_path}") from e
164 |
165 | dia.model.to(dia.device)
166 | dia.model.eval()
167 | if load_dac:
168 | dia._load_dac_model()
169 | return dia
170 |
171 | @classmethod
172 | def from_pretrained(
173 | cls,
174 | model_name: str = "nari-labs/Dia-1.6B",
175 | compute_dtype: str | ComputeDtype = ComputeDtype.FLOAT32,
176 | device: torch.device | None = None,
177 | load_dac: bool = True,
178 | ) -> "Dia":
179 | """Loads the Dia model from a Hugging Face Hub repository.
180 |
181 | Downloads the configuration and checkpoint files from the specified
182 | repository ID and then loads the model.
183 |
184 | Args:
185 | model_name: The Hugging Face Hub repository ID (e.g., "nari-labs/Dia-1.6B").
186 | compute_dtype: The computation dtype to use.
187 | device: The device to load the model onto. If None, will automatically select the best available device.
188 | load_dac: Whether to load the DAC model.
189 |
190 | Returns:
191 | An instance of the Dia model loaded with weights and set to eval mode.
192 |
193 | Raises:
194 | FileNotFoundError: If config or checkpoint download/loading fails.
195 | RuntimeError: If there is an error loading the checkpoint.
196 | """
197 | if isinstance(compute_dtype, str):
198 | compute_dtype = ComputeDtype(compute_dtype)
199 |
200 | # Load model directly using DiaModel's from_pretrained which handles HF download
201 | try:
202 | loaded_model = DiaModel.from_pretrained(model_name, compute_dtype=compute_dtype.to_dtype())
203 | except Exception as e:
204 | raise RuntimeError(f"Error loading model from Hugging Face Hub ({model_name})") from e
205 |
206 | config = loaded_model.config # Get config from the loaded model
207 | dia = cls(config, compute_dtype, device, load_dac)
208 |
209 | dia.model = loaded_model # Assign the already loaded model
210 | dia.model.to(dia.device)
211 | dia.model.eval()
212 | if load_dac:
213 | dia._load_dac_model()
214 | return dia
215 |
216 | def _load_dac_model(self):
217 | """Loads the Descript Audio Codec (DAC) model.
218 |
219 | Downloads the DAC model if necessary and loads it onto the specified device.
220 | Sets the DAC model to evaluation mode.
221 |
222 | Raises:
223 | RuntimeError: If downloading or loading the DAC model fails.
224 | """
225 | import dac
226 |
227 | try:
228 | dac_model_path = dac.utils.download()
229 | dac_model = dac.DAC.load(dac_model_path).to(self.device)
230 | dac_model.eval() # Ensure DAC is in eval mode
231 | except Exception as e:
232 | raise RuntimeError("Failed to load DAC model") from e
233 | self.dac_model = dac_model
234 |
235 | def _encode_text(self, text: str) -> torch.Tensor:
236 | """Encodes the input text string into a tensor of token IDs using byte-level encoding.
237 |
238 | Special tokens [S1] and [S2] are replaced by their byte values. The resulting
239 | sequence is truncated to the maximum configured text length.
240 |
241 | Args:
242 | text: The input text string.
243 |
244 | Returns:
245 | A tensor containing the encoded byte token IDs.
246 | """
247 | max_len = self.config.data.text_length
248 |
249 | byte_text = text.encode("utf-8")
250 | # Replace special tokens with their byte values if needed by the specific tokenizer/config
251 | # Assuming byte values 1 and 2 are correct placeholders based on original code
252 | replaced_bytes = byte_text.replace(b"[S1]", b"\x01").replace(b"[S2]", b"\x02")
253 | text_tokens = list(replaced_bytes)
254 | return torch.tensor(
255 | text_tokens[:max_len],
256 | dtype=torch.long,
257 | device=self.device,
258 | )
259 |
260 | def _pad_text_input(self, text_tokens: list[torch.Tensor]) -> torch.Tensor:
261 | """Pads the text input to the maximum length."""
262 | text_pad_value = self.config.data.text_pad_value
263 | max_len = self.config.data.text_length
264 | batch_size = len(text_tokens)
265 |
266 | src_tokens = torch.full(
267 | (batch_size, 1, max_len),
268 | fill_value=text_pad_value,
269 | dtype=torch.long,
270 | device=self.device,
271 | )
272 | for i in range(batch_size):
273 | current_len = len(text_tokens[i])
274 | src_tokens[i, 0, :current_len] = text_tokens[i]
275 | return src_tokens
276 |
277 | def _prepare_audio_prompt(self, audio_prompts: list[torch.Tensor | None]) -> tuple[torch.Tensor, list[int]]:
278 | """Prepares the audio prompt tensor for the decoder.
279 |
280 | Handles padding, adds the beginning-of-sequence (BOS) token, applies the
281 | delay pattern, and determines the number of prefill steps for each item
282 | in the batch.
283 |
284 | Args:
285 | audio_prompts: A list of audio prompt tensors (encoded DAC frames) or None.
286 | Each tensor should have shape [T, C].
287 |
288 | Returns:
289 | A tuple containing:
290 | - delayed_batch (torch.Tensor): The prepared audio prompt tensor with
291 | delays applied, shape [B, T_max_padded, C].
292 | - prefill_steps (list[int]): A list containing the number of valid
293 | tokens (including BOS) for each prompt in the batch.
294 | """
295 | num_channels = self.config.data.channels
296 | audio_bos_value = self.config.data.audio_bos_value
297 | delay_pattern = self.config.data.delay_pattern
298 | max_delay_pattern = max(delay_pattern)
299 | batch_size = len(audio_prompts)
300 |
301 | max_len = max(p.shape[0] if p is not None else 0 for p in audio_prompts) + max_delay_pattern
302 | prefill_steps = []
303 |
304 | prefill = torch.full(
305 | (batch_size, max_len, num_channels),
306 | fill_value=-1,
307 | dtype=torch.int,
308 | device=self.device,
309 | )
310 |
311 | prefill[:, 0, :] = audio_bos_value
312 |
313 | for i in range(batch_size):
314 | prompt = audio_prompts[i]
315 | if prompt is not None:
316 | prompt = prompt.to(device=self.device, dtype=torch.int)
317 | prefill[i, 1 : prompt.shape[0] + 1, :] = prompt
318 | prefill_steps.append(prompt.shape[0] + 1)
319 | else:
320 | prefill_steps.append(1)
321 |
322 | delay_precomp = build_delay_indices(
323 | B=batch_size,
324 | T=max_len,
325 | C=num_channels,
326 | delay_pattern=delay_pattern,
327 | )
328 |
329 | delayed_batch = apply_audio_delay(
330 | audio_BxTxC=prefill,
331 | pad_value=-1,
332 | bos_value=audio_bos_value,
333 | precomp=delay_precomp,
334 | )
335 |
336 | return delayed_batch, prefill_steps
337 |
338 | def _prepare_generation(
339 | self,
340 | text: torch.Tensor,
341 | audio_prompts: list[torch.Tensor | None],
342 | max_tokens: int | None = None,
343 | ):
344 | """Initializes the model state for generation.
345 |
346 | Encodes the text input (conditional and unconditional), prepares the
347 | encoder and decoder states (including KV caches and cross-attention),
348 | prepares the audio prompt, and performs the initial decoder prefill steps
349 | based on the audio prompts.
350 |
351 | Args:
352 | text: The padded text input tensor, shape [B, 1, T_text].
353 | audio_prompts: A list of prepared audio prompt tensors or None.
354 |
355 | Returns:
356 | A tuple containing:
357 | - dec_state (DecoderInferenceState): The initialized decoder state.
358 | - dec_output (DecoderOutput): The initialized decoder output manager,
359 | containing the prefilled audio tokens.
360 | """
361 | batch_size = text.shape[0]
362 |
363 | enc_input_uncond = torch.zeros_like(text)
364 | enc_input_cond = text
365 | stacked_inputs = torch.stack([enc_input_uncond, enc_input_cond], dim=1)
366 | enc_input = stacked_inputs.view(2 * batch_size, -1)
367 |
368 | enc_state = EncoderInferenceState.new(self.config, enc_input_cond)
369 | encoder_out = self.model.encoder(enc_input, enc_state)
370 |
371 | dec_cross_attn_cache = self.model.decoder.precompute_cross_attn_cache(
372 | encoder_out, enc_state.positions, enc_state.padding_mask
373 | )
374 | dec_state = DecoderInferenceState.new(
375 | self.config,
376 | enc_state,
377 | encoder_out,
378 | dec_cross_attn_cache,
379 | self.compute_dtype,
380 | max_generation_length=max_tokens,
381 | )
382 | prefill, prefill_steps = self._prepare_audio_prompt(audio_prompts)
383 |
384 | dec_output = DecoderOutput.new(batch_size, self.config, self.device)
385 | dec_output.prefill(prefill, prefill_steps)
386 |
387 | dec_step = min(prefill_steps) - 1
388 | if dec_step > 0:
389 | dec_state.prepare_step(0, dec_step)
390 | tokens_BxTxC = dec_output.get_tokens_at(0, dec_step).repeat_interleave(2, dim=0)
391 | self.model.decoder.forward(tokens_BxTxC, dec_state)
392 |
393 | return dec_state, dec_output
394 |
395 | def _decoder_step(
396 | self,
397 | tokens_Bx1xC: torch.Tensor,
398 | dec_state: DecoderInferenceState,
399 | cfg_scale: float,
400 | temperature: float,
401 | top_p: float,
402 | top_k: int,
403 | current_idx: int,
404 | ) -> torch.Tensor:
405 | """Performs a single step of the decoder inference.
406 |
407 | Takes the tokens from the previous step, runs them through the decoder
408 | (for both conditional and unconditional paths), applies classifier-free
409 | guidance (CFG), samples the next token using temperature, top-p, and top-k
410 | sampling, and applies constraints (e.g., preventing EOS in certain channels).
411 |
412 | Args:
413 | tokens_Bx1xC: The input tokens for the current step, shape [2*B, 1, C].
414 | Repeated for CFG (unconditional and conditional).
415 | dec_state: The current state of the decoder (KV caches, etc.).
416 | cfg_scale: The scale factor for classifier-free guidance.
417 | temperature: The temperature for sampling.
418 | top_p: The cumulative probability threshold for top-p sampling.
419 | top_k: The number of top logits to consider for top-k sampling.
420 | current_idx: The current generation step index.
421 |
422 | Returns:
423 | torch.Tensor: The sampled next tokens for each item in the batch,
424 | shape [B, C].
425 | """
426 | B = tokens_Bx1xC.shape[0] // 2
427 |
428 | audio_eos_value = self.config.data.audio_eos_value
429 | logits_Bx1xCxV = self.model.decoder.decode_step(tokens_Bx1xC, dec_state, current_idx)
430 |
431 | logits_last_2BxCxV = logits_Bx1xCxV[:, -1]
432 | logits_last_Bx2xCxV = logits_last_2BxCxV.view(B, 2, *logits_last_2BxCxV.shape[1:])
433 |
434 | uncond_logits_BxCxV = logits_last_Bx2xCxV[:, 0, :, :] # Shape [B, C, V]
435 | cond_logits_BxCxV = logits_last_Bx2xCxV[:, 1, :, :] # Shape [B, C, V]
436 | logits_BxCxV = cond_logits_BxCxV + cfg_scale * (cond_logits_BxCxV - uncond_logits_BxCxV)
437 |
438 | logits_BxCxV[:, :, audio_eos_value + 1 :] = torch.full_like(
439 | logits_BxCxV[:, :, audio_eos_value + 1 :],
440 | fill_value=-torch.inf,
441 | )
442 | logits_BxCxV[:, 1:, audio_eos_value:] = torch.full_like(
443 | logits_BxCxV[:, 1:, audio_eos_value:],
444 | fill_value=-torch.inf,
445 | )
446 | logits_BxCxV[:, 0, audio_eos_value] *= torch.tensor(0.8, device=self.device)
447 |
448 | flat_logits_BCxV = logits_BxCxV.view(B * self.config.data.channels, -1)
449 |
450 | pred_BC = _sample_next_token(
451 | flat_logits_BCxV.float(),
452 | temperature=temperature,
453 | top_p=top_p,
454 | top_k=top_k,
455 | audio_eos_value=audio_eos_value,
456 | )
457 |
458 | pred_BxC = pred_BC.view(B, self.config.data.channels)
459 | return pred_BxC
460 |
461 | def _generate_output(self, generated_codes: torch.Tensor, lengths_Bx: torch.Tensor) -> list[np.ndarray]:
462 | """Converts generated delayed codes into audio waveforms.
463 |
464 | Reverts the delay pattern applied during generation, decodes the resulting
465 | codebook using the DAC model (if loaded), and returns a list of audio
466 | waveforms as NumPy arrays. If DAC is not loaded, returns the raw codebook indices.
467 |
468 | Args:
469 | generated_codes: The tensor of generated audio codes with delays,
470 | shape [B, T_gen, C].
471 | lengths_Bx: A tensor containing the valid length of generated codes
472 | (excluding padding and BOS/EOS markers) for each item
473 | in the batch, shape [B].
474 |
475 | Returns:
476 | A list of NumPy arrays, where each array represents the generated audio
477 | waveform for one item in the batch. If DAC is not loaded, returns the
478 | raw, reverted codebook indices as NumPy arrays.
479 | """
480 | num_channels = self.config.data.channels
481 | batch_size = generated_codes.shape[0]
482 | seq_length = generated_codes.shape[1]
483 | delay_pattern = self.config.data.delay_pattern
484 | audio_pad_value = self.config.data.audio_pad_value
485 | max_delay_pattern = max(delay_pattern)
486 |
487 | revert_precomp = build_revert_indices(
488 | B=batch_size,
489 | T=seq_length,
490 | C=num_channels,
491 | delay_pattern=delay_pattern,
492 | )
493 |
494 | codebook = revert_audio_delay(
495 | audio_BxTxC=generated_codes,
496 | pad_value=audio_pad_value,
497 | precomp=revert_precomp,
498 | T=seq_length,
499 | )[:, :-max_delay_pattern, :]
500 |
501 | min_valid_index = 0
502 | max_valid_index = 1023
503 | invalid_mask = (codebook < min_valid_index) | (codebook > max_valid_index)
504 | codebook[invalid_mask] = 0
505 |
506 | audios = []
507 |
508 | if self.load_dac:
509 | for i in range(batch_size):
510 | audio = self._decode(codebook[i, : lengths_Bx[i], :])
511 | audio_np = audio.cpu().numpy()
512 | audios.append(audio_np)
513 | else:
514 | for i in range(batch_size):
515 | audios.append(codebook[i, : lengths_Bx[i], :].cpu().numpy())
516 | return audios
517 |
518 | @torch.no_grad()
519 | @torch.inference_mode()
520 | def _encode(self, audio: torch.Tensor) -> torch.Tensor:
521 | """
522 | Encodes the given audio waveform into a tensor of DAC codebook indices
523 | """
524 | audio = audio.unsqueeze(0)
525 | audio_data = self.dac_model.preprocess(audio, DEFAULT_SAMPLE_RATE)
526 | _, encoded_frame, _, _, _ = self.dac_model.encode(audio_data)
527 | encoded_frame: torch.Tensor
528 | return encoded_frame.squeeze(0).transpose(0, 1)
529 |
530 | @torch.no_grad()
531 | @torch.inference_mode()
532 | def _decode(self, audio_codes: torch.Tensor) -> torch.Tensor:
533 | """
534 | Decodes the given frames into an output audio waveform
535 | """
536 | audio_codes = audio_codes.unsqueeze(0).transpose(1, 2)
537 | audio_values, _, _ = self.dac_model.quantizer.from_codes(audio_codes)
538 | audio_values = self.dac_model.decode(audio_values)
539 | audio_values: torch.Tensor
540 | return audio_values.squeeze()
541 |
542 | def load_audio(self, audio_path: str) -> torch.Tensor:
543 | """Loads and preprocesses an audio file for use as a prompt.
544 |
545 | Loads the audio file, resamples it to the target sample rate if necessary,
546 | preprocesses it using the DAC model's preprocessing, and encodes it into
547 | DAC codebook indices.
548 |
549 | Args:
550 | audio_path: Path to the audio file.
551 |
552 | Returns:
553 | torch.Tensor: The encoded audio prompt as DAC codebook indices,
554 | shape [T, C].
555 |
556 | Raises:
557 | RuntimeError: If the DAC model is not loaded (`load_dac=False` during init).
558 | FileNotFoundError: If the audio file cannot be found.
559 | Exception: If there's an error during loading or processing.
560 | """
561 | if self.dac_model is None:
562 | raise RuntimeError("DAC model is required for loading audio prompts but was not loaded.")
563 | audio, sr = torchaudio.load(audio_path, channels_first=True) # C, T
564 | if sr != DEFAULT_SAMPLE_RATE:
565 | audio = torchaudio.functional.resample(audio, sr, DEFAULT_SAMPLE_RATE)
566 | # Convert to mono if stereo
567 | if audio.shape[0] > 1:
568 | audio = torch.mean(audio, dim=0, keepdim=True) # Average channels to get mono
569 | return self._encode(audio.to(self.device))
570 |
571 | def save_audio(self, path: str, audio: np.ndarray):
572 | """Saves the generated audio waveform to a file.
573 |
574 | Uses the soundfile library to write the NumPy audio array to the specified
575 | path with the default sample rate.
576 |
577 | Args:
578 | path: The path where the audio file will be saved.
579 | audio: The audio waveform as a NumPy array.
580 | """
581 | import soundfile as sf
582 |
583 | sf.write(path, audio, DEFAULT_SAMPLE_RATE)
584 |
585 | @torch.inference_mode()
586 | def generate(
587 | self,
588 | text: str | list[str],
589 | max_tokens: int | None = None,
590 | cfg_scale: float = 3.0,
591 | temperature: float = 1.2,
592 | top_p: float = 0.95,
593 | use_torch_compile: bool = False,
594 | cfg_filter_top_k: int = 45,
595 | audio_prompt: list[str | torch.Tensor | None] | str | torch.Tensor | None = None,
596 | audio_prompt_path: list[str | torch.Tensor | None] | str | torch.Tensor | None = None,
597 | use_cfg_filter: bool | None = None,
598 | verbose: bool = False,
599 | ) -> np.ndarray | list[np.ndarray]:
600 | """Generates audio corresponding to the input text.
601 |
602 | Args:
603 | text: The input text prompt, or a list of text prompts for batch generation.
604 | max_tokens: The maximum number of audio tokens to generate per prompt.
605 | Defaults to the model's configured audio length if None.
606 | cfg_scale: The scale factor for classifier-free guidance (CFG). Higher values
607 | lead to stronger guidance towards the text prompt.
608 | temperature: The temperature for sampling. Higher values increase randomness.
609 | top_p: The cumulative probability threshold for nucleus (top-p) sampling.
610 | use_torch_compile: Whether to compile the generation steps using torch.compile.
611 | Can significantly speed up generation after the initial
612 | compilation overhead. Defaults to False.
613 | cfg_filter_top_k: The number of top logits to consider during CFG filtering.
614 | (Note: This parameter name might be slightly misleading based
615 | on the code; it's used in the `_sample_next_token` function.)
616 | audio_prompt: An audio prompt or list of prompts to condition the generation.
617 | Can be a file path (str), a pre-loaded tensor (DAC codes), or None.
618 | If a list, its length must match the batch size of the text input.
619 | audio_prompt_path: (Deprecated) Use `audio_prompt` instead.
620 | use_cfg_filter: (Deprecated) This parameter is no longer used.
621 | verbose: If True, prints progress information during generation, including
622 | speed metrics.
623 |
624 | Returns:
625 | If a single text prompt was provided, returns a NumPy array containing the
626 | generated audio waveform.
627 | If a list of text prompts was provided, returns a list of NumPy arrays,
628 | each corresponding to a prompt in the input list. Returns None for a
629 | sequence if no audio was generated for it.
630 | """
631 | batch_size = len(text) if isinstance(text, list) else 1
632 | audio_eos_value = self.config.data.audio_eos_value
633 | audio_pad_value = self.config.data.audio_pad_value
634 | delay_pattern = self.config.data.delay_pattern
635 | max_tokens = self.config.data.audio_length if max_tokens is None else max_tokens
636 | max_delay_pattern = max(delay_pattern)
637 | delay_pattern_Cx = torch.tensor(delay_pattern, device=self.device, dtype=torch.long)
638 | self.model.eval()
639 |
640 | if audio_prompt_path:
641 | print("Warning: audio_prompt_path is deprecated. Use audio_prompt instead.")
642 | audio_prompt = audio_prompt_path
643 | if use_cfg_filter is not None:
644 | print("Warning: use_cfg_filter is deprecated.")
645 |
646 | if verbose:
647 | total_start_time = time.time()
648 |
649 | if use_torch_compile and not hasattr(self, "_compiled"):
650 | # Compilation can take about a minute.
651 | self._prepare_generation = torch.compile(self._prepare_generation, dynamic=True, fullgraph=True)
652 | self._decoder_step = torch.compile(self._decoder_step, fullgraph=True, mode="max-autotune")
653 | self._compiled = True
654 |
655 | if isinstance(audio_prompt, list):
656 | audio_prompt = [self.load_audio(p) if isinstance(p, str) else p for p in audio_prompt]
657 | elif isinstance(audio_prompt, str):
658 | audio_prompt = [self.load_audio(audio_prompt)]
659 | elif isinstance(audio_prompt, torch.Tensor):
660 | audio_prompt = [audio_prompt]
661 | elif audio_prompt is None:
662 | audio_prompt = [None] * batch_size
663 |
664 | assert len(audio_prompt) == batch_size, "Number of audio prompts must match batch size"
665 |
666 | if isinstance(text, list):
667 | text = [self._encode_text(t) for t in text]
668 | else:
669 | text = [self._encode_text(text)]
670 | text = self._pad_text_input(text)
671 |
672 | dec_state, dec_output = self._prepare_generation(text, audio_prompt, max_tokens=max_tokens)
673 | dec_step = min(dec_output.prefill_steps) - 1
674 | current_idx = torch.tensor([dec_step], device=self.device)
675 |
676 | eos_detected_Bx = torch.zeros((batch_size,), dtype=torch.bool, device=self.device)
677 | eos_countdown_Bx = torch.full((batch_size,), -1, dtype=torch.long, device=self.device)
678 | finished_step_Bx = torch.full((batch_size,), -1, dtype=torch.long, device=self.device)
679 |
680 | bos_over = False
681 |
682 | if verbose:
683 | print("generate: starting generation loop")
684 | if use_torch_compile:
685 | print("generate: using use_torch_compile=True, the first step may be slow")
686 | start_time = time.time()
687 |
688 | # --- Generation Loop ---
689 | while dec_step < max_tokens:
690 | if (eos_countdown_Bx == 0).all():
691 | break
692 |
693 | current_step_idx = dec_step + 1
694 | torch.compiler.cudagraph_mark_step_begin()
695 | dec_state.prepare_step(dec_step)
696 | tokens_Bx1xC = dec_output.get_tokens_at(dec_step).repeat_interleave(2, dim=0) # Repeat for CFG
697 |
698 | pred_BxC = self._decoder_step(
699 | tokens_Bx1xC,
700 | dec_state,
701 | cfg_scale,
702 | temperature,
703 | top_p,
704 | cfg_filter_top_k,
705 | current_idx,
706 | )
707 |
708 | current_idx += 1
709 |
710 | active_mask_Bx = eos_countdown_Bx != 0
711 | eos_trigger_Bx = torch.zeros_like(active_mask_Bx)
712 | if active_mask_Bx.any():
713 | is_eos_token = (~eos_detected_Bx[active_mask_Bx]) & (pred_BxC[active_mask_Bx, 0] == audio_eos_value)
714 | is_max_len = current_step_idx >= max_tokens - max_delay_pattern
715 | eos_trigger_Bx[active_mask_Bx] = is_eos_token | is_max_len
716 | eos_detected_Bx |= eos_trigger_Bx
717 | start_countdown_mask_Bx = eos_trigger_Bx & (eos_countdown_Bx < 0)
718 | if start_countdown_mask_Bx.any():
719 | eos_countdown_Bx[start_countdown_mask_Bx] = max_delay_pattern
720 | finished_step_Bx[start_countdown_mask_Bx] = current_step_idx
721 |
722 | padding_mask_Bx = eos_countdown_Bx > 0
723 | if padding_mask_Bx.any():
724 | pred_active_BxC = pred_BxC[padding_mask_Bx].clone()
725 | countdown_active_Bx = eos_countdown_Bx[padding_mask_Bx]
726 | step_after_eos_Bx = max_delay_pattern - countdown_active_Bx
727 | step_after_eos_Bx_ = step_after_eos_Bx.unsqueeze(1)
728 | delay_pattern_Cx_ = delay_pattern_Cx.unsqueeze(0)
729 | eos_mask_NxC = step_after_eos_Bx_ == delay_pattern_Cx_
730 | pad_mask_NxC = step_after_eos_Bx_ > delay_pattern_Cx_
731 | pred_active_BxC[eos_mask_NxC] = audio_eos_value
732 | pred_active_BxC[pad_mask_NxC] = audio_pad_value
733 | pred_BxC[padding_mask_Bx] = pred_active_BxC
734 | eos_countdown_Bx[padding_mask_Bx] -= 1
735 |
736 | # --- Update BOS flag (Original) ---
737 | if not bos_over:
738 | bos_over = all(
739 | dec_step - prefill_step > max_delay_pattern for prefill_step in dec_output.prefill_steps
740 | )
741 |
742 | dec_output.update_one(pred_BxC, current_step_idx, not bos_over)
743 |
744 | dec_step += 1
745 |
746 | if verbose and dec_step % 86 == 0:
747 | duration = time.time() - start_time
748 | if duration > 0:
749 | print(
750 | f"generate step {dec_step}: speed={86 * batch_size / duration:.3f} tokens/s, realtime factor={batch_size / duration:.3f}x"
751 | )
752 | start_time = time.time()
753 |
754 | # --- Finalize and Extract Output ---
755 | final_step = dec_step + 1
756 |
757 | finished_step_Bx[finished_step_Bx == -1] = final_step - max_delay_pattern
758 |
759 | prefill_steps_tensor = torch.tensor(dec_output.prefill_steps, device=self.device)
760 | lengths_Bx = finished_step_Bx - prefill_steps_tensor
761 | lengths_Bx = torch.clamp(lengths_Bx, min=0)
762 |
763 | max_len = lengths_Bx.max().item() + max_delay_pattern
764 | outputs = []
765 |
766 | if max_len > 0:
767 | num_channels = self.config.data.channels
768 | audio_pad_value = self.config.data.audio_pad_value
769 | generated_codes = torch.full(
770 | (batch_size, max_len, num_channels),
771 | fill_value=audio_pad_value,
772 | dtype=torch.long,
773 | device=self.device,
774 | )
775 |
776 | for i in range(batch_size):
777 | start_step = dec_output.prefill_steps[i]
778 | actual_len = lengths_Bx[i].item() + max_delay_pattern
779 | if actual_len > 0:
780 | tokens_to_copy = dec_output.generated_tokens[i, start_step : start_step + actual_len, :]
781 | generated_codes[i, :actual_len, :] = tokens_to_copy
782 |
783 | if verbose:
784 | avg_steps = lengths_Bx.float().mean().item()
785 | total_duration = time.time() - total_start_time
786 | print(f"generate: avg steps={avg_steps:.1f}, total duration={total_duration:.3f}s")
787 |
788 | del dec_state
789 |
790 | outputs = self._generate_output(generated_codes, lengths_Bx)
791 | else:
792 | print("Warning: Nothing generated for any sequence in the batch.")
793 | outputs = [None] * batch_size
794 |
795 | return outputs if batch_size > 1 else outputs[0]
796 |
--------------------------------------------------------------------------------
/dia/state.py:
--------------------------------------------------------------------------------
1 | from dataclasses import dataclass
2 | from typing import Optional
3 |
4 | import torch
5 |
6 | from .config import DiaConfig
7 |
8 |
9 | def create_attn_mask(
10 | q_padding_mask_1d: torch.Tensor,
11 | k_padding_mask_1d: torch.Tensor,
12 | device: torch.device,
13 | is_causal: bool = False,
14 | ) -> torch.Tensor:
15 | """
16 | Creates the attention mask (self or cross) mimicking JAX segment ID logic.
17 | """
18 | # B1, Tq = q_padding_mask_1d.shape
19 | # B2, Tk = k_padding_mask_1d.shape
20 |
21 | p_mask_q = q_padding_mask_1d.unsqueeze(2) # Shape [B, Tq, 1]
22 | p_mask_k = k_padding_mask_1d.unsqueeze(1) # Shape [B, 1, Tk]
23 |
24 | # Condition A: Non-padding query attends to non-padding key
25 | non_pad_attends_non_pad = p_mask_q & p_mask_k # Shape [B, Tq, Tk]
26 |
27 | # Condition B: Padding query attends to padding key
28 | pad_attends_pad = (~p_mask_q) & (~p_mask_k) # Shape [B, Tq, Tk]
29 |
30 | # Combine: True if padding status is compatible (both non-pad OR both pad)
31 | mask = non_pad_attends_non_pad | pad_attends_pad # Shape [B, Tq, Tk]
32 |
33 | if is_causal:
34 | # assert Tq == Tk, "Causal mask requires query and key sequence lengths to be equal"
35 | causal_mask_2d = torch.tril(torch.ones_like(mask[0], dtype=torch.bool, device=device)) # Shape [B, Tq, Tk]
36 | causal_mask = mask & causal_mask_2d # Shape [B, Tq, Tk]
37 | return causal_mask.unsqueeze(1) # Shape [B, 1, Tq, Tk]
38 | else:
39 | return mask.unsqueeze(1) # Shape [B, 1, Tq, Tk]
40 |
41 |
42 | @dataclass
43 | class EncoderInferenceState:
44 | """Parameters specifically for encoder inference."""
45 |
46 | max_seq_len: int
47 | device: torch.device
48 | positions: torch.Tensor
49 | padding_mask: torch.Tensor
50 | attn_mask: torch.Tensor
51 |
52 | @classmethod
53 | def new(cls, config: DiaConfig, cond_src: torch.Tensor) -> "EncoderInferenceState":
54 | """Creates EtorchrInferenceParams from DiaConfig and a device."""
55 | device = cond_src.device
56 |
57 | positions = torch.arange(config.data.text_length, dtype=torch.float32, device=device).unsqueeze(0)
58 | padding_mask = (cond_src.squeeze(1) != config.data.text_pad_value).to(device).repeat_interleave(2, dim=0)
59 | attn_mask = create_attn_mask(padding_mask, padding_mask, device, is_causal=False)
60 |
61 | return cls(
62 | max_seq_len=config.data.text_length,
63 | device=device,
64 | positions=positions,
65 | padding_mask=padding_mask,
66 | attn_mask=attn_mask,
67 | )
68 |
69 |
70 | class KVCache(torch.nn.Module):
71 | k: torch.Tensor
72 | v: torch.Tensor
73 |
74 | def __init__(
75 | self,
76 | batch_size: int,
77 | num_heads: int,
78 | max_len: int,
79 | head_dim: int,
80 | dtype: torch.dtype,
81 | device: torch.device,
82 | k: torch.Tensor | None = None,
83 | v: torch.Tensor | None = None,
84 | ):
85 | k = torch.zeros((2 * batch_size, num_heads, max_len, head_dim), dtype=dtype, device=device) if k is None else k
86 | v = torch.zeros((2 * batch_size, num_heads, max_len, head_dim), dtype=dtype, device=device) if v is None else v
87 | super().__init__()
88 |
89 | self.register_buffer("k", k)
90 | self.register_buffer("v", v)
91 |
92 | @classmethod
93 | def from_kv(cls, k: torch.Tensor, v: torch.Tensor) -> "KVCache":
94 | return cls(
95 | batch_size=k.shape[0] // 2,
96 | num_heads=k.shape[1],
97 | max_len=k.shape[2],
98 | head_dim=k.shape[3],
99 | dtype=k.dtype,
100 | device=k.device,
101 | k=k,
102 | v=v,
103 | )
104 |
105 | def update(self, k: torch.Tensor, v: torch.Tensor, current_idx: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor]:
106 | k_out, v_out = self.k, self.v
107 | k_out[:, :, current_idx, :] = k
108 | v_out[:, :, current_idx, :] = v
109 | return self.k, self.v
110 |
111 | def prefill(self, k: torch.Tensor, v: torch.Tensor):
112 | prefill_len = k.shape[2]
113 | self.k[:, :, :prefill_len, :] = k
114 | self.v[:, :, :prefill_len, :] = v
115 |
116 |
117 | @dataclass
118 | class DecoderInferenceState:
119 | """Parameters specifically for decoder inference."""
120 |
121 | device: torch.device
122 | dtype: torch.dtype
123 | enc_out: torch.Tensor
124 | enc_positions: torch.Tensor
125 | dec_positions: torch.Tensor
126 | self_attn_cache: list[KVCache]
127 | cross_attn_cache: list[KVCache]
128 | casual_attn_mask: torch.Tensor
129 | cross_attn_mask: torch.Tensor
130 |
131 | @classmethod
132 | def new(
133 | cls,
134 | config: DiaConfig,
135 | enc_state: EncoderInferenceState,
136 | enc_out: torch.Tensor,
137 | dec_cross_attn_cache: list[KVCache],
138 | compute_dtype: torch.dtype,
139 | max_generation_length: Optional[int] = None,
140 | ) -> "DecoderInferenceState":
141 | """Creates DecoderInferenceParams from DiaConfig and a device."""
142 | device = enc_out.device
143 | max_audio_len = max_generation_length or config.data.audio_length
144 | batch_size = enc_out.shape[0] // 2
145 |
146 | dec_positions = torch.full((2 * batch_size, 1), fill_value=0, dtype=torch.int32, device=device)
147 | causal_mask = torch.tril(torch.ones(max_audio_len, max_audio_len, dtype=torch.bool, device=device))
148 | dec_mask = torch.ones((2 * batch_size, 1), dtype=torch.bool, device=device)
149 | cross_attn_mask = create_attn_mask(dec_mask, enc_state.padding_mask, device, is_causal=False)
150 |
151 | self_attn_cache = [
152 | KVCache(
153 | batch_size,
154 | config.model.decoder.kv_heads,
155 | max_audio_len,
156 | config.model.decoder.gqa_head_dim,
157 | compute_dtype,
158 | device,
159 | )
160 | for _ in range(config.model.decoder.n_layer)
161 | ]
162 |
163 | return cls(
164 | device=device,
165 | dtype=compute_dtype,
166 | enc_out=enc_out,
167 | enc_positions=enc_state.positions,
168 | dec_positions=dec_positions,
169 | self_attn_cache=self_attn_cache,
170 | cross_attn_cache=dec_cross_attn_cache,
171 | casual_attn_mask=causal_mask,
172 | cross_attn_mask=cross_attn_mask,
173 | )
174 |
175 | def prepare_step(self, step_from: int, step_to: int | None = None) -> None:
176 | if step_to is None:
177 | step_to = step_from + 1
178 | self.dec_positions = torch.arange(step_from, step_to, dtype=torch.int32, device=self.device).unsqueeze(0)
179 |
180 |
181 | @dataclass
182 | class DecoderOutput:
183 | generated_tokens: torch.Tensor
184 | prefill_steps: list[int]
185 |
186 | @classmethod
187 | def new(cls, batch_size: int, config: DiaConfig, device: torch.device) -> "DecoderOutput":
188 | max_audio_len = config.data.audio_length
189 | return cls(
190 | generated_tokens=torch.full(
191 | (batch_size, max_audio_len, config.data.channels),
192 | fill_value=-1,
193 | dtype=torch.int,
194 | device=device,
195 | ),
196 | prefill_steps=[],
197 | )
198 |
199 | def get_tokens_at(self, step_from: int, step_to: int | None = None) -> torch.Tensor:
200 | if step_to is None:
201 | step_to = step_from + 1
202 | return self.generated_tokens[:, step_from:step_to, :]
203 |
204 | def update_one(self, dec_out: torch.Tensor, step: int, apply_mask: bool = False):
205 | dec_out = dec_out.to(self.generated_tokens.dtype)
206 | if apply_mask:
207 | mask = self.generated_tokens[:, step, :] == -1
208 | self.generated_tokens[:, step, :] = torch.where(mask, dec_out, self.generated_tokens[:, step, :])
209 | else:
210 | self.generated_tokens[:, step, :] = dec_out
211 |
212 | def prefill(self, dec_out: torch.Tensor, prefill_steps: list[int]):
213 | length = dec_out.shape[1]
214 | self.generated_tokens[:, :length, :] = dec_out
215 | self.prefill_steps = prefill_steps
216 |
--------------------------------------------------------------------------------
/dia/static/images/banner.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/nari-labs/dia/2811af1c5f476b1f49f4744fabf56cf352be21e5/dia/static/images/banner.png
--------------------------------------------------------------------------------
/docker/Dockerfile.cpu:
--------------------------------------------------------------------------------
1 | # Dockerfile.cpu - CPU-only deployment for DIA
2 | # --------------------------------------------------
3 | # Build: docker build . -f docker/Dockerfile.cpu -t dia-cpu
4 | # Run: docker run --rm -p 7860:7860 dia-cpu
5 |
6 | FROM python:3.10-slim
7 |
8 | # Set non-interactive frontend
9 | ENV DEBIAN_FRONTEND=noninteractive
10 |
11 | # Install venv, and system dependencies
12 | RUN apt-get update && apt-get install -y \
13 | python3-venv \
14 | libsndfile1 \
15 | ffmpeg \
16 | curl \
17 | && apt-get clean && rm -rf /var/lib/apt/lists/*
18 |
19 | # Create non-root user and set up directories
20 | RUN useradd -m -u 1001 appuser && \
21 | mkdir -p /app/outputs /app && \
22 | chown -R appuser:appuser /app
23 |
24 | USER appuser
25 | WORKDIR /app
26 |
27 | # Copy all code (including pyproject.toml)
28 | COPY --chown=appuser:appuser . .
29 |
30 | # Create and activate virtual environment
31 | RUN python3 -m venv /app/venv
32 | ENV PATH="/app/venv/bin:$PATH"
33 |
34 | # Install all project dependencies (CPU-only PyTorch)
35 | RUN pip install --upgrade pip && \
36 | pip install torch torchaudio --index-url https://download.pytorch.org/whl/cpu && \
37 | pip install --no-cache-dir -e .[dev]
38 |
39 | # Set environment variables
40 | ENV PYTHONUNBUFFERED=1 \
41 | PYTHONPATH=/app
42 |
43 | # Expose Gradio default port
44 | ENV GRADIO_SERVER_NAME="0.0.0.0"
45 | EXPOSE 7860
46 |
47 | # Entrypoint
48 | CMD ["python3", "app.py"]
49 |
--------------------------------------------------------------------------------
/docker/Dockerfile.gpu:
--------------------------------------------------------------------------------
1 | # Dockerfile.gpu - GPU deployment for DIA
2 | # --------------------------------------------------
3 | # Build: docker build . -f docker/Dockerfile.gpu -t dia-gpu
4 | # Run: docker run --rm --gpus all -p 7860:7860 dia-gpu
5 | # Requires NVIDIA Container Toolkit on host.
6 |
7 | FROM pytorch/pytorch:2.1.2-cuda12.1-cudnn8-runtime
8 |
9 | # Set non-interactive frontend
10 | ENV DEBIAN_FRONTEND=noninteractive
11 |
12 | # Install venv, and system dependencies
13 | RUN apt-get update && apt-get install -y \
14 | python3-venv \
15 | libsndfile1 \
16 | ffmpeg \
17 | curl \
18 | && apt-get clean && rm -rf /var/lib/apt/lists/*
19 |
20 | # Create non-root user and set up directories
21 | RUN useradd -m -u 1001 appuser && \
22 | mkdir -p /app/outputs /app && \
23 | chown -R appuser:appuser /app
24 |
25 | USER appuser
26 | WORKDIR /app
27 |
28 | # Copy all code (including pyproject.toml)
29 | COPY --chown=appuser:appuser . .
30 |
31 | # Create and activate virtual environment
32 | RUN python3 -m venv /app/venv
33 | ENV PATH="/app/venv/bin:$PATH"
34 |
35 | # Install all project dependencies
36 | RUN pip install --upgrade pip && pip install --no-cache-dir .
37 |
38 | # Set environment variables
39 | ENV PYTHONUNBUFFERED=1 \
40 | PYTHONPATH=/app \
41 | USE_GPU=true \
42 | LD_LIBRARY_PATH=/usr/local/cuda/lib64:/usr/local/cuda-12.1/lib64:${LD_LIBRARY_PATH}
43 |
44 | # Expose Gradio default port
45 | ENV GRADIO_SERVER_NAME="0.0.0.0"
46 | EXPOSE 7860
47 |
48 | # Entrypoint
49 | CMD ["python3", "app.py"]
50 |
--------------------------------------------------------------------------------
/example/benchmark.py:
--------------------------------------------------------------------------------
1 | from random import choice
2 |
3 | import torch
4 |
5 | from dia.model import Dia
6 |
7 |
8 | torch._inductor.config.coordinate_descent_tuning = True
9 | torch._inductor.config.triton.unique_kernel_names = True
10 | torch._inductor.config.fx_graph_cache = True
11 |
12 | # debugging
13 | torch._logging.set_logs(graph_breaks=True, recompiles=True)
14 |
15 | model_name = "nari-labs/Dia-1.6B"
16 | compute_dtype = "float16"
17 |
18 | model = Dia.from_pretrained(model_name, compute_dtype=compute_dtype)
19 |
20 |
21 | test_cases = [
22 | "[S1] Dia is an open weights text to dialogue model.",
23 | "[S1] Dia is an open weights text to dialogue model. [S2] You get full control over scripts and voices. [S1] Wow. Amazing. (laughs) [S2] Try it now on Git hub or Hugging Face.",
24 | "[S1] torch.compile is a new feature in PyTorch that allows you to compile your model with a single line of code.",
25 | "[S1] torch.compile is a new feature in PyTorch that allows you to compile your model with a single line of code. [S2] It is a new feature in PyTorch that allows you to compile your model with a single line of code.",
26 | ]
27 |
28 |
29 | # Wram up
30 | for _ in range(2):
31 | text = choice(test_cases)
32 | output = model.generate(text, audio_prompt="./example_prompt.mp3", use_torch_compile=True, verbose=True)
33 | output = model.generate(text, use_torch_compile=True, verbose=True)
34 |
35 | # Benchmark
36 | for _ in range(10):
37 | text = choice(test_cases)
38 | output = model.generate(text, use_torch_compile=True, verbose=True)
39 | output = model.generate(text, audio_prompt="./example_prompt.mp3", use_torch_compile=True, verbose=True)
40 |
--------------------------------------------------------------------------------
/example/simple-cpu.py:
--------------------------------------------------------------------------------
1 | import torch
2 |
3 | from dia.model import Dia
4 |
5 |
6 | # Select device: CPU
7 | device = torch.device("cpu")
8 | print(f"Using device: {device}")
9 |
10 | # Load model
11 | model = Dia.from_pretrained(
12 | "nari-labs/Dia-1.6B", compute_dtype="float32", device=device
13 | ) # Float32 works better than float16 on CPU - you can also test with float16
14 |
15 | text = "[S1] Dia is an open weights text to dialogue model. [S2] You get full control over scripts and voices. [S1] Wow. Amazing. (laughs) [S2] Try it now on Git hub or Hugging Face."
16 |
17 | output = model.generate(text, use_torch_compile=False, verbose=True)
18 |
19 | model.save_audio("simple.mp3", output)
20 |
--------------------------------------------------------------------------------
/example/simple-mac.py:
--------------------------------------------------------------------------------
1 | from dia.model import Dia
2 |
3 |
4 | model = Dia.from_pretrained("nari-labs/Dia-1.6B", compute_dtype="float16")
5 |
6 | text = "[S1] Dia is an open weights text to dialogue model. [S2] You get full control over scripts and voices. [S1] Wow. Amazing. (laughs) [S2] Try it now on Git hub or Hugging Face."
7 |
8 | # It is important to set the `use_torch_compile` argument to `False` when using Dia on MacOS.
9 | # This is because the `torch.compile` function is not supported on MacOS.
10 | output = model.generate(text, use_torch_compile=False, verbose=True)
11 |
12 | model.save_audio("simple.mp3", output)
13 |
--------------------------------------------------------------------------------
/example/simple.py:
--------------------------------------------------------------------------------
1 | from dia.model import Dia
2 |
3 |
4 | model = Dia.from_pretrained("nari-labs/Dia-1.6B", compute_dtype="float16")
5 |
6 | text = "[S1] Dia is an open weights text to dialogue model. [S2] You get full control over scripts and voices. [S1] Wow. Amazing. (laughs) [S2] Try it now on Git hub or Hugging Face."
7 |
8 | output = model.generate(text, use_torch_compile=True, verbose=True)
9 |
10 | model.save_audio("simple.mp3", output)
11 |
--------------------------------------------------------------------------------
/example/simple_batch.py:
--------------------------------------------------------------------------------
1 | from dia.model import Dia
2 |
3 |
4 | model = Dia.from_pretrained("nari-labs/Dia-1.6B", compute_dtype="float16")
5 |
6 | text = "[S1] Dia is an open weights text to dialogue model. [S2] You get full control over scripts and voices. [S1] Wow. Amazing. (laughs) [S2] Try it now on Git hub or Hugging Face."
7 | texts = [text for _ in range(10)]
8 |
9 | output = model.generate(texts, use_torch_compile=True, verbose=True, max_tokens=1500)
10 |
11 | for i, o in enumerate(output):
12 | model.save_audio(f"simple_{i}.mp3", o)
13 |
--------------------------------------------------------------------------------
/example/voice_clone.py:
--------------------------------------------------------------------------------
1 | from dia.model import Dia
2 |
3 |
4 | model = Dia.from_pretrained("nari-labs/Dia-1.6B", compute_dtype="float16")
5 |
6 | # You should put the transcript of the voice you want to clone
7 | # We will use the audio created by running simple.py as an example.
8 | # Note that you will be REQUIRED TO RUN simple.py for the script to work as-is.
9 | clone_from_text = "[S1] Dia is an open weights text to dialogue model. [S2] You get full control over scripts and voices. [S1] Wow. Amazing. (laughs) [S2] Try it now on Git hub or Hugging Face."
10 | clone_from_audio = "simple.mp3"
11 |
12 | # For your custom needs, replace above with below and add your audio file to this directory:
13 | # clone_from_text = "[S1] ... [S2] ... [S1] ... corresponding to your_audio_name.mp3"
14 | # clone_from_audio = "your_audio_name.mp3"
15 |
16 | # Text to generate
17 | text_to_generate = "[S1] Hello, how are you? [S2] I'm good, thank you. [S1] What's your name? [S2] My name is Dia. [S1] Nice to meet you. [S2] Nice to meet you too."
18 |
19 | # It will only return the audio from the text_to_generate
20 | output = model.generate(
21 | clone_from_text + text_to_generate, audio_prompt=clone_from_audio, use_torch_compile=True, verbose=True
22 | )
23 |
24 | model.save_audio("voice_clone.mp3", output)
25 |
--------------------------------------------------------------------------------
/example/voice_clone_batch.py:
--------------------------------------------------------------------------------
1 | from dia.model import Dia
2 |
3 |
4 | model = Dia.from_pretrained("nari-labs/Dia-1.6B", compute_dtype="float16")
5 |
6 | # You should put the transcript of the voice you want to clone
7 | # We will use the audio created by running simple.py as an example.
8 | # Note that you will be REQUIRED TO RUN simple.py for the script to work as-is.
9 | clone_from_text = "[S1] Dia is an open weights text to dialogue model. [S2] You get full control over scripts and voices. [S1] Wow. Amazing. (laughs) [S2] Try it now on Git hub or Hugging Face."
10 |
11 | # For your custom needs, replace above with below and add your audio file to this directory:
12 | # clone_from_text = "[S1] ... [S2] ... [S1] ... corresponding to your_audio_name.mp3"
13 | # clone_from_audio = "your_audio_name.mp3"
14 |
15 | # Text to generate
16 | text_to_generate = "[S1] Dia is an open weights text to dialogue model. [S2] You get full control over scripts and voices. [S1] Wow. Amazing. (laughs) [S2] Try it now on Git hub or Hugging Face."
17 |
18 | clone_from_audios = [f"simple_{i}.mp3" for i in range(10)]
19 |
20 | texts = [clone_from_text + text_to_generate for _ in range(10)]
21 |
22 | # It will only return the audio from the text_to_generate
23 | output = model.generate(texts, audio_prompt=clone_from_audios, use_torch_compile=True, verbose=True, max_tokens=2000)
24 |
25 | for i, o in enumerate(output):
26 | model.save_audio(f"voice_clone_{i}.mp3", o)
27 |
--------------------------------------------------------------------------------
/example_prompt.mp3:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/nari-labs/dia/2811af1c5f476b1f49f4744fabf56cf352be21e5/example_prompt.mp3
--------------------------------------------------------------------------------
/pyproject.toml:
--------------------------------------------------------------------------------
1 | [project]
2 | name = "nari-tts"
3 | version = "0.1.0"
4 | description = "Dia - A text-to-speech model for dialogue generation"
5 | readme = "README.md"
6 | requires-python = ">=3.10"
7 | license = {file = "LICENSE"}
8 | authors = [
9 | {name = "Nari Labs", email = "contact@narilabs.ai"}
10 | ]
11 | dependencies = [
12 | "descript-audio-codec>=1.0.0",
13 | "gradio>=5.25.2",
14 | "huggingface-hub>=0.30.2",
15 | "numpy>=2.2.4",
16 | "pydantic>=2.11.3",
17 | "safetensors>=0.5.3",
18 | "soundfile>=0.13.1",
19 | "torch==2.6.0",
20 | "torchaudio==2.6.0",
21 | "triton==3.2.0 ; sys_platform == 'linux'",
22 | "triton-windows==3.2.0.post18 ; sys_platform == 'win32'",
23 | ]
24 |
25 | [build-system]
26 | requires = ["hatchling"]
27 | build-backend = "hatchling.build"
28 |
29 | [project.urls]
30 | "Homepage" = "https://github.com/nari-labs/dia"
31 | "Bug Tracker" = "https://github.com/nari-labs/dia/issues"
32 |
33 | [tool.hatch.build.targets.wheel]
34 | packages = ["dia"]
35 |
36 | [tool.ruff]
37 | # Never enforce `E501` (line length violations).
38 | lint.ignore = ["C901", "E501", "E741", "W605"]
39 | lint.select = ["C", "E", "F", "I", "W"]
40 | line-length = 119
41 |
42 | # Ignore import violations in all `__init__.py` files.
43 | [tool.ruff.lint.per-file-ignores]
44 | "__init__.py" = ["E402", "F401", "F403", "F811"]
45 |
46 | [tool.ruff.lint.isort]
47 | lines-after-imports = 2
48 |
49 | [tool.uv.sources]
50 | torch = [
51 | { index = "pytorch-cu126", marker = "sys_platform == 'linux' or sys_platform == 'win32'" },
52 | ]
53 | torchaudio = [
54 | { index = "pytorch-cu126", marker = "sys_platform == 'linux' or sys_platform == 'win32'" },
55 | ]
56 |
57 | [[tool.uv.index]]
58 | name = "pytorch-cu126"
59 | url = "https://download.pytorch.org/whl/cu126"
60 | explicit = true
61 |
62 | [dependency-groups]
63 | dev = [
64 | "ninja>=1.11.1.4",
65 | "packaging>=25.0",
66 | ]
67 |
--------------------------------------------------------------------------------