├── .eslintrc.json
├── .gitignore
├── .prettierrc
├── LICENSE
├── README.md
├── package-lock.json
├── package.json
├── public
├── index.html
├── openai-logomark.svg
└── robots.txt
├── readme
└── realtime-console-demo.png
├── relay-server
├── index.js
└── lib
│ └── relay.js
├── src
├── App.scss
├── App.tsx
├── components
│ ├── Map.scss
│ ├── Map.tsx
│ ├── button
│ │ ├── Button.scss
│ │ └── Button.tsx
│ └── toggle
│ │ ├── Toggle.scss
│ │ └── Toggle.tsx
├── index.css
├── index.tsx
├── lib
│ └── wavtools
│ │ ├── dist
│ │ ├── index.d.ts
│ │ ├── index.d.ts.map
│ │ └── lib
│ │ │ ├── analysis
│ │ │ ├── audio_analysis.d.ts
│ │ │ ├── audio_analysis.d.ts.map
│ │ │ ├── constants.d.ts
│ │ │ └── constants.d.ts.map
│ │ │ ├── wav_packer.d.ts
│ │ │ ├── wav_packer.d.ts.map
│ │ │ ├── wav_recorder.d.ts
│ │ │ ├── wav_recorder.d.ts.map
│ │ │ ├── wav_stream_player.d.ts
│ │ │ ├── wav_stream_player.d.ts.map
│ │ │ └── worklets
│ │ │ ├── audio_processor.d.ts
│ │ │ ├── audio_processor.d.ts.map
│ │ │ ├── stream_processor.d.ts
│ │ │ └── stream_processor.d.ts.map
│ │ ├── index.js
│ │ └── lib
│ │ ├── analysis
│ │ ├── audio_analysis.js
│ │ └── constants.js
│ │ ├── wav_packer.js
│ │ ├── wav_recorder.js
│ │ ├── wav_stream_player.js
│ │ └── worklets
│ │ ├── audio_processor.js
│ │ └── stream_processor.js
├── logo.svg
├── pages
│ ├── ConsolePage.scss
│ └── ConsolePage.tsx
├── react-app-env.d.ts
├── reportWebVitals.ts
├── setupTests.ts
└── utils
│ ├── conversation_config.js
│ └── wav_renderer.ts
└── tsconfig.json
/.eslintrc.json:
--------------------------------------------------------------------------------
1 | {
2 | "parserOptions": {
3 | "sourceType": "module"
4 | },
5 | "env": {
6 | "es2022": true
7 | }
8 | }
9 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # See https://help.github.com/articles/ignoring-files/ for more about ignoring files.
2 |
3 | # dependencies
4 | /node_modules
5 | /.pnp
6 | .pnp.js
7 |
8 | # testing
9 | /coverage
10 |
11 | # production
12 | /build
13 |
14 | # packaging
15 | *.zip
16 | *.tar.gz
17 | *.tar
18 | *.tgz
19 | *.bla
20 |
21 | # misc
22 | .DS_Store
23 | .env
24 | .env.local
25 | .env.development.local
26 | .env.test.local
27 | .env.production.local
28 |
29 | npm-debug.log*
30 | yarn-debug.log*
31 | yarn-error.log*
32 |
--------------------------------------------------------------------------------
/.prettierrc:
--------------------------------------------------------------------------------
1 | {
2 | "tabWidth": 2,
3 | "useTabs": false,
4 | "singleQuote": true
5 | }
6 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2024 OpenAI
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # OpenAI Realtime Console
2 |
3 | The OpenAI Realtime Console is intended as an inspector and interactive API reference
4 | for the OpenAI Realtime API. It comes packaged with two utility libraries,
5 | [openai/openai-realtime-api-beta](https://github.com/openai/openai-realtime-api-beta)
6 | that acts as a **Reference Client** (for browser and Node.js) and
7 | [`/src/lib/wavtools`](./src/lib/wavtools) which allows for simple audio
8 | management in the browser.
9 |
10 |
11 |
12 | # Starting the console
13 |
14 | This is a React project created using `create-react-app` that is bundled via Webpack.
15 | Install it by extracting the contents of this package and using;
16 |
17 | ```shell
18 | $ npm i
19 | ```
20 |
21 | Start your server with:
22 |
23 | ```shell
24 | $ npm start
25 | ```
26 |
27 | It should be available via `localhost:3000`.
28 |
29 | # Table of contents
30 |
31 | 1. [Using the console](#using-the-console)
32 | 1. [Using a server relay](#using-a-server-relay)
33 | 1. [Realtime API reference client](#realtime-api-reference-client)
34 | 1. [Sending streaming audio](#sending-streaming-audio)
35 | 1. [Adding and using tools](#adding-and-using-tools)
36 | 1. [Interrupting the model](#interrupting-the-model)
37 | 1. [Reference client events](#reference-client-events)
38 | 1. [Wavtools](#wavtools)
39 | 1. [WavRecorder quickstart](#wavrecorder-quickstart)
40 | 1. [WavStreamPlayer quickstart](#wavstreamplayer-quickstart)
41 | 1. [Acknowledgements and contact](#acknowledgements-and-contact)
42 |
43 | # Using the console
44 |
45 | The console requires an OpenAI API key (**user key** or **project key**) that has access to the
46 | Realtime API. You'll be prompted on startup to enter it. It will be saved via `localStorage` and can be
47 | changed at any time from the UI.
48 |
49 | To start a session you'll need to **connect**. This will require microphone access.
50 | You can then choose between **manual** (Push-to-talk) and **vad** (Voice Activity Detection)
51 | conversation modes, and switch between them at any time.
52 |
53 | There are two functions enabled;
54 |
55 | - `get_weather`: Ask for the weather anywhere and the model will do its best to pinpoint the
56 | location, show it on a map, and get the weather for that location. Note that it doesn't
57 | have location access, and coordinates are "guessed" from the model's training data so
58 | accuracy might not be perfect.
59 | - `set_memory`: You can ask the model to remember information for you, and it will store it in
60 | a JSON blob on the left.
61 |
62 | You can freely interrupt the model at any time in push-to-talk or VAD mode.
63 |
64 | ## Using a server relay
65 |
66 | If you would like to build a more robust implementation and play around with the reference
67 | client using your own server, we have included a Node.js [Relay Server](/relay-server/index.js).
68 |
69 | ```shell
70 | $ npm run relay
71 | ```
72 |
73 | It will start automatically on `localhost:8081`. **You will need to create a `.env` file**
74 | with `OPENAI_API_KEY=` set to your API key. Note that you should change the following code
75 | in [`ConsolePage.tsx`](/src/pages/ConsolePage.tsx):
76 |
77 | ```javascript
78 | /**
79 | * Change this if you want to connect to a local relay server!
80 | * This will require you to set OPENAI_API_KEY= in a `.env` file
81 | * You can run it with `npm run relay`, in parallel with `npm start`
82 | *
83 | * Simply switch the lines by commenting one and removing the other
84 | */
85 | // const USE_LOCAL_RELAY_SERVER_URL: string | undefined = 'http://localhost:8081';
86 | const USE_LOCAL_RELAY_SERVER_URL: string | undefined = void 0;
87 | ```
88 |
89 | This server is **only a simple message relay**, but it can be extended to:
90 |
91 | - Hide API credentials if you would like to ship an app to play with online
92 | - Handle certain calls you would like to keep secret (e.g. `instructions`) on
93 | the server directly
94 | - Restrict what types of events the client can receive and send
95 |
96 | You will have to implement these features yourself.
97 |
98 | # Realtime API reference client
99 |
100 | The latest reference client and documentation are available on GitHub at
101 | [openai/openai-realtime-api-beta](https://github.com/openai/openai-realtime-api-beta).
102 |
103 | You can use this client yourself in any React (front-end) or Node.js project.
104 | For full documentation, refer to the GitHub repository, but you can use the
105 | guide here as a primer to get started.
106 |
107 | ```javascript
108 | import { RealtimeClient } from '/src/lib/realtime-api-beta/index.js';
109 |
110 | const client = new RealtimeClient({ apiKey: process.env.OPENAI_API_KEY });
111 |
112 | // Can set parameters ahead of connecting
113 | client.updateSession({ instructions: 'You are a great, upbeat friend.' });
114 | client.updateSession({ voice: 'alloy' });
115 | client.updateSession({ turn_detection: 'server_vad' });
116 | client.updateSession({ input_audio_transcription: { model: 'whisper-1' } });
117 |
118 | // Set up event handling
119 | client.on('conversation.updated', ({ item, delta }) => {
120 | const items = client.conversation.getItems(); // can use this to render all items
121 | /* includes all changes to conversations, delta may be populated */
122 | });
123 |
124 | // Connect to Realtime API
125 | await client.connect();
126 |
127 | // Send a item and triggers a generation
128 | client.sendUserMessageContent([{ type: 'text', text: `How are you?` }]);
129 | ```
130 |
131 | ## Sending streaming audio
132 |
133 | To send streaming audio, use the `.appendInputAudio()` method. If you're in `turn_detection: 'disabled'` mode,
134 | then you need to use `.generate()` to tell the model to respond.
135 |
136 | ```javascript
137 | // Send user audio, must be Int16Array or ArrayBuffer
138 | // Default audio format is pcm16 with sample rate of 24,000 Hz
139 | // This populates 1s of noise in 0.1s chunks
140 | for (let i = 0; i < 10; i++) {
141 | const data = new Int16Array(2400);
142 | for (let n = 0; n < 2400; n++) {
143 | const value = Math.floor((Math.random() * 2 - 1) * 0x8000);
144 | data[n] = value;
145 | }
146 | client.appendInputAudio(data);
147 | }
148 | // Pending audio is committed and model is asked to generate
149 | client.createResponse();
150 | ```
151 |
152 | ## Adding and using tools
153 |
154 | Working with tools is easy. Just call `.addTool()` and set a callback as the second parameter.
155 | The callback will be executed with the parameters for the tool, and the result will be automatically
156 | sent back to the model.
157 |
158 | ```javascript
159 | // We can add tools as well, with callbacks specified
160 | client.addTool(
161 | {
162 | name: 'get_weather',
163 | description:
164 | 'Retrieves the weather for a given lat, lng coordinate pair. Specify a label for the location.',
165 | parameters: {
166 | type: 'object',
167 | properties: {
168 | lat: {
169 | type: 'number',
170 | description: 'Latitude',
171 | },
172 | lng: {
173 | type: 'number',
174 | description: 'Longitude',
175 | },
176 | location: {
177 | type: 'string',
178 | description: 'Name of the location',
179 | },
180 | },
181 | required: ['lat', 'lng', 'location'],
182 | },
183 | },
184 | async ({ lat, lng, location }) => {
185 | const result = await fetch(
186 | `https://api.open-meteo.com/v1/forecast?latitude=${lat}&longitude=${lng}¤t=temperature_2m,wind_speed_10m`
187 | );
188 | const json = await result.json();
189 | return json;
190 | }
191 | );
192 | ```
193 |
194 | ## Interrupting the model
195 |
196 | You may want to manually interrupt the model, especially in `turn_detection: 'disabled'` mode.
197 | To do this, we can use:
198 |
199 | ```javascript
200 | // id is the id of the item currently being generated
201 | // sampleCount is the number of audio samples that have been heard by the listener
202 | client.cancelResponse(id, sampleCount);
203 | ```
204 |
205 | This method will cause the model to immediately cease generation, but also truncate the
206 | item being played by removing all audio after `sampleCount` and clearing the text
207 | response. By using this method you can interrupt the model and prevent it from "remembering"
208 | anything it has generated that is ahead of where the user's state is.
209 |
210 | ## Reference client events
211 |
212 | There are five main client events for application control flow in `RealtimeClient`.
213 | Note that this is only an overview of using the client, the full Realtime API
214 | event specification is considerably larger, if you need more control check out the GitHub repository:
215 | [openai/openai-realtime-api-beta](https://github.com/openai/openai-realtime-api-beta).
216 |
217 | ```javascript
218 | // errors like connection failures
219 | client.on('error', (event) => {
220 | // do thing
221 | });
222 |
223 | // in VAD mode, the user starts speaking
224 | // we can use this to stop audio playback of a previous response if necessary
225 | client.on('conversation.interrupted', () => {
226 | /* do something */
227 | });
228 |
229 | // includes all changes to conversations
230 | // delta may be populated
231 | client.on('conversation.updated', ({ item, delta }) => {
232 | // get all items, e.g. if you need to update a chat window
233 | const items = client.conversation.getItems();
234 | switch (item.type) {
235 | case 'message':
236 | // system, user, or assistant message (item.role)
237 | break;
238 | case 'function_call':
239 | // always a function call from the model
240 | break;
241 | case 'function_call_output':
242 | // always a response from the user / application
243 | break;
244 | }
245 | if (delta) {
246 | // Only one of the following will be populated for any given event
247 | // delta.audio = Int16Array, audio added
248 | // delta.transcript = string, transcript added
249 | // delta.arguments = string, function arguments added
250 | }
251 | });
252 |
253 | // only triggered after item added to conversation
254 | client.on('conversation.item.appended', ({ item }) => {
255 | /* item status can be 'in_progress' or 'completed' */
256 | });
257 |
258 | // only triggered after item completed in conversation
259 | // will always be triggered after conversation.item.appended
260 | client.on('conversation.item.completed', ({ item }) => {
261 | /* item status will always be 'completed' */
262 | });
263 | ```
264 |
265 | # Wavtools
266 |
267 | Wavtools contains easy management of PCM16 audio streams in the browser, both
268 | recording and playing.
269 |
270 | ## WavRecorder Quickstart
271 |
272 | ```javascript
273 | import { WavRecorder } from '/src/lib/wavtools/index.js';
274 |
275 | const wavRecorder = new WavRecorder({ sampleRate: 24000 });
276 | wavRecorder.getStatus(); // "ended"
277 |
278 | // request permissions, connect microphone
279 | await wavRecorder.begin();
280 | wavRecorder.getStatus(); // "paused"
281 |
282 | // Start recording
283 | // This callback will be triggered in chunks of 8192 samples by default
284 | // { mono, raw } are Int16Array (PCM16) mono & full channel data
285 | await wavRecorder.record((data) => {
286 | const { mono, raw } = data;
287 | });
288 | wavRecorder.getStatus(); // "recording"
289 |
290 | // Stop recording
291 | await wavRecorder.pause();
292 | wavRecorder.getStatus(); // "paused"
293 |
294 | // outputs "audio/wav" audio file
295 | const audio = await wavRecorder.save();
296 |
297 | // clears current audio buffer and starts recording
298 | await wavRecorder.clear();
299 | await wavRecorder.record();
300 |
301 | // get data for visualization
302 | const frequencyData = wavRecorder.getFrequencies();
303 |
304 | // Stop recording, disconnects microphone, output file
305 | await wavRecorder.pause();
306 | const finalAudio = await wavRecorder.end();
307 |
308 | // Listen for device change; e.g. if somebody disconnects a microphone
309 | // deviceList is array of MediaDeviceInfo[] + `default` property
310 | wavRecorder.listenForDeviceChange((deviceList) => {});
311 | ```
312 |
313 | ## WavStreamPlayer Quickstart
314 |
315 | ```javascript
316 | import { WavStreamPlayer } from '/src/lib/wavtools/index.js';
317 |
318 | const wavStreamPlayer = new WavStreamPlayer({ sampleRate: 24000 });
319 |
320 | // Connect to audio output
321 | await wavStreamPlayer.connect();
322 |
323 | // Create 1s of empty PCM16 audio
324 | const audio = new Int16Array(24000);
325 | // Queue 3s of audio, will start playing immediately
326 | wavStreamPlayer.add16BitPCM(audio, 'my-track');
327 | wavStreamPlayer.add16BitPCM(audio, 'my-track');
328 | wavStreamPlayer.add16BitPCM(audio, 'my-track');
329 |
330 | // get data for visualization
331 | const frequencyData = wavStreamPlayer.getFrequencies();
332 |
333 | // Interrupt the audio (halt playback) at any time
334 | // To restart, need to call .add16BitPCM() again
335 | const trackOffset = await wavStreamPlayer.interrupt();
336 | trackOffset.trackId; // "my-track"
337 | trackOffset.offset; // sample number
338 | trackOffset.currentTime; // time in track
339 | ```
340 |
341 | # Acknowledgements and contact
342 |
343 | Thanks for checking out the Realtime Console. We hope you have fun with the Realtime API.
344 | Special thanks to the whole Realtime API team for making this possible. Please feel free
345 | to reach out, ask questions, or give feedback by creating an issue on the repository.
346 | You can also reach out and let us know what you think directly!
347 |
348 | - OpenAI Developers / [@OpenAIDevs](https://x.com/OpenAIDevs)
349 | - Jordan Sitkin / API / [@dustmason](https://x.com/dustmason)
350 | - Mark Hudnall / API / [@landakram](https://x.com/landakram)
351 | - Peter Bakkum / API / [@pbbakkum](https://x.com/pbbakkum)
352 | - Atty Eleti / API / [@athyuttamre](https://x.com/athyuttamre)
353 | - Jason Clark / API / [@onebitToo](https://x.com/onebitToo)
354 | - Karolis Kosas / Design / [@karoliskosas](https://x.com/karoliskosas)
355 | - Keith Horwood / API + DX / [@keithwhor](https://x.com/keithwhor)
356 | - Romain Huet / DX / [@romainhuet](https://x.com/romainhuet)
357 | - Katia Gil Guzman / DX / [@kagigz](https://x.com/kagigz)
358 | - Ilan Bigio / DX / [@ilanbigio](https://x.com/ilanbigio)
359 | - Kevin Whinnery / DX / [@kevinwhinnery](https://x.com/kevinwhinnery)
360 |
--------------------------------------------------------------------------------
/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "openai-realtime-console",
3 | "version": "0.0.0",
4 | "type": "module",
5 | "private": true,
6 | "dependencies": {
7 | "@openai/realtime-api-beta": "github:openai/openai-realtime-api-beta",
8 | "@testing-library/jest-dom": "^5.17.0",
9 | "@testing-library/react": "^13.4.0",
10 | "@testing-library/user-event": "^13.5.0",
11 | "@types/jest": "^27.5.2",
12 | "@types/leaflet": "^1.9.12",
13 | "@types/node": "^16.18.108",
14 | "@types/react": "^18.3.5",
15 | "@types/react-dom": "^18.3.0",
16 | "dotenv": "^16.4.5",
17 | "leaflet": "^1.9.4",
18 | "react": "^18.3.1",
19 | "react-dom": "^18.3.1",
20 | "react-feather": "^2.0.10",
21 | "react-leaflet": "^4.2.1",
22 | "react-scripts": "^5.0.1",
23 | "sass": "^1.78.0",
24 | "save": "^2.9.0",
25 | "typescript": "^4.9.5",
26 | "web-vitals": "^2.1.4",
27 | "ws": "^8.18.0"
28 | },
29 | "scripts": {
30 | "start": "react-scripts start",
31 | "build": "react-scripts build",
32 | "test": "react-scripts test",
33 | "eject": "react-scripts eject",
34 | "zip": "zip -r realtime-api-console.zip . -x 'node_modules' 'node_modules/*' 'node_modules/**' '.git' '.git/*' '.git/**' '.DS_Store' '*/.DS_Store' 'package-lock.json' '*.zip' '*.tar.gz' '*.tar' '.env'",
35 | "relay": "nodemon ./relay-server/index.js"
36 | },
37 | "eslintConfig": {
38 | "extends": [
39 | "react-app",
40 | "react-app/jest"
41 | ]
42 | },
43 | "browserslist": {
44 | "production": [
45 | ">0.2%",
46 | "not dead",
47 | "not op_mini all"
48 | ],
49 | "development": [
50 | "last 1 chrome version",
51 | "last 1 firefox version",
52 | "last 1 safari version"
53 | ]
54 | },
55 | "devDependencies": {
56 | "@babel/plugin-proposal-private-property-in-object": "^7.21.11",
57 | "nodemon": "^3.1.7"
58 | }
59 | }
60 |
--------------------------------------------------------------------------------
/public/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 | realtime console
8 |
9 |
13 |
14 |
20 |
25 |
26 |
27 | You need to enable JavaScript to run this app.
28 |
29 |
39 |
40 |
41 |
--------------------------------------------------------------------------------
/public/openai-logomark.svg:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/public/robots.txt:
--------------------------------------------------------------------------------
1 | # https://www.robotstxt.org/robotstxt.html
2 | User-agent: *
3 | Disallow:
4 |
--------------------------------------------------------------------------------
/readme/realtime-console-demo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Calcium-Ion/openai-realtime-console/5d7b871000c90e2d2310fefa9ad3e0f99cb30d48/readme/realtime-console-demo.png
--------------------------------------------------------------------------------
/relay-server/index.js:
--------------------------------------------------------------------------------
1 | import { RealtimeRelay } from './lib/relay.js';
2 | import dotenv from 'dotenv';
3 | dotenv.config({ override: true });
4 |
5 | const OPENAI_API_KEY = process.env.OPENAI_API_KEY;
6 |
7 | if (!OPENAI_API_KEY) {
8 | console.error(
9 | `Environment variable "OPENAI_API_KEY" is required.\n` +
10 | `Please set it in your .env file.`
11 | );
12 | process.exit(1);
13 | }
14 |
15 | const PORT = parseInt(process.env.PORT) || 8081;
16 |
17 | const relay = new RealtimeRelay(OPENAI_API_KEY);
18 | relay.listen(PORT);
19 |
--------------------------------------------------------------------------------
/relay-server/lib/relay.js:
--------------------------------------------------------------------------------
1 | import { WebSocketServer } from 'ws';
2 | import { RealtimeClient } from '@openai/realtime-api-beta';
3 |
4 | export class RealtimeRelay {
5 | constructor(apiKey) {
6 | this.apiKey = apiKey;
7 | this.sockets = new WeakMap();
8 | this.wss = null;
9 | }
10 |
11 | listen(port) {
12 | this.wss = new WebSocketServer({ port });
13 | this.wss.on('connection', this.connectionHandler.bind(this));
14 | this.log(`Listening on ws://localhost:${port}`);
15 | }
16 |
17 | async connectionHandler(ws, req) {
18 | if (!req.url) {
19 | this.log('No URL provided, closing connection.');
20 | ws.close();
21 | return;
22 | }
23 |
24 | const url = new URL(req.url, `http://${req.headers.host}`);
25 | const pathname = url.pathname;
26 |
27 | if (pathname !== '/') {
28 | this.log(`Invalid pathname: "${pathname}"`);
29 | ws.close();
30 | return;
31 | }
32 |
33 | // Instantiate new client
34 | this.log(`Connecting with key "${this.apiKey.slice(0, 3)}..."`);
35 | const client = new RealtimeClient({ apiKey: this.apiKey });
36 |
37 | // Relay: OpenAI Realtime API Event -> Browser Event
38 | client.realtime.on('server.*', (event) => {
39 | this.log(`Relaying "${event.type}" to Client`);
40 | ws.send(JSON.stringify(event));
41 | });
42 | client.realtime.on('close', () => ws.close());
43 |
44 | // Relay: Browser Event -> OpenAI Realtime API Event
45 | // We need to queue data waiting for the OpenAI connection
46 | const messageQueue = [];
47 | const messageHandler = (data) => {
48 | try {
49 | const event = JSON.parse(data);
50 | this.log(`Relaying "${event.type}" to OpenAI`);
51 | client.realtime.send(event.type, event);
52 | } catch (e) {
53 | console.error(e.message);
54 | this.log(`Error parsing event from client: ${data}`);
55 | }
56 | };
57 | ws.on('message', (data) => {
58 | if (!client.isConnected()) {
59 | messageQueue.push(data);
60 | } else {
61 | messageHandler(data);
62 | }
63 | });
64 | ws.on('close', () => client.disconnect());
65 |
66 | // Connect to OpenAI Realtime API
67 | try {
68 | this.log(`Connecting to OpenAI...`);
69 | await client.connect();
70 | } catch (e) {
71 | this.log(`Error connecting to OpenAI: ${e.message}`);
72 | ws.close();
73 | return;
74 | }
75 | this.log(`Connected to OpenAI successfully!`);
76 | while (messageQueue.length) {
77 | messageHandler(messageQueue.shift());
78 | }
79 | }
80 |
81 | log(...args) {
82 | console.log(`[RealtimeRelay]`, ...args);
83 | }
84 | }
85 |
--------------------------------------------------------------------------------
/src/App.scss:
--------------------------------------------------------------------------------
1 | [data-component='App'] {
2 | height: 100%;
3 | width: 100%;
4 | position: relative;
5 | }
6 |
--------------------------------------------------------------------------------
/src/App.tsx:
--------------------------------------------------------------------------------
1 | import { ConsolePage } from './pages/ConsolePage';
2 | import './App.scss';
3 |
4 | function App() {
5 | return (
6 |
7 |
8 |
9 | );
10 | }
11 |
12 | export default App;
13 |
--------------------------------------------------------------------------------
/src/components/Map.scss:
--------------------------------------------------------------------------------
1 | [data-component='Map'] {
2 | position: absolute;
3 | width: 100%;
4 | height: 100%;
5 | .leaflet-container {
6 | height: 100%;
7 | width: 100%;
8 | }
9 | }
10 |
--------------------------------------------------------------------------------
/src/components/Map.tsx:
--------------------------------------------------------------------------------
1 | import { MapContainer, TileLayer, Marker, Popup, useMap } from 'react-leaflet';
2 | import { LatLngTuple } from 'leaflet';
3 | import './Map.scss';
4 |
5 | function ChangeView({ center, zoom }: { center: LatLngTuple; zoom: number }) {
6 | const map = useMap();
7 | map.setView(center, zoom);
8 | return null;
9 | }
10 |
11 | export function Map({
12 | center,
13 | location = 'My Location',
14 | }: {
15 | center: LatLngTuple;
16 | location?: string;
17 | }) {
18 | return (
19 |
20 |
27 |
28 |
29 |
30 | {location}
31 |
32 |
33 |
34 | );
35 | }
36 |
--------------------------------------------------------------------------------
/src/components/button/Button.scss:
--------------------------------------------------------------------------------
1 | [data-component='Button'] {
2 | display: flex;
3 | align-items: center;
4 | gap: 8px;
5 | font-family: 'Roboto Mono', monospace;
6 | font-size: 12px;
7 | font-optical-sizing: auto;
8 | font-weight: 400;
9 | font-style: normal;
10 | border: none;
11 | background-color: #ececf1;
12 | color: #101010;
13 | border-radius: 1000px;
14 | padding: 8px 24px;
15 | min-height: 42px;
16 | transition: transform 0.1s ease-in-out, background-color 0.1s ease-in-out;
17 | outline: none;
18 |
19 | &.button-style-action {
20 | background-color: #101010;
21 | color: #ececf1;
22 | &:hover:not([disabled]) {
23 | background-color: #404040;
24 | }
25 | }
26 |
27 | &.button-style-alert {
28 | background-color: #f00;
29 | color: #ececf1;
30 | &:hover:not([disabled]) {
31 | background-color: #f00;
32 | }
33 | }
34 |
35 | &.button-style-flush {
36 | background-color: rgba(255, 255, 255, 0);
37 | }
38 |
39 | &[disabled] {
40 | color: #999;
41 | }
42 |
43 | &:not([disabled]) {
44 | cursor: pointer;
45 | }
46 |
47 | &:hover:not([disabled]) {
48 | background-color: #d8d8d8;
49 | }
50 |
51 | &:active:not([disabled]) {
52 | transform: translateY(1px);
53 | }
54 |
55 | .icon {
56 | display: flex;
57 | &.icon-start {
58 | margin-left: -8px;
59 | }
60 | &.icon-end {
61 | margin-right: -8px;
62 | }
63 | svg {
64 | width: 16px;
65 | height: 16px;
66 | }
67 | }
68 |
69 | &.icon-red .icon {
70 | color: #cc0000;
71 | }
72 | &.icon-green .icon {
73 | color: #009900;
74 | }
75 | &.icon-grey .icon {
76 | color: #909090;
77 | }
78 | &.icon-fill {
79 | svg {
80 | fill: currentColor;
81 | }
82 | }
83 | }
84 |
--------------------------------------------------------------------------------
/src/components/button/Button.tsx:
--------------------------------------------------------------------------------
1 | import React from 'react';
2 | import './Button.scss';
3 |
4 | import { Icon } from 'react-feather';
5 |
6 | interface ButtonProps extends React.ButtonHTMLAttributes {
7 | label?: string;
8 | icon?: Icon;
9 | iconPosition?: 'start' | 'end';
10 | iconColor?: 'red' | 'green' | 'grey';
11 | iconFill?: boolean;
12 | buttonStyle?: 'regular' | 'action' | 'alert' | 'flush';
13 | }
14 |
15 | export function Button({
16 | label = 'Okay',
17 | icon = void 0,
18 | iconPosition = 'start',
19 | iconColor = void 0,
20 | iconFill = false,
21 | buttonStyle = 'regular',
22 | ...rest
23 | }: ButtonProps) {
24 | const StartIcon = iconPosition === 'start' ? icon : null;
25 | const EndIcon = iconPosition === 'end' ? icon : null;
26 | const classList = [];
27 | if (iconColor) {
28 | classList.push(`icon-${iconColor}`);
29 | }
30 | if (iconFill) {
31 | classList.push(`icon-fill`);
32 | }
33 | classList.push(`button-style-${buttonStyle}`);
34 |
35 | return (
36 |
37 | {StartIcon && (
38 |
39 |
40 |
41 | )}
42 | {label}
43 | {EndIcon && (
44 |
45 |
46 |
47 | )}
48 |
49 | );
50 | }
51 |
--------------------------------------------------------------------------------
/src/components/toggle/Toggle.scss:
--------------------------------------------------------------------------------
1 | [data-component='Toggle'] {
2 | position: relative;
3 | display: flex;
4 | align-items: center;
5 | gap: 8px;
6 | cursor: pointer;
7 | overflow: hidden;
8 |
9 | background-color: #ececf1;
10 | color: #101010;
11 | height: 40px;
12 | border-radius: 1000px;
13 |
14 | &:hover {
15 | background-color: #d8d8d8;
16 | }
17 |
18 | div.label {
19 | position: relative;
20 | color: #666;
21 | transition: color 0.1s ease-in-out;
22 | padding: 0px 16px;
23 | z-index: 2;
24 | user-select: none;
25 | }
26 |
27 | div.label.right {
28 | margin-left: -8px;
29 | }
30 |
31 | .toggle-background {
32 | background-color: #101010;
33 | position: absolute;
34 | top: 0px;
35 | left: 0px;
36 | width: auto;
37 | bottom: 0px;
38 | z-index: 1;
39 | border-radius: 1000px;
40 | transition: left 0.1s ease-in-out, width 0.1s ease-in-out;
41 | }
42 |
43 | &[data-enabled='true'] {
44 | div.label.right {
45 | color: #fff;
46 | }
47 | }
48 |
49 | &[data-enabled='false'] {
50 | div.label.left {
51 | color: #fff;
52 | }
53 | }
54 | }
55 |
--------------------------------------------------------------------------------
/src/components/toggle/Toggle.tsx:
--------------------------------------------------------------------------------
1 | import { useState, useEffect, useRef } from 'react';
2 |
3 | import './Toggle.scss';
4 |
5 | export function Toggle({
6 | defaultValue = false,
7 | values,
8 | labels,
9 | onChange = () => {},
10 | }: {
11 | defaultValue?: string | boolean;
12 | values?: string[];
13 | labels?: string[];
14 | onChange?: (isEnabled: boolean, value: string) => void;
15 | }) {
16 | if (typeof defaultValue === 'string') {
17 | defaultValue = !!Math.max(0, (values || []).indexOf(defaultValue));
18 | }
19 |
20 | const leftRef = useRef(null);
21 | const rightRef = useRef(null);
22 | const bgRef = useRef(null);
23 | const [value, setValue] = useState(defaultValue);
24 |
25 | const toggleValue = () => {
26 | const v = !value;
27 | const index = +v;
28 | setValue(v);
29 | onChange(v, (values || [])[index]);
30 | };
31 |
32 | useEffect(() => {
33 | const leftEl = leftRef.current;
34 | const rightEl = rightRef.current;
35 | const bgEl = bgRef.current;
36 | if (leftEl && rightEl && bgEl) {
37 | if (value) {
38 | bgEl.style.left = rightEl.offsetLeft + 'px';
39 | bgEl.style.width = rightEl.offsetWidth + 'px';
40 | } else {
41 | bgEl.style.left = '';
42 | bgEl.style.width = leftEl.offsetWidth + 'px';
43 | }
44 | }
45 | }, [value]);
46 |
47 | return (
48 |
53 | {labels && (
54 |
55 | {labels[0]}
56 |
57 | )}
58 | {labels && (
59 |
60 | {labels[1]}
61 |
62 | )}
63 |
64 |
65 | );
66 | }
67 |
--------------------------------------------------------------------------------
/src/index.css:
--------------------------------------------------------------------------------
1 | html,
2 | body {
3 | padding: 0px;
4 | margin: 0px;
5 | position: relative;
6 | width: 100%;
7 | height: 100%;
8 | font-family: 'Assistant', sans-serif;
9 | font-optical-sizing: auto;
10 | font-weight: 400;
11 | font-style: normal;
12 | color: #18181b;
13 | -webkit-font-smoothing: antialiased;
14 | -moz-osx-font-smoothing: grayscale;
15 | }
16 |
17 | #root {
18 | position: relative;
19 | width: 100%;
20 | height: 100%;
21 | }
22 |
--------------------------------------------------------------------------------
/src/index.tsx:
--------------------------------------------------------------------------------
1 | import React from 'react';
2 | import ReactDOM from 'react-dom/client';
3 | import './index.css';
4 | import App from './App';
5 | import reportWebVitals from './reportWebVitals';
6 |
7 | const root = ReactDOM.createRoot(
8 | document.getElementById('root') as HTMLElement
9 | );
10 | root.render(
11 |
12 |
13 |
14 | );
15 |
16 | // If you want to start measuring performance in your app, pass a function
17 | // to log results (for example: reportWebVitals(console.log))
18 | // or send to an analytics endpoint. Learn more: https://bit.ly/CRA-vitals
19 | reportWebVitals();
20 |
--------------------------------------------------------------------------------
/src/lib/wavtools/dist/index.d.ts:
--------------------------------------------------------------------------------
1 | import { AudioAnalysis } from './lib/analysis/audio_analysis.js';
2 | import { WavPacker } from './lib/wav_packer.js';
3 | import { WavStreamPlayer } from './lib/wav_stream_player.js';
4 | import { WavRecorder } from './lib/wav_recorder.js';
5 | export { AudioAnalysis, WavPacker, WavStreamPlayer, WavRecorder };
6 | //# sourceMappingURL=index.d.ts.map
--------------------------------------------------------------------------------
/src/lib/wavtools/dist/index.d.ts.map:
--------------------------------------------------------------------------------
1 | {"version":3,"file":"index.d.ts","sourceRoot":"","sources":["../index.js"],"names":[],"mappings":"8BAC8B,kCAAkC;0BADtC,qBAAqB;gCAEf,4BAA4B;4BAChC,uBAAuB"}
--------------------------------------------------------------------------------
/src/lib/wavtools/dist/lib/analysis/audio_analysis.d.ts:
--------------------------------------------------------------------------------
1 | /**
2 | * Output of AudioAnalysis for the frequency domain of the audio
3 | * @typedef {Object} AudioAnalysisOutputType
4 | * @property {Float32Array} values Amplitude of this frequency between {0, 1} inclusive
5 | * @property {number[]} frequencies Raw frequency bucket values
6 | * @property {string[]} labels Labels for the frequency bucket values
7 | */
8 | /**
9 | * Analyzes audio for visual output
10 | * @class
11 | */
12 | export class AudioAnalysis {
13 | /**
14 | * Retrieves frequency domain data from an AnalyserNode adjusted to a decibel range
15 | * returns human-readable formatting and labels
16 | * @param {AnalyserNode} analyser
17 | * @param {number} sampleRate
18 | * @param {Float32Array} [fftResult]
19 | * @param {"frequency"|"music"|"voice"} [analysisType]
20 | * @param {number} [minDecibels] default -100
21 | * @param {number} [maxDecibels] default -30
22 | * @returns {AudioAnalysisOutputType}
23 | */
24 | static getFrequencies(analyser: AnalyserNode, sampleRate: number, fftResult?: Float32Array, analysisType?: "frequency" | "music" | "voice", minDecibels?: number, maxDecibels?: number): AudioAnalysisOutputType;
25 | /**
26 | * Creates a new AudioAnalysis instance for an HTMLAudioElement
27 | * @param {HTMLAudioElement} audioElement
28 | * @param {AudioBuffer|null} [audioBuffer] If provided, will cache all frequency domain data from the buffer
29 | * @returns {AudioAnalysis}
30 | */
31 | constructor(audioElement: HTMLAudioElement, audioBuffer?: AudioBuffer | null);
32 | fftResults: any[];
33 | audio: HTMLAudioElement;
34 | context: any;
35 | analyser: any;
36 | sampleRate: any;
37 | audioBuffer: any;
38 | /**
39 | * Gets the current frequency domain data from the playing audio track
40 | * @param {"frequency"|"music"|"voice"} [analysisType]
41 | * @param {number} [minDecibels] default -100
42 | * @param {number} [maxDecibels] default -30
43 | * @returns {AudioAnalysisOutputType}
44 | */
45 | getFrequencies(analysisType?: "frequency" | "music" | "voice", minDecibels?: number, maxDecibels?: number): AudioAnalysisOutputType;
46 | /**
47 | * Resume the internal AudioContext if it was suspended due to the lack of
48 | * user interaction when the AudioAnalysis was instantiated.
49 | * @returns {Promise}
50 | */
51 | resumeIfSuspended(): Promise;
52 | }
53 | /**
54 | * Output of AudioAnalysis for the frequency domain of the audio
55 | */
56 | export type AudioAnalysisOutputType = {
57 | /**
58 | * Amplitude of this frequency between {0, 1} inclusive
59 | */
60 | values: Float32Array;
61 | /**
62 | * Raw frequency bucket values
63 | */
64 | frequencies: number[];
65 | /**
66 | * Labels for the frequency bucket values
67 | */
68 | labels: string[];
69 | };
70 | //# sourceMappingURL=audio_analysis.d.ts.map
--------------------------------------------------------------------------------
/src/lib/wavtools/dist/lib/analysis/audio_analysis.d.ts.map:
--------------------------------------------------------------------------------
1 | {"version":3,"file":"audio_analysis.d.ts","sourceRoot":"","sources":["../../../lib/analysis/audio_analysis.js"],"names":[],"mappings":"AAOA;;;;;;GAMG;AAEH;;;GAGG;AACH;IACE;;;;;;;;;;OAUG;IACH,gCARW,YAAY,cACZ,MAAM,cACN,YAAY,iBACZ,WAAW,GAAC,OAAO,GAAC,OAAO,gBAC3B,MAAM,gBACN,MAAM,GACJ,uBAAuB,CAwDnC;IAED;;;;;OAKG;IACH,0BAJW,gBAAgB,gBAChB,WAAW,GAAC,IAAI,EAkE1B;IA9DC,kBAAoB;IA2ClB,wBAAyB;IACzB,aAAkC;IAClC,cAAwB;IACxB,gBAA4B;IAC5B,iBAA8B;IAiBlC;;;;;;OAMG;IACH,8BALW,WAAW,GAAC,OAAO,GAAC,OAAO,gBAC3B,MAAM,gBACN,MAAM,GACJ,uBAAuB,CAwBnC;IAED;;;;OAIG;IACH,qBAFa,OAAO,CAAC,IAAI,CAAC,CAOzB;CACF;;;;;;;;YA9La,YAAY;;;;iBACZ,MAAM,EAAE;;;;YACR,MAAM,EAAE"}
--------------------------------------------------------------------------------
/src/lib/wavtools/dist/lib/analysis/constants.d.ts:
--------------------------------------------------------------------------------
1 | /**
2 | * All note frequencies from 1st to 8th octave
3 | * in format "A#8" (A#, 8th octave)
4 | */
5 | export const noteFrequencies: any[];
6 | export const noteFrequencyLabels: any[];
7 | export const voiceFrequencies: any[];
8 | export const voiceFrequencyLabels: any[];
9 | //# sourceMappingURL=constants.d.ts.map
--------------------------------------------------------------------------------
/src/lib/wavtools/dist/lib/analysis/constants.d.ts.map:
--------------------------------------------------------------------------------
1 | {"version":3,"file":"constants.d.ts","sourceRoot":"","sources":["../../../lib/analysis/constants.js"],"names":[],"mappings":"AA6BA;;;GAGG;AACH,oCAAkC;AAClC,wCAAsC;AActC,qCAKG;AACH,yCAKG"}
--------------------------------------------------------------------------------
/src/lib/wavtools/dist/lib/wav_packer.d.ts:
--------------------------------------------------------------------------------
1 | /**
2 | * Raw wav audio file contents
3 | * @typedef {Object} WavPackerAudioType
4 | * @property {Blob} blob
5 | * @property {string} url
6 | * @property {number} channelCount
7 | * @property {number} sampleRate
8 | * @property {number} duration
9 | */
10 | /**
11 | * Utility class for assembling PCM16 "audio/wav" data
12 | * @class
13 | */
14 | export class WavPacker {
15 | /**
16 | * Converts Float32Array of amplitude data to ArrayBuffer in Int16Array format
17 | * @param {Float32Array} float32Array
18 | * @returns {ArrayBuffer}
19 | */
20 | static floatTo16BitPCM(float32Array: Float32Array): ArrayBuffer;
21 | /**
22 | * Concatenates two ArrayBuffers
23 | * @param {ArrayBuffer} leftBuffer
24 | * @param {ArrayBuffer} rightBuffer
25 | * @returns {ArrayBuffer}
26 | */
27 | static mergeBuffers(leftBuffer: ArrayBuffer, rightBuffer: ArrayBuffer): ArrayBuffer;
28 | /**
29 | * Packs data into an Int16 format
30 | * @private
31 | * @param {number} size 0 = 1x Int16, 1 = 2x Int16
32 | * @param {number} arg value to pack
33 | * @returns
34 | */
35 | private _packData;
36 | /**
37 | * Packs audio into "audio/wav" Blob
38 | * @param {number} sampleRate
39 | * @param {{bitsPerSample: number, channels: Array, data: Int16Array}} audio
40 | * @returns {WavPackerAudioType}
41 | */
42 | pack(sampleRate: number, audio: {
43 | bitsPerSample: number;
44 | channels: Array;
45 | data: Int16Array;
46 | }): WavPackerAudioType;
47 | }
48 | /**
49 | * Raw wav audio file contents
50 | */
51 | export type WavPackerAudioType = {
52 | blob: Blob;
53 | url: string;
54 | channelCount: number;
55 | sampleRate: number;
56 | duration: number;
57 | };
58 | //# sourceMappingURL=wav_packer.d.ts.map
--------------------------------------------------------------------------------
/src/lib/wavtools/dist/lib/wav_packer.d.ts.map:
--------------------------------------------------------------------------------
1 | {"version":3,"file":"wav_packer.d.ts","sourceRoot":"","sources":["../../lib/wav_packer.js"],"names":[],"mappings":"AAAA;;;;;;;;GAQG;AAEH;;;GAGG;AACH;IACE;;;;OAIG;IACH,qCAHW,YAAY,GACV,WAAW,CAWvB;IAED;;;;;OAKG;IACH,gCAJW,WAAW,eACX,WAAW,GACT,WAAW,CASvB;IAED;;;;;;OAMG;IACH,kBAKC;IAED;;;;;OAKG;IACH,iBAJW,MAAM,SACN;QAAC,aAAa,EAAE,MAAM,CAAC;QAAC,QAAQ,EAAE,KAAK,CAAC,YAAY,CAAC,CAAC;QAAC,IAAI,EAAE,UAAU,CAAA;KAAC,GACtE,kBAAkB,CA6C9B;CACF;;;;;UA3Ga,IAAI;SACJ,MAAM;kBACN,MAAM;gBACN,MAAM;cACN,MAAM"}
--------------------------------------------------------------------------------
/src/lib/wavtools/dist/lib/wav_recorder.d.ts:
--------------------------------------------------------------------------------
1 | /**
2 | * Decodes audio into a wav file
3 | * @typedef {Object} DecodedAudioType
4 | * @property {Blob} blob
5 | * @property {string} url
6 | * @property {Float32Array} values
7 | * @property {AudioBuffer} audioBuffer
8 | */
9 | /**
10 | * Records live stream of user audio as PCM16 "audio/wav" data
11 | * @class
12 | */
13 | export class WavRecorder {
14 | /**
15 | * Decodes audio data from multiple formats to a Blob, url, Float32Array and AudioBuffer
16 | * @param {Blob|Float32Array|Int16Array|ArrayBuffer|number[]} audioData
17 | * @param {number} sampleRate
18 | * @param {number} fromSampleRate
19 | * @returns {Promise}
20 | */
21 | static decode(audioData: Blob | Float32Array | Int16Array | ArrayBuffer | number[], sampleRate?: number, fromSampleRate?: number): Promise;
22 | /**
23 | * Create a new WavRecorder instance
24 | * @param {{sampleRate?: number, outputToSpeakers?: boolean, debug?: boolean}} [options]
25 | * @returns {WavRecorder}
26 | */
27 | constructor({ sampleRate, outputToSpeakers, debug, }?: {
28 | sampleRate?: number;
29 | outputToSpeakers?: boolean;
30 | debug?: boolean;
31 | });
32 | scriptSrc: any;
33 | sampleRate: number;
34 | outputToSpeakers: boolean;
35 | debug: boolean;
36 | _deviceChangeCallback: () => Promise;
37 | _devices: any[];
38 | stream: any;
39 | processor: any;
40 | source: any;
41 | node: any;
42 | recording: boolean;
43 | _lastEventId: number;
44 | eventReceipts: {};
45 | eventTimeout: number;
46 | _chunkProcessor: () => void;
47 | _chunkProcessorBuffer: {
48 | raw: ArrayBuffer;
49 | mono: ArrayBuffer;
50 | };
51 | /**
52 | * Logs data in debug mode
53 | * @param {...any} arguments
54 | * @returns {true}
55 | */
56 | log(...args: any[]): true;
57 | /**
58 | * Retrieves the current sampleRate for the recorder
59 | * @returns {number}
60 | */
61 | getSampleRate(): number;
62 | /**
63 | * Retrieves the current status of the recording
64 | * @returns {"ended"|"paused"|"recording"}
65 | */
66 | getStatus(): "ended" | "paused" | "recording";
67 | /**
68 | * Sends an event to the AudioWorklet
69 | * @private
70 | * @param {string} name
71 | * @param {{[key: string]: any}} data
72 | * @param {AudioWorkletNode} [_processor]
73 | * @returns {Promise<{[key: string]: any}>}
74 | */
75 | private _event;
76 | /**
77 | * Sets device change callback, remove if callback provided is `null`
78 | * @param {(Array): void|null} callback
79 | * @returns {true}
80 | */
81 | listenForDeviceChange(callback: any): true;
82 | /**
83 | * Manually request permission to use the microphone
84 | * @returns {Promise}
85 | */
86 | requestPermission(): Promise;
87 | /**
88 | * List all eligible devices for recording, will request permission to use microphone
89 | * @returns {Promise>}
90 | */
91 | listDevices(): Promise>;
94 | /**
95 | * Begins a recording session and requests microphone permissions if not already granted
96 | * Microphone recording indicator will appear on browser tab but status will be "paused"
97 | * @param {string} [deviceId] if no device provided, default device will be used
98 | * @returns {Promise}
99 | */
100 | begin(deviceId?: string): Promise;
101 | analyser: any;
102 | /**
103 | * Gets the current frequency domain data from the recording track
104 | * @param {"frequency"|"music"|"voice"} [analysisType]
105 | * @param {number} [minDecibels] default -100
106 | * @param {number} [maxDecibels] default -30
107 | * @returns {import('./analysis/audio_analysis.js').AudioAnalysisOutputType}
108 | */
109 | getFrequencies(analysisType?: "frequency" | "music" | "voice", minDecibels?: number, maxDecibels?: number): import("./analysis/audio_analysis.js").AudioAnalysisOutputType;
110 | /**
111 | * Pauses the recording
112 | * Keeps microphone stream open but halts storage of audio
113 | * @returns {Promise}
114 | */
115 | pause(): Promise;
116 | /**
117 | * Start recording stream and storing to memory from the connected audio source
118 | * @param {(data: { mono: Int16Array; raw: Int16Array }) => any} [chunkProcessor]
119 | * @param {number} [chunkSize] chunkProcessor will not be triggered until this size threshold met in mono audio
120 | * @returns {Promise}
121 | */
122 | record(chunkProcessor?: (data: {
123 | mono: Int16Array;
124 | raw: Int16Array;
125 | }) => any, chunkSize?: number): Promise;
126 | _chunkProcessorSize: number;
127 | /**
128 | * Clears the audio buffer, empties stored recording
129 | * @returns {Promise}
130 | */
131 | clear(): Promise;
132 | /**
133 | * Reads the current audio stream data
134 | * @returns {Promise<{meanValues: Float32Array, channels: Array}>}
135 | */
136 | read(): Promise<{
137 | meanValues: Float32Array;
138 | channels: Array;
139 | }>;
140 | /**
141 | * Saves the current audio stream to a file
142 | * @param {boolean} [force] Force saving while still recording
143 | * @returns {Promise}
144 | */
145 | save(force?: boolean): Promise;
146 | /**
147 | * Ends the current recording session and saves the result
148 | * @returns {Promise}
149 | */
150 | end(): Promise;
151 | /**
152 | * Performs a full cleanup of WavRecorder instance
153 | * Stops actively listening via microphone and removes existing listeners
154 | * @returns {Promise}
155 | */
156 | quit(): Promise;
157 | }
158 | /**
159 | * Decodes audio into a wav file
160 | */
161 | export type DecodedAudioType = {
162 | blob: Blob;
163 | url: string;
164 | values: Float32Array;
165 | audioBuffer: AudioBuffer;
166 | };
167 | //# sourceMappingURL=wav_recorder.d.ts.map
--------------------------------------------------------------------------------
/src/lib/wavtools/dist/lib/wav_recorder.d.ts.map:
--------------------------------------------------------------------------------
1 | {"version":3,"file":"wav_recorder.d.ts","sourceRoot":"","sources":["../../lib/wav_recorder.js"],"names":[],"mappings":"AAIA;;;;;;;GAOG;AAEH;;;GAGG;AACH;IAsCE;;;;;;OAMG;IACH,yBALW,IAAI,GAAC,YAAY,GAAC,UAAU,GAAC,WAAW,GAAC,MAAM,EAAE,eACjD,MAAM,mBACN,MAAM,GACJ,OAAO,CAAC,gBAAgB,CAAC,CAqErC;IA/GD;;;;OAIG;IACH,uDAHW;QAAC,UAAU,CAAC,EAAE,MAAM,CAAC;QAAC,gBAAgB,CAAC,EAAE,OAAO,CAAC;QAAC,KAAK,CAAC,EAAE,OAAO,CAAA;KAAC,EAiC5E;IAxBC,eAAkC;IAElC,mBAA4B;IAC5B,0BAAwC;IACxC,eAAoB;IACpB,2CAAiC;IACjC,gBAAkB;IAElB,YAAkB;IAClB,eAAqB;IACrB,YAAkB;IAClB,UAAgB;IAChB,mBAAsB;IAEtB,qBAAqB;IACrB,kBAAuB;IACvB,qBAAwB;IAExB,4BAA+B;IAE/B;;;MAGC;IA+EH;;;;OAIG;IACH,qBAFa,IAAI,CAOhB;IAED;;;OAGG;IACH,iBAFa,MAAM,CAIlB;IAED;;;OAGG;IACH,aAFa,OAAO,GAAC,QAAQ,GAAC,WAAW,CAUxC;IAED;;;;;;;OAOG;IACH,eAqBC;IAED;;;;OAIG;IACH,sCAFa,IAAI,CAmChB;IAED;;;OAGG;IACH,qBAFa,OAAO,CAAC,IAAI,CAAC,CAoBzB;IAED;;;OAGG;IACH,eAFa,OAAO,CAAC,KAAK,CAAC,eAAe,GAAG;QAAC,OAAO,EAAE,OAAO,CAAA;KAAC,CAAC,CAAC,CA8BhE;IAED;;;;;OAKG;IACH,iBAHW,MAAM,GACJ,OAAO,CAAC,IAAI,CAAC,CAkFzB;IAHC,cAAwB;IAK1B;;;;;;OAMG;IACH,8BALW,WAAW,GAAC,OAAO,GAAC,OAAO,gBAC3B,MAAM,gBACN,MAAM,GACJ,OAAO,8BAA8B,EAAE,uBAAuB,CAkB1E;IAED;;;;OAIG;IACH,SAFa,OAAO,CAAC,IAAI,CAAC,CAezB;IAED;;;;;OAKG;IACH,wBAJW,CAAC,IAAI,EAAE;QAAE,IAAI,EAAE,UAAU,CAAC;QAAC,GAAG,EAAE,UAAU,CAAA;KAAE,KAAK,GAAG,cACpD,MAAM,GACJ,OAAO,CAAC,IAAI,CAAC,CAoBzB;IATC,4BAAoC;IAWtC;;;OAGG;IACH,SAFa,OAAO,CAAC,IAAI,CAAC,CAQzB;IAED;;;OAGG;IACH,QAFa,OAAO,CAAC;QAAC,UAAU,EAAE,YAAY,CAAC;QAAC,QAAQ,EAAE,KAAK,CAAC,YAAY,CAAC,CAAA;KAAC,CAAC,CAS9E;IAED;;;;OAIG;IACH,aAHW,OAAO,GACL,OAAO,CAAC,OAAO,iBAAiB,EAAE,kBAAkB,CAAC,CAgBjE;IAED;;;OAGG;IACH,OAFa,OAAO,CAAC,OAAO,iBAAiB,EAAE,kBAAkB,CAAC,CA8BjE;IAED;;;;OAIG;IACH,QAFa,OAAO,CAAC,IAAI,CAAC,CAQzB;CACF;;;;;UA1hBa,IAAI;SACJ,MAAM;YACN,YAAY;iBACZ,WAAW"}
--------------------------------------------------------------------------------
/src/lib/wavtools/dist/lib/wav_stream_player.d.ts:
--------------------------------------------------------------------------------
1 | /**
2 | * Plays audio streams received in raw PCM16 chunks from the browser
3 | * @class
4 | */
5 | export class WavStreamPlayer {
6 | /**
7 | * Creates a new WavStreamPlayer instance
8 | * @param {{sampleRate?: number}} options
9 | * @returns {WavStreamPlayer}
10 | */
11 | constructor({ sampleRate }?: {
12 | sampleRate?: number;
13 | });
14 | scriptSrc: any;
15 | sampleRate: number;
16 | context: any;
17 | stream: any;
18 | analyser: any;
19 | trackSampleOffsets: {};
20 | interruptedTrackIds: {};
21 | /**
22 | * Connects the audio context and enables output to speakers
23 | * @returns {Promise}
24 | */
25 | connect(): Promise;
26 | /**
27 | * Gets the current frequency domain data from the playing track
28 | * @param {"frequency"|"music"|"voice"} [analysisType]
29 | * @param {number} [minDecibels] default -100
30 | * @param {number} [maxDecibels] default -30
31 | * @returns {import('./analysis/audio_analysis.js').AudioAnalysisOutputType}
32 | */
33 | getFrequencies(analysisType?: "frequency" | "music" | "voice", minDecibels?: number, maxDecibels?: number): import("./analysis/audio_analysis.js").AudioAnalysisOutputType;
34 | /**
35 | * Starts audio streaming
36 | * @private
37 | * @returns {Promise}
38 | */
39 | private _start;
40 | /**
41 | * Adds 16BitPCM data to the currently playing audio stream
42 | * You can add chunks beyond the current play point and they will be queued for play
43 | * @param {ArrayBuffer|Int16Array} arrayBuffer
44 | * @param {string} [trackId]
45 | * @returns {Int16Array}
46 | */
47 | add16BitPCM(arrayBuffer: ArrayBuffer | Int16Array, trackId?: string): Int16Array;
48 | /**
49 | * Gets the offset (sample count) of the currently playing stream
50 | * @param {boolean} [interrupt]
51 | * @returns {{trackId: string|null, offset: number, currentTime: number}}
52 | */
53 | getTrackSampleOffset(interrupt?: boolean): {
54 | trackId: string | null;
55 | offset: number;
56 | currentTime: number;
57 | };
58 | /**
59 | * Strips the current stream and returns the sample offset of the audio
60 | * @param {boolean} [interrupt]
61 | * @returns {{trackId: string|null, offset: number, currentTime: number}}
62 | */
63 | interrupt(): {
64 | trackId: string | null;
65 | offset: number;
66 | currentTime: number;
67 | };
68 | }
69 | //# sourceMappingURL=wav_stream_player.d.ts.map
--------------------------------------------------------------------------------
/src/lib/wavtools/dist/lib/wav_stream_player.d.ts.map:
--------------------------------------------------------------------------------
1 | {"version":3,"file":"wav_stream_player.d.ts","sourceRoot":"","sources":["../../lib/wav_stream_player.js"],"names":[],"mappings":"AAGA;;;GAGG;AACH;IACE;;;;OAIG;IACH,6BAHW;QAAC,UAAU,CAAC,EAAE,MAAM,CAAA;KAAC,EAW/B;IAPC,eAAmC;IACnC,mBAA4B;IAC5B,aAAmB;IACnB,YAAkB;IAClB,cAAoB;IACpB,uBAA4B;IAC5B,wBAA6B;IAG/B;;;OAGG;IACH,WAFa,OAAO,CAAC,IAAI,CAAC,CAkBzB;IAED;;;;;;OAMG;IACH,8BALW,WAAW,GAAC,OAAO,GAAC,OAAO,gBAC3B,MAAM,gBACN,MAAM,GACJ,OAAO,8BAA8B,EAAE,uBAAuB,CAkB1E;IAED;;;;OAIG;IACH,eAkBC;IAED;;;;;;OAMG;IACH,yBAJW,WAAW,GAAC,UAAU,YACtB,MAAM,GACJ,UAAU,CAqBtB;IAED;;;;OAIG;IACH,iCAHW,OAAO,GACL;QAAC,OAAO,EAAE,MAAM,GAAC,IAAI,CAAC;QAAC,MAAM,EAAE,MAAM,CAAC;QAAC,WAAW,EAAE,MAAM,CAAA;KAAC,CAqBvE;IAED;;;;OAIG;IACH,aAFa;QAAC,OAAO,EAAE,MAAM,GAAC,IAAI,CAAC;QAAC,MAAM,EAAE,MAAM,CAAC;QAAC,WAAW,EAAE,MAAM,CAAA;KAAC,CAIvE;CACF"}
--------------------------------------------------------------------------------
/src/lib/wavtools/dist/lib/worklets/audio_processor.d.ts:
--------------------------------------------------------------------------------
1 | export const AudioProcessorSrc: any;
2 | //# sourceMappingURL=audio_processor.d.ts.map
--------------------------------------------------------------------------------
/src/lib/wavtools/dist/lib/worklets/audio_processor.d.ts.map:
--------------------------------------------------------------------------------
1 | {"version":3,"file":"audio_processor.d.ts","sourceRoot":"","sources":["../../../lib/worklets/audio_processor.js"],"names":[],"mappings":"AAqNA,oCAAqC"}
--------------------------------------------------------------------------------
/src/lib/wavtools/dist/lib/worklets/stream_processor.d.ts:
--------------------------------------------------------------------------------
1 | export const StreamProcessorWorklet: "\nclass StreamProcessor extends AudioWorkletProcessor {\n constructor() {\n super();\n this.hasStarted = false;\n this.hasInterrupted = false;\n this.outputBuffers = [];\n this.bufferLength = 128;\n this.write = { buffer: new Float32Array(this.bufferLength), trackId: null };\n this.writeOffset = 0;\n this.trackSampleOffsets = {};\n this.port.onmessage = (event) => {\n if (event.data) {\n const payload = event.data;\n if (payload.event === 'write') {\n const int16Array = payload.buffer;\n const float32Array = new Float32Array(int16Array.length);\n for (let i = 0; i < int16Array.length; i++) {\n float32Array[i] = int16Array[i] / 0x8000; // Convert Int16 to Float32\n }\n this.writeData(float32Array, payload.trackId);\n } else if (\n payload.event === 'offset' ||\n payload.event === 'interrupt'\n ) {\n const requestId = payload.requestId;\n const trackId = this.write.trackId;\n const offset = this.trackSampleOffsets[trackId] || 0;\n this.port.postMessage({\n event: 'offset',\n requestId,\n trackId,\n offset,\n });\n if (payload.event === 'interrupt') {\n this.hasInterrupted = true;\n }\n } else {\n throw new Error(`Unhandled event \"${payload.event}\"`);\n }\n }\n };\n }\n\n writeData(float32Array, trackId = null) {\n let { buffer } = this.write;\n let offset = this.writeOffset;\n for (let i = 0; i < float32Array.length; i++) {\n buffer[offset++] = float32Array[i];\n if (offset >= buffer.length) {\n this.outputBuffers.push(this.write);\n this.write = { buffer: new Float32Array(this.bufferLength), trackId };\n buffer = this.write.buffer;\n offset = 0;\n }\n }\n this.writeOffset = offset;\n return true;\n }\n\n process(inputs, outputs, parameters) {\n const output = outputs[0];\n const outputChannelData = output[0];\n const outputBuffers = this.outputBuffers;\n if (this.hasInterrupted) {\n this.port.postMessage({ event: 'stop' });\n return false;\n } else if (outputBuffers.length) {\n this.hasStarted = true;\n const { buffer, trackId } = outputBuffers.shift();\n for (let i = 0; i < outputChannelData.length; i++) {\n outputChannelData[i] = buffer[i] || 0;\n }\n if (trackId) {\n this.trackSampleOffsets[trackId] =\n this.trackSampleOffsets[trackId] || 0;\n this.trackSampleOffsets[trackId] += buffer.length;\n }\n return true;\n } else if (this.hasStarted) {\n this.port.postMessage({ event: 'stop' });\n return false;\n } else {\n return true;\n }\n }\n}\n\nregisterProcessor('stream_processor', StreamProcessor);\n";
2 | export const StreamProcessorSrc: any;
3 | //# sourceMappingURL=stream_processor.d.ts.map
--------------------------------------------------------------------------------
/src/lib/wavtools/dist/lib/worklets/stream_processor.d.ts.map:
--------------------------------------------------------------------------------
1 | {"version":3,"file":"stream_processor.d.ts","sourceRoot":"","sources":["../../../lib/worklets/stream_processor.js"],"names":[],"mappings":"AAAA,q4FAyFE;AAMF,qCAAsC"}
--------------------------------------------------------------------------------
/src/lib/wavtools/index.js:
--------------------------------------------------------------------------------
1 | import { WavPacker } from './lib/wav_packer.js';
2 | import { AudioAnalysis } from './lib/analysis/audio_analysis.js';
3 | import { WavStreamPlayer } from './lib/wav_stream_player.js';
4 | import { WavRecorder } from './lib/wav_recorder.js';
5 |
6 | export { AudioAnalysis, WavPacker, WavStreamPlayer, WavRecorder };
7 |
--------------------------------------------------------------------------------
/src/lib/wavtools/lib/analysis/audio_analysis.js:
--------------------------------------------------------------------------------
1 | import {
2 | noteFrequencies,
3 | noteFrequencyLabels,
4 | voiceFrequencies,
5 | voiceFrequencyLabels,
6 | } from './constants.js';
7 |
8 | /**
9 | * Output of AudioAnalysis for the frequency domain of the audio
10 | * @typedef {Object} AudioAnalysisOutputType
11 | * @property {Float32Array} values Amplitude of this frequency between {0, 1} inclusive
12 | * @property {number[]} frequencies Raw frequency bucket values
13 | * @property {string[]} labels Labels for the frequency bucket values
14 | */
15 |
16 | /**
17 | * Analyzes audio for visual output
18 | * @class
19 | */
20 | export class AudioAnalysis {
21 | /**
22 | * Retrieves frequency domain data from an AnalyserNode adjusted to a decibel range
23 | * returns human-readable formatting and labels
24 | * @param {AnalyserNode} analyser
25 | * @param {number} sampleRate
26 | * @param {Float32Array} [fftResult]
27 | * @param {"frequency"|"music"|"voice"} [analysisType]
28 | * @param {number} [minDecibels] default -100
29 | * @param {number} [maxDecibels] default -30
30 | * @returns {AudioAnalysisOutputType}
31 | */
32 | static getFrequencies(
33 | analyser,
34 | sampleRate,
35 | fftResult,
36 | analysisType = 'frequency',
37 | minDecibels = -100,
38 | maxDecibels = -30,
39 | ) {
40 | if (!fftResult) {
41 | fftResult = new Float32Array(analyser.frequencyBinCount);
42 | analyser.getFloatFrequencyData(fftResult);
43 | }
44 | const nyquistFrequency = sampleRate / 2;
45 | const frequencyStep = (1 / fftResult.length) * nyquistFrequency;
46 | let outputValues;
47 | let frequencies;
48 | let labels;
49 | if (analysisType === 'music' || analysisType === 'voice') {
50 | const useFrequencies =
51 | analysisType === 'voice' ? voiceFrequencies : noteFrequencies;
52 | const aggregateOutput = Array(useFrequencies.length).fill(minDecibels);
53 | for (let i = 0; i < fftResult.length; i++) {
54 | const frequency = i * frequencyStep;
55 | const amplitude = fftResult[i];
56 | for (let n = useFrequencies.length - 1; n >= 0; n--) {
57 | if (frequency > useFrequencies[n]) {
58 | aggregateOutput[n] = Math.max(aggregateOutput[n], amplitude);
59 | break;
60 | }
61 | }
62 | }
63 | outputValues = aggregateOutput;
64 | frequencies =
65 | analysisType === 'voice' ? voiceFrequencies : noteFrequencies;
66 | labels =
67 | analysisType === 'voice' ? voiceFrequencyLabels : noteFrequencyLabels;
68 | } else {
69 | outputValues = Array.from(fftResult);
70 | frequencies = outputValues.map((_, i) => frequencyStep * i);
71 | labels = frequencies.map((f) => `${f.toFixed(2)} Hz`);
72 | }
73 | // We normalize to {0, 1}
74 | const normalizedOutput = outputValues.map((v) => {
75 | return Math.max(
76 | 0,
77 | Math.min((v - minDecibels) / (maxDecibels - minDecibels), 1),
78 | );
79 | });
80 | const values = new Float32Array(normalizedOutput);
81 | return {
82 | values,
83 | frequencies,
84 | labels,
85 | };
86 | }
87 |
88 | /**
89 | * Creates a new AudioAnalysis instance for an HTMLAudioElement
90 | * @param {HTMLAudioElement} audioElement
91 | * @param {AudioBuffer|null} [audioBuffer] If provided, will cache all frequency domain data from the buffer
92 | * @returns {AudioAnalysis}
93 | */
94 | constructor(audioElement, audioBuffer = null) {
95 | this.fftResults = [];
96 | if (audioBuffer) {
97 | /**
98 | * Modified from
99 | * https://stackoverflow.com/questions/75063715/using-the-web-audio-api-to-analyze-a-song-without-playing
100 | *
101 | * We do this to populate FFT values for the audio if provided an `audioBuffer`
102 | * The reason to do this is that Safari fails when using `createMediaElementSource`
103 | * This has a non-zero RAM cost so we only opt-in to run it on Safari, Chrome is better
104 | */
105 | const { length, sampleRate } = audioBuffer;
106 | const offlineAudioContext = new OfflineAudioContext({
107 | length,
108 | sampleRate,
109 | });
110 | const source = offlineAudioContext.createBufferSource();
111 | source.buffer = audioBuffer;
112 | const analyser = offlineAudioContext.createAnalyser();
113 | analyser.fftSize = 8192;
114 | analyser.smoothingTimeConstant = 0.1;
115 | source.connect(analyser);
116 | // limit is :: 128 / sampleRate;
117 | // but we just want 60fps - cuts ~1s from 6MB to 1MB of RAM
118 | const renderQuantumInSeconds = 1 / 60;
119 | const durationInSeconds = length / sampleRate;
120 | const analyze = (index) => {
121 | const suspendTime = renderQuantumInSeconds * index;
122 | if (suspendTime < durationInSeconds) {
123 | offlineAudioContext.suspend(suspendTime).then(() => {
124 | const fftResult = new Float32Array(analyser.frequencyBinCount);
125 | analyser.getFloatFrequencyData(fftResult);
126 | this.fftResults.push(fftResult);
127 | analyze(index + 1);
128 | });
129 | }
130 | if (index === 1) {
131 | offlineAudioContext.startRendering();
132 | } else {
133 | offlineAudioContext.resume();
134 | }
135 | };
136 | source.start(0);
137 | analyze(1);
138 | this.audio = audioElement;
139 | this.context = offlineAudioContext;
140 | this.analyser = analyser;
141 | this.sampleRate = sampleRate;
142 | this.audioBuffer = audioBuffer;
143 | } else {
144 | const audioContext = new AudioContext();
145 | const track = audioContext.createMediaElementSource(audioElement);
146 | const analyser = audioContext.createAnalyser();
147 | analyser.fftSize = 8192;
148 | analyser.smoothingTimeConstant = 0.1;
149 | track.connect(analyser);
150 | analyser.connect(audioContext.destination);
151 | this.audio = audioElement;
152 | this.context = audioContext;
153 | this.analyser = analyser;
154 | this.sampleRate = this.context.sampleRate;
155 | this.audioBuffer = null;
156 | }
157 | }
158 |
159 | /**
160 | * Gets the current frequency domain data from the playing audio track
161 | * @param {"frequency"|"music"|"voice"} [analysisType]
162 | * @param {number} [minDecibels] default -100
163 | * @param {number} [maxDecibels] default -30
164 | * @returns {AudioAnalysisOutputType}
165 | */
166 | getFrequencies(
167 | analysisType = 'frequency',
168 | minDecibels = -100,
169 | maxDecibels = -30,
170 | ) {
171 | let fftResult = null;
172 | if (this.audioBuffer && this.fftResults.length) {
173 | const pct = this.audio.currentTime / this.audio.duration;
174 | const index = Math.min(
175 | (pct * this.fftResults.length) | 0,
176 | this.fftResults.length - 1,
177 | );
178 | fftResult = this.fftResults[index];
179 | }
180 | return AudioAnalysis.getFrequencies(
181 | this.analyser,
182 | this.sampleRate,
183 | fftResult,
184 | analysisType,
185 | minDecibels,
186 | maxDecibels,
187 | );
188 | }
189 |
190 | /**
191 | * Resume the internal AudioContext if it was suspended due to the lack of
192 | * user interaction when the AudioAnalysis was instantiated.
193 | * @returns {Promise}
194 | */
195 | async resumeIfSuspended() {
196 | if (this.context.state === 'suspended') {
197 | await this.context.resume();
198 | }
199 | return true;
200 | }
201 | }
202 |
203 | globalThis.AudioAnalysis = AudioAnalysis;
204 |
--------------------------------------------------------------------------------
/src/lib/wavtools/lib/analysis/constants.js:
--------------------------------------------------------------------------------
1 | /**
2 | * Constants for help with visualization
3 | * Helps map frequency ranges from Fast Fourier Transform
4 | * to human-interpretable ranges, notably music ranges and
5 | * human vocal ranges.
6 | */
7 |
8 | // Eighth octave frequencies
9 | const octave8Frequencies = [
10 | 4186.01, 4434.92, 4698.63, 4978.03, 5274.04, 5587.65, 5919.91, 6271.93,
11 | 6644.88, 7040.0, 7458.62, 7902.13,
12 | ];
13 |
14 | // Labels for each of the above frequencies
15 | const octave8FrequencyLabels = [
16 | 'C',
17 | 'C#',
18 | 'D',
19 | 'D#',
20 | 'E',
21 | 'F',
22 | 'F#',
23 | 'G',
24 | 'G#',
25 | 'A',
26 | 'A#',
27 | 'B',
28 | ];
29 |
30 | /**
31 | * All note frequencies from 1st to 8th octave
32 | * in format "A#8" (A#, 8th octave)
33 | */
34 | export const noteFrequencies = [];
35 | export const noteFrequencyLabels = [];
36 | for (let i = 1; i <= 8; i++) {
37 | for (let f = 0; f < octave8Frequencies.length; f++) {
38 | const freq = octave8Frequencies[f];
39 | noteFrequencies.push(freq / Math.pow(2, 8 - i));
40 | noteFrequencyLabels.push(octave8FrequencyLabels[f] + i);
41 | }
42 | }
43 |
44 | /**
45 | * Subset of the note frequencies between 32 and 2000 Hz
46 | * 6 octave range: C1 to B6
47 | */
48 | const voiceFrequencyRange = [32.0, 2000.0];
49 | export const voiceFrequencies = noteFrequencies.filter((_, i) => {
50 | return (
51 | noteFrequencies[i] > voiceFrequencyRange[0] &&
52 | noteFrequencies[i] < voiceFrequencyRange[1]
53 | );
54 | });
55 | export const voiceFrequencyLabels = noteFrequencyLabels.filter((_, i) => {
56 | return (
57 | noteFrequencies[i] > voiceFrequencyRange[0] &&
58 | noteFrequencies[i] < voiceFrequencyRange[1]
59 | );
60 | });
61 |
--------------------------------------------------------------------------------
/src/lib/wavtools/lib/wav_packer.js:
--------------------------------------------------------------------------------
1 | /**
2 | * Raw wav audio file contents
3 | * @typedef {Object} WavPackerAudioType
4 | * @property {Blob} blob
5 | * @property {string} url
6 | * @property {number} channelCount
7 | * @property {number} sampleRate
8 | * @property {number} duration
9 | */
10 |
11 | /**
12 | * Utility class for assembling PCM16 "audio/wav" data
13 | * @class
14 | */
15 | export class WavPacker {
16 | /**
17 | * Converts Float32Array of amplitude data to ArrayBuffer in Int16Array format
18 | * @param {Float32Array} float32Array
19 | * @returns {ArrayBuffer}
20 | */
21 | static floatTo16BitPCM(float32Array) {
22 | const buffer = new ArrayBuffer(float32Array.length * 2);
23 | const view = new DataView(buffer);
24 | let offset = 0;
25 | for (let i = 0; i < float32Array.length; i++, offset += 2) {
26 | let s = Math.max(-1, Math.min(1, float32Array[i]));
27 | view.setInt16(offset, s < 0 ? s * 0x8000 : s * 0x7fff, true);
28 | }
29 | return buffer;
30 | }
31 |
32 | /**
33 | * Concatenates two ArrayBuffers
34 | * @param {ArrayBuffer} leftBuffer
35 | * @param {ArrayBuffer} rightBuffer
36 | * @returns {ArrayBuffer}
37 | */
38 | static mergeBuffers(leftBuffer, rightBuffer) {
39 | const tmpArray = new Uint8Array(
40 | leftBuffer.byteLength + rightBuffer.byteLength
41 | );
42 | tmpArray.set(new Uint8Array(leftBuffer), 0);
43 | tmpArray.set(new Uint8Array(rightBuffer), leftBuffer.byteLength);
44 | return tmpArray.buffer;
45 | }
46 |
47 | /**
48 | * Packs data into an Int16 format
49 | * @private
50 | * @param {number} size 0 = 1x Int16, 1 = 2x Int16
51 | * @param {number} arg value to pack
52 | * @returns
53 | */
54 | _packData(size, arg) {
55 | return [
56 | new Uint8Array([arg, arg >> 8]),
57 | new Uint8Array([arg, arg >> 8, arg >> 16, arg >> 24]),
58 | ][size];
59 | }
60 |
61 | /**
62 | * Packs audio into "audio/wav" Blob
63 | * @param {number} sampleRate
64 | * @param {{bitsPerSample: number, channels: Array, data: Int16Array}} audio
65 | * @returns {WavPackerAudioType}
66 | */
67 | pack(sampleRate, audio) {
68 | if (!audio?.bitsPerSample) {
69 | throw new Error(`Missing "bitsPerSample"`);
70 | } else if (!audio?.channels) {
71 | throw new Error(`Missing "channels"`);
72 | } else if (!audio?.data) {
73 | throw new Error(`Missing "data"`);
74 | }
75 | const { bitsPerSample, channels, data } = audio;
76 | const output = [
77 | // Header
78 | 'RIFF',
79 | this._packData(
80 | 1,
81 | 4 + (8 + 24) /* chunk 1 length */ + (8 + 8) /* chunk 2 length */
82 | ), // Length
83 | 'WAVE',
84 | // chunk 1
85 | 'fmt ', // Sub-chunk identifier
86 | this._packData(1, 16), // Chunk length
87 | this._packData(0, 1), // Audio format (1 is linear quantization)
88 | this._packData(0, channels.length),
89 | this._packData(1, sampleRate),
90 | this._packData(1, (sampleRate * channels.length * bitsPerSample) / 8), // Byte rate
91 | this._packData(0, (channels.length * bitsPerSample) / 8),
92 | this._packData(0, bitsPerSample),
93 | // chunk 2
94 | 'data', // Sub-chunk identifier
95 | this._packData(
96 | 1,
97 | (channels[0].length * channels.length * bitsPerSample) / 8
98 | ), // Chunk length
99 | data,
100 | ];
101 | const blob = new Blob(output, { type: 'audio/mpeg' });
102 | const url = URL.createObjectURL(blob);
103 | return {
104 | blob,
105 | url,
106 | channelCount: channels.length,
107 | sampleRate,
108 | duration: data.byteLength / (channels.length * sampleRate * 2),
109 | };
110 | }
111 | }
112 |
113 | globalThis.WavPacker = WavPacker;
114 |
--------------------------------------------------------------------------------
/src/lib/wavtools/lib/wav_recorder.js:
--------------------------------------------------------------------------------
1 | import { AudioProcessorSrc } from './worklets/audio_processor.js';
2 | import { AudioAnalysis } from './analysis/audio_analysis.js';
3 | import { WavPacker } from './wav_packer.js';
4 |
5 | /**
6 | * Decodes audio into a wav file
7 | * @typedef {Object} DecodedAudioType
8 | * @property {Blob} blob
9 | * @property {string} url
10 | * @property {Float32Array} values
11 | * @property {AudioBuffer} audioBuffer
12 | */
13 |
14 | /**
15 | * Records live stream of user audio as PCM16 "audio/wav" data
16 | * @class
17 | */
18 | export class WavRecorder {
19 | /**
20 | * Create a new WavRecorder instance
21 | * @param {{sampleRate?: number, outputToSpeakers?: boolean, debug?: boolean}} [options]
22 | * @returns {WavRecorder}
23 | */
24 | constructor({
25 | sampleRate = 44100,
26 | outputToSpeakers = false,
27 | debug = false,
28 | } = {}) {
29 | // Script source
30 | this.scriptSrc = AudioProcessorSrc;
31 | // Config
32 | this.sampleRate = sampleRate;
33 | this.outputToSpeakers = outputToSpeakers;
34 | this.debug = !!debug;
35 | this._deviceChangeCallback = null;
36 | this._devices = [];
37 | // State variables
38 | this.stream = null;
39 | this.processor = null;
40 | this.source = null;
41 | this.node = null;
42 | this.recording = false;
43 | // Event handling with AudioWorklet
44 | this._lastEventId = 0;
45 | this.eventReceipts = {};
46 | this.eventTimeout = 5000;
47 | // Process chunks of audio
48 | this._chunkProcessor = () => {};
49 | this._chunkProcessorSize = void 0;
50 | this._chunkProcessorBuffer = {
51 | raw: new ArrayBuffer(0),
52 | mono: new ArrayBuffer(0),
53 | };
54 | }
55 |
56 | /**
57 | * Decodes audio data from multiple formats to a Blob, url, Float32Array and AudioBuffer
58 | * @param {Blob|Float32Array|Int16Array|ArrayBuffer|number[]} audioData
59 | * @param {number} sampleRate
60 | * @param {number} fromSampleRate
61 | * @returns {Promise}
62 | */
63 | static async decode(audioData, sampleRate = 44100, fromSampleRate = -1) {
64 | const context = new AudioContext({ sampleRate });
65 | let arrayBuffer;
66 | let blob;
67 | if (audioData instanceof Blob) {
68 | if (fromSampleRate !== -1) {
69 | throw new Error(
70 | `Can not specify "fromSampleRate" when reading from Blob`,
71 | );
72 | }
73 | blob = audioData;
74 | arrayBuffer = await blob.arrayBuffer();
75 | } else if (audioData instanceof ArrayBuffer) {
76 | if (fromSampleRate !== -1) {
77 | throw new Error(
78 | `Can not specify "fromSampleRate" when reading from ArrayBuffer`,
79 | );
80 | }
81 | arrayBuffer = audioData;
82 | blob = new Blob([arrayBuffer], { type: 'audio/wav' });
83 | } else {
84 | let float32Array;
85 | let data;
86 | if (audioData instanceof Int16Array) {
87 | data = audioData;
88 | float32Array = new Float32Array(audioData.length);
89 | for (let i = 0; i < audioData.length; i++) {
90 | float32Array[i] = audioData[i] / 0x8000;
91 | }
92 | } else if (audioData instanceof Float32Array) {
93 | float32Array = audioData;
94 | } else if (audioData instanceof Array) {
95 | float32Array = new Float32Array(audioData);
96 | } else {
97 | throw new Error(
98 | `"audioData" must be one of: Blob, Float32Arrray, Int16Array, ArrayBuffer, Array`,
99 | );
100 | }
101 | if (fromSampleRate === -1) {
102 | throw new Error(
103 | `Must specify "fromSampleRate" when reading from Float32Array, In16Array or Array`,
104 | );
105 | } else if (fromSampleRate < 3000) {
106 | throw new Error(`Minimum "fromSampleRate" is 3000 (3kHz)`);
107 | }
108 | if (!data) {
109 | data = WavPacker.floatTo16BitPCM(float32Array);
110 | }
111 | const audio = {
112 | bitsPerSample: 16,
113 | channels: [float32Array],
114 | data,
115 | };
116 | const packer = new WavPacker();
117 | const result = packer.pack(fromSampleRate, audio);
118 | blob = result.blob;
119 | arrayBuffer = await blob.arrayBuffer();
120 | }
121 | const audioBuffer = await context.decodeAudioData(arrayBuffer);
122 | const values = audioBuffer.getChannelData(0);
123 | const url = URL.createObjectURL(blob);
124 | return {
125 | blob,
126 | url,
127 | values,
128 | audioBuffer,
129 | };
130 | }
131 |
132 | /**
133 | * Logs data in debug mode
134 | * @param {...any} arguments
135 | * @returns {true}
136 | */
137 | log() {
138 | if (this.debug) {
139 | this.log(...arguments);
140 | }
141 | return true;
142 | }
143 |
144 | /**
145 | * Retrieves the current sampleRate for the recorder
146 | * @returns {number}
147 | */
148 | getSampleRate() {
149 | return this.sampleRate;
150 | }
151 |
152 | /**
153 | * Retrieves the current status of the recording
154 | * @returns {"ended"|"paused"|"recording"}
155 | */
156 | getStatus() {
157 | if (!this.processor) {
158 | return 'ended';
159 | } else if (!this.recording) {
160 | return 'paused';
161 | } else {
162 | return 'recording';
163 | }
164 | }
165 |
166 | /**
167 | * Sends an event to the AudioWorklet
168 | * @private
169 | * @param {string} name
170 | * @param {{[key: string]: any}} data
171 | * @param {AudioWorkletNode} [_processor]
172 | * @returns {Promise<{[key: string]: any}>}
173 | */
174 | async _event(name, data = {}, _processor = null) {
175 | _processor = _processor || this.processor;
176 | if (!_processor) {
177 | throw new Error('Can not send events without recording first');
178 | }
179 | const message = {
180 | event: name,
181 | id: this._lastEventId++,
182 | data,
183 | };
184 | _processor.port.postMessage(message);
185 | const t0 = new Date().valueOf();
186 | while (!this.eventReceipts[message.id]) {
187 | if (new Date().valueOf() - t0 > this.eventTimeout) {
188 | throw new Error(`Timeout waiting for "${name}" event`);
189 | }
190 | await new Promise((res) => setTimeout(() => res(true), 1));
191 | }
192 | const payload = this.eventReceipts[message.id];
193 | delete this.eventReceipts[message.id];
194 | return payload;
195 | }
196 |
197 | /**
198 | * Sets device change callback, remove if callback provided is `null`
199 | * @param {(Array): void|null} callback
200 | * @returns {true}
201 | */
202 | listenForDeviceChange(callback) {
203 | if (callback === null && this._deviceChangeCallback) {
204 | navigator.mediaDevices.removeEventListener(
205 | 'devicechange',
206 | this._deviceChangeCallback,
207 | );
208 | this._deviceChangeCallback = null;
209 | } else if (callback !== null) {
210 | // Basically a debounce; we only want this called once when devices change
211 | // And we only want the most recent callback() to be executed
212 | // if a few are operating at the same time
213 | let lastId = 0;
214 | let lastDevices = [];
215 | const serializeDevices = (devices) =>
216 | devices
217 | .map((d) => d.deviceId)
218 | .sort()
219 | .join(',');
220 | const cb = async () => {
221 | let id = ++lastId;
222 | const devices = await this.listDevices();
223 | if (id === lastId) {
224 | if (serializeDevices(lastDevices) !== serializeDevices(devices)) {
225 | lastDevices = devices;
226 | callback(devices.slice());
227 | }
228 | }
229 | };
230 | navigator.mediaDevices.addEventListener('devicechange', cb);
231 | cb();
232 | this._deviceChangeCallback = cb;
233 | }
234 | return true;
235 | }
236 |
237 | /**
238 | * Manually request permission to use the microphone
239 | * @returns {Promise}
240 | */
241 | async requestPermission() {
242 | const permissionStatus = await navigator.permissions.query({
243 | name: 'microphone',
244 | });
245 | if (permissionStatus.state === 'denied') {
246 | window.alert('You must grant microphone access to use this feature.');
247 | } else if (permissionStatus.state === 'prompt') {
248 | try {
249 | const stream = await navigator.mediaDevices.getUserMedia({
250 | audio: true,
251 | });
252 | const tracks = stream.getTracks();
253 | tracks.forEach((track) => track.stop());
254 | } catch (e) {
255 | window.alert('You must grant microphone access to use this feature.');
256 | }
257 | }
258 | return true;
259 | }
260 |
261 | /**
262 | * List all eligible devices for recording, will request permission to use microphone
263 | * @returns {Promise>}
264 | */
265 | async listDevices() {
266 | if (
267 | !navigator.mediaDevices ||
268 | !('enumerateDevices' in navigator.mediaDevices)
269 | ) {
270 | throw new Error('Could not request user devices');
271 | }
272 | await this.requestPermission();
273 | const devices = await navigator.mediaDevices.enumerateDevices();
274 | const audioDevices = devices.filter(
275 | (device) => device.kind === 'audioinput',
276 | );
277 | const defaultDeviceIndex = audioDevices.findIndex(
278 | (device) => device.deviceId === 'default',
279 | );
280 | const deviceList = [];
281 | if (defaultDeviceIndex !== -1) {
282 | let defaultDevice = audioDevices.splice(defaultDeviceIndex, 1)[0];
283 | let existingIndex = audioDevices.findIndex(
284 | (device) => device.groupId === defaultDevice.groupId,
285 | );
286 | if (existingIndex !== -1) {
287 | defaultDevice = audioDevices.splice(existingIndex, 1)[0];
288 | }
289 | defaultDevice.default = true;
290 | deviceList.push(defaultDevice);
291 | }
292 | return deviceList.concat(audioDevices);
293 | }
294 |
295 | /**
296 | * Begins a recording session and requests microphone permissions if not already granted
297 | * Microphone recording indicator will appear on browser tab but status will be "paused"
298 | * @param {string} [deviceId] if no device provided, default device will be used
299 | * @returns {Promise}
300 | */
301 | async begin(deviceId) {
302 | if (this.processor) {
303 | throw new Error(
304 | `Already connected: please call .end() to start a new session`,
305 | );
306 | }
307 |
308 | if (
309 | !navigator.mediaDevices ||
310 | !('getUserMedia' in navigator.mediaDevices)
311 | ) {
312 | throw new Error('Could not request user media');
313 | }
314 | try {
315 | const config = { audio: true };
316 | if (deviceId) {
317 | config.audio = { deviceId: { exact: deviceId } };
318 | }
319 | this.stream = await navigator.mediaDevices.getUserMedia(config);
320 | } catch (err) {
321 | throw new Error('Could not start media stream');
322 | }
323 |
324 | const context = new AudioContext({ sampleRate: this.sampleRate });
325 | const source = context.createMediaStreamSource(this.stream);
326 | // Load and execute the module script.
327 | try {
328 | await context.audioWorklet.addModule(this.scriptSrc);
329 | } catch (e) {
330 | console.error(e);
331 | throw new Error(`Could not add audioWorklet module: ${this.scriptSrc}`);
332 | }
333 | const processor = new AudioWorkletNode(context, 'audio_processor');
334 | processor.port.onmessage = (e) => {
335 | const { event, id, data } = e.data;
336 | if (event === 'receipt') {
337 | this.eventReceipts[id] = data;
338 | } else if (event === 'chunk') {
339 | if (this._chunkProcessorSize) {
340 | const buffer = this._chunkProcessorBuffer;
341 | this._chunkProcessorBuffer = {
342 | raw: WavPacker.mergeBuffers(buffer.raw, data.raw),
343 | mono: WavPacker.mergeBuffers(buffer.mono, data.mono),
344 | };
345 | if (
346 | this._chunkProcessorBuffer.mono.byteLength >=
347 | this._chunkProcessorSize
348 | ) {
349 | this._chunkProcessor(this._chunkProcessorBuffer);
350 | this._chunkProcessorBuffer = {
351 | raw: new ArrayBuffer(0),
352 | mono: new ArrayBuffer(0),
353 | };
354 | }
355 | } else {
356 | this._chunkProcessor(data);
357 | }
358 | }
359 | };
360 |
361 | const node = source.connect(processor);
362 | const analyser = context.createAnalyser();
363 | analyser.fftSize = 8192;
364 | analyser.smoothingTimeConstant = 0.1;
365 | node.connect(analyser);
366 | if (this.outputToSpeakers) {
367 | // eslint-disable-next-line no-console
368 | console.warn(
369 | 'Warning: Output to speakers may affect sound quality,\n' +
370 | 'especially due to system audio feedback preventative measures.\n' +
371 | 'use only for debugging',
372 | );
373 | analyser.connect(context.destination);
374 | }
375 |
376 | this.source = source;
377 | this.node = node;
378 | this.analyser = analyser;
379 | this.processor = processor;
380 | return true;
381 | }
382 |
383 | /**
384 | * Gets the current frequency domain data from the recording track
385 | * @param {"frequency"|"music"|"voice"} [analysisType]
386 | * @param {number} [minDecibels] default -100
387 | * @param {number} [maxDecibels] default -30
388 | * @returns {import('./analysis/audio_analysis.js').AudioAnalysisOutputType}
389 | */
390 | getFrequencies(
391 | analysisType = 'frequency',
392 | minDecibels = -100,
393 | maxDecibels = -30,
394 | ) {
395 | if (!this.processor) {
396 | throw new Error('Session ended: please call .begin() first');
397 | }
398 | return AudioAnalysis.getFrequencies(
399 | this.analyser,
400 | this.sampleRate,
401 | null,
402 | analysisType,
403 | minDecibels,
404 | maxDecibels,
405 | );
406 | }
407 |
408 | /**
409 | * Pauses the recording
410 | * Keeps microphone stream open but halts storage of audio
411 | * @returns {Promise}
412 | */
413 | async pause() {
414 | if (!this.processor) {
415 | throw new Error('Session ended: please call .begin() first');
416 | } else if (!this.recording) {
417 | throw new Error('Already paused: please call .record() first');
418 | }
419 | if (this._chunkProcessorBuffer.raw.byteLength) {
420 | this._chunkProcessor(this._chunkProcessorBuffer);
421 | }
422 | this.log('Pausing ...');
423 | await this._event('stop');
424 | this.recording = false;
425 | return true;
426 | }
427 |
428 | /**
429 | * Start recording stream and storing to memory from the connected audio source
430 | * @param {(data: { mono: Int16Array; raw: Int16Array }) => any} [chunkProcessor]
431 | * @param {number} [chunkSize] chunkProcessor will not be triggered until this size threshold met in mono audio
432 | * @returns {Promise}
433 | */
434 | async record(chunkProcessor = () => {}, chunkSize = 8192) {
435 | if (!this.processor) {
436 | throw new Error('Session ended: please call .begin() first');
437 | } else if (this.recording) {
438 | throw new Error('Already recording: please call .pause() first');
439 | } else if (typeof chunkProcessor !== 'function') {
440 | throw new Error(`chunkProcessor must be a function`);
441 | }
442 | this._chunkProcessor = chunkProcessor;
443 | this._chunkProcessorSize = chunkSize;
444 | this._chunkProcessorBuffer = {
445 | raw: new ArrayBuffer(0),
446 | mono: new ArrayBuffer(0),
447 | };
448 | this.log('Recording ...');
449 | await this._event('start');
450 | this.recording = true;
451 | return true;
452 | }
453 |
454 | /**
455 | * Clears the audio buffer, empties stored recording
456 | * @returns {Promise}
457 | */
458 | async clear() {
459 | if (!this.processor) {
460 | throw new Error('Session ended: please call .begin() first');
461 | }
462 | await this._event('clear');
463 | return true;
464 | }
465 |
466 | /**
467 | * Reads the current audio stream data
468 | * @returns {Promise<{meanValues: Float32Array, channels: Array}>}
469 | */
470 | async read() {
471 | if (!this.processor) {
472 | throw new Error('Session ended: please call .begin() first');
473 | }
474 | this.log('Reading ...');
475 | const result = await this._event('read');
476 | return result;
477 | }
478 |
479 | /**
480 | * Saves the current audio stream to a file
481 | * @param {boolean} [force] Force saving while still recording
482 | * @returns {Promise}
483 | */
484 | async save(force = false) {
485 | if (!this.processor) {
486 | throw new Error('Session ended: please call .begin() first');
487 | }
488 | if (!force && this.recording) {
489 | throw new Error(
490 | 'Currently recording: please call .pause() first, or call .save(true) to force',
491 | );
492 | }
493 | this.log('Exporting ...');
494 | const exportData = await this._event('export');
495 | const packer = new WavPacker();
496 | const result = packer.pack(this.sampleRate, exportData.audio);
497 | return result;
498 | }
499 |
500 | /**
501 | * Ends the current recording session and saves the result
502 | * @returns {Promise}
503 | */
504 | async end() {
505 | if (!this.processor) {
506 | throw new Error('Session ended: please call .begin() first');
507 | }
508 |
509 | const _processor = this.processor;
510 |
511 | this.log('Stopping ...');
512 | await this._event('stop');
513 | this.recording = false;
514 | const tracks = this.stream.getTracks();
515 | tracks.forEach((track) => track.stop());
516 |
517 | this.log('Exporting ...');
518 | const exportData = await this._event('export', {}, _processor);
519 |
520 | this.processor.disconnect();
521 | this.source.disconnect();
522 | this.node.disconnect();
523 | this.analyser.disconnect();
524 | this.stream = null;
525 | this.processor = null;
526 | this.source = null;
527 | this.node = null;
528 |
529 | const packer = new WavPacker();
530 | const result = packer.pack(this.sampleRate, exportData.audio);
531 | return result;
532 | }
533 |
534 | /**
535 | * Performs a full cleanup of WavRecorder instance
536 | * Stops actively listening via microphone and removes existing listeners
537 | * @returns {Promise}
538 | */
539 | async quit() {
540 | this.listenForDeviceChange(null);
541 | if (this.processor) {
542 | await this.end();
543 | }
544 | return true;
545 | }
546 | }
547 |
548 | globalThis.WavRecorder = WavRecorder;
549 |
--------------------------------------------------------------------------------
/src/lib/wavtools/lib/wav_stream_player.js:
--------------------------------------------------------------------------------
1 | import { StreamProcessorSrc } from './worklets/stream_processor.js';
2 | import { AudioAnalysis } from './analysis/audio_analysis.js';
3 |
4 | /**
5 | * Plays audio streams received in raw PCM16 chunks from the browser
6 | * @class
7 | */
8 | export class WavStreamPlayer {
9 | /**
10 | * Creates a new WavStreamPlayer instance
11 | * @param {{sampleRate?: number}} options
12 | * @returns {WavStreamPlayer}
13 | */
14 | constructor({ sampleRate = 44100 } = {}) {
15 | this.scriptSrc = StreamProcessorSrc;
16 | this.sampleRate = sampleRate;
17 | this.context = null;
18 | this.stream = null;
19 | this.analyser = null;
20 | this.trackSampleOffsets = {};
21 | this.interruptedTrackIds = {};
22 | }
23 |
24 | /**
25 | * Connects the audio context and enables output to speakers
26 | * @returns {Promise}
27 | */
28 | async connect() {
29 | this.context = new AudioContext({ sampleRate: this.sampleRate });
30 | if (this.context.state === 'suspended') {
31 | await this.context.resume();
32 | }
33 | try {
34 | await this.context.audioWorklet.addModule(this.scriptSrc);
35 | } catch (e) {
36 | console.error(e);
37 | throw new Error(`Could not add audioWorklet module: ${this.scriptSrc}`);
38 | }
39 | const analyser = this.context.createAnalyser();
40 | analyser.fftSize = 8192;
41 | analyser.smoothingTimeConstant = 0.1;
42 | this.analyser = analyser;
43 | return true;
44 | }
45 |
46 | /**
47 | * Gets the current frequency domain data from the playing track
48 | * @param {"frequency"|"music"|"voice"} [analysisType]
49 | * @param {number} [minDecibels] default -100
50 | * @param {number} [maxDecibels] default -30
51 | * @returns {import('./analysis/audio_analysis.js').AudioAnalysisOutputType}
52 | */
53 | getFrequencies(
54 | analysisType = 'frequency',
55 | minDecibels = -100,
56 | maxDecibels = -30
57 | ) {
58 | if (!this.analyser) {
59 | throw new Error('Not connected, please call .connect() first');
60 | }
61 | return AudioAnalysis.getFrequencies(
62 | this.analyser,
63 | this.sampleRate,
64 | null,
65 | analysisType,
66 | minDecibels,
67 | maxDecibels
68 | );
69 | }
70 |
71 | /**
72 | * Starts audio streaming
73 | * @private
74 | * @returns {Promise}
75 | */
76 | _start() {
77 | const streamNode = new AudioWorkletNode(this.context, 'stream_processor');
78 | streamNode.connect(this.context.destination);
79 | streamNode.port.onmessage = (e) => {
80 | const { event } = e.data;
81 | if (event === 'stop') {
82 | streamNode.disconnect();
83 | this.stream = null;
84 | } else if (event === 'offset') {
85 | const { requestId, trackId, offset } = e.data;
86 | const currentTime = offset / this.sampleRate;
87 | this.trackSampleOffsets[requestId] = { trackId, offset, currentTime };
88 | }
89 | };
90 | this.analyser.disconnect();
91 | streamNode.connect(this.analyser);
92 | this.stream = streamNode;
93 | return true;
94 | }
95 |
96 | /**
97 | * Adds 16BitPCM data to the currently playing audio stream
98 | * You can add chunks beyond the current play point and they will be queued for play
99 | * @param {ArrayBuffer|Int16Array} arrayBuffer
100 | * @param {string} [trackId]
101 | * @returns {Int16Array}
102 | */
103 | add16BitPCM(arrayBuffer, trackId = 'default') {
104 | if (typeof trackId !== 'string') {
105 | throw new Error(`trackId must be a string`);
106 | } else if (this.interruptedTrackIds[trackId]) {
107 | return;
108 | }
109 | if (!this.stream) {
110 | this._start();
111 | }
112 | let buffer;
113 | if (arrayBuffer instanceof Int16Array) {
114 | buffer = arrayBuffer;
115 | } else if (arrayBuffer instanceof ArrayBuffer) {
116 | buffer = new Int16Array(arrayBuffer);
117 | } else {
118 | throw new Error(`argument must be Int16Array or ArrayBuffer`);
119 | }
120 | this.stream.port.postMessage({ event: 'write', buffer, trackId });
121 | return buffer;
122 | }
123 |
124 | /**
125 | * Gets the offset (sample count) of the currently playing stream
126 | * @param {boolean} [interrupt]
127 | * @returns {{trackId: string|null, offset: number, currentTime: number}}
128 | */
129 | async getTrackSampleOffset(interrupt = false) {
130 | if (!this.stream) {
131 | return null;
132 | }
133 | const requestId = crypto.randomUUID();
134 | this.stream.port.postMessage({
135 | event: interrupt ? 'interrupt' : 'offset',
136 | requestId,
137 | });
138 | let trackSampleOffset;
139 | while (!trackSampleOffset) {
140 | trackSampleOffset = this.trackSampleOffsets[requestId];
141 | await new Promise((r) => setTimeout(() => r(), 1));
142 | }
143 | const { trackId } = trackSampleOffset;
144 | if (interrupt && trackId) {
145 | this.interruptedTrackIds[trackId] = true;
146 | }
147 | return trackSampleOffset;
148 | }
149 |
150 | /**
151 | * Strips the current stream and returns the sample offset of the audio
152 | * @param {boolean} [interrupt]
153 | * @returns {{trackId: string|null, offset: number, currentTime: number}}
154 | */
155 | async interrupt() {
156 | return this.getTrackSampleOffset(true);
157 | }
158 | }
159 |
160 | globalThis.WavStreamPlayer = WavStreamPlayer;
161 |
--------------------------------------------------------------------------------
/src/lib/wavtools/lib/worklets/audio_processor.js:
--------------------------------------------------------------------------------
1 | const AudioProcessorWorklet = `
2 | class AudioProcessor extends AudioWorkletProcessor {
3 |
4 | constructor() {
5 | super();
6 | this.port.onmessage = this.receive.bind(this);
7 | this.initialize();
8 | }
9 |
10 | initialize() {
11 | this.foundAudio = false;
12 | this.recording = false;
13 | this.chunks = [];
14 | }
15 |
16 | /**
17 | * Concatenates sampled chunks into channels
18 | * Format is chunk[Left[], Right[]]
19 | */
20 | readChannelData(chunks, channel = -1, maxChannels = 9) {
21 | let channelLimit;
22 | if (channel !== -1) {
23 | if (chunks[0] && chunks[0].length - 1 < channel) {
24 | throw new Error(
25 | \`Channel \${channel} out of range: max \${chunks[0].length}\`
26 | );
27 | }
28 | channelLimit = channel + 1;
29 | } else {
30 | channel = 0;
31 | channelLimit = Math.min(chunks[0] ? chunks[0].length : 1, maxChannels);
32 | }
33 | const channels = [];
34 | for (let n = channel; n < channelLimit; n++) {
35 | const length = chunks.reduce((sum, chunk) => {
36 | return sum + chunk[n].length;
37 | }, 0);
38 | const buffers = chunks.map((chunk) => chunk[n]);
39 | const result = new Float32Array(length);
40 | let offset = 0;
41 | for (let i = 0; i < buffers.length; i++) {
42 | result.set(buffers[i], offset);
43 | offset += buffers[i].length;
44 | }
45 | channels[n] = result;
46 | }
47 | return channels;
48 | }
49 |
50 | /**
51 | * Combines parallel audio data into correct format,
52 | * channels[Left[], Right[]] to float32Array[LRLRLRLR...]
53 | */
54 | formatAudioData(channels) {
55 | if (channels.length === 1) {
56 | // Simple case is only one channel
57 | const float32Array = channels[0].slice();
58 | const meanValues = channels[0].slice();
59 | return { float32Array, meanValues };
60 | } else {
61 | const float32Array = new Float32Array(
62 | channels[0].length * channels.length
63 | );
64 | const meanValues = new Float32Array(channels[0].length);
65 | for (let i = 0; i < channels[0].length; i++) {
66 | const offset = i * channels.length;
67 | let meanValue = 0;
68 | for (let n = 0; n < channels.length; n++) {
69 | float32Array[offset + n] = channels[n][i];
70 | meanValue += channels[n][i];
71 | }
72 | meanValues[i] = meanValue / channels.length;
73 | }
74 | return { float32Array, meanValues };
75 | }
76 | }
77 |
78 | /**
79 | * Converts 32-bit float data to 16-bit integers
80 | */
81 | floatTo16BitPCM(float32Array) {
82 | const buffer = new ArrayBuffer(float32Array.length * 2);
83 | const view = new DataView(buffer);
84 | let offset = 0;
85 | for (let i = 0; i < float32Array.length; i++, offset += 2) {
86 | let s = Math.max(-1, Math.min(1, float32Array[i]));
87 | view.setInt16(offset, s < 0 ? s * 0x8000 : s * 0x7fff, true);
88 | }
89 | return buffer;
90 | }
91 |
92 | /**
93 | * Retrieves the most recent amplitude values from the audio stream
94 | * @param {number} channel
95 | */
96 | getValues(channel = -1) {
97 | const channels = this.readChannelData(this.chunks, channel);
98 | const { meanValues } = this.formatAudioData(channels);
99 | return { meanValues, channels };
100 | }
101 |
102 | /**
103 | * Exports chunks as an audio/wav file
104 | */
105 | export() {
106 | const channels = this.readChannelData(this.chunks);
107 | const { float32Array, meanValues } = this.formatAudioData(channels);
108 | const audioData = this.floatTo16BitPCM(float32Array);
109 | return {
110 | meanValues: meanValues,
111 | audio: {
112 | bitsPerSample: 16,
113 | channels: channels,
114 | data: audioData,
115 | },
116 | };
117 | }
118 |
119 | receive(e) {
120 | const { event, id } = e.data;
121 | let receiptData = {};
122 | switch (event) {
123 | case 'start':
124 | this.recording = true;
125 | break;
126 | case 'stop':
127 | this.recording = false;
128 | break;
129 | case 'clear':
130 | this.initialize();
131 | break;
132 | case 'export':
133 | receiptData = this.export();
134 | break;
135 | case 'read':
136 | receiptData = this.getValues();
137 | break;
138 | default:
139 | break;
140 | }
141 | // Always send back receipt
142 | this.port.postMessage({ event: 'receipt', id, data: receiptData });
143 | }
144 |
145 | sendChunk(chunk) {
146 | const channels = this.readChannelData([chunk]);
147 | const { float32Array, meanValues } = this.formatAudioData(channels);
148 | const rawAudioData = this.floatTo16BitPCM(float32Array);
149 | const monoAudioData = this.floatTo16BitPCM(meanValues);
150 | this.port.postMessage({
151 | event: 'chunk',
152 | data: {
153 | mono: monoAudioData,
154 | raw: rawAudioData,
155 | },
156 | });
157 | }
158 |
159 | process(inputList, outputList, parameters) {
160 | // Copy input to output (e.g. speakers)
161 | // Note that this creates choppy sounds with Mac products
162 | const sourceLimit = Math.min(inputList.length, outputList.length);
163 | for (let inputNum = 0; inputNum < sourceLimit; inputNum++) {
164 | const input = inputList[inputNum];
165 | const output = outputList[inputNum];
166 | const channelCount = Math.min(input.length, output.length);
167 | for (let channelNum = 0; channelNum < channelCount; channelNum++) {
168 | input[channelNum].forEach((sample, i) => {
169 | output[channelNum][i] = sample;
170 | });
171 | }
172 | }
173 | const inputs = inputList[0];
174 | // There's latency at the beginning of a stream before recording starts
175 | // Make sure we actually receive audio data before we start storing chunks
176 | let sliceIndex = 0;
177 | if (!this.foundAudio) {
178 | for (const channel of inputs) {
179 | sliceIndex = 0; // reset for each channel
180 | if (this.foundAudio) {
181 | break;
182 | }
183 | if (channel) {
184 | for (const value of channel) {
185 | if (value !== 0) {
186 | // find only one non-zero entry in any channel
187 | this.foundAudio = true;
188 | break;
189 | } else {
190 | sliceIndex++;
191 | }
192 | }
193 | }
194 | }
195 | }
196 | if (inputs && inputs[0] && this.foundAudio && this.recording) {
197 | // We need to copy the TypedArray, because the \`process\`
198 | // internals will reuse the same buffer to hold each input
199 | const chunk = inputs.map((input) => input.slice(sliceIndex));
200 | this.chunks.push(chunk);
201 | this.sendChunk(chunk);
202 | }
203 | return true;
204 | }
205 | }
206 |
207 | registerProcessor('audio_processor', AudioProcessor);
208 | `;
209 |
210 | const script = new Blob([AudioProcessorWorklet], {
211 | type: 'application/javascript',
212 | });
213 | const src = URL.createObjectURL(script);
214 | export const AudioProcessorSrc = src;
215 |
--------------------------------------------------------------------------------
/src/lib/wavtools/lib/worklets/stream_processor.js:
--------------------------------------------------------------------------------
1 | export const StreamProcessorWorklet = `
2 | class StreamProcessor extends AudioWorkletProcessor {
3 | constructor() {
4 | super();
5 | this.hasStarted = false;
6 | this.hasInterrupted = false;
7 | this.outputBuffers = [];
8 | this.bufferLength = 128;
9 | this.write = { buffer: new Float32Array(this.bufferLength), trackId: null };
10 | this.writeOffset = 0;
11 | this.trackSampleOffsets = {};
12 | this.port.onmessage = (event) => {
13 | if (event.data) {
14 | const payload = event.data;
15 | if (payload.event === 'write') {
16 | const int16Array = payload.buffer;
17 | const float32Array = new Float32Array(int16Array.length);
18 | for (let i = 0; i < int16Array.length; i++) {
19 | float32Array[i] = int16Array[i] / 0x8000; // Convert Int16 to Float32
20 | }
21 | this.writeData(float32Array, payload.trackId);
22 | } else if (
23 | payload.event === 'offset' ||
24 | payload.event === 'interrupt'
25 | ) {
26 | const requestId = payload.requestId;
27 | const trackId = this.write.trackId;
28 | const offset = this.trackSampleOffsets[trackId] || 0;
29 | this.port.postMessage({
30 | event: 'offset',
31 | requestId,
32 | trackId,
33 | offset,
34 | });
35 | if (payload.event === 'interrupt') {
36 | this.hasInterrupted = true;
37 | }
38 | } else {
39 | throw new Error(\`Unhandled event "\${payload.event}"\`);
40 | }
41 | }
42 | };
43 | }
44 |
45 | writeData(float32Array, trackId = null) {
46 | let { buffer } = this.write;
47 | let offset = this.writeOffset;
48 | for (let i = 0; i < float32Array.length; i++) {
49 | buffer[offset++] = float32Array[i];
50 | if (offset >= buffer.length) {
51 | this.outputBuffers.push(this.write);
52 | this.write = { buffer: new Float32Array(this.bufferLength), trackId };
53 | buffer = this.write.buffer;
54 | offset = 0;
55 | }
56 | }
57 | this.writeOffset = offset;
58 | return true;
59 | }
60 |
61 | process(inputs, outputs, parameters) {
62 | const output = outputs[0];
63 | const outputChannelData = output[0];
64 | const outputBuffers = this.outputBuffers;
65 | if (this.hasInterrupted) {
66 | this.port.postMessage({ event: 'stop' });
67 | return false;
68 | } else if (outputBuffers.length) {
69 | this.hasStarted = true;
70 | const { buffer, trackId } = outputBuffers.shift();
71 | for (let i = 0; i < outputChannelData.length; i++) {
72 | outputChannelData[i] = buffer[i] || 0;
73 | }
74 | if (trackId) {
75 | this.trackSampleOffsets[trackId] =
76 | this.trackSampleOffsets[trackId] || 0;
77 | this.trackSampleOffsets[trackId] += buffer.length;
78 | }
79 | return true;
80 | } else if (this.hasStarted) {
81 | this.port.postMessage({ event: 'stop' });
82 | return false;
83 | } else {
84 | return true;
85 | }
86 | }
87 | }
88 |
89 | registerProcessor('stream_processor', StreamProcessor);
90 | `;
91 |
92 | const script = new Blob([StreamProcessorWorklet], {
93 | type: 'application/javascript',
94 | });
95 | const src = URL.createObjectURL(script);
96 | export const StreamProcessorSrc = src;
97 |
--------------------------------------------------------------------------------
/src/logo.svg:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/src/pages/ConsolePage.scss:
--------------------------------------------------------------------------------
1 | [data-component='ConsolePage'] {
2 | font-family: 'Roboto Mono', monospace;
3 | font-weight: 400;
4 | font-style: normal;
5 | font-size: 12px;
6 | height: 100%;
7 | display: flex;
8 | flex-direction: column;
9 | overflow: hidden;
10 | margin: 0px 8px;
11 | & > div {
12 | flex-shrink: 0;
13 | }
14 |
15 | .spacer {
16 | flex-grow: 1;
17 | }
18 |
19 | .content-top {
20 | display: flex;
21 | align-items: center;
22 | padding: 8px 16px;
23 | min-height: 40px;
24 | .content-title {
25 | flex-grow: 1;
26 | display: flex;
27 | align-items: center;
28 | gap: 12px;
29 | img {
30 | width: 24px;
31 | height: 24px;
32 | }
33 | }
34 | }
35 |
36 | .content-main {
37 | flex-grow: 1;
38 | flex-shrink: 1 !important;
39 | margin: 0px 16px;
40 | display: flex;
41 | overflow: hidden;
42 | margin-bottom: 24px;
43 | .content-block {
44 | position: relative;
45 | display: flex;
46 | flex-direction: column;
47 | max-height: 100%;
48 | width: 100%;
49 | .content-block-title {
50 | flex-shrink: 0;
51 | padding-top: 16px;
52 | padding-bottom: 4px;
53 | position: relative;
54 | }
55 | .content-block-body {
56 | color: #6e6e7f;
57 | position: relative;
58 | flex-grow: 1;
59 | padding: 8px 0px;
60 | padding-top: 4px;
61 | line-height: 1.2em;
62 | overflow: auto;
63 | &.full {
64 | padding: 0px;
65 | }
66 | }
67 | }
68 | .content-right {
69 | width: 300px;
70 | flex-shrink: 0;
71 | display: flex;
72 | flex-direction: column;
73 | margin-left: 24px;
74 | gap: 24px;
75 | & > div {
76 | border-radius: 16px;
77 | flex-grow: 1;
78 | flex-shrink: 0;
79 | overflow: hidden;
80 | position: relative;
81 | .content-block-title {
82 | position: absolute;
83 | display: flex;
84 | align-items: center;
85 | justify-content: center;
86 | line-height: 2em;
87 | top: 16px;
88 | left: 16px;
89 | padding: 4px 16px;
90 | background-color: #fff;
91 | border-radius: 1000px;
92 | min-height: 32px;
93 | z-index: 9999;
94 | text-align: center;
95 | white-space: pre;
96 | &.bottom {
97 | top: auto;
98 | bottom: 16px;
99 | right: 16px;
100 | }
101 | }
102 | }
103 | & > div.kv {
104 | height: 250px;
105 | max-height: 250px;
106 | white-space: pre;
107 | background-color: #ececf1;
108 | .content-block-body {
109 | padding: 16px;
110 | margin-top: 56px;
111 | }
112 | }
113 | }
114 | .content-logs {
115 | flex-grow: 1;
116 | display: flex;
117 | flex-direction: column;
118 | overflow: hidden;
119 | & > div {
120 | flex-grow: 1;
121 | }
122 | & > .content-actions {
123 | flex-grow: 0;
124 | flex-shrink: 0;
125 | display: flex;
126 | align-items: center;
127 | justify-content: center;
128 | gap: 16px;
129 | }
130 | & > div.events {
131 | overflow: hidden;
132 | }
133 | .events {
134 | border-top: 1px solid #e7e7e7;
135 | }
136 | .conversation {
137 | display: flex;
138 | flex-shrink: 0;
139 | width: 100%;
140 | overflow: hidden;
141 | height: 200px;
142 | min-height: 0;
143 | max-height: 200px;
144 | border-top: 1px solid #e7e7e7;
145 | }
146 | }
147 | }
148 |
149 | .conversation-item {
150 | position: relative;
151 | display: flex;
152 | gap: 16px;
153 | margin-bottom: 16px;
154 | &:not(:hover) .close {
155 | display: none;
156 | }
157 | .close {
158 | position: absolute;
159 | top: 0px;
160 | right: -20px;
161 | background: #aaa;
162 | color: #fff;
163 | display: flex;
164 | border-radius: 16px;
165 | padding: 2px;
166 | cursor: pointer;
167 | &:hover {
168 | background: #696969;
169 | }
170 | svg {
171 | stroke-width: 3;
172 | width: 12px;
173 | height: 12px;
174 | }
175 | }
176 | .speaker {
177 | position: relative;
178 | text-align: left;
179 | gap: 16px;
180 | width: 80px;
181 | flex-shrink: 0;
182 | margin-right: 16px;
183 | &.user {
184 | color: #0099ff;
185 | }
186 | &.assistant {
187 | color: #009900;
188 | }
189 | }
190 | .speaker-content {
191 | color: #18181b;
192 | overflow: hidden;
193 | word-wrap: break-word;
194 | }
195 | }
196 |
197 | .event {
198 | border-radius: 3px;
199 | white-space: pre;
200 | display: flex;
201 | padding: 0px;
202 | gap: 16px;
203 | .event-timestamp {
204 | text-align: left;
205 | gap: 8px;
206 | padding: 4px 0px;
207 | width: 80px;
208 | flex-shrink: 0;
209 | margin-right: 16px;
210 | }
211 | .event-details {
212 | display: flex;
213 | flex-direction: column;
214 | color: #18181b;
215 | gap: 8px;
216 | .event-summary {
217 | padding: 4px 8px;
218 | margin: 0px -8px;
219 | &:hover {
220 | border-radius: 8px;
221 | background-color: #f0f0f0;
222 | }
223 | cursor: pointer;
224 | display: flex;
225 | gap: 8px;
226 | align-items: center;
227 | .event-source {
228 | flex-shrink: 0;
229 | display: flex;
230 | align-items: center;
231 | gap: 8px;
232 | &.client {
233 | color: #0099ff;
234 | }
235 | &.server {
236 | color: #009900;
237 | }
238 | &.error {
239 | color: #990000;
240 | }
241 | svg {
242 | stroke-width: 3;
243 | width: 12px;
244 | height: 12px;
245 | }
246 | }
247 | }
248 | }
249 | }
250 |
251 | .visualization {
252 | position: absolute;
253 | display: flex;
254 | bottom: 4px;
255 | right: 8px;
256 | padding: 4px;
257 | border-radius: 16px;
258 | z-index: 10;
259 | gap: 2px;
260 | .visualization-entry {
261 | position: relative;
262 | display: flex;
263 | align-items: center;
264 | height: 40px;
265 | width: 100px;
266 | gap: 4px;
267 | &.client {
268 | color: #0099ff;
269 | }
270 | &.server {
271 | color: #009900;
272 | }
273 | canvas {
274 | width: 100%;
275 | height: 100%;
276 | color: currentColor;
277 | }
278 | }
279 | }
280 | }
281 |
--------------------------------------------------------------------------------
/src/pages/ConsolePage.tsx:
--------------------------------------------------------------------------------
1 | /**
2 | * Change this if you want to connect to a local relay server!
3 | * This will require you to set OPENAI_API_KEY= in a `.env` file
4 | * You can run it with `npm run relay`, in parallel with `npm start`
5 | *
6 | * Simply switch the lines by commenting one and removing the other
7 | */
8 | // const USE_LOCAL_RELAY_SERVER_URL: string | undefined = 'http://localhost:8081';
9 | const USE_LOCAL_RELAY_SERVER_URL: string | undefined = void 0;
10 |
11 | import { useEffect, useRef, useCallback, useState } from 'react';
12 |
13 | import { RealtimeClient } from '@openai/realtime-api-beta';
14 | import { ItemType } from '@openai/realtime-api-beta/dist/lib/client.js';
15 | import { WavRecorder, WavStreamPlayer } from '../lib/wavtools/index.js';
16 | import { instructions } from '../utils/conversation_config.js';
17 | import { WavRenderer } from '../utils/wav_renderer';
18 |
19 | import { X, Edit, Zap, ArrowUp, ArrowDown } from 'react-feather';
20 | import { Button } from '../components/button/Button';
21 | import { Toggle } from '../components/toggle/Toggle';
22 | import { Map } from '../components/Map';
23 |
24 | import './ConsolePage.scss';
25 | import { isJsxOpeningLikeElement } from 'typescript';
26 |
27 | /**
28 | * Type for result from get_weather() function call
29 | */
30 | interface Coordinates {
31 | lat: number;
32 | lng: number;
33 | location?: string;
34 | temperature?: {
35 | value: number;
36 | units: string;
37 | };
38 | wind_speed?: {
39 | value: number;
40 | units: string;
41 | };
42 | }
43 |
44 | /**
45 | * Type for all event logs
46 | */
47 | interface RealtimeEvent {
48 | time: string;
49 | source: 'client' | 'server';
50 | count?: number;
51 | event: { [key: string]: any };
52 | }
53 |
54 | export function ConsolePage() {
55 | /**
56 | * Ask user for API Key
57 | * If we're using the local relay server, we don't need this
58 | */
59 | const apiKey = USE_LOCAL_RELAY_SERVER_URL
60 | ? ''
61 | : localStorage.getItem('tmp::voice_api_key') ||
62 | prompt('OpenAI API Key') ||
63 | '';
64 | const baseUrl = USE_LOCAL_RELAY_SERVER_URL
65 | ? ''
66 | : localStorage.getItem('tmp::base_url') || 'api.openai.com/v1';
67 | if (apiKey !== '') {
68 | localStorage.setItem('tmp::voice_api_key', apiKey);
69 | }
70 |
71 | /**
72 | * Instantiate:
73 | * - WavRecorder (speech input)
74 | * - WavStreamPlayer (speech output)
75 | * - RealtimeClient (API client)
76 | */
77 | const wavRecorderRef = useRef(
78 | new WavRecorder({ sampleRate: 24000 })
79 | );
80 | const wavStreamPlayerRef = useRef(
81 | new WavStreamPlayer({ sampleRate: 24000 })
82 | );
83 | const clientRef = useRef(
84 | new RealtimeClient(
85 | USE_LOCAL_RELAY_SERVER_URL
86 | ? { url: USE_LOCAL_RELAY_SERVER_URL }
87 | : {
88 | apiKey: apiKey,
89 | url: baseUrl + "/realtime",
90 | dangerouslyAllowAPIKeyInBrowser: true,
91 | }
92 | )
93 | );
94 |
95 | /**
96 | * References for
97 | * - Rendering audio visualization (canvas)
98 | * - Autoscrolling event logs
99 | * - Timing delta for event log displays
100 | */
101 | const clientCanvasRef = useRef(null);
102 | const serverCanvasRef = useRef(null);
103 | const eventsScrollHeightRef = useRef(0);
104 | const eventsScrollRef = useRef(null);
105 | const startTimeRef = useRef(new Date().toISOString());
106 |
107 | /**
108 | * All of our variables for displaying application state
109 | * - items are all conversation items (dialog)
110 | * - realtimeEvents are event logs, which can be expanded
111 | * - memoryKv is for set_memory() function
112 | * - coords, marker are for get_weather() function
113 | */
114 | const [items, setItems] = useState([]);
115 | const [realtimeEvents, setRealtimeEvents] = useState([]);
116 | const [expandedEvents, setExpandedEvents] = useState<{
117 | [key: string]: boolean;
118 | }>({});
119 | const [isConnected, setIsConnected] = useState(false);
120 | const [canPushToTalk, setCanPushToTalk] = useState(true);
121 | const [isRecording, setIsRecording] = useState(false);
122 | const [memoryKv, setMemoryKv] = useState<{ [key: string]: any }>({});
123 | const [coords, setCoords] = useState({
124 | lat: 37.775593,
125 | lng: -122.418137,
126 | });
127 | const [marker, setMarker] = useState(null);
128 |
129 | /**
130 | * Utility for formatting the timing of logs
131 | */
132 | const formatTime = useCallback((timestamp: string) => {
133 | const startTime = startTimeRef.current;
134 | const t0 = new Date(startTime).valueOf();
135 | const t1 = new Date(timestamp).valueOf();
136 | const delta = t1 - t0;
137 | const hs = Math.floor(delta / 10) % 100;
138 | const s = Math.floor(delta / 1000) % 60;
139 | const m = Math.floor(delta / 60_000) % 60;
140 | const pad = (n: number) => {
141 | let s = n + '';
142 | while (s.length < 2) {
143 | s = '0' + s;
144 | }
145 | return s;
146 | };
147 | return `${pad(m)}:${pad(s)}.${pad(hs)}`;
148 | }, []);
149 |
150 | /**
151 | * When you click the API key
152 | */
153 | const resetAPIKey = useCallback(() => {
154 | const apiKey = prompt('OpenAI API Key');
155 | if (apiKey !== null) {
156 | // localStorage.clear();
157 | localStorage.setItem('tmp::voice_api_key', apiKey);
158 | window.location.reload();
159 | }
160 | }, []);
161 |
162 | const resetBseUrl = useCallback(() => {
163 | let baseUrl = prompt('BaseUrl: default wss://api.openai.com/v1');
164 | if (baseUrl !== null) {
165 | // localStorage.clear();
166 | localStorage.setItem('tmp::base_url', baseUrl);
167 | window.location.reload();
168 | }
169 | }, []);
170 |
171 | /**
172 | * Connect to conversation:
173 | * WavRecorder taks speech input, WavStreamPlayer output, client is API client
174 | */
175 | const connectConversation = useCallback(async () => {
176 | const client = clientRef.current;
177 | const wavRecorder = wavRecorderRef.current;
178 | const wavStreamPlayer = wavStreamPlayerRef.current;
179 |
180 | // Set state variables
181 | startTimeRef.current = new Date().toISOString();
182 | setIsConnected(true);
183 | setRealtimeEvents([]);
184 | setItems(client.conversation.getItems());
185 |
186 | // Connect to microphone
187 | await wavRecorder.begin();
188 |
189 | // Connect to audio output
190 | await wavStreamPlayer.connect();
191 |
192 | // Connect to realtime API
193 | await client.connect();
194 | client.sendUserMessageContent([
195 | {
196 | type: `input_text`,
197 | text: `Hello!`,
198 | // text: `For testing purposes, I want you to list ten car brands. Number each item, e.g. "one (or whatever number you are one): the item name".`
199 | },
200 | ]);
201 |
202 | if (client.getTurnDetectionType() === 'server_vad') {
203 | await wavRecorder.record((data) => client.appendInputAudio(data.mono));
204 | }
205 | }, []);
206 |
207 | /**
208 | * Disconnect and reset conversation state
209 | */
210 | const disconnectConversation = useCallback(async () => {
211 | setIsConnected(false);
212 | setRealtimeEvents([]);
213 | setItems([]);
214 | setMemoryKv({});
215 | setCoords({
216 | lat: 37.775593,
217 | lng: -122.418137,
218 | });
219 | setMarker(null);
220 |
221 | const client = clientRef.current;
222 | client.disconnect();
223 |
224 | const wavRecorder = wavRecorderRef.current;
225 | await wavRecorder.end();
226 |
227 | const wavStreamPlayer = wavStreamPlayerRef.current;
228 | await wavStreamPlayer.interrupt();
229 | }, []);
230 |
231 | const deleteConversationItem = useCallback(async (id: string) => {
232 | const client = clientRef.current;
233 | client.deleteItem(id);
234 | }, []);
235 |
236 | /**
237 | * In push-to-talk mode, start recording
238 | * .appendInputAudio() for each sample
239 | */
240 | const startRecording = async () => {
241 | setIsRecording(true);
242 | const client = clientRef.current;
243 | const wavRecorder = wavRecorderRef.current;
244 | const wavStreamPlayer = wavStreamPlayerRef.current;
245 | const trackSampleOffset = await wavStreamPlayer.interrupt();
246 | if (trackSampleOffset?.trackId) {
247 | const { trackId, offset } = trackSampleOffset;
248 | await client.cancelResponse(trackId, offset);
249 | }
250 | await wavRecorder.record((data) => client.appendInputAudio(data.mono));
251 | };
252 |
253 | /**
254 | * In push-to-talk mode, stop recording
255 | */
256 | const stopRecording = async () => {
257 | setIsRecording(false);
258 | const client = clientRef.current;
259 | const wavRecorder = wavRecorderRef.current;
260 | await wavRecorder.pause();
261 | client.createResponse();
262 | };
263 |
264 | /**
265 | * Switch between Manual <> VAD mode for communication
266 | */
267 | const changeTurnEndType = async (value: string) => {
268 | const client = clientRef.current;
269 | const wavRecorder = wavRecorderRef.current;
270 | if (value === 'none' && wavRecorder.getStatus() === 'recording') {
271 | await wavRecorder.pause();
272 | }
273 | client.updateSession({
274 | turn_detection: value === 'none' ? null : { type: 'server_vad' },
275 | });
276 | if (value === 'server_vad' && client.isConnected()) {
277 | await wavRecorder.record((data) => client.appendInputAudio(data.mono));
278 | }
279 | setCanPushToTalk(value === 'none');
280 | };
281 |
282 | /**
283 | * Auto-scroll the event logs
284 | */
285 | useEffect(() => {
286 | if (eventsScrollRef.current) {
287 | const eventsEl = eventsScrollRef.current;
288 | const scrollHeight = eventsEl.scrollHeight;
289 | // Only scroll if height has just changed
290 | if (scrollHeight !== eventsScrollHeightRef.current) {
291 | eventsEl.scrollTop = scrollHeight;
292 | eventsScrollHeightRef.current = scrollHeight;
293 | }
294 | }
295 | }, [realtimeEvents]);
296 |
297 | /**
298 | * Auto-scroll the conversation logs
299 | */
300 | useEffect(() => {
301 | const conversationEls = [].slice.call(
302 | document.body.querySelectorAll('[data-conversation-content]')
303 | );
304 | for (const el of conversationEls) {
305 | const conversationEl = el as HTMLDivElement;
306 | conversationEl.scrollTop = conversationEl.scrollHeight;
307 | }
308 | }, [items]);
309 |
310 | /**
311 | * Set up render loops for the visualization canvas
312 | */
313 | useEffect(() => {
314 | let isLoaded = true;
315 |
316 | const wavRecorder = wavRecorderRef.current;
317 | const clientCanvas = clientCanvasRef.current;
318 | let clientCtx: CanvasRenderingContext2D | null = null;
319 |
320 | const wavStreamPlayer = wavStreamPlayerRef.current;
321 | const serverCanvas = serverCanvasRef.current;
322 | let serverCtx: CanvasRenderingContext2D | null = null;
323 |
324 | const render = () => {
325 | if (isLoaded) {
326 | if (clientCanvas) {
327 | if (!clientCanvas.width || !clientCanvas.height) {
328 | clientCanvas.width = clientCanvas.offsetWidth;
329 | clientCanvas.height = clientCanvas.offsetHeight;
330 | }
331 | clientCtx = clientCtx || clientCanvas.getContext('2d');
332 | if (clientCtx) {
333 | clientCtx.clearRect(0, 0, clientCanvas.width, clientCanvas.height);
334 | const result = wavRecorder.recording
335 | ? wavRecorder.getFrequencies('voice')
336 | : { values: new Float32Array([0]) };
337 | WavRenderer.drawBars(
338 | clientCanvas,
339 | clientCtx,
340 | result.values,
341 | '#0099ff',
342 | 10,
343 | 0,
344 | 8
345 | );
346 | }
347 | }
348 | if (serverCanvas) {
349 | if (!serverCanvas.width || !serverCanvas.height) {
350 | serverCanvas.width = serverCanvas.offsetWidth;
351 | serverCanvas.height = serverCanvas.offsetHeight;
352 | }
353 | serverCtx = serverCtx || serverCanvas.getContext('2d');
354 | if (serverCtx) {
355 | serverCtx.clearRect(0, 0, serverCanvas.width, serverCanvas.height);
356 | const result = wavStreamPlayer.analyser
357 | ? wavStreamPlayer.getFrequencies('voice')
358 | : { values: new Float32Array([0]) };
359 | WavRenderer.drawBars(
360 | serverCanvas,
361 | serverCtx,
362 | result.values,
363 | '#009900',
364 | 10,
365 | 0,
366 | 8
367 | );
368 | }
369 | }
370 | window.requestAnimationFrame(render);
371 | }
372 | };
373 | render();
374 |
375 | return () => {
376 | isLoaded = false;
377 | };
378 | }, []);
379 |
380 | /**
381 | * Core RealtimeClient and audio capture setup
382 | * Set all of our instructions, tools, events and more
383 | */
384 | useEffect(() => {
385 | // Get refs
386 | const wavStreamPlayer = wavStreamPlayerRef.current;
387 | const client = clientRef.current;
388 |
389 | // Set instructions
390 | client.updateSession({ instructions: instructions });
391 | // Set transcription, otherwise we don't get user transcriptions back
392 | client.updateSession({ input_audio_transcription: { model: 'whisper-1' } });
393 |
394 | // Add tools
395 | client.addTool(
396 | {
397 | name: 'set_memory',
398 | description: 'Saves important data about the user into memory.',
399 | parameters: {
400 | type: 'object',
401 | properties: {
402 | key: {
403 | type: 'string',
404 | description:
405 | 'The key of the memory value. Always use lowercase and underscores, no other characters.',
406 | },
407 | value: {
408 | type: 'string',
409 | description: 'Value can be anything represented as a string',
410 | },
411 | },
412 | required: ['key', 'value'],
413 | },
414 | },
415 | async ({ key, value }: { [key: string]: any }) => {
416 | setMemoryKv((memoryKv) => {
417 | const newKv = { ...memoryKv };
418 | newKv[key] = value;
419 | return newKv;
420 | });
421 | return { ok: true };
422 | }
423 | );
424 | client.addTool(
425 | {
426 | name: 'get_weather',
427 | description:
428 | 'Retrieves the weather for a given lat, lng coordinate pair. Specify a label for the location.',
429 | parameters: {
430 | type: 'object',
431 | properties: {
432 | lat: {
433 | type: 'number',
434 | description: 'Latitude',
435 | },
436 | lng: {
437 | type: 'number',
438 | description: 'Longitude',
439 | },
440 | location: {
441 | type: 'string',
442 | description: 'Name of the location',
443 | },
444 | },
445 | required: ['lat', 'lng', 'location'],
446 | },
447 | },
448 | async ({ lat, lng, location }: { [key: string]: any }) => {
449 | setMarker({ lat, lng, location });
450 | setCoords({ lat, lng, location });
451 | const result = await fetch(
452 | `https://api.open-meteo.com/v1/forecast?latitude=${lat}&longitude=${lng}¤t=temperature_2m,wind_speed_10m`
453 | );
454 | const json = await result.json();
455 | const temperature = {
456 | value: json.current.temperature_2m as number,
457 | units: json.current_units.temperature_2m as string,
458 | };
459 | const wind_speed = {
460 | value: json.current.wind_speed_10m as number,
461 | units: json.current_units.wind_speed_10m as string,
462 | };
463 | setMarker({ lat, lng, location, temperature, wind_speed });
464 | return json;
465 | }
466 | );
467 |
468 | // handle realtime events from client + server for event logging
469 | client.on('realtime.event', (realtimeEvent: RealtimeEvent) => {
470 | setRealtimeEvents((realtimeEvents) => {
471 | const lastEvent = realtimeEvents[realtimeEvents.length - 1];
472 | if (lastEvent?.event.type === realtimeEvent.event.type) {
473 | // if we receive multiple events in a row, aggregate them for display purposes
474 | lastEvent.count = (lastEvent.count || 0) + 1;
475 | return realtimeEvents.slice(0, -1).concat(lastEvent);
476 | } else {
477 | return realtimeEvents.concat(realtimeEvent);
478 | }
479 | });
480 | });
481 | client.on('error', (event: any) => console.error(event));
482 | client.on('conversation.interrupted', async () => {
483 | const trackSampleOffset = await wavStreamPlayer.interrupt();
484 | if (trackSampleOffset?.trackId) {
485 | const { trackId, offset } = trackSampleOffset;
486 | await client.cancelResponse(trackId, offset);
487 | }
488 | });
489 | client.on('conversation.updated', async ({ item, delta }: any) => {
490 | const items = client.conversation.getItems();
491 | if (delta?.audio) {
492 | wavStreamPlayer.add16BitPCM(delta.audio, item.id);
493 | }
494 | if (item.status === 'completed' && item.formatted.audio?.length) {
495 | const wavFile = await WavRecorder.decode(
496 | item.formatted.audio,
497 | 24000,
498 | 24000
499 | );
500 | item.formatted.file = wavFile;
501 | }
502 | setItems(items);
503 | });
504 |
505 | setItems(client.conversation.getItems());
506 |
507 | return () => {
508 | // cleanup; resets to defaults
509 | client.reset();
510 | };
511 | }, []);
512 |
513 | /**
514 | * Render the application
515 | */
516 | return (
517 |
518 |
519 |
520 |
521 |
realtime console
522 |
523 |
524 | {!USE_LOCAL_RELAY_SERVER_URL && (
525 | resetBseUrl()}
531 | />
532 | )}
533 |
534 |
535 | {!USE_LOCAL_RELAY_SERVER_URL && (
536 | resetAPIKey()}
542 | />
543 | )}
544 |
545 |
546 |
547 |
548 |
549 |
550 |
551 |
552 |
553 |
554 |
555 |
556 |
557 |
events
558 |
559 | {!realtimeEvents.length && `awaiting connection...`}
560 | {realtimeEvents.map((realtimeEvent, i) => {
561 | const count = realtimeEvent.count;
562 | const event = { ...realtimeEvent.event };
563 | if (event.type === 'input_audio_buffer.append') {
564 | event.audio = `[trimmed: ${event.audio.length} bytes]`;
565 | } else if (event.type === 'response.audio.delta') {
566 | event.delta = `[trimmed: ${event.delta.length} bytes]`;
567 | }
568 | return (
569 |
570 |
571 | {formatTime(realtimeEvent.time)}
572 |
573 |
574 |
{
577 | // toggle event details
578 | const id = event.event_id;
579 | const expanded = { ...expandedEvents };
580 | if (expanded[id]) {
581 | delete expanded[id];
582 | } else {
583 | expanded[id] = true;
584 | }
585 | setExpandedEvents(expanded);
586 | }}
587 | >
588 |
595 | {realtimeEvent.source === 'client' ? (
596 |
597 | ) : (
598 |
599 | )}
600 |
601 | {event.type === 'error'
602 | ? 'error!'
603 | : realtimeEvent.source}
604 |
605 |
606 |
607 | {event.type}
608 | {count && ` (${count})`}
609 |
610 |
611 | {!!expandedEvents[event.event_id] && (
612 |
613 | {JSON.stringify(event, null, 2)}
614 |
615 | )}
616 |
617 |
618 | );
619 | })}
620 |
621 |
622 |
623 |
conversation
624 |
625 | {!items.length && `awaiting connection...`}
626 | {items.map((conversationItem, i) => {
627 | return (
628 |
629 |
630 |
631 | {(
632 | conversationItem.role || conversationItem.type
633 | ).replaceAll('_', ' ')}
634 |
635 |
638 | deleteConversationItem(conversationItem.id)
639 | }
640 | >
641 |
642 |
643 |
644 |
645 | {/* tool response */}
646 | {conversationItem.type === 'function_call_output' && (
647 |
{conversationItem.formatted.output}
648 | )}
649 | {/* tool call */}
650 | {!!conversationItem.formatted.tool && (
651 |
652 | {conversationItem.formatted.tool.name}(
653 | {conversationItem.formatted.tool.arguments})
654 |
655 | )}
656 | {!conversationItem.formatted.tool &&
657 | conversationItem.role === 'user' && (
658 |
659 | {conversationItem.formatted.transcript ||
660 | (conversationItem.formatted.audio?.length
661 | ? '(awaiting transcript)'
662 | : conversationItem.formatted.text ||
663 | '(item sent)')}
664 |
665 | )}
666 | {!conversationItem.formatted.tool &&
667 | conversationItem.role === 'assistant' && (
668 |
669 | {conversationItem.formatted.transcript ||
670 | conversationItem.formatted.text ||
671 | '(truncated)'}
672 |
673 | )}
674 | {conversationItem.formatted.file && (
675 |
679 | )}
680 |
681 |
682 | );
683 | })}
684 |
685 |
686 |
687 |
changeTurnEndType(value)}
692 | />
693 |
694 | {isConnected && canPushToTalk && (
695 |
702 | )}
703 |
704 |
713 |
714 |
715 |
716 |
717 |
get_weather()
718 |
719 | {marker?.location || 'not yet retrieved'}
720 | {!!marker?.temperature && (
721 | <>
722 |
723 | 🌡️ {marker.temperature.value} {marker.temperature.units}
724 | >
725 | )}
726 | {!!marker?.wind_speed && (
727 | <>
728 | {' '}
729 | 🍃 {marker.wind_speed.value} {marker.wind_speed.units}
730 | >
731 | )}
732 |
733 |
734 | {coords && (
735 |
739 | )}
740 |
741 |
742 |
743 |
set_memory()
744 |
745 | {JSON.stringify(memoryKv, null, 2)}
746 |
747 |
748 |
749 |
750 |
751 | );
752 | }
753 |
--------------------------------------------------------------------------------
/src/react-app-env.d.ts:
--------------------------------------------------------------------------------
1 | ///
2 |
--------------------------------------------------------------------------------
/src/reportWebVitals.ts:
--------------------------------------------------------------------------------
1 | import { ReportHandler } from 'web-vitals';
2 |
3 | const reportWebVitals = (onPerfEntry?: ReportHandler) => {
4 | if (onPerfEntry && onPerfEntry instanceof Function) {
5 | import('web-vitals').then(({ getCLS, getFID, getFCP, getLCP, getTTFB }) => {
6 | getCLS(onPerfEntry);
7 | getFID(onPerfEntry);
8 | getFCP(onPerfEntry);
9 | getLCP(onPerfEntry);
10 | getTTFB(onPerfEntry);
11 | });
12 | }
13 | };
14 |
15 | export default reportWebVitals;
16 |
--------------------------------------------------------------------------------
/src/setupTests.ts:
--------------------------------------------------------------------------------
1 | // jest-dom adds custom jest matchers for asserting on DOM nodes.
2 | // allows you to do things like:
3 | // expect(element).toHaveTextContent(/react/i)
4 | // learn more: https://github.com/testing-library/jest-dom
5 | import '@testing-library/jest-dom';
6 |
--------------------------------------------------------------------------------
/src/utils/conversation_config.js:
--------------------------------------------------------------------------------
1 | export const instructions = `System settings:
2 | Tool use: enabled.
3 |
4 | Instructions:
5 | - You are an artificial intelligence agent responsible for helping test realtime voice capabilities
6 | - Please make sure to respond with a helpful voice via audio
7 | - Be kind, helpful, and curteous
8 | - It is okay to ask the user questions
9 | - Use tools and functions you have available liberally, it is part of the training apparatus
10 | - Be open to exploration and conversation
11 | - Remember: this is just for fun and testing!
12 |
13 | Personality:
14 | - Be upbeat and genuine
15 | - Try speaking quickly as if excited
16 | `;
17 |
--------------------------------------------------------------------------------
/src/utils/wav_renderer.ts:
--------------------------------------------------------------------------------
1 | const dataMap = new WeakMap();
2 |
3 | /**
4 | * Normalizes a Float32Array to Array(m): We use this to draw amplitudes on a graph
5 | * If we're rendering the same audio data, then we'll often be using
6 | * the same (data, m, downsamplePeaks) triplets so we give option to memoize
7 | */
8 | const normalizeArray = (
9 | data: Float32Array,
10 | m: number,
11 | downsamplePeaks: boolean = false,
12 | memoize: boolean = false
13 | ) => {
14 | let cache, mKey, dKey;
15 | if (memoize) {
16 | mKey = m.toString();
17 | dKey = downsamplePeaks.toString();
18 | cache = dataMap.has(data) ? dataMap.get(data) : {};
19 | dataMap.set(data, cache);
20 | cache[mKey] = cache[mKey] || {};
21 | if (cache[mKey][dKey]) {
22 | return cache[mKey][dKey];
23 | }
24 | }
25 | const n = data.length;
26 | const result = new Array(m);
27 | if (m <= n) {
28 | // Downsampling
29 | result.fill(0);
30 | const count = new Array(m).fill(0);
31 | for (let i = 0; i < n; i++) {
32 | const index = Math.floor(i * (m / n));
33 | if (downsamplePeaks) {
34 | // take highest result in the set
35 | result[index] = Math.max(result[index], Math.abs(data[i]));
36 | } else {
37 | result[index] += Math.abs(data[i]);
38 | }
39 | count[index]++;
40 | }
41 | if (!downsamplePeaks) {
42 | for (let i = 0; i < result.length; i++) {
43 | result[i] = result[i] / count[i];
44 | }
45 | }
46 | } else {
47 | for (let i = 0; i < m; i++) {
48 | const index = (i * (n - 1)) / (m - 1);
49 | const low = Math.floor(index);
50 | const high = Math.ceil(index);
51 | const t = index - low;
52 | if (high >= n) {
53 | result[i] = data[n - 1];
54 | } else {
55 | result[i] = data[low] * (1 - t) + data[high] * t;
56 | }
57 | }
58 | }
59 | if (memoize) {
60 | cache[mKey as string][dKey as string] = result;
61 | }
62 | return result;
63 | };
64 |
65 | export const WavRenderer = {
66 | /**
67 | * Renders a point-in-time snapshot of an audio sample, usually frequency values
68 | * @param canvas
69 | * @param ctx
70 | * @param data
71 | * @param color
72 | * @param pointCount number of bars to render
73 | * @param barWidth width of bars in px
74 | * @param barSpacing spacing between bars in px
75 | * @param center vertically center the bars
76 | */
77 | drawBars: (
78 | canvas: HTMLCanvasElement,
79 | ctx: CanvasRenderingContext2D,
80 | data: Float32Array,
81 | color: string,
82 | pointCount: number = 0,
83 | barWidth: number = 0,
84 | barSpacing: number = 0,
85 | center: boolean = false
86 | ) => {
87 | pointCount = Math.floor(
88 | Math.min(
89 | pointCount,
90 | (canvas.width - barSpacing) / (Math.max(barWidth, 1) + barSpacing)
91 | )
92 | );
93 | if (!pointCount) {
94 | pointCount = Math.floor(
95 | (canvas.width - barSpacing) / (Math.max(barWidth, 1) + barSpacing)
96 | );
97 | }
98 | if (!barWidth) {
99 | barWidth = (canvas.width - barSpacing) / pointCount - barSpacing;
100 | }
101 | const points = normalizeArray(data, pointCount, true);
102 | for (let i = 0; i < pointCount; i++) {
103 | const amplitude = Math.abs(points[i]);
104 | const height = Math.max(1, amplitude * canvas.height);
105 | const x = barSpacing + i * (barWidth + barSpacing);
106 | const y = center ? (canvas.height - height) / 2 : canvas.height - height;
107 | ctx.fillStyle = color;
108 | ctx.fillRect(x, y, barWidth, height);
109 | }
110 | },
111 | };
112 |
--------------------------------------------------------------------------------
/tsconfig.json:
--------------------------------------------------------------------------------
1 | {
2 | "compilerOptions": {
3 | "target": "ES2020",
4 | "lib": ["dom", "dom.iterable", "esnext", "ES2020"],
5 | "allowJs": true,
6 | "skipLibCheck": true,
7 | "esModuleInterop": true,
8 | "allowSyntheticDefaultImports": true,
9 | "strict": true,
10 | "forceConsistentCasingInFileNames": true,
11 | "noFallthroughCasesInSwitch": true,
12 | "module": "esnext",
13 | "moduleResolution": "node",
14 | "resolveJsonModule": true,
15 | "isolatedModules": true,
16 | "noEmit": true,
17 | "jsx": "react-jsx"
18 | },
19 | "include": ["src", "src/lib"]
20 | }
21 |
--------------------------------------------------------------------------------