├── .github
└── workflows
│ └── deploy_gh_pages.yml
├── .gitignore
├── .npmrc
├── .prettierignore
├── .prettierrc
├── LICENSE
├── README.md
├── bun.lockb
├── eslint.config.js
├── package-lock.json
├── package.json
├── postcss.config.js
├── src
├── app.css
├── app.d.ts
├── app.html
├── lib
│ ├── index.ts
│ └── visualizations
│ │ ├── audio
│ │ ├── AudioFrequency.svelte
│ │ ├── BarAudioVisualizer.svelte
│ │ ├── CircleBarAudioVisualizer.svelte
│ │ ├── CircleCirclesAudioVisualizer.svelte
│ │ ├── DeformedCircleAudioVisualizer.svelte
│ │ ├── InnerGlowAudioVisualizer.svelte
│ │ ├── MicrophoneAudioVisualizer.svelte
│ │ └── SpeakerAudioVisualizer.svelte
│ │ ├── core
│ │ ├── BarVisualizer.svelte
│ │ ├── CircleBarVisualizer.svelte
│ │ ├── CircleCirclesVisualizer.svelte
│ │ ├── DeformedCircleVisualizer.svelte
│ │ ├── Glow.svelte
│ │ ├── InnerGlowVisualizer.svelte
│ │ ├── MicrophoneVisualizer.svelte
│ │ ├── SpeakerVisualizer.svelte
│ │ └── utils.ts
│ │ └── wavtools
│ │ ├── dist
│ │ ├── index.d.ts
│ │ ├── index.d.ts.map
│ │ └── lib
│ │ │ ├── analysis
│ │ │ ├── audio_analysis.d.ts
│ │ │ ├── audio_analysis.d.ts.map
│ │ │ ├── constants.d.ts
│ │ │ └── constants.d.ts.map
│ │ │ ├── wav_packer.d.ts
│ │ │ ├── wav_packer.d.ts.map
│ │ │ ├── wav_recorder.d.ts
│ │ │ ├── wav_recorder.d.ts.map
│ │ │ ├── wav_stream_player.d.ts
│ │ │ ├── wav_stream_player.d.ts.map
│ │ │ └── worklets
│ │ │ ├── audio_processor.d.ts
│ │ │ ├── audio_processor.d.ts.map
│ │ │ ├── stream_processor.d.ts
│ │ │ └── stream_processor.d.ts.map
│ │ ├── index.js
│ │ └── lib
│ │ ├── analysis
│ │ ├── audio_analysis.js
│ │ └── constants.js
│ │ ├── audio_file_player.js
│ │ ├── wav_packer.js
│ │ ├── wav_recorder.js
│ │ ├── wav_stream_player.js
│ │ └── worklets
│ │ ├── audio_processor.js
│ │ └── stream_processor.js
└── routes
│ ├── +layout.svelte
│ ├── +layout.ts
│ └── +page.svelte
├── static
└── music.mp3
├── svelte.config.js
├── tailwind.config.js
├── tsconfig.json
└── vite.config.ts
/.github/workflows/deploy_gh_pages.yml:
--------------------------------------------------------------------------------
1 | name: Deploy to GitHub Pages
2 |
3 | on:
4 | push:
5 | branches:
6 | - main
7 |
8 | jobs:
9 | build_site:
10 | runs-on: ubuntu-latest
11 | steps:
12 | - name: Checkout
13 | uses: actions/checkout@v3
14 |
15 | - name: Install Node.js
16 | uses: actions/setup-node@v3
17 | with:
18 | node-version: 18
19 | cache: npm
20 |
21 | - name: Install dependencies
22 | run: npm install
23 |
24 | - name: build
25 | env:
26 | BASE_PATH: '/${{ github.event.repository.name }}'
27 | run: |
28 | npm run build
29 |
30 | - name: Upload Artifacts
31 | uses: actions/upload-pages-artifact@v2
32 | with:
33 | # this should match the `pages` option in your adapter-static options
34 | path: 'build/'
35 |
36 | deploy:
37 | needs: build_site
38 | runs-on: ubuntu-latest
39 |
40 | permissions:
41 | pages: write
42 | id-token: write
43 |
44 | environment:
45 | name: github-pages
46 | url: ${{ steps.deployment.outputs.page_url }}
47 |
48 | steps:
49 | - name: Deploy
50 | id: deployment
51 | uses: actions/deploy-pages@v2
52 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | .DS_Store
2 | node_modules
3 | /build
4 | /.svelte-kit
5 | /package
6 | .env
7 | .env.*
8 | !.env.example
9 | vite.config.js.timestamp-*
10 | vite.config.ts.timestamp-*
11 |
--------------------------------------------------------------------------------
/.npmrc:
--------------------------------------------------------------------------------
1 | engine-strict=true
2 |
--------------------------------------------------------------------------------
/.prettierignore:
--------------------------------------------------------------------------------
1 | # Ignore files for PNPM, NPM and YARN
2 | pnpm-lock.yaml
3 | package-lock.json
4 | yarn.lock
5 |
--------------------------------------------------------------------------------
/.prettierrc:
--------------------------------------------------------------------------------
1 | {
2 | "useTabs": true,
3 | "singleQuote": true,
4 | "trailingComma": "none",
5 | "printWidth": 100,
6 | "plugins": ["prettier-plugin-svelte"],
7 | "overrides": [{ "files": "*.svelte", "options": { "parser": "svelte" } }]
8 | }
9 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License Copyright (c) 2024 flo-bit
2 |
3 | Permission is hereby granted, free of
4 | charge, to any person obtaining a copy of this software and associated
5 | documentation files (the "Software"), to deal in the Software without
6 | restriction, including without limitation the rights to use, copy, modify, merge,
7 | publish, distribute, sublicense, and/or sell copies of the Software, and to
8 | permit persons to whom the Software is furnished to do so, subject to the
9 | following conditions:
10 |
11 | The above copyright notice and this permission notice
12 | (including the next paragraph) shall be included in all copies or substantial
13 | portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
16 | ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17 | MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO
18 | EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 | OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 | FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
21 | THE SOFTWARE.
22 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # svelte audio visualizations
2 |
3 | simple, zero dependency audio visualizations for svelte, especially useful for visualizing voice input and output.
4 |
5 | https://github.com/user-attachments/assets/056b0226-75e2-438f-82a1-cfbc7aace468
6 |
7 | ## Installation
8 |
9 | copy the `lib/visualizations` folder into your project.
10 |
11 | ## Usage
12 |
13 | There are two ways you can use the visualizations:
14 |
15 | 1. Using with a `WavRecorder`, `WavStreamPlayer` or `AudioFilePlayer` instance. For this use the components in the `lib/visualizations/audio` folder ending with `AudioVisualizer`.
16 |
17 | ```svelte
18 |
29 |
30 |
31 |
32 | {#if audio}
33 |
34 | {/if}
35 | ```
36 |
37 | 2. Passing in values yourself. For this use the components in the `lib/visualizations/core` folder. Ending just in `Visualizer`.
38 |
39 | ```svelte
40 |
43 |
44 |
45 | ```
46 |
47 | For this a normalized Float32Array is expected, where each value is between 0 and 1. Also note, that the length of the array influences the visualizations (e.g. number of bars in the `BarVisualizer`). To convert from any length array, to a specific length, you can use the `normalizeArray` function.
48 |
49 | ```ts
50 | import { normalizeArray } from '$lib/visualizations/core/utils';
51 |
52 | const values = new Float32Array([0, 1, 0, 1, 0, 1]);
53 |
54 | const normalizedValues = normalizeArray(values, 10);
55 | ```
56 |
57 |
58 | ## Credits
59 |
60 | originally built for [svelte-realtime-api](https://github.com/flo-bit/svelte-openai-realtime-api) with lots of code adjusted from [openai-realtime-console](https://github.com/openai/openai-realtime-console), including the WavRecorder and WavStreamPlayer classes.
61 |
62 | ## License
63 |
64 | MIT
65 |
--------------------------------------------------------------------------------
/bun.lockb:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/flo-bit/svelte-audio-visualizations/cfcd93b40495f48460d8de2ea83e8bf43d53088d/bun.lockb
--------------------------------------------------------------------------------
/eslint.config.js:
--------------------------------------------------------------------------------
1 | import js from '@eslint/js';
2 | import ts from 'typescript-eslint';
3 | import svelte from 'eslint-plugin-svelte';
4 | import prettier from 'eslint-config-prettier';
5 | import globals from 'globals';
6 |
7 | /** @type {import('eslint').Linter.FlatConfig[]} */
8 | export default [
9 | js.configs.recommended,
10 | ...ts.configs.recommended,
11 | ...svelte.configs['flat/recommended'],
12 | prettier,
13 | ...svelte.configs['flat/prettier'],
14 | {
15 | languageOptions: {
16 | globals: {
17 | ...globals.browser,
18 | ...globals.node
19 | }
20 | }
21 | },
22 | {
23 | files: ['**/*.svelte'],
24 | languageOptions: {
25 | parserOptions: {
26 | parser: ts.parser
27 | }
28 | }
29 | },
30 | {
31 | ignores: ['build/', '.svelte-kit/', 'dist/']
32 | }
33 | ];
34 |
--------------------------------------------------------------------------------
/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "visualization",
3 | "version": "0.0.1",
4 | "private": true,
5 | "scripts": {
6 | "dev": "vite dev",
7 | "build": "vite build",
8 | "preview": "vite preview",
9 | "check": "svelte-kit sync && svelte-check --tsconfig ./tsconfig.json",
10 | "check:watch": "svelte-kit sync && svelte-check --tsconfig ./tsconfig.json --watch",
11 | "lint": "prettier --check . && eslint .",
12 | "format": "prettier --write ."
13 | },
14 | "devDependencies": {
15 | "@sveltejs/adapter-auto": "^3.0.0",
16 | "@sveltejs/kit": "^2.0.0",
17 | "@sveltejs/vite-plugin-svelte": "^3.0.0",
18 | "@types/eslint": "^8.56.7",
19 | "autoprefixer": "^10.4.19",
20 | "eslint": "^9.0.0",
21 | "eslint-config-prettier": "^9.1.0",
22 | "eslint-plugin-svelte": "^2.36.0",
23 | "globals": "^15.0.0",
24 | "postcss": "^8.4.38",
25 | "prettier": "^3.1.1",
26 | "prettier-plugin-svelte": "^3.1.2",
27 | "svelte": "^4.2.7",
28 | "svelte-check": "^3.6.0",
29 | "tailwindcss": "^3.4.3",
30 | "tslib": "^2.4.1",
31 | "typescript": "^5.0.0",
32 | "typescript-eslint": "^8.0.0-alpha.20",
33 | "vite": "^5.0.3"
34 | },
35 | "type": "module",
36 | "dependencies": {
37 | "@sveltejs/adapter-static": "^3.0.5"
38 | },
39 | "license": "MIT"
40 | }
--------------------------------------------------------------------------------
/postcss.config.js:
--------------------------------------------------------------------------------
1 | export default {
2 | plugins: {
3 | tailwindcss: {},
4 | autoprefixer: {},
5 | },
6 | }
7 |
--------------------------------------------------------------------------------
/src/app.css:
--------------------------------------------------------------------------------
1 | @tailwind base;
2 | @tailwind components;
3 | @tailwind utilities;
4 |
--------------------------------------------------------------------------------
/src/app.d.ts:
--------------------------------------------------------------------------------
1 | // See https://kit.svelte.dev/docs/types#app
2 | // for information about these interfaces
3 | declare global {
4 | namespace App {
5 | // interface Error {}
6 | // interface Locals {}
7 | // interface PageData {}
8 | // interface PageState {}
9 | // interface Platform {}
10 | }
11 | }
12 |
13 | export {};
14 |
--------------------------------------------------------------------------------
/src/app.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
9 |
10 | %sveltekit.head%
11 |
12 |
13 |
14 | svelte audio visualizations
15 |
16 |
17 | %sveltekit.body%
18 |
19 |
20 |
--------------------------------------------------------------------------------
/src/lib/index.ts:
--------------------------------------------------------------------------------
1 | // place files you want to import through the `$lib` alias in this folder.
2 |
--------------------------------------------------------------------------------
/src/lib/visualizations/audio/AudioFrequency.svelte:
--------------------------------------------------------------------------------
1 |
30 |
31 |
32 |
--------------------------------------------------------------------------------
/src/lib/visualizations/audio/BarAudioVisualizer.svelte:
--------------------------------------------------------------------------------
1 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
--------------------------------------------------------------------------------
/src/lib/visualizations/audio/CircleBarAudioVisualizer.svelte:
--------------------------------------------------------------------------------
1 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
--------------------------------------------------------------------------------
/src/lib/visualizations/audio/CircleCirclesAudioVisualizer.svelte:
--------------------------------------------------------------------------------
1 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
--------------------------------------------------------------------------------
/src/lib/visualizations/audio/DeformedCircleAudioVisualizer.svelte:
--------------------------------------------------------------------------------
1 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
--------------------------------------------------------------------------------
/src/lib/visualizations/audio/InnerGlowAudioVisualizer.svelte:
--------------------------------------------------------------------------------
1 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
--------------------------------------------------------------------------------
/src/lib/visualizations/audio/MicrophoneAudioVisualizer.svelte:
--------------------------------------------------------------------------------
1 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
--------------------------------------------------------------------------------
/src/lib/visualizations/audio/SpeakerAudioVisualizer.svelte:
--------------------------------------------------------------------------------
1 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
--------------------------------------------------------------------------------
/src/lib/visualizations/core/BarVisualizer.svelte:
--------------------------------------------------------------------------------
1 |
52 |
53 |
54 |
--------------------------------------------------------------------------------
/src/lib/visualizations/core/CircleBarVisualizer.svelte:
--------------------------------------------------------------------------------
1 |
68 |
69 |
70 |
--------------------------------------------------------------------------------
/src/lib/visualizations/core/CircleCirclesVisualizer.svelte:
--------------------------------------------------------------------------------
1 |
74 |
75 |
76 |
--------------------------------------------------------------------------------
/src/lib/visualizations/core/DeformedCircleVisualizer.svelte:
--------------------------------------------------------------------------------
1 |
67 |
68 |
69 |
--------------------------------------------------------------------------------
/src/lib/visualizations/core/Glow.svelte:
--------------------------------------------------------------------------------
1 |
4 |
5 |
12 |
13 |
14 |
15 |
--------------------------------------------------------------------------------
/src/lib/visualizations/core/InnerGlowVisualizer.svelte:
--------------------------------------------------------------------------------
1 |
133 |
134 |
135 |
--------------------------------------------------------------------------------
/src/lib/visualizations/core/MicrophoneVisualizer.svelte:
--------------------------------------------------------------------------------
1 |
8 |
9 |
28 |
--------------------------------------------------------------------------------
/src/lib/visualizations/core/SpeakerVisualizer.svelte:
--------------------------------------------------------------------------------
1 |
8 |
9 |
37 |
--------------------------------------------------------------------------------
/src/lib/visualizations/core/utils.ts:
--------------------------------------------------------------------------------
1 | const dataMap = new WeakMap();
2 |
3 | import { onMount } from 'svelte';
4 |
5 | export async function raf(callback: () => void) {
6 | let mounted = true;
7 |
8 | async function render() {
9 | if (!mounted || !callback) return;
10 |
11 | try {
12 | callback();
13 | } catch (e) {
14 | console.error(e);
15 | }
16 | requestAnimationFrame(render);
17 | }
18 |
19 | onMount(() => {
20 | render();
21 | return () => (mounted = false);
22 | });
23 | }
24 |
25 | /**
26 | * Normalizes a Float32Array to Array(m): We use this to draw amplitudes on a graph
27 | * If we're rendering the same audio data, then we'll often be using
28 | * the same (data, m, downsamplePeaks) triplets so we give option to memoize
29 | */
30 | export const normalizeArray = (
31 | data: Float32Array,
32 | m: number,
33 | downsamplePeaks: boolean = false,
34 | memoize: boolean = false
35 | ) => {
36 | let cache, mKey, dKey;
37 | if (memoize) {
38 | mKey = m.toString();
39 | dKey = downsamplePeaks.toString();
40 | cache = dataMap.has(data) ? dataMap.get(data) : {};
41 | dataMap.set(data, cache);
42 | cache[mKey] = cache[mKey] || {};
43 | if (cache[mKey][dKey]) {
44 | return cache[mKey][dKey];
45 | }
46 | }
47 | const n = data.length;
48 | const result = new Array(m);
49 | if (m <= n) {
50 | // Downsampling
51 | result.fill(0);
52 | const count = new Array(m).fill(0);
53 | for (let i = 0; i < n; i++) {
54 | const index = Math.floor(i * (m / n));
55 | if (downsamplePeaks) {
56 | // take highest result in the set
57 | result[index] = Math.max(result[index], Math.abs(data[i]));
58 | } else {
59 | result[index] += Math.abs(data[i]);
60 | }
61 | count[index]++;
62 | }
63 | if (!downsamplePeaks) {
64 | for (let i = 0; i < result.length; i++) {
65 | result[i] = result[i] / count[i];
66 | }
67 | }
68 | } else {
69 | for (let i = 0; i < m; i++) {
70 | const index = (i * (n - 1)) / (m - 1);
71 | const low = Math.floor(index);
72 | const high = Math.ceil(index);
73 | const t = index - low;
74 | if (high >= n) {
75 | result[i] = data[n - 1];
76 | } else {
77 | result[i] = data[low] * (1 - t) + data[high] * t;
78 | }
79 | }
80 | }
81 | if (memoize) {
82 | cache[mKey as string][dKey as string] = result;
83 | }
84 | return result;
85 | };
86 |
--------------------------------------------------------------------------------
/src/lib/visualizations/wavtools/dist/index.d.ts:
--------------------------------------------------------------------------------
1 | import { AudioAnalysis } from './lib/analysis/audio_analysis.js';
2 | import { WavPacker } from './lib/wav_packer.js';
3 | import { WavStreamPlayer } from './lib/wav_stream_player.js';
4 | import { WavRecorder } from './lib/wav_recorder.js';
5 | export { AudioAnalysis, WavPacker, WavStreamPlayer, WavRecorder };
6 | //# sourceMappingURL=index.d.ts.map
--------------------------------------------------------------------------------
/src/lib/visualizations/wavtools/dist/index.d.ts.map:
--------------------------------------------------------------------------------
1 | {"version":3,"file":"index.d.ts","sourceRoot":"","sources":["../index.js"],"names":[],"mappings":"8BAC8B,kCAAkC;0BADtC,qBAAqB;gCAEf,4BAA4B;4BAChC,uBAAuB"}
--------------------------------------------------------------------------------
/src/lib/visualizations/wavtools/dist/lib/analysis/audio_analysis.d.ts:
--------------------------------------------------------------------------------
1 | /**
2 | * Output of AudioAnalysis for the frequency domain of the audio
3 | * @typedef {Object} AudioAnalysisOutputType
4 | * @property {Float32Array} values Amplitude of this frequency between {0, 1} inclusive
5 | * @property {number[]} frequencies Raw frequency bucket values
6 | * @property {string[]} labels Labels for the frequency bucket values
7 | */
8 | /**
9 | * Analyzes audio for visual output
10 | * @class
11 | */
12 | export class AudioAnalysis {
13 | /**
14 | * Retrieves frequency domain data from an AnalyserNode adjusted to a decibel range
15 | * returns human-readable formatting and labels
16 | * @param {AnalyserNode} analyser
17 | * @param {number} sampleRate
18 | * @param {Float32Array} [fftResult]
19 | * @param {"frequency"|"music"|"voice"} [analysisType]
20 | * @param {number} [minDecibels] default -100
21 | * @param {number} [maxDecibels] default -30
22 | * @returns {AudioAnalysisOutputType}
23 | */
24 | static getFrequencies(analyser: AnalyserNode, sampleRate: number, fftResult?: Float32Array, analysisType?: "frequency" | "music" | "voice", minDecibels?: number, maxDecibels?: number): AudioAnalysisOutputType;
25 | /**
26 | * Creates a new AudioAnalysis instance for an HTMLAudioElement
27 | * @param {HTMLAudioElement} audioElement
28 | * @param {AudioBuffer|null} [audioBuffer] If provided, will cache all frequency domain data from the buffer
29 | * @returns {AudioAnalysis}
30 | */
31 | constructor(audioElement: HTMLAudioElement, audioBuffer?: AudioBuffer | null);
32 | fftResults: any[];
33 | audio: HTMLAudioElement;
34 | context: any;
35 | analyser: any;
36 | sampleRate: any;
37 | audioBuffer: any;
38 | /**
39 | * Gets the current frequency domain data from the playing audio track
40 | * @param {"frequency"|"music"|"voice"} [analysisType]
41 | * @param {number} [minDecibels] default -100
42 | * @param {number} [maxDecibels] default -30
43 | * @returns {AudioAnalysisOutputType}
44 | */
45 | getFrequencies(analysisType?: "frequency" | "music" | "voice", minDecibels?: number, maxDecibels?: number): AudioAnalysisOutputType;
46 | /**
47 | * Resume the internal AudioContext if it was suspended due to the lack of
48 | * user interaction when the AudioAnalysis was instantiated.
49 | * @returns {Promise}
50 | */
51 | resumeIfSuspended(): Promise;
52 | }
53 | /**
54 | * Output of AudioAnalysis for the frequency domain of the audio
55 | */
56 | export type AudioAnalysisOutputType = {
57 | /**
58 | * Amplitude of this frequency between {0, 1} inclusive
59 | */
60 | values: Float32Array;
61 | /**
62 | * Raw frequency bucket values
63 | */
64 | frequencies: number[];
65 | /**
66 | * Labels for the frequency bucket values
67 | */
68 | labels: string[];
69 | };
70 | //# sourceMappingURL=audio_analysis.d.ts.map
--------------------------------------------------------------------------------
/src/lib/visualizations/wavtools/dist/lib/analysis/audio_analysis.d.ts.map:
--------------------------------------------------------------------------------
1 | {"version":3,"file":"audio_analysis.d.ts","sourceRoot":"","sources":["../../../lib/analysis/audio_analysis.js"],"names":[],"mappings":"AAOA;;;;;;GAMG;AAEH;;;GAGG;AACH;IACE;;;;;;;;;;OAUG;IACH,gCARW,YAAY,cACZ,MAAM,cACN,YAAY,iBACZ,WAAW,GAAC,OAAO,GAAC,OAAO,gBAC3B,MAAM,gBACN,MAAM,GACJ,uBAAuB,CAwDnC;IAED;;;;;OAKG;IACH,0BAJW,gBAAgB,gBAChB,WAAW,GAAC,IAAI,EAkE1B;IA9DC,kBAAoB;IA2ClB,wBAAyB;IACzB,aAAkC;IAClC,cAAwB;IACxB,gBAA4B;IAC5B,iBAA8B;IAiBlC;;;;;;OAMG;IACH,8BALW,WAAW,GAAC,OAAO,GAAC,OAAO,gBAC3B,MAAM,gBACN,MAAM,GACJ,uBAAuB,CAwBnC;IAED;;;;OAIG;IACH,qBAFa,OAAO,CAAC,IAAI,CAAC,CAOzB;CACF;;;;;;;;YA9La,YAAY;;;;iBACZ,MAAM,EAAE;;;;YACR,MAAM,EAAE"}
--------------------------------------------------------------------------------
/src/lib/visualizations/wavtools/dist/lib/analysis/constants.d.ts:
--------------------------------------------------------------------------------
1 | /**
2 | * All note frequencies from 1st to 8th octave
3 | * in format "A#8" (A#, 8th octave)
4 | */
5 | export const noteFrequencies: any[];
6 | export const noteFrequencyLabels: any[];
7 | export const voiceFrequencies: any[];
8 | export const voiceFrequencyLabels: any[];
9 | //# sourceMappingURL=constants.d.ts.map
--------------------------------------------------------------------------------
/src/lib/visualizations/wavtools/dist/lib/analysis/constants.d.ts.map:
--------------------------------------------------------------------------------
1 | {"version":3,"file":"constants.d.ts","sourceRoot":"","sources":["../../../lib/analysis/constants.js"],"names":[],"mappings":"AA6BA;;;GAGG;AACH,oCAAkC;AAClC,wCAAsC;AActC,qCAKG;AACH,yCAKG"}
--------------------------------------------------------------------------------
/src/lib/visualizations/wavtools/dist/lib/wav_packer.d.ts:
--------------------------------------------------------------------------------
1 | /**
2 | * Raw wav audio file contents
3 | * @typedef {Object} WavPackerAudioType
4 | * @property {Blob} blob
5 | * @property {string} url
6 | * @property {number} channelCount
7 | * @property {number} sampleRate
8 | * @property {number} duration
9 | */
10 | /**
11 | * Utility class for assembling PCM16 "audio/wav" data
12 | * @class
13 | */
14 | export class WavPacker {
15 | /**
16 | * Converts Float32Array of amplitude data to ArrayBuffer in Int16Array format
17 | * @param {Float32Array} float32Array
18 | * @returns {ArrayBuffer}
19 | */
20 | static floatTo16BitPCM(float32Array: Float32Array): ArrayBuffer;
21 | /**
22 | * Concatenates two ArrayBuffers
23 | * @param {ArrayBuffer} leftBuffer
24 | * @param {ArrayBuffer} rightBuffer
25 | * @returns {ArrayBuffer}
26 | */
27 | static mergeBuffers(leftBuffer: ArrayBuffer, rightBuffer: ArrayBuffer): ArrayBuffer;
28 | /**
29 | * Packs data into an Int16 format
30 | * @private
31 | * @param {number} size 0 = 1x Int16, 1 = 2x Int16
32 | * @param {number} arg value to pack
33 | * @returns
34 | */
35 | private _packData;
36 | /**
37 | * Packs audio into "audio/wav" Blob
38 | * @param {number} sampleRate
39 | * @param {{bitsPerSample: number, channels: Array, data: Int16Array}} audio
40 | * @returns {WavPackerAudioType}
41 | */
42 | pack(sampleRate: number, audio: {
43 | bitsPerSample: number;
44 | channels: Array;
45 | data: Int16Array;
46 | }): WavPackerAudioType;
47 | }
48 | /**
49 | * Raw wav audio file contents
50 | */
51 | export type WavPackerAudioType = {
52 | blob: Blob;
53 | url: string;
54 | channelCount: number;
55 | sampleRate: number;
56 | duration: number;
57 | };
58 | //# sourceMappingURL=wav_packer.d.ts.map
--------------------------------------------------------------------------------
/src/lib/visualizations/wavtools/dist/lib/wav_packer.d.ts.map:
--------------------------------------------------------------------------------
1 | {"version":3,"file":"wav_packer.d.ts","sourceRoot":"","sources":["../../lib/wav_packer.js"],"names":[],"mappings":"AAAA;;;;;;;;GAQG;AAEH;;;GAGG;AACH;IACE;;;;OAIG;IACH,qCAHW,YAAY,GACV,WAAW,CAWvB;IAED;;;;;OAKG;IACH,gCAJW,WAAW,eACX,WAAW,GACT,WAAW,CASvB;IAED;;;;;;OAMG;IACH,kBAKC;IAED;;;;;OAKG;IACH,iBAJW,MAAM,SACN;QAAC,aAAa,EAAE,MAAM,CAAC;QAAC,QAAQ,EAAE,KAAK,CAAC,YAAY,CAAC,CAAC;QAAC,IAAI,EAAE,UAAU,CAAA;KAAC,GACtE,kBAAkB,CA6C9B;CACF;;;;;UA3Ga,IAAI;SACJ,MAAM;kBACN,MAAM;gBACN,MAAM;cACN,MAAM"}
--------------------------------------------------------------------------------
/src/lib/visualizations/wavtools/dist/lib/wav_recorder.d.ts:
--------------------------------------------------------------------------------
1 | /**
2 | * Decodes audio into a wav file
3 | * @typedef {Object} DecodedAudioType
4 | * @property {Blob} blob
5 | * @property {string} url
6 | * @property {Float32Array} values
7 | * @property {AudioBuffer} audioBuffer
8 | */
9 | /**
10 | * Records live stream of user audio as PCM16 "audio/wav" data
11 | * @class
12 | */
13 | export class WavRecorder {
14 | /**
15 | * Decodes audio data from multiple formats to a Blob, url, Float32Array and AudioBuffer
16 | * @param {Blob|Float32Array|Int16Array|ArrayBuffer|number[]} audioData
17 | * @param {number} sampleRate
18 | * @param {number} fromSampleRate
19 | * @returns {Promise}
20 | */
21 | static decode(audioData: Blob | Float32Array | Int16Array | ArrayBuffer | number[], sampleRate?: number, fromSampleRate?: number): Promise;
22 | /**
23 | * Create a new WavRecorder instance
24 | * @param {{sampleRate?: number, outputToSpeakers?: boolean, debug?: boolean}} [options]
25 | * @returns {WavRecorder}
26 | */
27 | constructor({ sampleRate, outputToSpeakers, debug, }?: {
28 | sampleRate?: number;
29 | outputToSpeakers?: boolean;
30 | debug?: boolean;
31 | });
32 | scriptSrc: any;
33 | sampleRate: number;
34 | outputToSpeakers: boolean;
35 | debug: boolean;
36 | _deviceChangeCallback: () => Promise;
37 | _devices: any[];
38 | stream: any;
39 | processor: any;
40 | source: any;
41 | node: any;
42 | recording: boolean;
43 | _lastEventId: number;
44 | eventReceipts: {};
45 | eventTimeout: number;
46 | _chunkProcessor: () => void;
47 | _chunkProcessorBuffer: {
48 | raw: ArrayBuffer;
49 | mono: ArrayBuffer;
50 | };
51 | /**
52 | * Logs data in debug mode
53 | * @param {...any} arguments
54 | * @returns {true}
55 | */
56 | log(...args: any[]): true;
57 | /**
58 | * Retrieves the current sampleRate for the recorder
59 | * @returns {number}
60 | */
61 | getSampleRate(): number;
62 | /**
63 | * Retrieves the current status of the recording
64 | * @returns {"ended"|"paused"|"recording"}
65 | */
66 | getStatus(): "ended" | "paused" | "recording";
67 | /**
68 | * Sends an event to the AudioWorklet
69 | * @private
70 | * @param {string} name
71 | * @param {{[key: string]: any}} data
72 | * @param {AudioWorkletNode} [_processor]
73 | * @returns {Promise<{[key: string]: any}>}
74 | */
75 | private _event;
76 | /**
77 | * Sets device change callback, remove if callback provided is `null`
78 | * @param {(Array): void|null} callback
79 | * @returns {true}
80 | */
81 | listenForDeviceChange(callback: any): true;
82 | /**
83 | * Manually request permission to use the microphone
84 | * @returns {Promise}
85 | */
86 | requestPermission(): Promise;
87 | /**
88 | * List all eligible devices for recording, will request permission to use microphone
89 | * @returns {Promise>}
90 | */
91 | listDevices(): Promise>;
94 | /**
95 | * Begins a recording session and requests microphone permissions if not already granted
96 | * Microphone recording indicator will appear on browser tab but status will be "paused"
97 | * @param {string} [deviceId] if no device provided, default device will be used
98 | * @returns {Promise}
99 | */
100 | begin(deviceId?: string): Promise;
101 | analyser: any;
102 | /**
103 | * Gets the current frequency domain data from the recording track
104 | * @param {"frequency"|"music"|"voice"} [analysisType]
105 | * @param {number} [minDecibels] default -100
106 | * @param {number} [maxDecibels] default -30
107 | * @returns {import('./analysis/audio_analysis.js').AudioAnalysisOutputType}
108 | */
109 | getFrequencies(analysisType?: "frequency" | "music" | "voice", minDecibels?: number, maxDecibels?: number): import("./analysis/audio_analysis.js").AudioAnalysisOutputType;
110 | /**
111 | * Pauses the recording
112 | * Keeps microphone stream open but halts storage of audio
113 | * @returns {Promise}
114 | */
115 | pause(): Promise;
116 | /**
117 | * Start recording stream and storing to memory from the connected audio source
118 | * @param {(data: { mono: Int16Array; raw: Int16Array }) => any} [chunkProcessor]
119 | * @param {number} [chunkSize] chunkProcessor will not be triggered until this size threshold met in mono audio
120 | * @returns {Promise}
121 | */
122 | record(chunkProcessor?: (data: {
123 | mono: Int16Array;
124 | raw: Int16Array;
125 | }) => any, chunkSize?: number): Promise;
126 | _chunkProcessorSize: number;
127 | /**
128 | * Clears the audio buffer, empties stored recording
129 | * @returns {Promise}
130 | */
131 | clear(): Promise;
132 | /**
133 | * Reads the current audio stream data
134 | * @returns {Promise<{meanValues: Float32Array, channels: Array}>}
135 | */
136 | read(): Promise<{
137 | meanValues: Float32Array;
138 | channels: Array;
139 | }>;
140 | /**
141 | * Saves the current audio stream to a file
142 | * @param {boolean} [force] Force saving while still recording
143 | * @returns {Promise}
144 | */
145 | save(force?: boolean): Promise;
146 | /**
147 | * Ends the current recording session and saves the result
148 | * @returns {Promise}
149 | */
150 | end(): Promise;
151 | /**
152 | * Performs a full cleanup of WavRecorder instance
153 | * Stops actively listening via microphone and removes existing listeners
154 | * @returns {Promise}
155 | */
156 | quit(): Promise;
157 | }
158 | /**
159 | * Decodes audio into a wav file
160 | */
161 | export type DecodedAudioType = {
162 | blob: Blob;
163 | url: string;
164 | values: Float32Array;
165 | audioBuffer: AudioBuffer;
166 | };
167 | //# sourceMappingURL=wav_recorder.d.ts.map
--------------------------------------------------------------------------------
/src/lib/visualizations/wavtools/dist/lib/wav_recorder.d.ts.map:
--------------------------------------------------------------------------------
1 | {"version":3,"file":"wav_recorder.d.ts","sourceRoot":"","sources":["../../lib/wav_recorder.js"],"names":[],"mappings":"AAIA;;;;;;;GAOG;AAEH;;;GAGG;AACH;IAsCE;;;;;;OAMG;IACH,yBALW,IAAI,GAAC,YAAY,GAAC,UAAU,GAAC,WAAW,GAAC,MAAM,EAAE,eACjD,MAAM,mBACN,MAAM,GACJ,OAAO,CAAC,gBAAgB,CAAC,CAqErC;IA/GD;;;;OAIG;IACH,uDAHW;QAAC,UAAU,CAAC,EAAE,MAAM,CAAC;QAAC,gBAAgB,CAAC,EAAE,OAAO,CAAC;QAAC,KAAK,CAAC,EAAE,OAAO,CAAA;KAAC,EAiC5E;IAxBC,eAAkC;IAElC,mBAA4B;IAC5B,0BAAwC;IACxC,eAAoB;IACpB,2CAAiC;IACjC,gBAAkB;IAElB,YAAkB;IAClB,eAAqB;IACrB,YAAkB;IAClB,UAAgB;IAChB,mBAAsB;IAEtB,qBAAqB;IACrB,kBAAuB;IACvB,qBAAwB;IAExB,4BAA+B;IAE/B;;;MAGC;IA+EH;;;;OAIG;IACH,qBAFa,IAAI,CAOhB;IAED;;;OAGG;IACH,iBAFa,MAAM,CAIlB;IAED;;;OAGG;IACH,aAFa,OAAO,GAAC,QAAQ,GAAC,WAAW,CAUxC;IAED;;;;;;;OAOG;IACH,eAqBC;IAED;;;;OAIG;IACH,sCAFa,IAAI,CAmChB;IAED;;;OAGG;IACH,qBAFa,OAAO,CAAC,IAAI,CAAC,CAoBzB;IAED;;;OAGG;IACH,eAFa,OAAO,CAAC,KAAK,CAAC,eAAe,GAAG;QAAC,OAAO,EAAE,OAAO,CAAA;KAAC,CAAC,CAAC,CA8BhE;IAED;;;;;OAKG;IACH,iBAHW,MAAM,GACJ,OAAO,CAAC,IAAI,CAAC,CAkFzB;IAHC,cAAwB;IAK1B;;;;;;OAMG;IACH,8BALW,WAAW,GAAC,OAAO,GAAC,OAAO,gBAC3B,MAAM,gBACN,MAAM,GACJ,OAAO,8BAA8B,EAAE,uBAAuB,CAkB1E;IAED;;;;OAIG;IACH,SAFa,OAAO,CAAC,IAAI,CAAC,CAezB;IAED;;;;;OAKG;IACH,wBAJW,CAAC,IAAI,EAAE;QAAE,IAAI,EAAE,UAAU,CAAC;QAAC,GAAG,EAAE,UAAU,CAAA;KAAE,KAAK,GAAG,cACpD,MAAM,GACJ,OAAO,CAAC,IAAI,CAAC,CAoBzB;IATC,4BAAoC;IAWtC;;;OAGG;IACH,SAFa,OAAO,CAAC,IAAI,CAAC,CAQzB;IAED;;;OAGG;IACH,QAFa,OAAO,CAAC;QAAC,UAAU,EAAE,YAAY,CAAC;QAAC,QAAQ,EAAE,KAAK,CAAC,YAAY,CAAC,CAAA;KAAC,CAAC,CAS9E;IAED;;;;OAIG;IACH,aAHW,OAAO,GACL,OAAO,CAAC,OAAO,iBAAiB,EAAE,kBAAkB,CAAC,CAgBjE;IAED;;;OAGG;IACH,OAFa,OAAO,CAAC,OAAO,iBAAiB,EAAE,kBAAkB,CAAC,CA8BjE;IAED;;;;OAIG;IACH,QAFa,OAAO,CAAC,IAAI,CAAC,CAQzB;CACF;;;;;UA1hBa,IAAI;SACJ,MAAM;YACN,YAAY;iBACZ,WAAW"}
--------------------------------------------------------------------------------
/src/lib/visualizations/wavtools/dist/lib/wav_stream_player.d.ts:
--------------------------------------------------------------------------------
1 | /**
2 | * Plays audio streams received in raw PCM16 chunks from the browser
3 | * @class
4 | */
5 | export class WavStreamPlayer {
6 | /**
7 | * Creates a new WavStreamPlayer instance
8 | * @param {{sampleRate?: number}} options
9 | * @returns {WavStreamPlayer}
10 | */
11 | constructor({ sampleRate }?: {
12 | sampleRate?: number;
13 | });
14 | scriptSrc: any;
15 | sampleRate: number;
16 | context: any;
17 | stream: any;
18 | analyser: any;
19 | trackSampleOffsets: {};
20 | interruptedTrackIds: {};
21 | /**
22 | * Connects the audio context and enables output to speakers
23 | * @returns {Promise}
24 | */
25 | connect(): Promise;
26 | /**
27 | * Gets the current frequency domain data from the playing track
28 | * @param {"frequency"|"music"|"voice"} [analysisType]
29 | * @param {number} [minDecibels] default -100
30 | * @param {number} [maxDecibels] default -30
31 | * @returns {import('./analysis/audio_analysis.js').AudioAnalysisOutputType}
32 | */
33 | getFrequencies(analysisType?: "frequency" | "music" | "voice", minDecibels?: number, maxDecibels?: number): import("./analysis/audio_analysis.js").AudioAnalysisOutputType;
34 | /**
35 | * Starts audio streaming
36 | * @private
37 | * @returns {Promise}
38 | */
39 | private _start;
40 | /**
41 | * Adds 16BitPCM data to the currently playing audio stream
42 | * You can add chunks beyond the current play point and they will be queued for play
43 | * @param {ArrayBuffer|Int16Array} arrayBuffer
44 | * @param {string} [trackId]
45 | * @returns {Int16Array}
46 | */
47 | add16BitPCM(arrayBuffer: ArrayBuffer | Int16Array, trackId?: string): Int16Array;
48 | /**
49 | * Gets the offset (sample count) of the currently playing stream
50 | * @param {boolean} [interrupt]
51 | * @returns {{trackId: string|null, offset: number, currentTime: number}}
52 | */
53 | getTrackSampleOffset(interrupt?: boolean): {
54 | trackId: string | null;
55 | offset: number;
56 | currentTime: number;
57 | };
58 | /**
59 | * Strips the current stream and returns the sample offset of the audio
60 | * @param {boolean} [interrupt]
61 | * @returns {{trackId: string|null, offset: number, currentTime: number}}
62 | */
63 | interrupt(): {
64 | trackId: string | null;
65 | offset: number;
66 | currentTime: number;
67 | };
68 | }
69 | //# sourceMappingURL=wav_stream_player.d.ts.map
--------------------------------------------------------------------------------
/src/lib/visualizations/wavtools/dist/lib/wav_stream_player.d.ts.map:
--------------------------------------------------------------------------------
1 | {"version":3,"file":"wav_stream_player.d.ts","sourceRoot":"","sources":["../../lib/wav_stream_player.js"],"names":[],"mappings":"AAGA;;;GAGG;AACH;IACE;;;;OAIG;IACH,6BAHW;QAAC,UAAU,CAAC,EAAE,MAAM,CAAA;KAAC,EAW/B;IAPC,eAAmC;IACnC,mBAA4B;IAC5B,aAAmB;IACnB,YAAkB;IAClB,cAAoB;IACpB,uBAA4B;IAC5B,wBAA6B;IAG/B;;;OAGG;IACH,WAFa,OAAO,CAAC,IAAI,CAAC,CAkBzB;IAED;;;;;;OAMG;IACH,8BALW,WAAW,GAAC,OAAO,GAAC,OAAO,gBAC3B,MAAM,gBACN,MAAM,GACJ,OAAO,8BAA8B,EAAE,uBAAuB,CAkB1E;IAED;;;;OAIG;IACH,eAkBC;IAED;;;;;;OAMG;IACH,yBAJW,WAAW,GAAC,UAAU,YACtB,MAAM,GACJ,UAAU,CAqBtB;IAED;;;;OAIG;IACH,iCAHW,OAAO,GACL;QAAC,OAAO,EAAE,MAAM,GAAC,IAAI,CAAC;QAAC,MAAM,EAAE,MAAM,CAAC;QAAC,WAAW,EAAE,MAAM,CAAA;KAAC,CAqBvE;IAED;;;;OAIG;IACH,aAFa;QAAC,OAAO,EAAE,MAAM,GAAC,IAAI,CAAC;QAAC,MAAM,EAAE,MAAM,CAAC;QAAC,WAAW,EAAE,MAAM,CAAA;KAAC,CAIvE;CACF"}
--------------------------------------------------------------------------------
/src/lib/visualizations/wavtools/dist/lib/worklets/audio_processor.d.ts:
--------------------------------------------------------------------------------
1 | export const AudioProcessorSrc: any;
2 | //# sourceMappingURL=audio_processor.d.ts.map
--------------------------------------------------------------------------------
/src/lib/visualizations/wavtools/dist/lib/worklets/audio_processor.d.ts.map:
--------------------------------------------------------------------------------
1 | {"version":3,"file":"audio_processor.d.ts","sourceRoot":"","sources":["../../../lib/worklets/audio_processor.js"],"names":[],"mappings":"AAqNA,oCAAqC"}
--------------------------------------------------------------------------------
/src/lib/visualizations/wavtools/dist/lib/worklets/stream_processor.d.ts:
--------------------------------------------------------------------------------
1 | export const StreamProcessorWorklet: "\nclass StreamProcessor extends AudioWorkletProcessor {\n constructor() {\n super();\n this.hasStarted = false;\n this.hasInterrupted = false;\n this.outputBuffers = [];\n this.bufferLength = 128;\n this.write = { buffer: new Float32Array(this.bufferLength), trackId: null };\n this.writeOffset = 0;\n this.trackSampleOffsets = {};\n this.port.onmessage = (event) => {\n if (event.data) {\n const payload = event.data;\n if (payload.event === 'write') {\n const int16Array = payload.buffer;\n const float32Array = new Float32Array(int16Array.length);\n for (let i = 0; i < int16Array.length; i++) {\n float32Array[i] = int16Array[i] / 0x8000; // Convert Int16 to Float32\n }\n this.writeData(float32Array, payload.trackId);\n } else if (\n payload.event === 'offset' ||\n payload.event === 'interrupt'\n ) {\n const requestId = payload.requestId;\n const trackId = this.write.trackId;\n const offset = this.trackSampleOffsets[trackId] || 0;\n this.port.postMessage({\n event: 'offset',\n requestId,\n trackId,\n offset,\n });\n if (payload.event === 'interrupt') {\n this.hasInterrupted = true;\n }\n } else {\n throw new Error(`Unhandled event \"${payload.event}\"`);\n }\n }\n };\n }\n\n writeData(float32Array, trackId = null) {\n let { buffer } = this.write;\n let offset = this.writeOffset;\n for (let i = 0; i < float32Array.length; i++) {\n buffer[offset++] = float32Array[i];\n if (offset >= buffer.length) {\n this.outputBuffers.push(this.write);\n this.write = { buffer: new Float32Array(this.bufferLength), trackId };\n buffer = this.write.buffer;\n offset = 0;\n }\n }\n this.writeOffset = offset;\n return true;\n }\n\n process(inputs, outputs, parameters) {\n const output = outputs[0];\n const outputChannelData = output[0];\n const outputBuffers = this.outputBuffers;\n if (this.hasInterrupted) {\n this.port.postMessage({ event: 'stop' });\n return false;\n } else if (outputBuffers.length) {\n this.hasStarted = true;\n const { buffer, trackId } = outputBuffers.shift();\n for (let i = 0; i < outputChannelData.length; i++) {\n outputChannelData[i] = buffer[i] || 0;\n }\n if (trackId) {\n this.trackSampleOffsets[trackId] =\n this.trackSampleOffsets[trackId] || 0;\n this.trackSampleOffsets[trackId] += buffer.length;\n }\n return true;\n } else if (this.hasStarted) {\n this.port.postMessage({ event: 'stop' });\n return false;\n } else {\n return true;\n }\n }\n}\n\nregisterProcessor('stream_processor', StreamProcessor);\n";
2 | export const StreamProcessorSrc: any;
3 | //# sourceMappingURL=stream_processor.d.ts.map
--------------------------------------------------------------------------------
/src/lib/visualizations/wavtools/dist/lib/worklets/stream_processor.d.ts.map:
--------------------------------------------------------------------------------
1 | {"version":3,"file":"stream_processor.d.ts","sourceRoot":"","sources":["../../../lib/worklets/stream_processor.js"],"names":[],"mappings":"AAAA,q4FAyFE;AAMF,qCAAsC"}
--------------------------------------------------------------------------------
/src/lib/visualizations/wavtools/index.js:
--------------------------------------------------------------------------------
1 | import { WavPacker } from './lib/wav_packer.js';
2 | import { AudioAnalysis } from './lib/analysis/audio_analysis.js';
3 | import { WavStreamPlayer } from './lib/wav_stream_player.js';
4 | import { WavRecorder } from './lib/wav_recorder.js';
5 | import { AudioFilePlayer } from './lib/audio_file_player.js';
6 |
7 | export { AudioAnalysis, WavPacker, WavStreamPlayer, WavRecorder, AudioFilePlayer };
8 |
--------------------------------------------------------------------------------
/src/lib/visualizations/wavtools/lib/analysis/audio_analysis.js:
--------------------------------------------------------------------------------
1 | import {
2 | noteFrequencies,
3 | noteFrequencyLabels,
4 | voiceFrequencies,
5 | voiceFrequencyLabels,
6 | } from './constants.js';
7 |
8 | /**
9 | * Output of AudioAnalysis for the frequency domain of the audio
10 | * @typedef {Object} AudioAnalysisOutputType
11 | * @property {Float32Array} values Amplitude of this frequency between {0, 1} inclusive
12 | * @property {number[]} frequencies Raw frequency bucket values
13 | * @property {string[]} labels Labels for the frequency bucket values
14 | */
15 |
16 | /**
17 | * Analyzes audio for visual output
18 | * @class
19 | */
20 | export class AudioAnalysis {
21 | /**
22 | * Retrieves frequency domain data from an AnalyserNode adjusted to a decibel range
23 | * returns human-readable formatting and labels
24 | * @param {AnalyserNode} analyser
25 | * @param {number} sampleRate
26 | * @param {Float32Array} [fftResult]
27 | * @param {"frequency"|"music"|"voice"} [analysisType]
28 | * @param {number} [minDecibels] default -100
29 | * @param {number} [maxDecibels] default -30
30 | * @returns {AudioAnalysisOutputType}
31 | */
32 | static getFrequencies(
33 | analyser,
34 | sampleRate,
35 | fftResult,
36 | analysisType = 'frequency',
37 | minDecibels = -100,
38 | maxDecibels = -30,
39 | ) {
40 | if (!fftResult) {
41 | fftResult = new Float32Array(analyser.frequencyBinCount);
42 | analyser.getFloatFrequencyData(fftResult);
43 | }
44 | const nyquistFrequency = sampleRate / 2;
45 | const frequencyStep = (1 / fftResult.length) * nyquistFrequency;
46 | let outputValues;
47 | let frequencies;
48 | let labels;
49 | if (analysisType === 'music' || analysisType === 'voice') {
50 | const useFrequencies =
51 | analysisType === 'voice' ? voiceFrequencies : noteFrequencies;
52 | const aggregateOutput = Array(useFrequencies.length).fill(minDecibels);
53 | for (let i = 0; i < fftResult.length; i++) {
54 | const frequency = i * frequencyStep;
55 | const amplitude = fftResult[i];
56 | for (let n = useFrequencies.length - 1; n >= 0; n--) {
57 | if (frequency > useFrequencies[n]) {
58 | aggregateOutput[n] = Math.max(aggregateOutput[n], amplitude);
59 | break;
60 | }
61 | }
62 | }
63 | outputValues = aggregateOutput;
64 | frequencies =
65 | analysisType === 'voice' ? voiceFrequencies : noteFrequencies;
66 | labels =
67 | analysisType === 'voice' ? voiceFrequencyLabels : noteFrequencyLabels;
68 | } else {
69 | outputValues = Array.from(fftResult);
70 | frequencies = outputValues.map((_, i) => frequencyStep * i);
71 | labels = frequencies.map((f) => `${f.toFixed(2)} Hz`);
72 | }
73 | // We normalize to {0, 1}
74 | const normalizedOutput = outputValues.map((v) => {
75 | return Math.max(
76 | 0,
77 | Math.min((v - minDecibels) / (maxDecibels - minDecibels), 1),
78 | );
79 | });
80 | const values = new Float32Array(normalizedOutput);
81 | return {
82 | values,
83 | frequencies,
84 | labels,
85 | };
86 | }
87 |
88 | /**
89 | * Creates a new AudioAnalysis instance for an HTMLAudioElement
90 | * @param {HTMLAudioElement} audioElement
91 | * @param {AudioBuffer|null} [audioBuffer] If provided, will cache all frequency domain data from the buffer
92 | * @returns {AudioAnalysis}
93 | */
94 | constructor(audioElement, audioBuffer = null) {
95 | this.fftResults = [];
96 | if (audioBuffer) {
97 | /**
98 | * Modified from
99 | * https://stackoverflow.com/questions/75063715/using-the-web-audio-api-to-analyze-a-song-without-playing
100 | *
101 | * We do this to populate FFT values for the audio if provided an `audioBuffer`
102 | * The reason to do this is that Safari fails when using `createMediaElementSource`
103 | * This has a non-zero RAM cost so we only opt-in to run it on Safari, Chrome is better
104 | */
105 | const { length, sampleRate } = audioBuffer;
106 | const offlineAudioContext = new OfflineAudioContext({
107 | length,
108 | sampleRate,
109 | });
110 | const source = offlineAudioContext.createBufferSource();
111 | source.buffer = audioBuffer;
112 | const analyser = offlineAudioContext.createAnalyser();
113 | analyser.fftSize = 8192;
114 | analyser.smoothingTimeConstant = 0.1;
115 | source.connect(analyser);
116 | // limit is :: 128 / sampleRate;
117 | // but we just want 60fps - cuts ~1s from 6MB to 1MB of RAM
118 | const renderQuantumInSeconds = 1 / 60;
119 | const durationInSeconds = length / sampleRate;
120 | const analyze = (index) => {
121 | const suspendTime = renderQuantumInSeconds * index;
122 | if (suspendTime < durationInSeconds) {
123 | offlineAudioContext.suspend(suspendTime).then(() => {
124 | const fftResult = new Float32Array(analyser.frequencyBinCount);
125 | analyser.getFloatFrequencyData(fftResult);
126 | this.fftResults.push(fftResult);
127 | analyze(index + 1);
128 | });
129 | }
130 | if (index === 1) {
131 | offlineAudioContext.startRendering();
132 | } else {
133 | offlineAudioContext.resume();
134 | }
135 | };
136 | source.start(0);
137 | analyze(1);
138 | this.audio = audioElement;
139 | this.context = offlineAudioContext;
140 | this.analyser = analyser;
141 | this.sampleRate = sampleRate;
142 | this.audioBuffer = audioBuffer;
143 | } else {
144 | const audioContext = new AudioContext();
145 | const track = audioContext.createMediaElementSource(audioElement);
146 | const analyser = audioContext.createAnalyser();
147 | analyser.fftSize = 8192;
148 | analyser.smoothingTimeConstant = 0.1;
149 | track.connect(analyser);
150 | analyser.connect(audioContext.destination);
151 | this.audio = audioElement;
152 | this.context = audioContext;
153 | this.analyser = analyser;
154 | this.sampleRate = this.context.sampleRate;
155 | this.audioBuffer = null;
156 | }
157 | }
158 |
159 | /**
160 | * Gets the current frequency domain data from the playing audio track
161 | * @param {"frequency"|"music"|"voice"} [analysisType]
162 | * @param {number} [minDecibels] default -100
163 | * @param {number} [maxDecibels] default -30
164 | * @returns {AudioAnalysisOutputType}
165 | */
166 | getFrequencies(
167 | analysisType = 'frequency',
168 | minDecibels = -100,
169 | maxDecibels = -30,
170 | ) {
171 | let fftResult = null;
172 | if (this.audioBuffer && this.fftResults.length) {
173 | const pct = this.audio.currentTime / this.audio.duration;
174 | const index = Math.min(
175 | (pct * this.fftResults.length) | 0,
176 | this.fftResults.length - 1,
177 | );
178 | fftResult = this.fftResults[index];
179 | }
180 | return AudioAnalysis.getFrequencies(
181 | this.analyser,
182 | this.sampleRate,
183 | fftResult,
184 | analysisType,
185 | minDecibels,
186 | maxDecibels,
187 | );
188 | }
189 |
190 | /**
191 | * Resume the internal AudioContext if it was suspended due to the lack of
192 | * user interaction when the AudioAnalysis was instantiated.
193 | * @returns {Promise}
194 | */
195 | async resumeIfSuspended() {
196 | if (this.context.state === 'suspended') {
197 | await this.context.resume();
198 | }
199 | return true;
200 | }
201 | }
202 |
203 | globalThis.AudioAnalysis = AudioAnalysis;
204 |
--------------------------------------------------------------------------------
/src/lib/visualizations/wavtools/lib/analysis/constants.js:
--------------------------------------------------------------------------------
1 | /**
2 | * Constants for help with visualization
3 | * Helps map frequency ranges from Fast Fourier Transform
4 | * to human-interpretable ranges, notably music ranges and
5 | * human vocal ranges.
6 | */
7 |
8 | // Eighth octave frequencies
9 | const octave8Frequencies = [
10 | 4186.01, 4434.92, 4698.63, 4978.03, 5274.04, 5587.65, 5919.91, 6271.93,
11 | 6644.88, 7040.0, 7458.62, 7902.13,
12 | ];
13 |
14 | // Labels for each of the above frequencies
15 | const octave8FrequencyLabels = [
16 | 'C',
17 | 'C#',
18 | 'D',
19 | 'D#',
20 | 'E',
21 | 'F',
22 | 'F#',
23 | 'G',
24 | 'G#',
25 | 'A',
26 | 'A#',
27 | 'B',
28 | ];
29 |
30 | /**
31 | * All note frequencies from 1st to 8th octave
32 | * in format "A#8" (A#, 8th octave)
33 | */
34 | export const noteFrequencies = [];
35 | export const noteFrequencyLabels = [];
36 | for (let i = 1; i <= 8; i++) {
37 | for (let f = 0; f < octave8Frequencies.length; f++) {
38 | const freq = octave8Frequencies[f];
39 | noteFrequencies.push(freq / Math.pow(2, 8 - i));
40 | noteFrequencyLabels.push(octave8FrequencyLabels[f] + i);
41 | }
42 | }
43 |
44 | /**
45 | * Subset of the note frequencies between 32 and 2000 Hz
46 | * 6 octave range: C1 to B6
47 | */
48 | const voiceFrequencyRange = [32.0, 2000.0];
49 | export const voiceFrequencies = noteFrequencies.filter((_, i) => {
50 | return (
51 | noteFrequencies[i] > voiceFrequencyRange[0] &&
52 | noteFrequencies[i] < voiceFrequencyRange[1]
53 | );
54 | });
55 | export const voiceFrequencyLabels = noteFrequencyLabels.filter((_, i) => {
56 | return (
57 | noteFrequencies[i] > voiceFrequencyRange[0] &&
58 | noteFrequencies[i] < voiceFrequencyRange[1]
59 | );
60 | });
61 |
--------------------------------------------------------------------------------
/src/lib/visualizations/wavtools/lib/audio_file_player.js:
--------------------------------------------------------------------------------
1 | import { AudioAnalysis } from './analysis/audio_analysis.js';
2 |
3 | /**
4 | * Plays audio files (mp3, wav, etc.)
5 | * @class
6 | */
7 | export class AudioFilePlayer {
8 | /**
9 | * Creates a new AudioFilePlayer instance
10 | * @param {{sampleRate?: number}} options
11 | * @returns {AudioFilePlayer}
12 | */
13 | constructor({ sampleRate = 44100 } = {}) {
14 | this.sampleRate = sampleRate;
15 | this.context = null;
16 | this.analyser = null;
17 | this.source = null;
18 | this.buffer = null;
19 | this.isPlaying = false;
20 | this.startTime = 0;
21 | this.pauseTime = 0;
22 | }
23 |
24 | /**
25 | * Connects the audio context and enables output to speakers
26 | * @returns {Promise}
27 | */
28 | async connect() {
29 | this.context = new AudioContext({
30 | sampleRate: this.sampleRate
31 | });
32 | if (this.context.state === 'suspended') {
33 | await this.context.resume();
34 | }
35 | const analyser = this.context.createAnalyser();
36 | analyser.fftSize = 8192;
37 | analyser.smoothingTimeConstant = 0.1;
38 | this.analyser = analyser;
39 | return true;
40 | }
41 |
42 | /**
43 | * Loads an audio file and decodes it
44 | * @param {string|ArrayBuffer|Blob} audioInput - URL string, ArrayBuffer, or Blob of the audio file
45 | * @returns {Promise}
46 | */
47 | async loadFile(audioInput) {
48 | if (!this.context) {
49 | await this.connect();
50 | }
51 |
52 | let arrayBuffer;
53 | if (typeof audioInput === 'string') {
54 | // Fetch the audio file from URL
55 | const response = await fetch(audioInput);
56 | arrayBuffer = await response.arrayBuffer();
57 | } else if (audioInput instanceof Blob) {
58 | arrayBuffer = await audioInput.arrayBuffer();
59 | } else if (audioInput instanceof ArrayBuffer) {
60 | arrayBuffer = audioInput;
61 | } else {
62 | throw new Error('audioInput must be a URL string, ArrayBuffer, or Blob');
63 | }
64 |
65 | // Decode the audio data
66 | this.buffer = await this.context.decodeAudioData(arrayBuffer);
67 | }
68 |
69 | /**
70 | * Plays the loaded audio file
71 | * @returns {void}
72 | */
73 | play() {
74 | if (this.isPlaying) {
75 | return;
76 | }
77 | if (!this.buffer) {
78 | throw new Error('No audio buffer loaded. Please call loadFile() first.');
79 | }
80 |
81 | this.source = this.context.createBufferSource();
82 | this.source.buffer = this.buffer;
83 | this.source.connect(this.analyser);
84 | this.analyser.connect(this.context.destination);
85 |
86 | const offset = this.pauseTime || 0;
87 | this.source.start(0, offset);
88 | this.startTime = this.context.currentTime - offset;
89 | this.isPlaying = true;
90 |
91 | this.source.onended = () => {
92 | this.isPlaying = false;
93 | this.pauseTime = 0;
94 | };
95 | }
96 |
97 | /**
98 | * Pauses the playback
99 | * @returns {void}
100 | */
101 | pause() {
102 | if (!this.isPlaying) {
103 | return;
104 | }
105 | this.source.stop();
106 | this.pauseTime = this.context.currentTime - this.startTime;
107 | this.isPlaying = false;
108 | }
109 |
110 | /**
111 | * Stops the playback and resets play position
112 | * @returns {void}
113 | */
114 | stop() {
115 | if (this.source) {
116 | this.source.stop();
117 | }
118 | this.isPlaying = false;
119 | this.pauseTime = 0;
120 | }
121 |
122 | /**
123 | * Gets the current frequency domain data from the playing track
124 | * @param {"frequency"|"music"|"voice"} [analysisType]
125 | * @param {number} [minDecibels] default -100
126 | * @param {number} [maxDecibels] default -30
127 | * @returns {import('./analysis/audio_analysis.js').AudioAnalysisOutputType}
128 | */
129 | getFrequencies(analysisType = 'frequency', minDecibels = -100, maxDecibels = -30) {
130 | if (!this.analyser) {
131 | throw new Error('Not connected, please call .connect() first');
132 | }
133 | return AudioAnalysis.getFrequencies(
134 | this.analyser,
135 | this.sampleRate,
136 | null,
137 | analysisType,
138 | minDecibels,
139 | maxDecibels
140 | );
141 | }
142 | }
143 |
144 | globalThis.AudioFilePlayer = AudioFilePlayer;
145 |
--------------------------------------------------------------------------------
/src/lib/visualizations/wavtools/lib/wav_packer.js:
--------------------------------------------------------------------------------
1 | /**
2 | * Raw wav audio file contents
3 | * @typedef {Object} WavPackerAudioType
4 | * @property {Blob} blob
5 | * @property {string} url
6 | * @property {number} channelCount
7 | * @property {number} sampleRate
8 | * @property {number} duration
9 | */
10 |
11 | /**
12 | * Utility class for assembling PCM16 "audio/wav" data
13 | * @class
14 | */
15 | export class WavPacker {
16 | /**
17 | * Converts Float32Array of amplitude data to ArrayBuffer in Int16Array format
18 | * @param {Float32Array} float32Array
19 | * @returns {ArrayBuffer}
20 | */
21 | static floatTo16BitPCM(float32Array) {
22 | const buffer = new ArrayBuffer(float32Array.length * 2);
23 | const view = new DataView(buffer);
24 | let offset = 0;
25 | for (let i = 0; i < float32Array.length; i++, offset += 2) {
26 | let s = Math.max(-1, Math.min(1, float32Array[i]));
27 | view.setInt16(offset, s < 0 ? s * 0x8000 : s * 0x7fff, true);
28 | }
29 | return buffer;
30 | }
31 |
32 | /**
33 | * Concatenates two ArrayBuffers
34 | * @param {ArrayBuffer} leftBuffer
35 | * @param {ArrayBuffer} rightBuffer
36 | * @returns {ArrayBuffer}
37 | */
38 | static mergeBuffers(leftBuffer, rightBuffer) {
39 | const tmpArray = new Uint8Array(
40 | leftBuffer.byteLength + rightBuffer.byteLength
41 | );
42 | tmpArray.set(new Uint8Array(leftBuffer), 0);
43 | tmpArray.set(new Uint8Array(rightBuffer), leftBuffer.byteLength);
44 | return tmpArray.buffer;
45 | }
46 |
47 | /**
48 | * Packs data into an Int16 format
49 | * @private
50 | * @param {number} size 0 = 1x Int16, 1 = 2x Int16
51 | * @param {number} arg value to pack
52 | * @returns
53 | */
54 | _packData(size, arg) {
55 | return [
56 | new Uint8Array([arg, arg >> 8]),
57 | new Uint8Array([arg, arg >> 8, arg >> 16, arg >> 24]),
58 | ][size];
59 | }
60 |
61 | /**
62 | * Packs audio into "audio/wav" Blob
63 | * @param {number} sampleRate
64 | * @param {{bitsPerSample: number, channels: Array, data: Int16Array}} audio
65 | * @returns {WavPackerAudioType}
66 | */
67 | pack(sampleRate, audio) {
68 | if (!audio?.bitsPerSample) {
69 | throw new Error(`Missing "bitsPerSample"`);
70 | } else if (!audio?.channels) {
71 | throw new Error(`Missing "channels"`);
72 | } else if (!audio?.data) {
73 | throw new Error(`Missing "data"`);
74 | }
75 | const { bitsPerSample, channels, data } = audio;
76 | const output = [
77 | // Header
78 | 'RIFF',
79 | this._packData(
80 | 1,
81 | 4 + (8 + 24) /* chunk 1 length */ + (8 + 8) /* chunk 2 length */
82 | ), // Length
83 | 'WAVE',
84 | // chunk 1
85 | 'fmt ', // Sub-chunk identifier
86 | this._packData(1, 16), // Chunk length
87 | this._packData(0, 1), // Audio format (1 is linear quantization)
88 | this._packData(0, channels.length),
89 | this._packData(1, sampleRate),
90 | this._packData(1, (sampleRate * channels.length * bitsPerSample) / 8), // Byte rate
91 | this._packData(0, (channels.length * bitsPerSample) / 8),
92 | this._packData(0, bitsPerSample),
93 | // chunk 2
94 | 'data', // Sub-chunk identifier
95 | this._packData(
96 | 1,
97 | (channels[0].length * channels.length * bitsPerSample) / 8
98 | ), // Chunk length
99 | data,
100 | ];
101 | const blob = new Blob(output, { type: 'audio/mpeg' });
102 | const url = URL.createObjectURL(blob);
103 | return {
104 | blob,
105 | url,
106 | channelCount: channels.length,
107 | sampleRate,
108 | duration: data.byteLength / (channels.length * sampleRate * 2),
109 | };
110 | }
111 | }
112 |
113 | globalThis.WavPacker = WavPacker;
114 |
--------------------------------------------------------------------------------
/src/lib/visualizations/wavtools/lib/wav_recorder.js:
--------------------------------------------------------------------------------
1 | import { AudioProcessorSrc } from './worklets/audio_processor.js';
2 | import { AudioAnalysis } from './analysis/audio_analysis.js';
3 | import { WavPacker } from './wav_packer.js';
4 |
5 | /**
6 | * Decodes audio into a wav file
7 | * @typedef {Object} DecodedAudioType
8 | * @property {Blob} blob
9 | * @property {string} url
10 | * @property {Float32Array} values
11 | * @property {AudioBuffer} audioBuffer
12 | */
13 |
14 | /**
15 | * Records live stream of user audio as PCM16 "audio/wav" data
16 | * @class
17 | */
18 | export class WavRecorder {
19 | /**
20 | * Create a new WavRecorder instance
21 | * @param {{sampleRate?: number, outputToSpeakers?: boolean, debug?: boolean}} [options]
22 | * @returns {WavRecorder}
23 | */
24 | constructor({
25 | sampleRate = 44100,
26 | outputToSpeakers = false,
27 | debug = false,
28 | } = {}) {
29 | // Script source
30 | this.scriptSrc = AudioProcessorSrc;
31 | // Config
32 | this.sampleRate = sampleRate;
33 | this.outputToSpeakers = outputToSpeakers;
34 | this.debug = !!debug;
35 | this._deviceChangeCallback = null;
36 | this._devices = [];
37 | // State variables
38 | this.stream = null;
39 | this.processor = null;
40 | this.source = null;
41 | this.node = null;
42 | this.recording = false;
43 | // Event handling with AudioWorklet
44 | this._lastEventId = 0;
45 | this.eventReceipts = {};
46 | this.eventTimeout = 5000;
47 | // Process chunks of audio
48 | this._chunkProcessor = () => {};
49 | this._chunkProcessorSize = void 0;
50 | this._chunkProcessorBuffer = {
51 | raw: new ArrayBuffer(0),
52 | mono: new ArrayBuffer(0),
53 | };
54 | }
55 |
56 | /**
57 | * Decodes audio data from multiple formats to a Blob, url, Float32Array and AudioBuffer
58 | * @param {Blob|Float32Array|Int16Array|ArrayBuffer|number[]} audioData
59 | * @param {number} sampleRate
60 | * @param {number} fromSampleRate
61 | * @returns {Promise}
62 | */
63 | static async decode(audioData, sampleRate = 44100, fromSampleRate = -1) {
64 | const context = new AudioContext({ sampleRate });
65 | let arrayBuffer;
66 | let blob;
67 | if (audioData instanceof Blob) {
68 | if (fromSampleRate !== -1) {
69 | throw new Error(
70 | `Can not specify "fromSampleRate" when reading from Blob`,
71 | );
72 | }
73 | blob = audioData;
74 | arrayBuffer = await blob.arrayBuffer();
75 | } else if (audioData instanceof ArrayBuffer) {
76 | if (fromSampleRate !== -1) {
77 | throw new Error(
78 | `Can not specify "fromSampleRate" when reading from ArrayBuffer`,
79 | );
80 | }
81 | arrayBuffer = audioData;
82 | blob = new Blob([arrayBuffer], { type: 'audio/wav' });
83 | } else {
84 | let float32Array;
85 | let data;
86 | if (audioData instanceof Int16Array) {
87 | data = audioData;
88 | float32Array = new Float32Array(audioData.length);
89 | for (let i = 0; i < audioData.length; i++) {
90 | float32Array[i] = audioData[i] / 0x8000;
91 | }
92 | } else if (audioData instanceof Float32Array) {
93 | float32Array = audioData;
94 | } else if (audioData instanceof Array) {
95 | float32Array = new Float32Array(audioData);
96 | } else {
97 | throw new Error(
98 | `"audioData" must be one of: Blob, Float32Arrray, Int16Array, ArrayBuffer, Array`,
99 | );
100 | }
101 | if (fromSampleRate === -1) {
102 | throw new Error(
103 | `Must specify "fromSampleRate" when reading from Float32Array, In16Array or Array`,
104 | );
105 | } else if (fromSampleRate < 3000) {
106 | throw new Error(`Minimum "fromSampleRate" is 3000 (3kHz)`);
107 | }
108 | if (!data) {
109 | data = WavPacker.floatTo16BitPCM(float32Array);
110 | }
111 | const audio = {
112 | bitsPerSample: 16,
113 | channels: [float32Array],
114 | data,
115 | };
116 | const packer = new WavPacker();
117 | const result = packer.pack(fromSampleRate, audio);
118 | blob = result.blob;
119 | arrayBuffer = await blob.arrayBuffer();
120 | }
121 | const audioBuffer = await context.decodeAudioData(arrayBuffer);
122 | const values = audioBuffer.getChannelData(0);
123 | const url = URL.createObjectURL(blob);
124 | return {
125 | blob,
126 | url,
127 | values,
128 | audioBuffer,
129 | };
130 | }
131 |
132 | /**
133 | * Logs data in debug mode
134 | * @param {...any} arguments
135 | * @returns {true}
136 | */
137 | log() {
138 | if (this.debug) {
139 | this.log(...arguments);
140 | }
141 | return true;
142 | }
143 |
144 | /**
145 | * Retrieves the current sampleRate for the recorder
146 | * @returns {number}
147 | */
148 | getSampleRate() {
149 | return this.sampleRate;
150 | }
151 |
152 | /**
153 | * Retrieves the current status of the recording
154 | * @returns {"ended"|"paused"|"recording"}
155 | */
156 | getStatus() {
157 | if (!this.processor) {
158 | return 'ended';
159 | } else if (!this.recording) {
160 | return 'paused';
161 | } else {
162 | return 'recording';
163 | }
164 | }
165 |
166 | /**
167 | * Sends an event to the AudioWorklet
168 | * @private
169 | * @param {string} name
170 | * @param {{[key: string]: any}} data
171 | * @param {AudioWorkletNode} [_processor]
172 | * @returns {Promise<{[key: string]: any}>}
173 | */
174 | async _event(name, data = {}, _processor = null) {
175 | _processor = _processor || this.processor;
176 | if (!_processor) {
177 | throw new Error('Can not send events without recording first');
178 | }
179 | const message = {
180 | event: name,
181 | id: this._lastEventId++,
182 | data,
183 | };
184 | _processor.port.postMessage(message);
185 | const t0 = new Date().valueOf();
186 | while (!this.eventReceipts[message.id]) {
187 | if (new Date().valueOf() - t0 > this.eventTimeout) {
188 | throw new Error(`Timeout waiting for "${name}" event`);
189 | }
190 | await new Promise((res) => setTimeout(() => res(true), 1));
191 | }
192 | const payload = this.eventReceipts[message.id];
193 | delete this.eventReceipts[message.id];
194 | return payload;
195 | }
196 |
197 | /**
198 | * Sets device change callback, remove if callback provided is `null`
199 | * @param {(Array): void|null} callback
200 | * @returns {true}
201 | */
202 | listenForDeviceChange(callback) {
203 | if (callback === null && this._deviceChangeCallback) {
204 | navigator.mediaDevices.removeEventListener(
205 | 'devicechange',
206 | this._deviceChangeCallback,
207 | );
208 | this._deviceChangeCallback = null;
209 | } else if (callback !== null) {
210 | // Basically a debounce; we only want this called once when devices change
211 | // And we only want the most recent callback() to be executed
212 | // if a few are operating at the same time
213 | let lastId = 0;
214 | let lastDevices = [];
215 | const serializeDevices = (devices) =>
216 | devices
217 | .map((d) => d.deviceId)
218 | .sort()
219 | .join(',');
220 | const cb = async () => {
221 | let id = ++lastId;
222 | const devices = await this.listDevices();
223 | if (id === lastId) {
224 | if (serializeDevices(lastDevices) !== serializeDevices(devices)) {
225 | lastDevices = devices;
226 | callback(devices.slice());
227 | }
228 | }
229 | };
230 | navigator.mediaDevices.addEventListener('devicechange', cb);
231 | cb();
232 | this._deviceChangeCallback = cb;
233 | }
234 | return true;
235 | }
236 |
237 | /**
238 | * Manually request permission to use the microphone
239 | * @returns {Promise}
240 | */
241 | async requestPermission() {
242 | const permissionStatus = await navigator.permissions.query({
243 | name: 'microphone',
244 | });
245 | if (permissionStatus.state === 'denied') {
246 | window.alert('You must grant microphone access to use this feature.');
247 | } else if (permissionStatus.state === 'prompt') {
248 | try {
249 | const stream = await navigator.mediaDevices.getUserMedia({
250 | audio: true,
251 | });
252 | const tracks = stream.getTracks();
253 | tracks.forEach((track) => track.stop());
254 | } catch (e) {
255 | window.alert('You must grant microphone access to use this feature.');
256 | }
257 | }
258 | return true;
259 | }
260 |
261 | /**
262 | * List all eligible devices for recording, will request permission to use microphone
263 | * @returns {Promise>}
264 | */
265 | async listDevices() {
266 | if (
267 | !navigator.mediaDevices ||
268 | !('enumerateDevices' in navigator.mediaDevices)
269 | ) {
270 | throw new Error('Could not request user devices');
271 | }
272 | await this.requestPermission();
273 | const devices = await navigator.mediaDevices.enumerateDevices();
274 | const audioDevices = devices.filter(
275 | (device) => device.kind === 'audioinput',
276 | );
277 | const defaultDeviceIndex = audioDevices.findIndex(
278 | (device) => device.deviceId === 'default',
279 | );
280 | const deviceList = [];
281 | if (defaultDeviceIndex !== -1) {
282 | let defaultDevice = audioDevices.splice(defaultDeviceIndex, 1)[0];
283 | let existingIndex = audioDevices.findIndex(
284 | (device) => device.groupId === defaultDevice.groupId,
285 | );
286 | if (existingIndex !== -1) {
287 | defaultDevice = audioDevices.splice(existingIndex, 1)[0];
288 | }
289 | defaultDevice.default = true;
290 | deviceList.push(defaultDevice);
291 | }
292 | return deviceList.concat(audioDevices);
293 | }
294 |
295 | /**
296 | * Begins a recording session and requests microphone permissions if not already granted
297 | * Microphone recording indicator will appear on browser tab but status will be "paused"
298 | * @param {string} [deviceId] if no device provided, default device will be used
299 | * @returns {Promise}
300 | */
301 | async begin(deviceId) {
302 | if (this.processor) {
303 | throw new Error(
304 | `Already connected: please call .end() to start a new session`,
305 | );
306 | }
307 |
308 | if (
309 | !navigator.mediaDevices ||
310 | !('getUserMedia' in navigator.mediaDevices)
311 | ) {
312 | throw new Error('Could not request user media');
313 | }
314 | try {
315 | const config = { audio: true };
316 | if (deviceId) {
317 | config.audio = { deviceId: { exact: deviceId } };
318 | }
319 | this.stream = await navigator.mediaDevices.getUserMedia(config);
320 | } catch (err) {
321 | throw new Error('Could not start media stream');
322 | }
323 |
324 | const context = new AudioContext({ sampleRate: this.sampleRate });
325 | const source = context.createMediaStreamSource(this.stream);
326 | // Load and execute the module script.
327 | try {
328 | await context.audioWorklet.addModule(this.scriptSrc);
329 | } catch (e) {
330 | console.error(e);
331 | throw new Error(`Could not add audioWorklet module: ${this.scriptSrc}`);
332 | }
333 | const processor = new AudioWorkletNode(context, 'audio_processor');
334 | processor.port.onmessage = (e) => {
335 | const { event, id, data } = e.data;
336 | if (event === 'receipt') {
337 | this.eventReceipts[id] = data;
338 | } else if (event === 'chunk') {
339 | if (this._chunkProcessorSize) {
340 | const buffer = this._chunkProcessorBuffer;
341 | this._chunkProcessorBuffer = {
342 | raw: WavPacker.mergeBuffers(buffer.raw, data.raw),
343 | mono: WavPacker.mergeBuffers(buffer.mono, data.mono),
344 | };
345 | if (
346 | this._chunkProcessorBuffer.mono.byteLength >=
347 | this._chunkProcessorSize
348 | ) {
349 | this._chunkProcessor(this._chunkProcessorBuffer);
350 | this._chunkProcessorBuffer = {
351 | raw: new ArrayBuffer(0),
352 | mono: new ArrayBuffer(0),
353 | };
354 | }
355 | } else {
356 | this._chunkProcessor(data);
357 | }
358 | }
359 | };
360 |
361 | const node = source.connect(processor);
362 | const analyser = context.createAnalyser();
363 | analyser.fftSize = 8192;
364 | analyser.smoothingTimeConstant = 0.1;
365 | node.connect(analyser);
366 | if (this.outputToSpeakers) {
367 | // eslint-disable-next-line no-console
368 | console.warn(
369 | 'Warning: Output to speakers may affect sound quality,\n' +
370 | 'especially due to system audio feedback preventative measures.\n' +
371 | 'use only for debugging',
372 | );
373 | analyser.connect(context.destination);
374 | }
375 |
376 | this.source = source;
377 | this.node = node;
378 | this.analyser = analyser;
379 | this.processor = processor;
380 | return true;
381 | }
382 |
383 | /**
384 | * Gets the current frequency domain data from the recording track
385 | * @param {"frequency"|"music"|"voice"} [analysisType]
386 | * @param {number} [minDecibels] default -100
387 | * @param {number} [maxDecibels] default -30
388 | * @returns {import('./analysis/audio_analysis.js').AudioAnalysisOutputType}
389 | */
390 | getFrequencies(
391 | analysisType = 'frequency',
392 | minDecibels = -100,
393 | maxDecibels = -30,
394 | ) {
395 | if (!this.processor) {
396 | throw new Error('Session ended: please call .begin() first');
397 | }
398 | return AudioAnalysis.getFrequencies(
399 | this.analyser,
400 | this.sampleRate,
401 | null,
402 | analysisType,
403 | minDecibels,
404 | maxDecibels,
405 | );
406 | }
407 |
408 | /**
409 | * Pauses the recording
410 | * Keeps microphone stream open but halts storage of audio
411 | * @returns {Promise}
412 | */
413 | async pause() {
414 | if (!this.processor) {
415 | throw new Error('Session ended: please call .begin() first');
416 | } else if (!this.recording) {
417 | throw new Error('Already paused: please call .record() first');
418 | }
419 | if (this._chunkProcessorBuffer.raw.byteLength) {
420 | this._chunkProcessor(this._chunkProcessorBuffer);
421 | }
422 | this.log('Pausing ...');
423 | await this._event('stop');
424 | this.recording = false;
425 | return true;
426 | }
427 |
428 | /**
429 | * Start recording stream and storing to memory from the connected audio source
430 | * @param {(data: { mono: Int16Array; raw: Int16Array }) => any} [chunkProcessor]
431 | * @param {number} [chunkSize] chunkProcessor will not be triggered until this size threshold met in mono audio
432 | * @returns {Promise}
433 | */
434 | async record(chunkProcessor = () => {}, chunkSize = 8192) {
435 | if (!this.processor) {
436 | throw new Error('Session ended: please call .begin() first');
437 | } else if (this.recording) {
438 | throw new Error('Already recording: please call .pause() first');
439 | } else if (typeof chunkProcessor !== 'function') {
440 | throw new Error(`chunkProcessor must be a function`);
441 | }
442 | this._chunkProcessor = chunkProcessor;
443 | this._chunkProcessorSize = chunkSize;
444 | this._chunkProcessorBuffer = {
445 | raw: new ArrayBuffer(0),
446 | mono: new ArrayBuffer(0),
447 | };
448 | this.log('Recording ...');
449 | await this._event('start');
450 | this.recording = true;
451 | return true;
452 | }
453 |
454 | /**
455 | * Clears the audio buffer, empties stored recording
456 | * @returns {Promise}
457 | */
458 | async clear() {
459 | if (!this.processor) {
460 | throw new Error('Session ended: please call .begin() first');
461 | }
462 | await this._event('clear');
463 | return true;
464 | }
465 |
466 | /**
467 | * Reads the current audio stream data
468 | * @returns {Promise<{meanValues: Float32Array, channels: Array}>}
469 | */
470 | async read() {
471 | if (!this.processor) {
472 | throw new Error('Session ended: please call .begin() first');
473 | }
474 | this.log('Reading ...');
475 | const result = await this._event('read');
476 | return result;
477 | }
478 |
479 | /**
480 | * Saves the current audio stream to a file
481 | * @param {boolean} [force] Force saving while still recording
482 | * @returns {Promise}
483 | */
484 | async save(force = false) {
485 | if (!this.processor) {
486 | throw new Error('Session ended: please call .begin() first');
487 | }
488 | if (!force && this.recording) {
489 | throw new Error(
490 | 'Currently recording: please call .pause() first, or call .save(true) to force',
491 | );
492 | }
493 | this.log('Exporting ...');
494 | const exportData = await this._event('export');
495 | const packer = new WavPacker();
496 | const result = packer.pack(this.sampleRate, exportData.audio);
497 | return result;
498 | }
499 |
500 | /**
501 | * Ends the current recording session and saves the result
502 | * @returns {Promise}
503 | */
504 | async end() {
505 | if (!this.processor) {
506 | throw new Error('Session ended: please call .begin() first');
507 | }
508 |
509 | const _processor = this.processor;
510 |
511 | this.log('Stopping ...');
512 | await this._event('stop');
513 | this.recording = false;
514 | const tracks = this.stream.getTracks();
515 | tracks.forEach((track) => track.stop());
516 |
517 | this.log('Exporting ...');
518 | const exportData = await this._event('export', {}, _processor);
519 |
520 | this.processor.disconnect();
521 | this.source.disconnect();
522 | this.node.disconnect();
523 | this.analyser.disconnect();
524 | this.stream = null;
525 | this.processor = null;
526 | this.source = null;
527 | this.node = null;
528 |
529 | const packer = new WavPacker();
530 | const result = packer.pack(this.sampleRate, exportData.audio);
531 | return result;
532 | }
533 |
534 | /**
535 | * Performs a full cleanup of WavRecorder instance
536 | * Stops actively listening via microphone and removes existing listeners
537 | * @returns {Promise}
538 | */
539 | async quit() {
540 | this.listenForDeviceChange(null);
541 | if (this.processor) {
542 | await this.end();
543 | }
544 | return true;
545 | }
546 | }
547 |
548 | globalThis.WavRecorder = WavRecorder;
549 |
--------------------------------------------------------------------------------
/src/lib/visualizations/wavtools/lib/wav_stream_player.js:
--------------------------------------------------------------------------------
1 | import { StreamProcessorSrc } from './worklets/stream_processor.js';
2 | import { AudioAnalysis } from './analysis/audio_analysis.js';
3 |
4 | /**
5 | * Plays audio streams received in raw PCM16 chunks from the browser
6 | * @class
7 | */
8 | export class WavStreamPlayer {
9 | /**
10 | * Creates a new WavStreamPlayer instance
11 | * @param {{sampleRate?: number}} options
12 | * @returns {WavStreamPlayer}
13 | */
14 | constructor({ sampleRate = 44100 } = {}) {
15 | this.scriptSrc = StreamProcessorSrc;
16 | this.sampleRate = sampleRate;
17 | this.context = null;
18 | this.stream = null;
19 | this.analyser = null;
20 | this.trackSampleOffsets = {};
21 | this.interruptedTrackIds = {};
22 | }
23 |
24 | /**
25 | * Connects the audio context and enables output to speakers
26 | * @returns {Promise}
27 | */
28 | async connect() {
29 | this.context = new AudioContext({ sampleRate: this.sampleRate });
30 | if (this.context.state === 'suspended') {
31 | await this.context.resume();
32 | }
33 | try {
34 | await this.context.audioWorklet.addModule(this.scriptSrc);
35 | } catch (e) {
36 | console.error(e);
37 | throw new Error(`Could not add audioWorklet module: ${this.scriptSrc}`);
38 | }
39 | const analyser = this.context.createAnalyser();
40 | analyser.fftSize = 8192;
41 | analyser.smoothingTimeConstant = 0.1;
42 | this.analyser = analyser;
43 | return true;
44 | }
45 |
46 | /**
47 | * Gets the current frequency domain data from the playing track
48 | * @param {"frequency"|"music"|"voice"} [analysisType]
49 | * @param {number} [minDecibels] default -100
50 | * @param {number} [maxDecibels] default -30
51 | * @returns {import('./analysis/audio_analysis.js').AudioAnalysisOutputType}
52 | */
53 | getFrequencies(
54 | analysisType = 'frequency',
55 | minDecibels = -100,
56 | maxDecibels = -30
57 | ) {
58 | if (!this.analyser) {
59 | throw new Error('Not connected, please call .connect() first');
60 | }
61 | return AudioAnalysis.getFrequencies(
62 | this.analyser,
63 | this.sampleRate,
64 | null,
65 | analysisType,
66 | minDecibels,
67 | maxDecibels
68 | );
69 | }
70 |
71 | /**
72 | * Starts audio streaming
73 | * @private
74 | * @returns {Promise}
75 | */
76 | _start() {
77 | const streamNode = new AudioWorkletNode(this.context, 'stream_processor');
78 | streamNode.connect(this.context.destination);
79 | streamNode.port.onmessage = (e) => {
80 | const { event } = e.data;
81 | if (event === 'stop') {
82 | streamNode.disconnect();
83 | this.stream = null;
84 | } else if (event === 'offset') {
85 | const { requestId, trackId, offset } = e.data;
86 | const currentTime = offset / this.sampleRate;
87 | this.trackSampleOffsets[requestId] = { trackId, offset, currentTime };
88 | }
89 | };
90 | this.analyser.disconnect();
91 | streamNode.connect(this.analyser);
92 | this.stream = streamNode;
93 | return true;
94 | }
95 |
96 | /**
97 | * Adds 16BitPCM data to the currently playing audio stream
98 | * You can add chunks beyond the current play point and they will be queued for play
99 | * @param {ArrayBuffer|Int16Array} arrayBuffer
100 | * @param {string} [trackId]
101 | * @returns {Int16Array}
102 | */
103 | add16BitPCM(arrayBuffer, trackId = 'default') {
104 | if (typeof trackId !== 'string') {
105 | throw new Error(`trackId must be a string`);
106 | } else if (this.interruptedTrackIds[trackId]) {
107 | return;
108 | }
109 | if (!this.stream) {
110 | this._start();
111 | }
112 | let buffer;
113 | if (arrayBuffer instanceof Int16Array) {
114 | buffer = arrayBuffer;
115 | } else if (arrayBuffer instanceof ArrayBuffer) {
116 | buffer = new Int16Array(arrayBuffer);
117 | } else {
118 | throw new Error(`argument must be Int16Array or ArrayBuffer`);
119 | }
120 | this.stream.port.postMessage({ event: 'write', buffer, trackId });
121 | return buffer;
122 | }
123 |
124 | /**
125 | * Gets the offset (sample count) of the currently playing stream
126 | * @param {boolean} [interrupt]
127 | * @returns {{trackId: string|null, offset: number, currentTime: number}}
128 | */
129 | async getTrackSampleOffset(interrupt = false) {
130 | if (!this.stream) {
131 | return null;
132 | }
133 | const requestId = crypto.randomUUID();
134 | this.stream.port.postMessage({
135 | event: interrupt ? 'interrupt' : 'offset',
136 | requestId,
137 | });
138 | let trackSampleOffset;
139 | while (!trackSampleOffset) {
140 | trackSampleOffset = this.trackSampleOffsets[requestId];
141 | await new Promise((r) => setTimeout(() => r(), 1));
142 | }
143 | const { trackId } = trackSampleOffset;
144 | if (interrupt && trackId) {
145 | this.interruptedTrackIds[trackId] = true;
146 | }
147 | return trackSampleOffset;
148 | }
149 |
150 | /**
151 | * Strips the current stream and returns the sample offset of the audio
152 | * @param {boolean} [interrupt]
153 | * @returns {{trackId: string|null, offset: number, currentTime: number}}
154 | */
155 | async interrupt() {
156 | return this.getTrackSampleOffset(true);
157 | }
158 | }
159 |
160 | globalThis.WavStreamPlayer = WavStreamPlayer;
161 |
--------------------------------------------------------------------------------
/src/lib/visualizations/wavtools/lib/worklets/audio_processor.js:
--------------------------------------------------------------------------------
1 | const AudioProcessorWorklet = `
2 | class AudioProcessor extends AudioWorkletProcessor {
3 |
4 | constructor() {
5 | super();
6 | this.port.onmessage = this.receive.bind(this);
7 | this.initialize();
8 | }
9 |
10 | initialize() {
11 | this.foundAudio = false;
12 | this.recording = false;
13 | this.chunks = [];
14 | }
15 |
16 | /**
17 | * Concatenates sampled chunks into channels
18 | * Format is chunk[Left[], Right[]]
19 | */
20 | readChannelData(chunks, channel = -1, maxChannels = 9) {
21 | let channelLimit;
22 | if (channel !== -1) {
23 | if (chunks[0] && chunks[0].length - 1 < channel) {
24 | throw new Error(
25 | \`Channel \${channel} out of range: max \${chunks[0].length}\`
26 | );
27 | }
28 | channelLimit = channel + 1;
29 | } else {
30 | channel = 0;
31 | channelLimit = Math.min(chunks[0] ? chunks[0].length : 1, maxChannels);
32 | }
33 | const channels = [];
34 | for (let n = channel; n < channelLimit; n++) {
35 | const length = chunks.reduce((sum, chunk) => {
36 | return sum + chunk[n].length;
37 | }, 0);
38 | const buffers = chunks.map((chunk) => chunk[n]);
39 | const result = new Float32Array(length);
40 | let offset = 0;
41 | for (let i = 0; i < buffers.length; i++) {
42 | result.set(buffers[i], offset);
43 | offset += buffers[i].length;
44 | }
45 | channels[n] = result;
46 | }
47 | return channels;
48 | }
49 |
50 | /**
51 | * Combines parallel audio data into correct format,
52 | * channels[Left[], Right[]] to float32Array[LRLRLRLR...]
53 | */
54 | formatAudioData(channels) {
55 | if (channels.length === 1) {
56 | // Simple case is only one channel
57 | const float32Array = channels[0].slice();
58 | const meanValues = channels[0].slice();
59 | return { float32Array, meanValues };
60 | } else {
61 | const float32Array = new Float32Array(
62 | channels[0].length * channels.length
63 | );
64 | const meanValues = new Float32Array(channels[0].length);
65 | for (let i = 0; i < channels[0].length; i++) {
66 | const offset = i * channels.length;
67 | let meanValue = 0;
68 | for (let n = 0; n < channels.length; n++) {
69 | float32Array[offset + n] = channels[n][i];
70 | meanValue += channels[n][i];
71 | }
72 | meanValues[i] = meanValue / channels.length;
73 | }
74 | return { float32Array, meanValues };
75 | }
76 | }
77 |
78 | /**
79 | * Converts 32-bit float data to 16-bit integers
80 | */
81 | floatTo16BitPCM(float32Array) {
82 | const buffer = new ArrayBuffer(float32Array.length * 2);
83 | const view = new DataView(buffer);
84 | let offset = 0;
85 | for (let i = 0; i < float32Array.length; i++, offset += 2) {
86 | let s = Math.max(-1, Math.min(1, float32Array[i]));
87 | view.setInt16(offset, s < 0 ? s * 0x8000 : s * 0x7fff, true);
88 | }
89 | return buffer;
90 | }
91 |
92 | /**
93 | * Retrieves the most recent amplitude values from the audio stream
94 | * @param {number} channel
95 | */
96 | getValues(channel = -1) {
97 | const channels = this.readChannelData(this.chunks, channel);
98 | const { meanValues } = this.formatAudioData(channels);
99 | return { meanValues, channels };
100 | }
101 |
102 | /**
103 | * Exports chunks as an audio/wav file
104 | */
105 | export() {
106 | const channels = this.readChannelData(this.chunks);
107 | const { float32Array, meanValues } = this.formatAudioData(channels);
108 | const audioData = this.floatTo16BitPCM(float32Array);
109 | return {
110 | meanValues: meanValues,
111 | audio: {
112 | bitsPerSample: 16,
113 | channels: channels,
114 | data: audioData,
115 | },
116 | };
117 | }
118 |
119 | receive(e) {
120 | const { event, id } = e.data;
121 | let receiptData = {};
122 | switch (event) {
123 | case 'start':
124 | this.recording = true;
125 | break;
126 | case 'stop':
127 | this.recording = false;
128 | break;
129 | case 'clear':
130 | this.initialize();
131 | break;
132 | case 'export':
133 | receiptData = this.export();
134 | break;
135 | case 'read':
136 | receiptData = this.getValues();
137 | break;
138 | default:
139 | break;
140 | }
141 | // Always send back receipt
142 | this.port.postMessage({ event: 'receipt', id, data: receiptData });
143 | }
144 |
145 | sendChunk(chunk) {
146 | const channels = this.readChannelData([chunk]);
147 | const { float32Array, meanValues } = this.formatAudioData(channels);
148 | const rawAudioData = this.floatTo16BitPCM(float32Array);
149 | const monoAudioData = this.floatTo16BitPCM(meanValues);
150 | this.port.postMessage({
151 | event: 'chunk',
152 | data: {
153 | mono: monoAudioData,
154 | raw: rawAudioData,
155 | },
156 | });
157 | }
158 |
159 | process(inputList, outputList, parameters) {
160 | // Copy input to output (e.g. speakers)
161 | // Note that this creates choppy sounds with Mac products
162 | const sourceLimit = Math.min(inputList.length, outputList.length);
163 | for (let inputNum = 0; inputNum < sourceLimit; inputNum++) {
164 | const input = inputList[inputNum];
165 | const output = outputList[inputNum];
166 | const channelCount = Math.min(input.length, output.length);
167 | for (let channelNum = 0; channelNum < channelCount; channelNum++) {
168 | input[channelNum].forEach((sample, i) => {
169 | output[channelNum][i] = sample;
170 | });
171 | }
172 | }
173 | const inputs = inputList[0];
174 | // There's latency at the beginning of a stream before recording starts
175 | // Make sure we actually receive audio data before we start storing chunks
176 | let sliceIndex = 0;
177 | if (!this.foundAudio) {
178 | for (const channel of inputs) {
179 | sliceIndex = 0; // reset for each channel
180 | if (this.foundAudio) {
181 | break;
182 | }
183 | if (channel) {
184 | for (const value of channel) {
185 | if (value !== 0) {
186 | // find only one non-zero entry in any channel
187 | this.foundAudio = true;
188 | break;
189 | } else {
190 | sliceIndex++;
191 | }
192 | }
193 | }
194 | }
195 | }
196 | if (inputs && inputs[0] && this.foundAudio && this.recording) {
197 | // We need to copy the TypedArray, because the \`process\`
198 | // internals will reuse the same buffer to hold each input
199 | const chunk = inputs.map((input) => input.slice(sliceIndex));
200 | this.chunks.push(chunk);
201 | this.sendChunk(chunk);
202 | }
203 | return true;
204 | }
205 | }
206 |
207 | registerProcessor('audio_processor', AudioProcessor);
208 | `;
209 |
210 | const script = new Blob([AudioProcessorWorklet], {
211 | type: 'application/javascript',
212 | });
213 | const src = URL.createObjectURL(script);
214 | export const AudioProcessorSrc = src;
215 |
--------------------------------------------------------------------------------
/src/lib/visualizations/wavtools/lib/worklets/stream_processor.js:
--------------------------------------------------------------------------------
1 | export const StreamProcessorWorklet = `
2 | class StreamProcessor extends AudioWorkletProcessor {
3 | constructor() {
4 | super();
5 | this.hasStarted = false;
6 | this.hasInterrupted = false;
7 | this.outputBuffers = [];
8 | this.bufferLength = 128;
9 | this.write = { buffer: new Float32Array(this.bufferLength), trackId: null };
10 | this.writeOffset = 0;
11 | this.trackSampleOffsets = {};
12 | this.port.onmessage = (event) => {
13 | if (event.data) {
14 | const payload = event.data;
15 | if (payload.event === 'write') {
16 | const int16Array = payload.buffer;
17 | const float32Array = new Float32Array(int16Array.length);
18 | for (let i = 0; i < int16Array.length; i++) {
19 | float32Array[i] = int16Array[i] / 0x8000; // Convert Int16 to Float32
20 | }
21 | this.writeData(float32Array, payload.trackId);
22 | } else if (
23 | payload.event === 'offset' ||
24 | payload.event === 'interrupt'
25 | ) {
26 | const requestId = payload.requestId;
27 | const trackId = this.write.trackId;
28 | const offset = this.trackSampleOffsets[trackId] || 0;
29 | this.port.postMessage({
30 | event: 'offset',
31 | requestId,
32 | trackId,
33 | offset,
34 | });
35 | if (payload.event === 'interrupt') {
36 | this.hasInterrupted = true;
37 | }
38 | } else {
39 | throw new Error(\`Unhandled event "\${payload.event}"\`);
40 | }
41 | }
42 | };
43 | }
44 |
45 | writeData(float32Array, trackId = null) {
46 | let { buffer } = this.write;
47 | let offset = this.writeOffset;
48 | for (let i = 0; i < float32Array.length; i++) {
49 | buffer[offset++] = float32Array[i];
50 | if (offset >= buffer.length) {
51 | this.outputBuffers.push(this.write);
52 | this.write = { buffer: new Float32Array(this.bufferLength), trackId };
53 | buffer = this.write.buffer;
54 | offset = 0;
55 | }
56 | }
57 | this.writeOffset = offset;
58 | return true;
59 | }
60 |
61 | process(inputs, outputs, parameters) {
62 | const output = outputs[0];
63 | const outputChannelData = output[0];
64 | const outputBuffers = this.outputBuffers;
65 | if (this.hasInterrupted) {
66 | this.port.postMessage({ event: 'stop' });
67 | return false;
68 | } else if (outputBuffers.length) {
69 | this.hasStarted = true;
70 | const { buffer, trackId } = outputBuffers.shift();
71 | for (let i = 0; i < outputChannelData.length; i++) {
72 | outputChannelData[i] = buffer[i] || 0;
73 | }
74 | if (trackId) {
75 | this.trackSampleOffsets[trackId] =
76 | this.trackSampleOffsets[trackId] || 0;
77 | this.trackSampleOffsets[trackId] += buffer.length;
78 | }
79 | return true;
80 | } else if (this.hasStarted) {
81 | this.port.postMessage({ event: 'stop' });
82 | return false;
83 | } else {
84 | return true;
85 | }
86 | }
87 | }
88 |
89 | registerProcessor('stream_processor', StreamProcessor);
90 | `;
91 |
92 | const script = new Blob([StreamProcessorWorklet], {
93 | type: 'application/javascript',
94 | });
95 | const src = URL.createObjectURL(script);
96 | export const StreamProcessorSrc = src;
97 |
--------------------------------------------------------------------------------
/src/routes/+layout.svelte:
--------------------------------------------------------------------------------
1 |
4 |
5 |
6 |
--------------------------------------------------------------------------------
/src/routes/+layout.ts:
--------------------------------------------------------------------------------
1 | export const prerender = true;
2 |
--------------------------------------------------------------------------------
/src/routes/+page.svelte:
--------------------------------------------------------------------------------
1 |
60 |
61 |
62 |
63 |
64 |
73 |
82 |
83 |
84 | made by
flo-bit
85 |
86 |
87 |
88 |
89 |
90 |
91 |
92 |
93 |
94 |
95 |
96 |
97 |
98 |
99 |
100 |
101 |
102 |
103 |
104 |
105 |
106 |
107 |
108 |
109 |
110 |
111 |
114 |
115 |
116 |
117 |
118 |
119 |
120 |
121 |
122 |
123 |
124 |
--------------------------------------------------------------------------------
/static/music.mp3:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/flo-bit/svelte-audio-visualizations/cfcd93b40495f48460d8de2ea83e8bf43d53088d/static/music.mp3
--------------------------------------------------------------------------------
/svelte.config.js:
--------------------------------------------------------------------------------
1 | import adapter from '@sveltejs/adapter-static';
2 | import { vitePreprocess } from '@sveltejs/vite-plugin-svelte';
3 |
4 | /** @type {import('@sveltejs/kit').Config} */
5 | const config = {
6 | // Consult https://kit.svelte.dev/docs/integrations#preprocessors
7 | // for more information about preprocessors
8 | preprocess: vitePreprocess(),
9 |
10 | kit: {
11 | // adapter-auto only supports some environments, see https://kit.svelte.dev/docs/adapter-auto for a list.
12 | // If your environment is not supported, or you settled on a specific environment, switch out the adapter.
13 | // See https://kit.svelte.dev/docs/adapters for more information about adapters.
14 | adapter: adapter(),
15 | paths: {
16 | base: '/svelte-audio-visualizations'
17 | }
18 | }
19 | };
20 |
21 | export default config;
22 |
--------------------------------------------------------------------------------
/tailwind.config.js:
--------------------------------------------------------------------------------
1 | /** @type {import('tailwindcss').Config} */
2 | export default {
3 | content: ['./src/**/*.{html,js,svelte,ts}'],
4 | theme: {
5 | extend: {}
6 | },
7 | plugins: []
8 | };
9 |
--------------------------------------------------------------------------------
/tsconfig.json:
--------------------------------------------------------------------------------
1 | {
2 | "extends": "./.svelte-kit/tsconfig.json",
3 | "compilerOptions": {
4 | "allowJs": true,
5 | "checkJs": true,
6 | "esModuleInterop": true,
7 | "forceConsistentCasingInFileNames": true,
8 | "resolveJsonModule": true,
9 | "skipLibCheck": true,
10 | "sourceMap": true,
11 | "strict": true,
12 | "moduleResolution": "bundler"
13 | }
14 | // Path aliases are handled by https://kit.svelte.dev/docs/configuration#alias
15 | // except $lib which is handled by https://kit.svelte.dev/docs/configuration#files
16 | //
17 | // If you want to overwrite includes/excludes, make sure to copy over the relevant includes/excludes
18 | // from the referenced tsconfig.json - TypeScript does not merge them in
19 | }
20 |
--------------------------------------------------------------------------------
/vite.config.ts:
--------------------------------------------------------------------------------
1 | import { sveltekit } from '@sveltejs/kit/vite';
2 | import { defineConfig } from 'vite';
3 |
4 | export default defineConfig({
5 | plugins: [sveltekit()]
6 | });
7 |
--------------------------------------------------------------------------------