├── .editorconfig ├── .github └── workflows │ ├── matrix-build.yaml │ └── verify-build.yaml ├── .gitignore ├── .gitmodules ├── .npmignore ├── .npmrc ├── .scripts ├── check-changelog.mjs ├── move-artifact.mjs ├── retrieve-artifacts.mjs ├── run-all-examples.mjs ├── utils │ └── dev-artifacts-helpers.mjs ├── wpt-harness.mjs └── wpt-mock │ ├── XMLHttpRequest.js │ ├── fetch.js │ ├── requestAnimationFrame.js │ └── wpt-buffer-loader.js ├── CHANGELOG.md ├── Cargo.toml ├── Cross.toml ├── LICENSE ├── README.md ├── build.rs ├── eslint.config.mjs ├── examples ├── all-nodes.js ├── amplitude-modulation.js ├── analyser.js ├── audio-buffer.js ├── audio-worklet-online.js ├── audio-worklet-shared-array-buffer.js ├── audio-worklet-webassembly.js ├── audio-worklet.js ├── benchmarks.js ├── biquad.js ├── change-state.js ├── composite-audio-node.js ├── compressor.js ├── constant-source.js ├── convolution.js ├── decoding-legacy.js ├── decoding.js ├── disconnect.js ├── doppler.js ├── ended-event.js ├── feedback-delay.js ├── granular-scrub.js ├── iir-filter.js ├── latency-attributes.js ├── many-oscillators-with-env.js ├── microphone.js ├── multichannel.js ├── multichannel.maxpat ├── multiple-contexts.js ├── offline.js ├── oscillators.js ├── package.json ├── panner.js ├── resampling.js ├── samples │ ├── corrupt.wav │ ├── empty_2c.wav │ ├── major-scale.ogg │ ├── parking-garage-response.wav │ ├── sample-38000.wav │ ├── sample-44100.wav │ ├── sample-48000.wav │ ├── sample-faulty.wav │ ├── sample.aiff │ ├── sample.flac │ ├── sample.mp3 │ ├── sample.ogg │ ├── sample.wav │ ├── sample.webm │ ├── siren.mp3 │ ├── small-room-response.wav │ ├── think-mono-38000.wav │ ├── think-mono-44100.wav │ ├── think-mono-48000.wav │ ├── think-stereo-38000.wav │ ├── think-stereo-44100.wav │ ├── think-stereo-48000.wav │ ├── vocals-dry.wav │ └── white.ogg ├── script-processor.js ├── sink-id.js ├── stereo-panner.js ├── trigger-soundfiles.js ├── waveshaper.js └── worklets │ ├── Makefile │ ├── SimpleKernel.cc │ ├── array-source.js │ ├── bitcrusher.js │ ├── free-queue.mjs │ ├── simple-kernel.wasmmodule.mjs │ ├── wasm-worklet-processor.mjs │ └── white-noise.js ├── generator ├── index.mjs ├── js │ ├── AudioNode.tmpl.js │ ├── AudioNodes.tmpl.js │ ├── AudioParam.tmpl.js │ ├── BaseAudioContext.tmpl.js │ ├── index.tmpl.cjs │ └── index.tmpl.mjs ├── rs │ ├── audio_node.tmpl.rs │ ├── audio_nodes.tmpl.rs │ └── lib.tmpl.rs └── web-audio.idl ├── index.cjs ├── index.d.ts ├── index.mjs ├── js ├── AnalyserNode.js ├── AudioBuffer.js ├── AudioBufferSourceNode.js ├── AudioContext.js ├── AudioDestinationNode.js ├── AudioListener.js ├── AudioNode.js ├── AudioParam.js ├── AudioParamMap.js ├── AudioRenderCapacity.js ├── AudioScheduledSourceNode.js ├── AudioWorklet.js ├── AudioWorkletGlobalScope.js ├── AudioWorkletNode.js ├── BaseAudioContext.js ├── BiquadFilterNode.js ├── ChannelMergerNode.js ├── ChannelSplitterNode.js ├── ConstantSourceNode.js ├── ConvolverNode.js ├── DelayNode.js ├── DynamicsCompressorNode.js ├── Events.js ├── GainNode.js ├── IIRFilterNode.js ├── MediaStreamAudioSourceNode.js ├── OfflineAudioContext.js ├── OscillatorNode.js ├── PannerNode.js ├── PeriodicWave.js ├── ScriptProcessorNode.js ├── StereoPannerNode.js ├── WaveShaperNode.js └── lib │ ├── cast.js │ ├── errors.js │ ├── events.js │ ├── symbols.js │ └── utils.js ├── load-native.cjs ├── package.json ├── src ├── analyser_node.rs ├── audio_buffer.rs ├── audio_buffer_source_node.rs ├── audio_context.rs ├── audio_destination_node.rs ├── audio_listener.rs ├── audio_node.rs ├── audio_param.rs ├── audio_render_capacity.rs ├── audio_worklet_node.rs ├── base_audio_context.rs ├── biquad_filter_node.rs ├── channel_merger_node.rs ├── channel_splitter_node.rs ├── constant_source_node.rs ├── convolver_node.rs ├── delay_node.rs ├── dynamics_compressor_node.rs ├── gain_node.rs ├── iir_filter_node.rs ├── lib.rs ├── media_devices │ ├── enumerate_devices.rs │ ├── get_user_media.rs │ └── mod.rs ├── media_stream_audio_source_node.rs ├── media_streams │ ├── media_stream.rs │ └── mod.rs ├── offline_audio_context.rs ├── oscillator_node.rs ├── panner_node.rs ├── periodic_wave.rs ├── script_processor_node.rs ├── stereo_panner_node.rs ├── utils │ ├── mod.rs │ └── thread_safe_function.rs └── wave_shaper_node.rs └── tests ├── AudioBuffer.spec.mjs ├── AudioParam.spec.mjs ├── AudioWorklet.spec.mjs ├── OfflineAudioContext.spec.mjs ├── PeriodicWave.spec.mjs ├── WaveShaper.spec.mjs ├── cast.spec.mjs ├── ctor.errors.mjs ├── getUserMedia.spec.mjs ├── test-offline-context-gc.mjs └── worklets ├── invalid-ctor.worklet.mjs ├── invalid.worklet.js ├── invalid.worklet.mjs └── noise-generator.worklet.mjs /.editorconfig: -------------------------------------------------------------------------------- 1 | # EditorConfig helps developers define and maintain consistent 2 | # coding styles between different editors or IDEs 3 | # http://editorconfig.org 4 | root = true 5 | 6 | [*] 7 | indent_style = space 8 | indent_size = 2 9 | end_of_line = lf 10 | charset = utf-8 11 | trim_trailing_whitespace = true 12 | insert_final_newline = true 13 | 14 | [*.rs] 15 | indent_size = 4 16 | 17 | [*.md] 18 | trim_trailing_whitespace = false 19 | -------------------------------------------------------------------------------- /.github/workflows/matrix-build.yaml: -------------------------------------------------------------------------------- 1 | name: matrix-build 2 | env: 3 | DEBUG: napi:* 4 | PROJECT_NAME: node-web-audio-api 5 | CARGO_BUILD_NAME: node_web_audio_api 6 | MACOSX_DEPLOYMENT_TARGET: '10.13' 7 | CARGO_TERM_COLOR: always 8 | 9 | on: 10 | push: 11 | tags: 12 | - '*' 13 | workflow_dispatch: 14 | 15 | jobs: 16 | verify-build: 17 | uses: ./.github/workflows/verify-build.yaml 18 | 19 | matrix-build: 20 | needs: verify-build 21 | runs-on: ${{ matrix.runner }} 22 | strategy: 23 | fail-fast: false 24 | matrix: 25 | include: 26 | - name: darwin-arm64 27 | runner: macos-latest 28 | target: aarch64-apple-darwin 29 | command: cargo 30 | 31 | - name: darwin-x64 32 | runner: macos-latest 33 | target: x86_64-apple-darwin 34 | command: cargo 35 | 36 | - name: win32-arm64-msvc 37 | runner: windows-latest 38 | target: aarch64-pc-windows-msvc 39 | command: cargo 40 | 41 | - name: win32-x64-msvc 42 | runner: windows-latest 43 | target: x86_64-pc-windows-msvc 44 | command: cargo 45 | 46 | - name: linux-arm64-gnu 47 | runner: ubuntu-latest 48 | target: aarch64-unknown-linux-gnu 49 | command: cross 50 | 51 | - name: linux-x64-gnu 52 | runner: ubuntu-latest 53 | target: x86_64-unknown-linux-gnu 54 | command: cross 55 | 56 | - name: linux-arm-gnueabihf 57 | runner: ubuntu-latest 58 | target: armv7-unknown-linux-gnueabihf 59 | command: cross 60 | 61 | name: build - ${{ matrix.name }} 62 | steps: 63 | - name: Check out repository 64 | uses: actions/checkout@v4 65 | 66 | # node is only used to generate the files, can use host architecture 67 | - name: Setup node 68 | uses: actions/setup-node@v4 69 | with: 70 | node-version: 22 71 | 72 | - name: Setup Rust toolchain 73 | uses: dtolnay/rust-toolchain@stable 74 | with: 75 | components: rustfmt 76 | 77 | # only install cross (via cargo-binstall) if we need it 78 | - name: Install Cross 79 | if: matrix.command == 'cross' 80 | shell: bash 81 | run: | 82 | curl -L --proto '=https' --tlsv1.2 -sSf https://raw.githubusercontent.com/cargo-bins/cargo-binstall/main/install-from-binstall-release.sh | bash 83 | cargo binstall --no-confirm cross 84 | 85 | - name: Generate Cargo.lock 86 | run: cargo generate-lockfile 87 | 88 | - name: Install Deps 89 | run: npm install 90 | 91 | - name: Re-generate files from IDL 92 | run: npm run generate 93 | 94 | - name: Build Binary 95 | shell: bash 96 | run: | 97 | rustup target add ${{ matrix.target }} 98 | 99 | if [[ "${{ matrix.runner }}" == "ubuntu-latest" ]]; then 100 | echo "Build with Jack feature" 101 | ${{ matrix.command }} build --locked --features jack --release --target ${{ matrix.target }} 102 | else 103 | ${{ matrix.command }} build --locked --release --target ${{ matrix.target }} 104 | fi 105 | 106 | - name: Rename Binary 107 | shell: bash 108 | run: | 109 | BIN_PREFIX="" 110 | BIN_SUFFIX="" 111 | 112 | if [[ "${{ matrix.runner }}" == "windows-latest" ]]; then 113 | BIN_PREFIX="" 114 | BIN_SUFFIX=".dll" 115 | elif [[ "${{ matrix.runner }}" == "macos-latest" ]]; then 116 | BIN_PREFIX="lib" 117 | BIN_SUFFIX=".dylib" 118 | elif [[ "${{ matrix.runner }}" == "ubuntu-latest" ]]; then 119 | BIN_PREFIX="lib" 120 | BIN_SUFFIX=".so" 121 | fi 122 | 123 | ls -al target/${{ matrix.target }}/release/ 124 | 125 | # The built binary output location 126 | BIN_OUTPUT="target/${{ matrix.target }}/release/${BIN_PREFIX}${CARGO_BUILD_NAME}${BIN_SUFFIX}" 127 | 128 | # Define a better name for the final binary 129 | BIN_RELEASE="${PROJECT_NAME}.${{ matrix.name }}.node" 130 | 131 | # Move the built binary where you want it 132 | mv "${BIN_OUTPUT}" "./${BIN_RELEASE}" 133 | 134 | - name: Upload artifact 135 | uses: actions/upload-artifact@v4 136 | with: 137 | name: bindings-${{ matrix.name }} 138 | path: ${{ env.PROJECT_NAME }}.${{ matrix.name }}.node 139 | if-no-files-found: error 140 | 141 | -------------------------------------------------------------------------------- /.github/workflows/verify-build.yaml: -------------------------------------------------------------------------------- 1 | # yaml-language-server: $schema=https://json.schemastore.org/github-workflow 2 | 3 | name: verify-build 4 | env: 5 | MACOSX_DEPLOYMENT_TARGET: '10.13' 6 | CARGO_TERM_COLOR: always 7 | 8 | # read-only repo token, no access to secrets 9 | permissions: 10 | contents: read 11 | 12 | # no access to secrets 13 | on: 14 | push: 15 | branches: [main] 16 | pull_request: 17 | workflow_dispatch: 18 | workflow_call: # make the job callable by matrix-build 19 | 20 | jobs: 21 | verify-build: 22 | # run on macos-latest which seems to have a soundcard available 23 | runs-on: macos-latest 24 | 25 | steps: 26 | - name: Setup Rust toolchain 27 | uses: dtolnay/rust-toolchain@stable 28 | with: 29 | components: clippy, rustfmt 30 | 31 | - name: Setup node 32 | uses: actions/setup-node@v4 33 | with: 34 | node-version: 22 35 | 36 | - name: Check out repository 37 | uses: actions/checkout@v4 38 | 39 | - name: Generate Cargo.lock 40 | run: cargo generate-lockfile 41 | 42 | # restore cargo cache from previous runs 43 | - name: Rust Cache 44 | uses: Swatinem/rust-cache@v2 45 | with: 46 | # The cache should not be shared between different workflows and jobs. 47 | shared-key: ${{ github.workflow }}-${{ github.job }} 48 | 49 | - name: Install Deps 50 | run: npm install 51 | 52 | # check it builds 53 | - name: Build 54 | run: npm run build 55 | 56 | # run checks and tests 57 | - name: Clippy 58 | # run: cargo clippy --all-features -- -D warnings 59 | run: cargo clippy --all-targets -- -D warnings 60 | - name: Fmt 61 | run: cargo fmt -- --check --color always 62 | - name: Lint 63 | run: npm run lint 64 | - name: Test 65 | run: npm run test:ci 66 | 67 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | NOTES.md 2 | /generator/src 3 | .DS_Store 4 | 5 | _* 6 | 7 | # Working files 8 | TODOS.md 9 | NOTES.md 10 | run-wpt.* 11 | all-checks.sh 12 | tests/junk.mjs 13 | tests/junk-*.mjs 14 | yarn.lock 15 | issues 16 | 17 | # Created by https://www.toptal.com/developers/gitignore/api/node 18 | # Edit at https://www.toptal.com/developers/gitignore?templates=node 19 | 20 | ### Node ### 21 | # Logs 22 | logs 23 | *.log 24 | npm-debug.log* 25 | yarn-debug.log* 26 | yarn-error.log* 27 | lerna-debug.log* 28 | 29 | # Diagnostic reports (https://nodejs.org/api/report.html) 30 | report.[0-9]*.[0-9]*.[0-9]*.[0-9]*.json 31 | 32 | # Runtime data 33 | pids 34 | *.pid 35 | *.seed 36 | *.pid.lock 37 | 38 | # Directory for instrumented libs generated by jscoverage/JSCover 39 | lib-cov 40 | 41 | # Coverage directory used by tools like istanbul 42 | coverage 43 | *.lcov 44 | 45 | # nyc test coverage 46 | .nyc_output 47 | 48 | # Grunt intermediate storage (https://gruntjs.com/creating-plugins#storing-task-files) 49 | .grunt 50 | 51 | # Bower dependency directory (https://bower.io/) 52 | bower_components 53 | 54 | # node-waf configuration 55 | .lock-wscript 56 | 57 | # Compiled binary addons (https://nodejs.org/api/addons.html) 58 | build/Release 59 | 60 | # Dependency directories 61 | node_modules/ 62 | jspm_packages/ 63 | 64 | # TypeScript v1 declaration files 65 | typings/ 66 | 67 | # TypeScript cache 68 | *.tsbuildinfo 69 | 70 | # Optional npm cache directory 71 | .npm 72 | 73 | # Optional eslint cache 74 | .eslintcache 75 | 76 | # Microbundle cache 77 | .rpt2_cache/ 78 | .rts2_cache_cjs/ 79 | .rts2_cache_es/ 80 | .rts2_cache_umd/ 81 | 82 | # Optional REPL history 83 | .node_repl_history 84 | 85 | # Output of 'npm pack' 86 | *.tgz 87 | 88 | # Yarn Integrity file 89 | .yarn-integrity 90 | 91 | # dotenv environment variables file 92 | .env 93 | .env.test 94 | 95 | # parcel-bundler cache (https://parceljs.org/) 96 | .cache 97 | 98 | # Next.js build output 99 | .next 100 | 101 | # Nuxt.js build / generate output 102 | .nuxt 103 | dist 104 | 105 | # Gatsby files 106 | .cache/ 107 | # Comment in the public line in if your project uses Gatsby and not Next.js 108 | # https://nextjs.org/blog/next-9-1#public-directory-support 109 | # public 110 | 111 | # vuepress build output 112 | .vuepress/dist 113 | 114 | # Serverless directories 115 | .serverless/ 116 | 117 | # FuseBox cache 118 | .fusebox/ 119 | 120 | # DynamoDB Local files 121 | .dynamodb/ 122 | 123 | # TernJS port file 124 | .tern-port 125 | 126 | # Stores VSCode versions used for testing VSCode extensions 127 | .vscode-test 128 | 129 | # End of https://www.toptal.com/developers/gitignore/api/node 130 | 131 | 132 | #Added by cargo 133 | 134 | /target 135 | Cargo.lock 136 | 137 | *.bak 138 | 139 | *.zip 140 | *.node 141 | 142 | *.test.yml 143 | 144 | *.env 145 | 146 | -------------------------------------------------------------------------------- /.gitmodules: -------------------------------------------------------------------------------- 1 | [submodule "wpt"] 2 | path = wpt 3 | url = git@github.com:web-platform-tests/wpt.git 4 | -------------------------------------------------------------------------------- /.npmignore: -------------------------------------------------------------------------------- 1 | .cargo 2 | .github 3 | .editorconfig 4 | .env 5 | .gitmodules 6 | 7 | eslint.config.mjs 8 | build.rs 9 | Cargo.* 10 | Cross.toml 11 | NOTES.md 12 | dummy.mjs 13 | all-checks.sh 14 | run-wpt.sh 15 | 16 | .scripts 17 | examples 18 | generator 19 | src 20 | src-manual 21 | target 22 | wpt 23 | tests 24 | issues 25 | 26 | *.tgz 27 | *.bak 28 | -------------------------------------------------------------------------------- /.npmrc: -------------------------------------------------------------------------------- 1 | package-lock=false 2 | -------------------------------------------------------------------------------- /.scripts/check-changelog.mjs: -------------------------------------------------------------------------------- 1 | import fs from 'node:fs'; 2 | 3 | import chalk from 'chalk'; 4 | 5 | // just check that the changelog has been updated in postversion script 6 | // and display an Warning message if no version entry found 7 | 8 | // const version = '1.0.0'; 9 | const version = JSON.parse(fs.readFileSync('package.json')).version; 10 | const changelog = fs.readFileSync('CHANGELOG.md').toString(); 11 | 12 | const versionsInChangelog = changelog.split('\n') 13 | .filter(line => line.startsWith('##')) 14 | .map(line => line.split(' ')[1]) 15 | .map(versions => versions.replace(/^v/, '')); 16 | 17 | if (!versionsInChangelog.includes(version)) { 18 | const msg = `> [CHECK CHANGELOG.md] WARNING - no entry found for version v${version}`; 19 | console.log(chalk.yellow(msg)); 20 | } else { 21 | console.log(chalk.green('> [CHECK CHANGELOG.md] ok')); 22 | } 23 | 24 | 25 | -------------------------------------------------------------------------------- /.scripts/move-artifact.mjs: -------------------------------------------------------------------------------- 1 | import fs from 'node:fs'; 2 | import path from 'node:path'; 3 | 4 | import { 5 | destReleaseFile, 6 | destDebugFile, 7 | deleteDevArtifacts, 8 | } from './utils/dev-artifacts-helpers.mjs'; 9 | 10 | const { platform } = process; 11 | const profile = process.argv.includes('--release') ? 'release' : 'debug'; 12 | 13 | const pkg = fs.readFileSync('package.json'); 14 | const PROJECT_NAME = JSON.parse(pkg).name; 15 | const CARGO_BUILD_NAME = PROJECT_NAME.replace(/-/g, '_'); 16 | 17 | let buildPrefix = ''; 18 | let buildSuffix = ''; 19 | 20 | switch (platform) { 21 | case 'win32': 22 | buildPrefix = ''; 23 | buildSuffix = '.dll'; 24 | break; 25 | case 'darwin': 26 | buildPrefix = 'lib'; 27 | buildSuffix = '.dylib'; 28 | break; 29 | default: // let's hope all linux like have same prefix and suffix... 30 | buildPrefix = 'lib'; 31 | buildSuffix = '.so'; 32 | break; 33 | } 34 | 35 | deleteDevArtifacts(); 36 | 37 | let srcFile = path.join('target', profile, `${buildPrefix}${CARGO_BUILD_NAME}${buildSuffix}`); 38 | let destFile = profile === 'release' ? destReleaseFile : destDebugFile; 39 | 40 | console.log(`> move artifact "${srcFile}" to "${destFile}"`); 41 | 42 | fs.copyFileSync(srcFile, destFile); 43 | -------------------------------------------------------------------------------- /.scripts/retrieve-artifacts.mjs: -------------------------------------------------------------------------------- 1 | import * as dotenv from 'dotenv'; 2 | import { Octokit } from 'octokit'; 3 | import fs from 'node:fs'; 4 | import { execSync } from 'node:child_process'; 5 | 6 | import { deleteDevArtifacts } from './utils/dev-artifacts-helpers.mjs'; 7 | 8 | // -------------------------------------------------------------- 9 | console.log(''); 10 | console.log(`> clean development workspace`); 11 | // -------------------------------------------------------------- 12 | deleteDevArtifacts(); 13 | 14 | 15 | const workflowName = 'matrix-build'; 16 | // -------------------------------------------------------------- 17 | console.log(''); 18 | console.log(`> get ${workflowName} workflow id`); 19 | // -------------------------------------------------------------- 20 | dotenv.config({ debug: false }); 21 | 22 | const owner = process.env.REPO_OWNER; 23 | const repo = process.env.REPO_NAME; 24 | const ghToken = process.env.GITHUB_TOKEN; 25 | 26 | const numArtifacts = 7; // 2 Mac, 2 windows, 3 linux 27 | // need a key for downloading job artifacts 28 | const octokit = new Octokit({ auth: ghToken }); 29 | 30 | let res; 31 | 32 | function checkResponse(res) { 33 | if (res.status !== 200) { 34 | console.log('request error:') 35 | console.log(res); 36 | 37 | console.log(''); 38 | console.log('exiting...'); 39 | process.exit(1); 40 | } 41 | } 42 | 43 | res = await octokit.request(`GET /repos/${owner}/${repo}/actions/workflows`); 44 | checkResponse(res) 45 | 46 | const workflowId = res.data.workflows.find(w => w.name === workflowName).id; 47 | console.log('workflow id is: ', workflowId); 48 | 49 | 50 | // -------------------------------------------------------------- 51 | console.log(''); 52 | console.log(`> get ${workflowName} workflow run id`); 53 | // -------------------------------------------------------------- 54 | 55 | const runId = await new Promise(async (resolve, reject) => { 56 | 57 | async function checkRunCompleted(resolve) { 58 | res = await octokit.request(`GET /repos/${owner}/${repo}/actions/workflows/${workflowId}/runs`); 59 | checkResponse(res) 60 | // the runs seems to be ordered 61 | const latestWorkflow = res.data.workflow_runs[0]; 62 | 63 | // status: 'completed', 64 | // conclusion: 'success', 65 | if (latestWorkflow.status === 'completed') { 66 | if (latestWorkflow.conclusion === 'success') { 67 | console.log('workflow successfully completed'); 68 | 69 | resolve(latestWorkflow.id); 70 | return; 71 | } else { 72 | console.log('workflow failed, exiting...'); 73 | process.exit(1); 74 | } 75 | } else { 76 | console.log('...workflow not completed, check again in 30 seconds'); 77 | setTimeout(() => checkRunCompleted(resolve), 30 * 1000); 78 | } 79 | } 80 | 81 | await checkRunCompleted(resolve); 82 | }); 83 | 84 | console.log('workflow run id is: ', runId); 85 | 86 | // -------------------------------------------------------------- 87 | console.log(''); 88 | console.log('get artifact list'); 89 | // -------------------------------------------------------------- 90 | 91 | res = await octokit.request(`GET /repos/${owner}/${repo}/actions/runs/${runId}/artifacts`); 92 | checkResponse(res); 93 | 94 | if (res.data.total_count < numArtifacts) { 95 | console.log(`> too few artifacts found: ${res.data.total_count}, exiting...`); 96 | process.exit(1); 97 | } else { 98 | console.log('download artifacts'); 99 | const artifacts = res.data.artifacts; 100 | 101 | for (let i = 0; i < artifacts.length; i++) { 102 | const artifact = artifacts[i]; 103 | const res = await octokit.request(`GET /repos/${owner}/${repo}/actions/artifacts/${artifact.id}/zip`); 104 | console.log('-------------------------------------'); 105 | console.log(artifact.name); 106 | console.log('-------------------------------------'); 107 | 108 | console.log('> write archive file'); 109 | fs.writeFileSync(`${artifact.name}.zip`, Buffer.from(res.data)); 110 | 111 | console.log('> unzip archive file (-o for override):', `unzip -o ${artifact.name}.zip`); 112 | execSync(`unzip -o ${artifact.name}.zip`, { stdio: 'inherit' }); 113 | 114 | console.log('> delete archive file'); 115 | fs.unlinkSync(`${artifact.name}.zip`); 116 | }; 117 | 118 | console.log('') 119 | console.log('> Success, # archives downloaded and inflated:', artifacts.length); 120 | } 121 | -------------------------------------------------------------------------------- /.scripts/run-all-examples.mjs: -------------------------------------------------------------------------------- 1 | import { fork } from 'node:child_process'; 2 | import fs from 'node:fs'; 3 | import path from 'node:path'; 4 | import { sleep } from '@ircam/sc-utils'; 5 | 6 | // run all examples for 2 seconds 7 | const list = fs.readdirSync('examples').filter(filename => filename.endsWith('.js')); 8 | const testDuration = 2; 9 | 10 | for (let i = 0; i < list.length; i++) { 11 | const example = list[i]; 12 | console.log(` 13 | ----------------------------------------------------------------- 14 | - ${example} 15 | ----------------------------------------------------------------- 16 | `); 17 | 18 | const proc = fork(path.join('examples', example)); 19 | await sleep(testDuration); 20 | proc.kill('SIGKILL'); 21 | } 22 | -------------------------------------------------------------------------------- /.scripts/utils/dev-artifacts-helpers.mjs: -------------------------------------------------------------------------------- 1 | import fs from 'node:fs'; 2 | 3 | const pkg = fs.readFileSync('package.json'); 4 | const PROJECT_NAME = JSON.parse(pkg).name; 5 | 6 | export const destReleaseFile = `${PROJECT_NAME}.build-release.node`; 7 | export const destDebugFile = `${PROJECT_NAME}.build-debug.node`; 8 | 9 | export function deleteDevArtifacts() { 10 | if (fs.existsSync(destReleaseFile)) { 11 | fs.rmSync(destReleaseFile, { force: true }); 12 | } 13 | 14 | if (fs.existsSync(destDebugFile)) { 15 | fs.rmSync(destDebugFile, { force: true }); 16 | } 17 | } 18 | -------------------------------------------------------------------------------- /.scripts/wpt-mock/XMLHttpRequest.js: -------------------------------------------------------------------------------- 1 | const fs = require('node:fs'); 2 | const path = require('node:path'); 3 | 4 | // @note - once all of them are listed, make a pull request to wpt to harmonize all file loading calls 5 | const relativePathPatches = { 6 | 'resources/audiobuffersource-multi-channels-expected.wav': 'the-audio-api/the-audiobuffersourcenode-interface/resources/audiobuffersource-multi-channels-expected.wav', 7 | }; 8 | 9 | // to be passed to wtp-runner step 10 | // window.XMLHttpRequest = XMLHttpRequest; 11 | module.exports = function createXMLHttpRequest(basepath) { 12 | return class XMLHttpRequest { 13 | constructor() { 14 | this._pathname; 15 | this.onload; 16 | this.onerror; 17 | this.response; 18 | this.status = null; 19 | } 20 | 21 | open(_protocol, url) { 22 | // apply patch when url are given as relative 23 | if (url in relativePathPatches) { 24 | url = relativePathPatches[url]; 25 | } 26 | 27 | this._pathname = url; 28 | } 29 | 30 | send() { 31 | const pathname = path.join(basepath, this._pathname); 32 | let buffer; 33 | 34 | try { 35 | buffer = fs.readFileSync(pathname).buffer; 36 | } catch (err) { 37 | console.log('[XMLHttpRequest mock] could not find file:', pathname); 38 | this.status = 404; 39 | this.onerror(err); 40 | return; 41 | } 42 | 43 | this.status = 200; 44 | this.response = buffer; 45 | this.onload(); 46 | } 47 | } 48 | } 49 | -------------------------------------------------------------------------------- /.scripts/wpt-mock/fetch.js: -------------------------------------------------------------------------------- 1 | // required in node_modules/wpt_runner/testharness/idlharness.js, cf `fetch_spec` 2 | const fs = require('node:fs'); 3 | const path = require('node:path'); 4 | 5 | module.exports = function createFetch(basePath) { 6 | return function fetch(pathname) { 7 | pathname = path.join(basePath, pathname); 8 | 9 | return new Promise(resolve => { 10 | if (!fs.existsSync(pathname)) { 11 | resolve({ 12 | ok: false, 13 | msg: `file ${pathname} not found`, 14 | }); 15 | } else { 16 | const buffer = fs.readFileSync(pathname); 17 | 18 | resolve({ 19 | ok: true, 20 | text: () => buffer.toString(), 21 | json: () => JSON.parse(buffer.toString()), 22 | }); 23 | } 24 | }); 25 | } 26 | }; 27 | -------------------------------------------------------------------------------- /.scripts/wpt-mock/requestAnimationFrame.js: -------------------------------------------------------------------------------- 1 | const fps = 60; 2 | const funcs = []; 3 | 4 | const skip = Symbol('skip'); 5 | const start = Date.now(); 6 | let time = start; 7 | let launched = false; 8 | 9 | const animFrame = () => { 10 | const fns = funcs.slice(); 11 | funcs.length = 0; 12 | 13 | const t = Date.now(); 14 | const dt = t - start; 15 | const t1 = 1e3 / fps; 16 | 17 | for(const f of fns) 18 | if(f !== skip) f(dt); 19 | 20 | while(time <= t + t1 / 4) time += t1; 21 | setTimeout(animFrame, time - t); 22 | }; 23 | 24 | module.exports.requestAnimationFrame = requestAnimationFrame = func => { 25 | // lazily start timer 26 | if (!launched) { 27 | launched = true; 28 | animFrame(); 29 | } 30 | 31 | funcs.push(func); 32 | return funcs.length - 1; 33 | }; 34 | 35 | module.exports.cancelAnimationFrame = cancelAnimationFrame = id => { 36 | funcs[id] = skip; 37 | }; 38 | -------------------------------------------------------------------------------- /.scripts/wpt-mock/wpt-buffer-loader.js: -------------------------------------------------------------------------------- 1 | const path = require('node:path'); 2 | 3 | const createXMLHttpRequest = require('./XMLHttpRequest.js'); 4 | const { OfflineAudioContext } = require('../../index.cjs'); 5 | 6 | // create a XMLHttpRequest to be passed to the runner 7 | // can be configured to handle the difference between process.cwd() and given path 8 | // window.XMLHttpRequest = createXMLHttpRequest(rootURL (?)) 9 | const XMLHttpRequest = createXMLHttpRequest(path.join('examples', 'samples')); 10 | // maybe should be passed to wtp-runner setup too 11 | // window.alert = console.log.bind(console); 12 | const alert = console.log.bind(console); 13 | 14 | // this is the BufferLoader from the wpt suite 15 | function BufferLoader(context, urlList, callback) { 16 | this.context = context; 17 | this.urlList = urlList; 18 | this.onload = callback; 19 | this.bufferList = new Array(); 20 | this.loadCount = 0; 21 | } 22 | 23 | BufferLoader.prototype.loadBuffer = function(url, index) { 24 | // Load buffer asynchronously 25 | var request = new XMLHttpRequest(); 26 | request.open("GET", url, true); 27 | request.responseType = "arraybuffer"; 28 | 29 | var loader = this; 30 | 31 | request.onload = function() { 32 | loader.context.decodeAudioData(request.response, decodeSuccessCallback, decodeErrorCallback); 33 | }; 34 | 35 | request.onerror = function() { 36 | alert('BufferLoader: XHR error'); 37 | }; 38 | 39 | var decodeSuccessCallback = function(buffer) { 40 | loader.bufferList[index] = buffer; 41 | if (++loader.loadCount == loader.urlList.length) 42 | loader.onload(loader.bufferList); 43 | }; 44 | 45 | var decodeErrorCallback = function() { 46 | alert('decodeErrorCallback: decode error'); 47 | }; 48 | 49 | request.send(); 50 | } 51 | 52 | BufferLoader.prototype.load = function() { 53 | for (var i = 0; i < this.urlList.length; ++i) 54 | this.loadBuffer(this.urlList[i], i); 55 | } 56 | 57 | // ---------------------------------------------- 58 | // testing 59 | // ---------------------------------------------- 60 | 61 | const offlineContext = new OfflineAudioContext({ 62 | numberOfChannels: 1, 63 | length: 1, 64 | sampleRate: 48000, 65 | }); 66 | 67 | const okFiles = [path.join('sample.wav')]; 68 | const err1Files = [path.join('corrupt.wav')]; 69 | const err2Files = [path.join('donotexists.wav')]; 70 | 71 | { 72 | // should work 73 | const loader = new BufferLoader(offlineContext, okFiles, audioBuffer => console.log(audioBuffer)); 74 | loader.load(); 75 | } 76 | 77 | { 78 | // should fail - decode error 79 | const loader = new BufferLoader(offlineContext, err1Files, audioBuffer => console.log(audioBuffer)); 80 | loader.load(); 81 | } 82 | 83 | { 84 | // should fail - file not found 85 | const loader = new BufferLoader(offlineContext, err2Files, audioBuffer => console.log(audioBuffer)); 86 | loader.load(); 87 | } 88 | -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # CHANGELOG 2 | 3 | ## v1.0.4 - 2025-03-20 4 | 5 | - Fix parsing of parameterData for AudioWorkletNode 6 | 7 | ## v1.0.3 - 2025-03-06 8 | 9 | - Improve typescript support 10 | 11 | ## v1.0.2 - 2025-03-01 12 | 13 | - Fix error handling when setting buffer in ABSN and Convolver 14 | 15 | ## v1.0.1 - 2025-01-17 16 | 17 | - Update upstream crate to [v1.2.0](https://github.com/orottier/web-audio-api-rs/blob/main/CHANGELOG.md#version-120-2025-01-16) 18 | 19 | ## v1.0.0 - 2025-01-11 20 | 21 | - Align version with upstream crate 22 | - Refactor CI 23 | 24 | ## v0.21.5 - 2024-12-23 25 | 26 | - Fix: Use module import for `AudioWorklet#addModule` 27 | - Feat: Resolve `AudioWorkletNode` when installed in `node_modules` 28 | - Ensure support of `AudioWorkletNode` that use Web Assembly 29 | 30 | ## v0.21.4 - 2024-12-16 31 | 32 | - Update upstream crate to [v1.1.0](https://github.com/orottier/web-audio-api-rs/blob/main/CHANGELOG.md#version-110-2024-12-11) 33 | 34 | ## v0.21.3 - 2024-10-06 35 | 36 | - Fix typescript export 37 | 38 | ## v0.21.2 - 2024-09-20 39 | 40 | - Update upstream crate to [v1.0.1](https://github.com/orottier/web-audio-api-rs/blob/main/CHANGELOG.md#version-101-2024-09-18) 41 | - Fix: Make sure `AudioBuffer` returned by `OfflineContext` is valid 42 | - Fix: Allow contexts to be properly garbage collected 43 | 44 | ## v0.21.1 - 2024-06-10 45 | 46 | - Feat: Buffer pool for AudioWorketProcessor 47 | - Fix: Propagate `addModule` errors to main thread 48 | - Fix: Memory leak due to `onended` events 49 | 50 | ## v0.21.0 - 2024-05-17 51 | 52 | - Feat: Implement AudioWorkletNode 53 | 54 | ## v0.20.0 - 2024-04-29 55 | 56 | - Update upstream crate to [v0.44.0](https://github.com/orottier/web-audio-api-rs/blob/main/CHANGELOG.md#version-0440-2024-04-22) 57 | - Implement ScriptProcessorNode 58 | - Fix memory leak introduced in v0.19.0 59 | - Improve events compliance 60 | 61 | ## v0.19.0 - 2024-04-17 62 | 63 | - Update upstream crate to [1.0.0-rc.5](https://github.com/orottier/web-audio-api-rs/blob/main/CHANGELOG.md#version-0430--100-rc5-2024-04-15) 64 | - Provide JS facades with proper inheritance chain for all exposed interfaces 65 | - Implement all AudioNode connect / disconnect alternatives 66 | - Improve compliance and error handling 67 | 68 | ## v0.18.0 - 2024-03-13 69 | 70 | - Fix `MediaStreamAudioSourceNode` 71 | 72 | ## v0.17.0 - 2024-03-08 73 | 74 | - Update upstream crate to [1.0.0-rc.2](https://github.com/orottier/web-audio-api-rs/blob/main/CHANGELOG.md#version-100-rc2-2024-03-07) 75 | - Improve compliance and error handling 76 | 77 | ## v0.16.0 - 2024-02-09 78 | 79 | - Update upstream create to [v0.42.0](https://github.com/orottier/web-audio-api-rs/blob/main/CHANGELOG.md#version-0420-2024-02-05) 80 | - Improve Error handling 81 | - Add channelCount to media constraints 82 | 83 | ## v0.15.0 - 2024-01-16 84 | 85 | - Update upstream create to [v0.41.1](https://github.com/orottier/web-audio-api-rs/blob/main/CHANGELOG.md#version-0411-2024-01-11) 86 | - Better error handling 87 | - Implement online AudioContext and AudioScheduledSourceNode events 88 | - Test against wpt 89 | 90 | ## v0.14.0 - 2023-12-06 91 | 92 | - Update upstream create to [v0.38.0](https://github.com/orottier/web-audio-api-rs/blob/main/CHANGELOG.md#version-0380-2023-12-03) 93 | - Implement AudioListener 94 | 95 | ## v0.13.0 - 2023-11-08 96 | 97 | - Update upstream crate to [v0.36.1](https://github.com/orottier/web-audio-api-rs/blob/main/CHANGELOG.md#version-0361-2023-11-08) 98 | - Ship build for linux arm64 99 | - Typescript support 100 | 101 | ## v0.12.0 - 2023-09-04 102 | 103 | - Update upstream crate to [v0.33](https://github.com/orottier/web-audio-api-rs/blob/main/CHANGELOG.md#version-0330-2023-07-27) 104 | 105 | ## v0.11.0 - 2023-07-21 106 | 107 | - Update upstream crate to [v0.32](https://github.com/orottier/web-audio-api-rs/blob/main/CHANGELOG.md#version-0320-2023-07-16) 108 | - Implement AudioDestination API 109 | - Make decodeAudioData(arrayBuffer) API compliant (drop `load` helper) 110 | 111 | ## v0.10.0 - 2023-05-26 112 | 113 | - Update upstream crate to [v0.31](https://github.com/orottier/web-audio-api-rs/blob/main/CHANGELOG.md#version-0310-2023-06-25) 114 | 115 | ## v0.9.0 - 2023-06-08 116 | 117 | - Update upstream crate to [v0.30](https://github.com/orottier/web-audio-api-rs/blob/main/CHANGELOG.md#version-0300-2023-06-07) 118 | 119 | ## v0.8.0 - 2023-05-19 120 | 121 | - Implement MediaDevices enumerateDevices and getUserMedia 122 | - Use jack as default output if exists on linux 123 | 124 | ## v0.7.0 - 2023-02-23 125 | 126 | - Improve readme & doc 127 | - Fix AudioParam method names 128 | 129 | ## v0.6.0 - 2023-02-01 130 | 131 | - Basic support for mediaDevices & MediaStreamAudioSourceNode 132 | - Add bindings to ConvolverNode, AnalyserNode & Panner nodes 133 | - Update upstream crate to [v0.26](https://github.com/orottier/web-audio-api-rs/blob/main/CHANGELOG.md#version-0250-2022-11-06) 134 | 135 | ## v0.5.0 - 2022-12-19 136 | 137 | - Implement AudioParam#setValueCurveAtTime 138 | - Offline context constructor 139 | 140 | ## v0.4.0 - 2022-11-07 141 | 142 | - Implement offline audio context 143 | - Update upstream crate to [v0.24](https://github.com/orottier/web-audio-api-rs/blob/main/CHANGELOG.md#version-0240-2022-09-10) 144 | - Implement AudioNode#disconnect 145 | - Properly support ESM 146 | - Limit number of online contexts to 1 on Linux 147 | - Force latencyHint to 'playback' if not manually set on RPi 148 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | authors = ["Benjamin Matuszewski "] 3 | edition = "2021" 4 | name = "node-web-audio-api" 5 | version = "1.0.4" 6 | 7 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 8 | 9 | [lib] 10 | crate-type = ["cdylib"] 11 | 12 | [dependencies] 13 | crossbeam-channel = "0.5" 14 | napi = { version="2.16", features=["napi9", "tokio_rt"] } 15 | napi-derive = { version="2.16" } 16 | thread-priority = "1.2" 17 | web-audio-api = "1.2" 18 | # web-audio-api = { path = "../web-audio-api-rs" } 19 | 20 | [target.'cfg(all(any(windows, unix), target_arch = "x86_64", not(target_env = "musl")))'.dependencies] 21 | mimalloc = { version = "0.1" } 22 | 23 | [build-dependencies] 24 | napi-build = "2.1" 25 | 26 | [profile.dev] 27 | opt-level = 3 28 | 29 | [profile.release] 30 | lto = true 31 | strip = true 32 | 33 | [features] 34 | jack = ["web-audio-api/cpal-jack"] 35 | -------------------------------------------------------------------------------- /Cross.toml: -------------------------------------------------------------------------------- 1 | [target.aarch64-unknown-linux-gnu] 2 | pre-build = [ 3 | "dpkg --add-architecture $CROSS_DEB_ARCH", 4 | "apt-get update && apt-get --assume-yes install libasound2-dev:$CROSS_DEB_ARCH libjack-jackd2-dev:$CROSS_DEB_ARCH" 5 | ] 6 | 7 | [target.armv7-unknown-linux-gnueabihf] 8 | pre-build = [ 9 | "dpkg --add-architecture $CROSS_DEB_ARCH", 10 | "apt-get update && apt-get --assume-yes install libasound2-dev:$CROSS_DEB_ARCH libjack-jackd2-dev:$CROSS_DEB_ARCH" 11 | ] 12 | 13 | [target.x86_64-unknown-linux-gnu] 14 | pre-build = [ 15 | "dpkg --add-architecture $CROSS_DEB_ARCH", 16 | "apt-get update && apt-get --assume-yes install libasound2-dev:$CROSS_DEB_ARCH libjack-jackd2-dev:$CROSS_DEB_ARCH" 17 | ] 18 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Copyright (c) 2021-present IRCAM – Centre Pompidou (France, Paris) 2 | 3 | All rights reserved. 4 | 5 | Redistribution and use in source and binary forms, with or without modification, 6 | are permitted provided that the following conditions are met: 7 | 8 | * Redistributions of source code must retain the above copyright notice, this 9 | list of conditions and the following disclaimer. 10 | 11 | * Redistributions in binary form must reproduce the above copyright notice, this 12 | list of conditions and the following disclaimer in the documentation and/or 13 | other materials provided with the distribution. 14 | 15 | * Neither the name of the IRCAM nor the names of its 16 | contributors may be used to endorse or promote products derived from 17 | this software without specific prior written permission. 18 | 19 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND 20 | ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 21 | WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 22 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR 23 | ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 24 | (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 25 | LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON 26 | ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 27 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 28 | SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 29 | -------------------------------------------------------------------------------- /build.rs: -------------------------------------------------------------------------------- 1 | extern crate napi_build; 2 | 3 | fn main() { 4 | napi_build::setup(); 5 | } 6 | -------------------------------------------------------------------------------- /eslint.config.mjs: -------------------------------------------------------------------------------- 1 | export { default } from '@ircam/eslint-config'; 2 | -------------------------------------------------------------------------------- /examples/amplitude-modulation.js: -------------------------------------------------------------------------------- 1 | import { AudioContext } from '../index.mjs'; 2 | 3 | const latencyHint = process.env.WEB_AUDIO_LATENCY === 'playback' ? 'playback' : 'interactive'; 4 | const audioContext = new AudioContext({ latencyHint }); 5 | 6 | const modulated = audioContext.createGain(); // the gain that will be modulated [0, 1] 7 | modulated.connect(audioContext.destination); 8 | modulated.gain.value = 0.5; 9 | 10 | const carrier = audioContext.createOscillator(); 11 | carrier.connect(modulated); 12 | carrier.frequency.value = 300; 13 | 14 | const depth = audioContext.createGain(); 15 | depth.connect(modulated.gain); 16 | depth.gain.value = 0.5; 17 | 18 | const modulator = audioContext.createOscillator(); 19 | modulator.connect(depth); 20 | modulator.frequency.value = 1.; 21 | 22 | modulator.start(); 23 | carrier.start(); 24 | 25 | let flag = 1; 26 | 27 | (function loop() { 28 | const freq = flag * 300; 29 | const when = audioContext.currentTime + 10; 30 | modulator.frequency.linearRampToValueAtTime(freq, when); 31 | 32 | flag = 1 - flag; 33 | 34 | setTimeout(loop, 10 * 1000); 35 | }()); 36 | -------------------------------------------------------------------------------- /examples/analyser.js: -------------------------------------------------------------------------------- 1 | import { AudioContext } from '../index.mjs'; 2 | 3 | const latencyHint = process.env.WEB_AUDIO_LATENCY === 'playback' ? 'playback' : 'interactive'; 4 | const audioContext = new AudioContext({ latencyHint }); 5 | 6 | const analyser = audioContext.createAnalyser(); 7 | analyser.connect(audioContext.destination); 8 | 9 | const osc = audioContext.createOscillator(); 10 | osc.frequency.value = 200.; 11 | osc.connect(analyser); 12 | osc.start(); 13 | 14 | const bins = new Float32Array(analyser.frequencyBinCount); 15 | 16 | setInterval(() => { 17 | // 10th bind should be highest 18 | analyser.getFloatFrequencyData(bins); 19 | console.log(bins.subarray(0, 20)); // print 20 first bins 20 | }, 1000); 21 | -------------------------------------------------------------------------------- /examples/audio-buffer.js: -------------------------------------------------------------------------------- 1 | import assert from 'node:assert'; 2 | import { AudioContext } from '../index.mjs'; 3 | 4 | const latencyHint = process.env.WEB_AUDIO_LATENCY === 'playback' ? 'playback' : 'interactive'; 5 | const audioContext = new AudioContext({ latencyHint }); 6 | 7 | // create a 1 second buffer filled with a sine at 200Hz 8 | console.log('> Play sine at 200Hz created manually in an AudioBuffer'); 9 | 10 | const numberOfChannels = 1; 11 | const length = audioContext.sampleRate; 12 | const sampleRate = audioContext.sampleRate; 13 | const buffer = audioContext.createBuffer(numberOfChannels, length, sampleRate); 14 | 15 | // // this works as expected but should be tested carefully, relies on unsafe code 16 | // const channel = buffer.getChannelData(0); 17 | // for (let i = 0; i < channel.length; i++) { 18 | // channel[i] = i; 19 | // } 20 | // console.log(channel); 21 | // console.log(buffer.getChannelData(0)); 22 | // process.exit(0); 23 | 24 | const sine = new Float32Array(length); 25 | 26 | for (let i = 0; i < length; i++) { 27 | let phase = i / length * 2. * Math.PI * 200.; 28 | sine[i] = Math.sin(phase); 29 | } 30 | 31 | buffer.copyToChannel(sine, 0); 32 | 33 | { 34 | const test = new Float32Array(length); 35 | buffer.copyFromChannel(test, 0); 36 | assert.deepStrictEqual(sine, test); 37 | } 38 | 39 | // play the buffer in a loop 40 | const src = audioContext.createBufferSource(); 41 | src.buffer = buffer; 42 | src.loop = true; 43 | src.connect(audioContext.destination); 44 | src.start(audioContext.currentTime); 45 | src.stop(audioContext.currentTime + 3.); 46 | 47 | await new Promise(resolve => setTimeout(resolve, 3.5 * 1000)); 48 | 49 | // play a sine at 200Hz 50 | console.log('> Play sine at 200Hz from an OscillatorNode'); 51 | 52 | let osc = audioContext.createOscillator(); 53 | osc.frequency.value = 200.; 54 | osc.connect(audioContext.destination); 55 | osc.start(audioContext.currentTime); 56 | osc.stop(audioContext.currentTime + 3.); 57 | 58 | await new Promise(resolve => setTimeout(resolve, 3.5 * 1000)); 59 | 60 | await audioContext.close(); 61 | -------------------------------------------------------------------------------- /examples/audio-worklet-online.js: -------------------------------------------------------------------------------- 1 | import { AudioContext, OscillatorNode, GainNode, AudioWorkletNode } from '../index.mjs'; 2 | 3 | // load audio worklet from online source 4 | 5 | const plugin = 'https://googlechromelabs.github.io/web-audio-samples/audio-worklet/basic/noise-generator/noise-generator.js'; 6 | const audioContext = new AudioContext(); 7 | await audioContext.audioWorklet.addModule(plugin); 8 | 9 | const modulatorNode = new OscillatorNode(audioContext); 10 | const modGainNode = new GainNode(audioContext); 11 | const noiseGeneratorNode = new AudioWorkletNode(audioContext, 'noise-generator'); 12 | noiseGeneratorNode.connect(audioContext.destination); 13 | 14 | // Connect the oscillator to 'amplitude' AudioParam. 15 | const paramAmp = noiseGeneratorNode.parameters.get('amplitude'); 16 | modulatorNode.connect(modGainNode).connect(paramAmp); 17 | 18 | modulatorNode.frequency.value = 0.5; 19 | modGainNode.gain.value = 0.75; 20 | modulatorNode.start(); 21 | -------------------------------------------------------------------------------- /examples/audio-worklet-shared-array-buffer.js: -------------------------------------------------------------------------------- 1 | import path from 'node:path'; 2 | 3 | import { AudioContext, AudioWorkletNode } from '../index.mjs'; 4 | import { sleep } from '@ircam/sc-utils'; 5 | 6 | const latencyHint = process.env.WEB_AUDIO_LATENCY === 'playback' ? 'playback' : 'interactive'; 7 | const audioContext = new AudioContext({ latencyHint }); 8 | 9 | await audioContext.audioWorklet.addModule(path.join('examples', 'worklets', 'array-source.js')); 10 | 11 | // Create a shared float array big enough for 128 floats 12 | let sharedArray = new SharedArrayBuffer(512); 13 | let sharedFloats = new Float32Array(sharedArray); 14 | 15 | async function runSource() { 16 | const src = new AudioWorkletNode(audioContext, 'array-source', { 17 | processorOptions: { sharedFloats }, 18 | }); 19 | 20 | src.connect(audioContext.destination); 21 | 22 | console.log('Sawtooth'); 23 | for (let i = 0; i < sharedFloats.length; i++) { 24 | sharedFloats[i] = -1. + i / 64; // create saw 25 | } 26 | await sleep(1); 27 | 28 | console.log('Square'); 29 | for (let i = 0; i < sharedFloats.length; i++) { 30 | sharedFloats[i] = i > 64 ? 1 : -1; 31 | } 32 | await sleep(1); 33 | 34 | src.disconnect(); 35 | // src goes out of scope and is disconnected, so it should be cleaned up 36 | } 37 | 38 | await runSource(); 39 | 40 | console.log('closing'); 41 | await audioContext.close(); 42 | -------------------------------------------------------------------------------- /examples/audio-worklet-webassembly.js: -------------------------------------------------------------------------------- 1 | // https://github.com/GoogleChromeLabs/web-audio-samples/tree/main/src/audio-worklet/design-pattern/wasm 2 | // 3 | // Copyright (c) 2018 The Chromium Authors. All rights reserved. 4 | // Use of this source code is governed by a BSD-style license that can be 5 | // found in the LICENSE file. 6 | 7 | import { AudioContext, OscillatorNode, AudioWorkletNode } from '../index.mjs'; 8 | 9 | const audioContext = new AudioContext(); 10 | 11 | await audioContext.audioWorklet.addModule('./worklets/wasm-worklet-processor.mjs'); 12 | const oscillator = new OscillatorNode(audioContext); 13 | const bypasser = new AudioWorkletNode(audioContext, 'wasm-worklet-processor'); 14 | oscillator.connect(bypasser).connect(audioContext.destination); 15 | oscillator.start(); 16 | 17 | -------------------------------------------------------------------------------- /examples/audio-worklet.js: -------------------------------------------------------------------------------- 1 | import path from 'node:path'; 2 | 3 | import { AudioContext, OfflineAudioContext, OscillatorNode, AudioWorkletNode } from '../index.mjs'; 4 | import { sleep } from '@ircam/sc-utils'; 5 | 6 | const latencyHint = process.env.WEB_AUDIO_LATENCY === 'playback' ? 'playback' : 'interactive'; 7 | 8 | const TEST_ONLINE = true; 9 | 10 | const audioContext = TEST_ONLINE 11 | ? new AudioContext({ latencyHint }) 12 | : new OfflineAudioContext(2, 8 * 48000, 48000); 13 | 14 | await audioContext.audioWorklet.addModule(path.join('examples', 'worklets', 'bitcrusher.js')); // relative to cwd 15 | await audioContext.audioWorklet.addModule(path.join('worklets', 'white-noise.js')); // relative path to call site 16 | 17 | const sine = new OscillatorNode(audioContext, { type: 'sawtooth', frequency: 5000 }); 18 | const bitCrusher = new AudioWorkletNode(audioContext, 'bitcrusher', { 19 | processorOptions: { msg: 'hello world' }, 20 | }); 21 | 22 | bitCrusher.port.on('message', (event) => console.log('main recv', event)); 23 | bitCrusher.port.postMessage({ hello: 'from main' }); 24 | 25 | sine 26 | .connect(bitCrusher) 27 | .connect(audioContext.destination); 28 | 29 | const paramBitDepth = bitCrusher.parameters.get('bitDepth'); 30 | const paramReduction = bitCrusher.parameters.get('frequencyReduction'); 31 | 32 | paramBitDepth.setValueAtTime(1, 0); 33 | 34 | paramReduction.setValueAtTime(0.01, 0.); 35 | paramReduction.linearRampToValueAtTime(0.1, 4.); 36 | paramReduction.exponentialRampToValueAtTime(0.01, 8.); 37 | 38 | sine.start(); 39 | sine.stop(8); 40 | 41 | const whiteNoise = new AudioWorkletNode(audioContext, 'white-noise'); 42 | whiteNoise.connect(audioContext.destination); 43 | 44 | if (TEST_ONLINE) { 45 | audioContext.renderCapacity.addEventListener('update', e => { 46 | const { timestamp, averageLoad, peakLoad, underrunRatio } = e; 47 | console.log('AudioRenderCapacityEvent:', { timestamp, averageLoad, peakLoad, underrunRatio }); 48 | }); 49 | audioContext.renderCapacity.start({ updateInterval: 1. }); 50 | 51 | await sleep(8); 52 | await audioContext.close(); 53 | } else { 54 | const buffer = await audioContext.startRendering(); 55 | const online = new AudioContext(); 56 | const src = online.createBufferSource(); 57 | src.buffer = buffer; 58 | src.connect(online.destination); 59 | src.start(); 60 | 61 | await sleep(8); 62 | await online.close(); 63 | } 64 | -------------------------------------------------------------------------------- /examples/biquad.js: -------------------------------------------------------------------------------- 1 | import fs from 'node:fs'; 2 | import path from 'node:path'; 3 | import { AudioContext } from '../index.mjs'; 4 | 5 | const latencyHint = process.env.WEB_AUDIO_LATENCY === 'playback' ? 'playback' : 'interactive'; 6 | const audioContext = new AudioContext({ latencyHint }); 7 | 8 | // setup background music: 9 | // read from local file 10 | const pathname = path.join('examples', 'samples', 'think-stereo-48000.wav'); 11 | const arrayBuffer = fs.readFileSync(pathname).buffer; 12 | const buffer = await audioContext.decodeAudioData(arrayBuffer); 13 | 14 | let now = audioContext.currentTime; 15 | 16 | console.log('> smoothly open low-pass filter for 10 sec'); 17 | // create a lowpass filter (default) 18 | const biquad = audioContext.createBiquadFilter(); 19 | biquad.connect(audioContext.destination); 20 | biquad.frequency.value = 10.; 21 | biquad.frequency.exponentialRampToValueAtTime(10000., now + 10.); 22 | 23 | // pipe the audio buffer source into the lowpass filter 24 | const src = audioContext.createBufferSource(); 25 | src.connect(biquad); 26 | src.buffer = buffer; 27 | src.loop = true; 28 | src.start(); 29 | 30 | const frequencyHz = new Float32Array([250., 500.0, 750.0, 1000., 1500.0, 2000.0, 4000.0]); 31 | const magResponse = new Float32Array(frequencyHz.length); 32 | const phaseResponse = new Float32Array(frequencyHz.length); 33 | 34 | biquad.getFrequencyResponse(frequencyHz, magResponse, phaseResponse); 35 | console.log('================================='); 36 | console.log('Biquad filter frequency response:'); 37 | console.log('================================='); 38 | console.log('Cutoff freq -- %f Hz', biquad.frequency.value); 39 | console.log('Gain -- %f', biquad.gain.value); 40 | console.log('Q factor -- %f', biquad.Q.value); 41 | console.log('---------------------------------'); 42 | frequencyHz.forEach((freq, index) => { 43 | console.log( 44 | '%f Hz --> %f dB', 45 | freq, 46 | 20.0 * Math.log10(magResponse[index]), 47 | ); 48 | }); 49 | console.log('---------------------------------'); 50 | 51 | await new Promise(resolve => setTimeout(resolve, 5 * 1000)); 52 | 53 | biquad.getFrequencyResponse(frequencyHz, magResponse, phaseResponse); 54 | console.log('================================='); 55 | console.log('Biquad filter frequency response:'); 56 | console.log('================================='); 57 | console.log('Cutoff freq -- %f Hz', biquad.frequency.value); 58 | console.log('Gain -- %f', biquad.gain.value); 59 | console.log('Q factor -- %f', biquad.Q.value); 60 | console.log('---------------------------------'); 61 | frequencyHz.forEach((freq, index) => { 62 | console.log( 63 | '%f Hz --> %f dB', 64 | freq, 65 | 20.0 * Math.log10(magResponse[index]), 66 | ); 67 | }); 68 | console.log('---------------------------------'); 69 | 70 | await new Promise(resolve => setTimeout(resolve, 5 * 1000)); 71 | 72 | biquad.getFrequencyResponse(frequencyHz, magResponse, phaseResponse); 73 | console.log('================================='); 74 | console.log('Biquad filter frequency response:'); 75 | console.log('================================='); 76 | console.log('Cutoff freq -- %f Hz', biquad.frequency.value); 77 | console.log('Gain -- %f', biquad.gain.value); 78 | console.log('Q factor -- %f', biquad.Q.value); 79 | console.log('---------------------------------'); 80 | frequencyHz.forEach((freq, index) => { 81 | console.log( 82 | '%f Hz --> %f dB', 83 | freq, 84 | 20.0 * Math.log10(magResponse[index]), 85 | ); 86 | }); 87 | console.log('---------------------------------'); 88 | 89 | now = audioContext.currentTime; 90 | biquad.frequency.exponentialRampToValueAtTime(10., now + 10.); 91 | 92 | await new Promise(resolve => setTimeout(resolve, 5 * 1000)); 93 | 94 | biquad.getFrequencyResponse(frequencyHz, magResponse, phaseResponse); 95 | console.log('================================='); 96 | console.log('Biquad filter frequency response:'); 97 | console.log('================================='); 98 | console.log('Cutoff freq -- %f Hz', biquad.frequency.value); 99 | console.log('Gain -- %f', biquad.gain.value); 100 | console.log('Q factor -- %f', biquad.Q.value); 101 | console.log('---------------------------------'); 102 | frequencyHz.forEach((freq, index) => { 103 | console.log( 104 | '%f Hz --> %f dB', 105 | freq, 106 | 20.0 * Math.log10(magResponse[index]), 107 | ); 108 | }); 109 | console.log('---------------------------------'); 110 | 111 | await new Promise(resolve => setTimeout(resolve, 5 * 1000)); 112 | 113 | biquad.getFrequencyResponse(frequencyHz, magResponse, phaseResponse); 114 | console.log('================================='); 115 | console.log('Biquad filter frequency response:'); 116 | console.log('================================='); 117 | console.log('Cutoff freq -- %f Hz', biquad.frequency.value); 118 | console.log('Gain -- %f', biquad.gain.value); 119 | console.log('Q factor -- %f', biquad.Q.value); 120 | console.log('---------------------------------'); 121 | frequencyHz.forEach((freq, index) => { 122 | console.log( 123 | '%f Hz --> %f dB', 124 | freq, 125 | 20.0 * Math.log10(magResponse[index]), 126 | ); 127 | }); 128 | console.log('---------------------------------'); 129 | 130 | await audioContext.close(); 131 | -------------------------------------------------------------------------------- /examples/change-state.js: -------------------------------------------------------------------------------- 1 | import { AudioContext } from '../index.mjs'; 2 | 3 | console.warn('[incomplete example]'); 4 | 5 | const latencyHint = process.env.WEB_AUDIO_LATENCY === 'playback' ? 'playback' : 'interactive'; 6 | const audioContext = new AudioContext({ latencyHint }); 7 | 8 | console.log('Context state - %s', audioContext.state); 9 | 10 | audioContext.addEventListener('statechange', event => { 11 | console.log('addEventListener (called second):', event); 12 | }); 13 | 14 | audioContext.onstatechange = event => { 15 | console.log('onstatechange (called first):', event); 16 | }; 17 | 18 | const sine = audioContext.createOscillator(); 19 | sine.connect(audioContext.destination); 20 | sine.frequency.value = 200; 21 | sine.start(); 22 | 23 | console.log('> Playback for 1 seconds'); 24 | await new Promise(resolve => setTimeout(resolve, 1000)); 25 | 26 | console.log('> Pause audioContext for 1 seconds'); 27 | console.log('Context state before suspend - %s', audioContext.state); 28 | await audioContext.suspend(); 29 | console.log('Context state after suspend - %s', audioContext.state); 30 | 31 | await new Promise(resolve => setTimeout(resolve, 1000)); 32 | 33 | console.log('> Resume audioContext for 1 seconds'); 34 | console.log('Context state before resume - %s', audioContext.state); 35 | await audioContext.resume(); 36 | console.log('Context state after resume - %s', audioContext.state); 37 | 38 | await new Promise(resolve => setTimeout(resolve, 1000)); 39 | 40 | // Closing the audioContext should halt the media stream source 41 | console.log('> Close audioContext'); 42 | console.log('Context state before close - %s', audioContext.state); 43 | await audioContext.close(); 44 | console.log('Context state after close - %s', audioContext.state); 45 | 46 | console.log('Process will exit now...'); 47 | 48 | -------------------------------------------------------------------------------- /examples/composite-audio-node.js: -------------------------------------------------------------------------------- 1 | import { sleep } from '@ircam/sc-utils'; 2 | import { AudioContext, AudioNode } from '../index.mjs'; 3 | 4 | // Monkeypatching AudioNode.connect to allow for composite nodes. 5 | // https://github.com/GoogleChromeLabs/web-audio-samples/wiki/CompositeAudioNode 6 | 7 | class CompositeAudioNode { 8 | get _isCompositeAudioNode() { 9 | return true; 10 | } 11 | 12 | constructor(context) { 13 | this.context = context; 14 | this._input = this.context.createGain(); 15 | this._output = this.context.createGain(); 16 | } 17 | 18 | connect() { 19 | this._output.connect.apply(this._output, arguments); 20 | } 21 | 22 | disconnect() { 23 | this._output.disconnect.apply(this._output, arguments); 24 | } 25 | } 26 | 27 | // The AudioNode prototype has to be monkey-patched because 28 | // the native AudioNode wants to connect only to other 29 | // native AudioNodes 30 | AudioNode.prototype._connect = AudioNode.prototype.connect; 31 | AudioNode.prototype.connect = function() { 32 | var args = Array.prototype.slice.call(arguments); 33 | if (args[0]._isCompositeAudioNode) { 34 | args[0] = args[0]._input; 35 | } 36 | 37 | this._connect.apply(this, args); 38 | }; 39 | 40 | class MyCompositeNode extends CompositeAudioNode { 41 | 42 | get gain() { 43 | return this._amp.gain; 44 | } 45 | 46 | constructor(context, options) { 47 | super(context, options); 48 | 49 | // Do stuffs below. 50 | this._amp = this.context.createGain(); 51 | this._input.connect(this._amp); 52 | this._amp.connect(this._output); 53 | } 54 | } 55 | 56 | var context = new AudioContext(); 57 | var myCompNode = new MyCompositeNode(context); 58 | var oscNode = context.createOscillator(); 59 | var gainNode = context.createGain(); 60 | 61 | myCompNode.gain.value = 0.25; 62 | 63 | oscNode.connect(myCompNode); 64 | myCompNode.connect(gainNode); 65 | gainNode.connect(context.destination); 66 | 67 | oscNode.start(); 68 | oscNode.stop(1.0); 69 | 70 | await sleep(1.5); 71 | await context.close(); 72 | -------------------------------------------------------------------------------- /examples/compressor.js: -------------------------------------------------------------------------------- 1 | import fs from 'node:fs'; 2 | import path from 'node:path'; 3 | import { AudioContext } from '../index.mjs'; 4 | 5 | const latencyHint = process.env.WEB_AUDIO_LATENCY === 'playback' ? 'playback' : 'interactive'; 6 | const audioContext = new AudioContext({ latencyHint }); 7 | 8 | const pathname = path.join(process.cwd(), 'examples', 'samples', 'think-stereo-48000.wav'); 9 | const arrayBuffer = fs.readFileSync(pathname).buffer; 10 | const buffer = await audioContext.decodeAudioData(arrayBuffer); 11 | 12 | console.log('> no compression'); 13 | const src = audioContext.createBufferSource(); 14 | src.connect(audioContext.destination); 15 | src.buffer = buffer; 16 | src.start(); 17 | 18 | await new Promise(resolve => setTimeout(resolve, 3 * 1000)); 19 | 20 | console.log('> compression (hard knee)'); 21 | console.log(`+ attack: 30ms`); 22 | console.log('+ release: 100ms'); 23 | console.log('+ ratio: 12'); 24 | console.log('>'); 25 | 26 | for (let i = 0; i < 6; i++) { 27 | console.log(`+ threshold at ${-10. * i}`); 28 | 29 | const compressor = audioContext.createDynamicsCompressor(); 30 | compressor.connect(audioContext.destination); 31 | compressor.threshold.value = -10. * i; 32 | compressor.knee.value = 0.; // hard kne 33 | compressor.attack.value = 0.03; 34 | compressor.release.value = 0.1; 35 | 36 | const src = audioContext.createBufferSource(); 37 | src.connect(compressor); 38 | src.buffer = buffer; 39 | src.start(); 40 | 41 | await new Promise(resolve => setTimeout(resolve, 3 * 1000)); 42 | } 43 | 44 | await audioContext.close(); 45 | 46 | -------------------------------------------------------------------------------- /examples/constant-source.js: -------------------------------------------------------------------------------- 1 | import { AudioContext } from '../index.mjs'; 2 | 3 | const latencyHint = process.env.WEB_AUDIO_LATENCY === 'playback' ? 'playback' : 'interactive'; 4 | const audioContext = new AudioContext({ latencyHint }); 5 | 6 | // use merger to pipe oscillators to right and left channels 7 | const merger = audioContext.createChannelMerger(2); 8 | merger.connect(audioContext.destination); 9 | 10 | // left branch 11 | const gainLeft = audioContext.createGain(); 12 | gainLeft.gain.value = 0.; 13 | gainLeft.connect(merger, 0, 0); 14 | 15 | const srcLeft = audioContext.createOscillator(); 16 | srcLeft.frequency.value = 200.; 17 | srcLeft.connect(gainLeft); 18 | srcLeft.start(); 19 | 20 | // right branch 21 | const gainRight = audioContext.createGain(); 22 | gainRight.gain.value = 0.; 23 | gainRight.connect(merger, 0, 1); 24 | 25 | const srcRight = audioContext.createOscillator(); 26 | srcRight.frequency.value = 300.; 27 | srcRight.connect(gainRight); 28 | srcRight.start(); 29 | 30 | // control both left and right gains with constant source 31 | const constantSource = audioContext.createConstantSource(); 32 | constantSource.offset.value = 0.; 33 | constantSource.connect(gainLeft.gain); 34 | constantSource.connect(gainRight.gain); 35 | constantSource.start(); 36 | 37 | let target = 0.; 38 | 39 | (function loop() { 40 | const now = audioContext.currentTime; 41 | constantSource.offset.setTargetAtTime(target, now, 0.1); 42 | 43 | target = 1 - target; 44 | 45 | setTimeout(loop, 1000); 46 | }()); 47 | 48 | -------------------------------------------------------------------------------- /examples/convolution.js: -------------------------------------------------------------------------------- 1 | import fs from 'node:fs'; 2 | import path from 'node:path'; 3 | import { AudioContext, ConvolverNode } from '../index.mjs'; 4 | 5 | // create an `AudioContext` and load a sound file 6 | const latencyHint = process.env.WEB_AUDIO_LATENCY === 'playback' ? 'playback' : 'interactive'; 7 | const audioContext = new AudioContext({ latencyHint }); 8 | 9 | audioContext.renderCapacity.addEventListener('update', e => { 10 | const { timestamp, averageLoad, peakLoad, underrunRatio } = e; 11 | console.log('AudioRenderCapacityEvent:', { timestamp, averageLoad, peakLoad, underrunRatio }); 12 | }); 13 | audioContext.renderCapacity.start({ updateInterval: 1.5 }); 14 | 15 | const arrayBuffer = fs.readFileSync(path.join('examples', 'samples', 'vocals-dry.wav')).buffer; 16 | const audioBuffer = await audioContext.decodeAudioData(arrayBuffer); 17 | 18 | const impulseFile1 = fs.readFileSync(path.join('examples', 'samples', 'small-room-response.wav')).buffer; 19 | const impulseBuffer1 = await audioContext.decodeAudioData(impulseFile1); 20 | 21 | const impulseFile2 = fs.readFileSync(path.join('examples', 'samples', 'parking-garage-response.wav')).buffer; 22 | const impulseBuffer2 = await audioContext.decodeAudioData(impulseFile2); 23 | 24 | const src = audioContext.createBufferSource(); 25 | src.buffer = audioBuffer; 26 | 27 | const convolve = new ConvolverNode(audioContext); 28 | 29 | src.connect(convolve); 30 | convolve.connect(audioContext.destination); 31 | 32 | src.start(); 33 | 34 | console.log('Dry'); 35 | await new Promise(resolve => setTimeout(resolve, 4000)); 36 | 37 | console.log('Small room'); 38 | convolve.buffer = impulseBuffer1; 39 | await new Promise(resolve => setTimeout(resolve, 4000)); 40 | 41 | console.log('Parking garage'); 42 | convolve.buffer = impulseBuffer2; 43 | await new Promise(resolve => setTimeout(resolve, 5000)); 44 | 45 | console.log('Stop input - flush out remaining impulse response'); 46 | src.stop(); 47 | await new Promise(resolve => setTimeout(resolve, 2000)); 48 | 49 | await audioContext.close(); 50 | 51 | -------------------------------------------------------------------------------- /examples/decoding-legacy.js: -------------------------------------------------------------------------------- 1 | import fs from 'node:fs'; 2 | import path from 'node:path'; 3 | import { AudioContext, OfflineAudioContext } from '../index.mjs'; 4 | 5 | const latencyHint = process.env.WEB_AUDIO_LATENCY === 'playback' ? 'playback' : 'interactive'; 6 | const audioContext = new AudioContext({ latencyHint }); 7 | 8 | const offlineContext = new OfflineAudioContext({ 9 | numberOfChannels: 1, 10 | length: 1, 11 | sampleRate: audioContext.sampleRate, 12 | }); 13 | 14 | const okFile = path.join('examples', 'samples', 'sample.wav'); 15 | const errFile = path.join('examples', 'samples', 'corrupt.wav'); 16 | 17 | function decodeSuccess(buffer) { 18 | console.log(`decodeSuccess`); 19 | const src = audioContext.createBufferSource(); 20 | src.buffer = buffer; 21 | src.connect(audioContext.destination); 22 | src.start(); 23 | } 24 | 25 | function decodeError(err) { 26 | console.log(`decodeError callback: ${err.message}`); 27 | } 28 | 29 | { 30 | // audioContext decode success 31 | const okArrayBuffer = fs.readFileSync(okFile).buffer; 32 | audioContext.decodeAudioData(okArrayBuffer, decodeSuccess, decodeError); 33 | // audioContext decode error 34 | const errArrayBuffer = fs.readFileSync(errFile).buffer; 35 | audioContext.decodeAudioData(errArrayBuffer, decodeSuccess, decodeError); 36 | } 37 | 38 | await new Promise(resolve => setTimeout(resolve, 3000)); 39 | 40 | { 41 | // offlineContext decode success 42 | const okArrayBuffer = fs.readFileSync(okFile).buffer; 43 | offlineContext.decodeAudioData(okArrayBuffer, decodeSuccess, decodeError); 44 | // offlineContext decode error 45 | const errArrayBuffer = fs.readFileSync(errFile).buffer; 46 | offlineContext.decodeAudioData(errArrayBuffer, decodeSuccess, decodeError); 47 | } 48 | 49 | await new Promise(resolve => setTimeout(resolve, 3000)); 50 | await audioContext.close(); 51 | -------------------------------------------------------------------------------- /examples/decoding.js: -------------------------------------------------------------------------------- 1 | import fs from 'node:fs'; 2 | import path from 'node:path'; 3 | import { AudioContext } from '../index.mjs'; 4 | 5 | const files = [ 6 | path.join('examples', 'samples', 'sample-faulty.wav'), 7 | path.join('examples', 'samples', 'sample.wav'), 8 | path.join('examples', 'samples', 'sample.flac'), 9 | path.join('examples', 'samples', 'sample.ogg'), 10 | path.join('examples', 'samples', 'sample.mp3'), 11 | // cannot decode, format not supported or file corrupted 12 | path.join('examples', 'samples', 'empty_2c.wav'), 13 | path.join('examples', 'samples', 'corrupt.wav'), 14 | path.join('examples', 'samples', 'sample.aiff'), 15 | path.join('examples', 'samples', 'sample.webm'), // 48kHz, 16 | ]; 17 | 18 | const latencyHint = process.env.WEB_AUDIO_LATENCY === 'playback' ? 'playback' : 'interactive'; 19 | const audioContext = new AudioContext({ latencyHint }); 20 | 21 | for (let filepath of files) { 22 | console.log('> --------------------------------'); 23 | 24 | try { 25 | const arrayBuffer = fs.readFileSync(filepath).buffer; 26 | const buffer = await audioContext.decodeAudioData(arrayBuffer); 27 | 28 | console.log('> playing file: %s', filepath); 29 | console.log('> duration: %s', buffer.duration); 30 | console.log('> length: %s', buffer.length); 31 | console.log('> channels: %s', buffer.numberOfChannels); 32 | console.log('> sample rate: %s', buffer.sampleRate); 33 | console.log('> --------------------------------'); 34 | 35 | const src = audioContext.createBufferSource(); 36 | src.connect(audioContext.destination); 37 | src.buffer = buffer; 38 | src.start(); 39 | 40 | await new Promise(resolve => setTimeout(resolve, 4 * 1000)); 41 | } catch (err) { 42 | console.log('> Error decoding audio file: %s', filepath); 43 | console.log(err); 44 | console.log('> --------------------------------'); 45 | 46 | await new Promise(resolve => setTimeout(resolve, 1 * 1000)); 47 | } 48 | } 49 | 50 | await audioContext.close(); 51 | -------------------------------------------------------------------------------- /examples/disconnect.js: -------------------------------------------------------------------------------- 1 | import { AudioContext } from '../index.mjs'; 2 | 3 | const latencyHint = process.env.WEB_AUDIO_LATENCY === 'playback' ? 'playback' : 'interactive'; 4 | const audioContext = new AudioContext({ latencyHint }); 5 | 6 | const gain = audioContext.createGain(); 7 | gain.connect(audioContext.destination); 8 | 9 | const osc = audioContext.createOscillator(); 10 | osc.connect(gain); 11 | osc.start(); 12 | 13 | await new Promise(resolve => setTimeout(resolve, 1000)); 14 | 15 | console.log('osc.disconnect()'); 16 | osc.disconnect(); 17 | 18 | await new Promise(resolve => setTimeout(resolve, 1000)); 19 | 20 | console.log('osc reconnect to gain'); 21 | osc.connect(gain); 22 | 23 | await new Promise(resolve => setTimeout(resolve, 1000)); 24 | 25 | console.log('osc.disconnect(gain)'); 26 | osc.disconnect(gain); 27 | 28 | await audioContext.close(); 29 | -------------------------------------------------------------------------------- /examples/doppler.js: -------------------------------------------------------------------------------- 1 | import path from 'node:path'; 2 | import fs from 'node:fs'; 3 | import { AudioContext, PannerNode } from '../index.mjs'; 4 | 5 | /* 6 | * This example feature a 'true physics' Doppler effect. 7 | * 8 | * The basics are very simple, we just add a DelayNode that represents the finite speed of sound. 9 | * Speed of sound = 343 m/s 10 | * So a siren at 100 meters away from you will have a delay of 0.3 seconds. A siren near you 11 | * obviously has no delay. 12 | * 13 | * We combine a delay node with a panner node that represents the moving siren. When the panner 14 | * node moves closer to the listener, we decrease the delay time linearly. This gives the Doppler 15 | * effect. 16 | */ 17 | const latencyHint = process.env.WEB_AUDIO_LATENCY === 'playback' ? 'playback' : 'interactive'; 18 | const audioContext = new AudioContext({ latencyHint }); 19 | 20 | const arrayBuffer = await fs.readFileSync(path.join('examples', 'samples', 'siren.mp3')).buffer; 21 | const buffer = await audioContext.decodeAudioData(arrayBuffer); 22 | 23 | const pannerOptions = { 24 | panningModel: 'equalpower', 25 | distanceModel: 'inverse', 26 | positionX: 0., 27 | positionY: 100., // siren starts 100 meters away 28 | positionZ: 1., // we stand 1 meter away from the road 29 | orientationX: 1., 30 | orientationY: 0., 31 | orientationZ: 0., 32 | refDistance: 1., 33 | maxDistance: 10000., 34 | rolloffFactor: 1., 35 | // no cone effect: 36 | coneInnerAngle: 360., 37 | coneOuterAngle: 0., 38 | coneOuterGain: 0., 39 | }; 40 | 41 | const now = audioContext.currentTime; 42 | 43 | const panner = new PannerNode(audioContext, pannerOptions); 44 | panner.connect(audioContext.destination); 45 | // move the siren in 10 seconds from y = 100 to y = -100 46 | panner.positionY.linearRampToValueAtTime(-100., now + 10.); 47 | 48 | // The delay starts with value 0.3, reaches 0 when the siren crosses us, then goes back to 0.3 49 | const delay = audioContext.createDelay(1.); 50 | delay.connect(panner); 51 | const dopplerMax = 100. / 343.; 52 | delay.delayTime.setValueAtTime(dopplerMax, now) 53 | .linearRampToValueAtTime(0., now + 5.) 54 | .linearRampToValueAtTime(dopplerMax, now + 10.); 55 | 56 | const src = audioContext.createBufferSource(); 57 | src.connect(delay); 58 | src.buffer = buffer; 59 | src.loop = true; 60 | src.start(now); 61 | 62 | await new Promise(resolve => setTimeout(resolve, 10 * 1000)); 63 | 64 | await audioContext.close(); 65 | -------------------------------------------------------------------------------- /examples/ended-event.js: -------------------------------------------------------------------------------- 1 | import fs from 'node:fs'; 2 | import path from 'node:path'; 3 | import { AudioContext, AudioBufferSourceNode, OscillatorNode, ConstantSourceNode } from '../index.mjs'; 4 | 5 | 6 | // test that if the context is closed before ended event is trigerred, 7 | // the underlying tsfn is properly aborted 8 | const TEST_ABORT_EARLY = false; 9 | // test factory methods or node constructors 10 | const USE_FACTORY_METHODS = true; 11 | 12 | 13 | const latencyHint = process.env.WEB_AUDIO_LATENCY === 'playback' ? 'playback' : 'interactive'; 14 | const audioContext = new AudioContext({ latencyHint }); 15 | 16 | const buffer = fs.readFileSync(path.join('examples', 'samples', 'sample.wav')).buffer; 17 | const audioBuffer = await audioContext.decodeAudioData(buffer); 18 | 19 | { 20 | const src = USE_FACTORY_METHODS 21 | ? audioContext.createBufferSource() 22 | : new AudioBufferSourceNode(audioContext); 23 | 24 | src.buffer = audioBuffer; 25 | src.connect(audioContext.destination); 26 | // src.buffer = audioBuffer; 27 | src.addEventListener('ended', (e) => { 28 | console.log('> AudioBufferSourceNode::onended', e); 29 | }); 30 | 31 | src.start(); 32 | } 33 | 34 | if (TEST_ABORT_EARLY) { 35 | await new Promise(resolve => setTimeout(resolve, 1000)); 36 | console.log('> closing context'); 37 | await audioContext.close(); 38 | } else { 39 | await new Promise(resolve => setTimeout(resolve, 5 * 1000)); 40 | 41 | { 42 | const src = USE_FACTORY_METHODS 43 | ? audioContext.createOscillator() 44 | : new OscillatorNode(audioContext); 45 | 46 | src.frequency.value = 200; 47 | src.connect(audioContext.destination); 48 | // src.buffer = audioBuffer; 49 | src.addEventListener('ended', (e) => { 50 | console.log('> OscillatorNode::onended', e); 51 | }); 52 | 53 | const now = audioContext.currentTime; 54 | src.start(now); 55 | src.stop(now + 1); 56 | } 57 | 58 | await new Promise(resolve => setTimeout(resolve, 1500)); 59 | 60 | { 61 | const src = USE_FACTORY_METHODS 62 | ? audioContext.createConstantSource() 63 | : new ConstantSourceNode(audioContext); 64 | 65 | src.offset.value = 0.1; 66 | src.connect(audioContext.destination); 67 | // src.buffer = audioBuffer; 68 | src.addEventListener('ended', (e) => { 69 | console.log('> ConstantSourceNode::onended', e); 70 | }); 71 | 72 | const now = audioContext.currentTime; 73 | src.start(now); 74 | src.stop(now + 1); 75 | } 76 | 77 | await new Promise(resolve => setTimeout(resolve, 1500)); 78 | 79 | console.log('> closing context'); 80 | await audioContext.close(); 81 | } 82 | 83 | -------------------------------------------------------------------------------- /examples/feedback-delay.js: -------------------------------------------------------------------------------- 1 | import { AudioContext } from '../index.mjs'; 2 | 3 | function triggerSine(audioContext, delayInput) { 4 | const now = audioContext.currentTime; 5 | const baseFreq = 100.; 6 | const numPartial = 1 + Math.floor(Math.random() * 20); 7 | 8 | const env = audioContext.createGain(); 9 | env.connect(delayInput); 10 | env.gain.setValueAtTime(0., now); 11 | env.gain.linearRampToValueAtTime(1. / numPartial, now + 0.02); 12 | env.gain.exponentialRampToValueAtTime(0.0001, now + 1.); 13 | 14 | const osc = audioContext.createOscillator(); 15 | osc.connect(env); 16 | osc.frequency.value = baseFreq * numPartial; 17 | osc.start(now); 18 | osc.stop(now + 1.); 19 | } 20 | 21 | 22 | const latencyHint = process.env.WEB_AUDIO_LATENCY === 'playback' ? 'playback' : 'interactive'; 23 | const audioContext = new AudioContext({ latencyHint }); 24 | 25 | // create feedback delay graph layout 26 | // |<- feedback <-| 27 | // |-> pre-gain -----> delay ------>| 28 | // src ---> input ----------------------------------> output 29 | 30 | const output = audioContext.createGain(); 31 | output.connect(audioContext.destination); 32 | 33 | const delay = audioContext.createDelay(1.); 34 | delay.delayTime.value = 0.3; 35 | delay.connect(output); 36 | 37 | const feedback = audioContext.createGain(); 38 | feedback.gain.value = 0.85; 39 | feedback.connect(delay); 40 | delay.connect(feedback); 41 | 42 | const preGain = audioContext.createGain(); 43 | preGain.gain.value = 0.5; 44 | preGain.connect(feedback); 45 | 46 | const input = audioContext.createGain(); 47 | input.connect(preGain); 48 | input.connect(audioContext.destination); // direct sound 49 | 50 | (function loop() { 51 | triggerSine(audioContext, input); 52 | 53 | const period = Math.floor(Math.random() * 930) + 170; 54 | setTimeout(loop, period); 55 | }()); 56 | 57 | -------------------------------------------------------------------------------- /examples/granular-scrub.js: -------------------------------------------------------------------------------- 1 | import path from 'node:path'; 2 | import fs from 'node:fs'; 3 | import { Scheduler } from '@ircam/sc-scheduling'; 4 | import { AudioContext } from '../index.mjs'; 5 | 6 | const latencyHint = process.env.WEB_AUDIO_LATENCY === 'playback' ? 'playback' : 'interactive'; 7 | const audioContext = new AudioContext({ latencyHint }); 8 | 9 | const scheduler = new Scheduler(() => audioContext.currentTime); 10 | 11 | const arrayBuffer = fs.readFileSync(path.join('examples', 'samples', 'sample.wav')).buffer; 12 | const buffer = await audioContext.decodeAudioData(arrayBuffer); 13 | 14 | const period = 0.05; 15 | const grainDuration = 0.2; 16 | let incr = period / 2; 17 | let position = 0; 18 | 19 | const engine = (currentTime) => { 20 | currentTime = Math.max(currentTime, audioContext.currentTime); 21 | 22 | if ( 23 | position + incr > buffer.duration - 2 * grainDuration 24 | || position + incr < 0 25 | ) { 26 | incr *= -1; 27 | } 28 | 29 | const now = currentTime + Math.random() * 0.005; 30 | 31 | const env = audioContext.createGain(); 32 | env.connect(audioContext.destination); 33 | env.gain.value = 0; 34 | 35 | const src = audioContext.createBufferSource(); 36 | src.buffer = buffer; 37 | src.connect(env); 38 | // add bit of random 39 | const detune = 4; 40 | src.detune.value = Math.random() * 2 * detune - detune; 41 | 42 | env.gain.setValueAtTime(0, now); 43 | env.gain.linearRampToValueAtTime(1, now + grainDuration / 2); 44 | env.gain.linearRampToValueAtTime(0, now + grainDuration); 45 | 46 | src.start(now, position); 47 | src.stop(now + grainDuration); 48 | 49 | position += incr; 50 | 51 | return currentTime + period; 52 | }; 53 | 54 | scheduler.add(engine); 55 | -------------------------------------------------------------------------------- /examples/iir-filter.js: -------------------------------------------------------------------------------- 1 | import fs from 'node:fs'; 2 | import path from 'node:path'; 3 | import { AudioContext } from '../index.mjs'; 4 | 5 | const latencyHint = process.env.WEB_AUDIO_LATENCY === 'playback' ? 'playback' : 'interactive'; 6 | const audioContext = new AudioContext({ latencyHint }); 7 | 8 | const pathname = path.join('examples', 'samples', 'think-stereo-48000.wav'); 9 | const arrayBuffer = fs.readFileSync(pathname).buffer; 10 | const buffer = await audioContext.decodeAudioData(arrayBuffer); 11 | 12 | // these values correspond to a lowpass filter at 200Hz (calculated from biquad) 13 | const feedforward = new Float64Array([0.0002029799640409502, 0.0004059599280819004, 0.0002029799640409502]); 14 | const feedback = new Float64Array([1.0126964557853775, -1.9991880801438362, 0.9873035442146225]); 15 | // @todo - for now the API doesn't support raw Arrays 16 | // const feedforward = [0.0002029799640409502, 0.0004059599280819004, 0.0002029799640409502]; 17 | // const feedback = [1.0126964557853775, -1.9991880801438362, 0.9873035442146225]; 18 | 19 | // Create an IIR filter node 20 | const iir = audioContext.createIIRFilter(feedforward, feedback); 21 | iir.connect(audioContext.destination); 22 | 23 | // Play buffer and pipe to filter 24 | const src = audioContext.createBufferSource(); 25 | src.connect(iir); 26 | src.buffer = buffer; 27 | src.loop = true; 28 | src.start(); 29 | -------------------------------------------------------------------------------- /examples/latency-attributes.js: -------------------------------------------------------------------------------- 1 | import { AudioContext } from '../index.mjs'; 2 | 3 | console.log('AudioContextLatencyCategory::Interactive'); 4 | 5 | const latencyHint = process.env.WEB_AUDIO_LATENCY === 'playback' ? 'playback' : 'interactive'; 6 | const audioContext = new AudioContext({ latencyHint }); 7 | 8 | const sine = audioContext.createOscillator(); 9 | sine.frequency.value = 200.; 10 | sine.connect(audioContext.destination); 11 | sine.start(); 12 | 13 | console.log('- BaseLatency:', audioContext.baseLatency); 14 | 15 | (function loop() { 16 | console.log('-------------------------------------------------'); 17 | console.log('+ currentTime:', audioContext.currentTime); 18 | console.log('+ OutputLatency:', audioContext.outputLatency); 19 | 20 | setTimeout(loop, 1000); 21 | }()); 22 | -------------------------------------------------------------------------------- /examples/many-oscillators-with-env.js: -------------------------------------------------------------------------------- 1 | import { AudioContext, OscillatorNode, GainNode } from '../index.mjs'; 2 | 3 | const latencyHint = process.env.WEB_AUDIO_LATENCY === 'playback' ? 'playback' : 'interactive'; 4 | const audioContext = new AudioContext({ latencyHint }); 5 | 6 | setInterval(() => { 7 | const now = audioContext.currentTime; 8 | const frequency = 200 + Math.random() * 2800; 9 | 10 | const env = new GainNode(audioContext, { gain: 0 }); 11 | env.connect(audioContext.destination); 12 | env.gain 13 | .setValueAtTime(0, now) 14 | .linearRampToValueAtTime(0.2, now + 0.02) 15 | .exponentialRampToValueAtTime(0.0001, now + 1); 16 | 17 | const osc = new OscillatorNode(audioContext, { frequency }); 18 | osc.connect(env); 19 | osc.start(now); 20 | osc.stop(now + 1); 21 | }, 80); 22 | -------------------------------------------------------------------------------- /examples/microphone.js: -------------------------------------------------------------------------------- 1 | import { 2 | mediaDevices, 3 | AudioContext, 4 | // eslint-disable-next-line no-unused-vars 5 | MediaStreamAudioSourceNode, 6 | } from '../index.mjs'; 7 | 8 | console.log('MediaDevices::getUserMedia - mic feedback, be careful with volume...)'); 9 | 10 | const mediaStream = await mediaDevices.getUserMedia({ audio: true }); 11 | 12 | const audioContext = new AudioContext(); 13 | await audioContext.resume(); 14 | 15 | // const source = new MediaStreamAudioSourceNode(audioContext, { mediaStream }); 16 | const source = audioContext.createMediaStreamSource(mediaStream); // factory API 17 | source.connect(audioContext.destination); 18 | 19 | -------------------------------------------------------------------------------- /examples/multichannel.js: -------------------------------------------------------------------------------- 1 | import { AudioContext } from '../index.mjs'; 2 | 3 | // Example of multichannel routing, for now the library can only handle up to 4 | // 32 channels. 5 | // 6 | // The example can be tested with a virtual soundcard such as Blackhole 7 | // https://github.com/ExistentialAudio/BlackHole 8 | // - make it as the default system output 9 | // - then use blackhole as input in another program to check the program output 10 | // (see `multichannel.maxpat` if you have Max installed, @todo make a Pd version) 11 | 12 | const audioContext = new AudioContext(); 13 | 14 | console.log('> Max channel count:', audioContext.destination.maxChannelCount); 15 | 16 | const numChannels = audioContext.destination.maxChannelCount; 17 | 18 | audioContext.destination.channelCount = numChannels; 19 | audioContext.destination.channelInterpretation = 'discrete'; 20 | 21 | await audioContext.resume(); 22 | 23 | const merger = audioContext.createChannelMerger(numChannels); 24 | merger.channelInterpretation = 'discrete'; 25 | merger.connect(audioContext.destination); 26 | 27 | let outputChannel = 0; 28 | 29 | setInterval(() => { 30 | console.log('- output sine in channel', outputChannel); 31 | 32 | const osc = audioContext.createOscillator(); 33 | osc.connect(merger, 0, outputChannel); 34 | osc.frequency.value = 200; 35 | osc.start(); 36 | osc.stop(audioContext.currentTime + 1); 37 | 38 | outputChannel = (outputChannel + 1) % numChannels; 39 | }, 1000); 40 | -------------------------------------------------------------------------------- /examples/multichannel.maxpat: -------------------------------------------------------------------------------- 1 | { 2 | "patcher" : { 3 | "fileversion" : 1, 4 | "appversion" : { 5 | "major" : 8, 6 | "minor" : 5, 7 | "revision" : 3, 8 | "architecture" : "x64", 9 | "modernui" : 1 10 | } 11 | , 12 | "classnamespace" : "box", 13 | "rect" : [ 309.0, 671.0, 1188.0, 539.0 ], 14 | "bglocked" : 0, 15 | "openinpresentation" : 0, 16 | "default_fontsize" : 12.0, 17 | "default_fontface" : 0, 18 | "default_fontname" : "Arial", 19 | "gridonopen" : 1, 20 | "gridsize" : [ 15.0, 15.0 ], 21 | "gridsnaponopen" : 1, 22 | "objectsnaponopen" : 1, 23 | "statusbarvisible" : 2, 24 | "toolbarvisible" : 1, 25 | "lefttoolbarpinned" : 0, 26 | "toptoolbarpinned" : 0, 27 | "righttoolbarpinned" : 0, 28 | "bottomtoolbarpinned" : 0, 29 | "toolbars_unpinned_last_save" : 0, 30 | "tallnewobj" : 0, 31 | "boxanimatetime" : 200, 32 | "enablehscroll" : 1, 33 | "enablevscroll" : 1, 34 | "devicewidth" : 0.0, 35 | "description" : "", 36 | "digest" : "", 37 | "tags" : "", 38 | "style" : "", 39 | "subpatcher_template" : "", 40 | "assistshowspatchername" : 0, 41 | "boxes" : [ { 42 | "box" : { 43 | "id" : "obj-5", 44 | "maxclass" : "meter~", 45 | "numinlets" : 1, 46 | "numoutlets" : 1, 47 | "outlettype" : [ "float" ], 48 | "patching_rect" : [ 606.428571428571445, 189.0, 19.0, 65.0 ] 49 | } 50 | 51 | } 52 | , { 53 | "box" : { 54 | "id" : "obj-6", 55 | "maxclass" : "meter~", 56 | "numinlets" : 1, 57 | "numoutlets" : 1, 58 | "outlettype" : [ "float" ], 59 | "patching_rect" : [ 575.428571428571445, 189.0, 19.0, 65.0 ] 60 | } 61 | 62 | } 63 | , { 64 | "box" : { 65 | "id" : "obj-7", 66 | "maxclass" : "meter~", 67 | "numinlets" : 1, 68 | "numoutlets" : 1, 69 | "outlettype" : [ "float" ], 70 | "patching_rect" : [ 548.0, 189.0, 19.0, 65.0 ] 71 | } 72 | 73 | } 74 | , { 75 | "box" : { 76 | "id" : "obj-8", 77 | "maxclass" : "meter~", 78 | "numinlets" : 1, 79 | "numoutlets" : 1, 80 | "outlettype" : [ "float" ], 81 | "patching_rect" : [ 519.0, 189.0, 19.0, 65.0 ] 82 | } 83 | 84 | } 85 | , { 86 | "box" : { 87 | "id" : "obj-4", 88 | "maxclass" : "meter~", 89 | "numinlets" : 1, 90 | "numoutlets" : 1, 91 | "outlettype" : [ "float" ], 92 | "patching_rect" : [ 488.428571428571445, 189.0, 19.0, 65.0 ] 93 | } 94 | 95 | } 96 | , { 97 | "box" : { 98 | "id" : "obj-27", 99 | "maxclass" : "meter~", 100 | "numinlets" : 1, 101 | "numoutlets" : 1, 102 | "outlettype" : [ "float" ], 103 | "patching_rect" : [ 457.428571428571445, 189.0, 19.0, 65.0 ] 104 | } 105 | 106 | } 107 | , { 108 | "box" : { 109 | "id" : "obj-26", 110 | "maxclass" : "meter~", 111 | "numinlets" : 1, 112 | "numoutlets" : 1, 113 | "outlettype" : [ "float" ], 114 | "patching_rect" : [ 430.0, 189.0, 19.0, 65.0 ] 115 | } 116 | 117 | } 118 | , { 119 | "box" : { 120 | "id" : "obj-25", 121 | "maxclass" : "meter~", 122 | "numinlets" : 1, 123 | "numoutlets" : 1, 124 | "outlettype" : [ "float" ], 125 | "patching_rect" : [ 401.0, 189.0, 19.0, 65.0 ] 126 | } 127 | 128 | } 129 | , { 130 | "box" : { 131 | "id" : "obj-3", 132 | "maxclass" : "toggle", 133 | "numinlets" : 1, 134 | "numoutlets" : 1, 135 | "outlettype" : [ "int" ], 136 | "parameter_enable" : 0, 137 | "patching_rect" : [ 430.0, 75.0, 24.0, 24.0 ] 138 | } 139 | 140 | } 141 | , { 142 | "box" : { 143 | "id" : "obj-1", 144 | "maxclass" : "newobj", 145 | "numinlets" : 1, 146 | "numoutlets" : 8, 147 | "outlettype" : [ "signal", "signal", "signal", "signal", "signal", "signal", "signal", "signal" ], 148 | "patching_rect" : [ 430.0, 124.0, 115.0, 22.0 ], 149 | "text" : "adc~ 1 2 3 4 5 6 7 8" 150 | } 151 | 152 | } 153 | ], 154 | "lines" : [ { 155 | "patchline" : { 156 | "destination" : [ "obj-25", 0 ], 157 | "source" : [ "obj-1", 0 ] 158 | } 159 | 160 | } 161 | , { 162 | "patchline" : { 163 | "destination" : [ "obj-26", 0 ], 164 | "source" : [ "obj-1", 1 ] 165 | } 166 | 167 | } 168 | , { 169 | "patchline" : { 170 | "destination" : [ "obj-27", 0 ], 171 | "source" : [ "obj-1", 2 ] 172 | } 173 | 174 | } 175 | , { 176 | "patchline" : { 177 | "destination" : [ "obj-4", 0 ], 178 | "source" : [ "obj-1", 3 ] 179 | } 180 | 181 | } 182 | , { 183 | "patchline" : { 184 | "destination" : [ "obj-5", 0 ], 185 | "source" : [ "obj-1", 7 ] 186 | } 187 | 188 | } 189 | , { 190 | "patchline" : { 191 | "destination" : [ "obj-6", 0 ], 192 | "source" : [ "obj-1", 6 ] 193 | } 194 | 195 | } 196 | , { 197 | "patchline" : { 198 | "destination" : [ "obj-7", 0 ], 199 | "source" : [ "obj-1", 5 ] 200 | } 201 | 202 | } 203 | , { 204 | "patchline" : { 205 | "destination" : [ "obj-8", 0 ], 206 | "source" : [ "obj-1", 4 ] 207 | } 208 | 209 | } 210 | , { 211 | "patchline" : { 212 | "destination" : [ "obj-1", 0 ], 213 | "source" : [ "obj-3", 0 ] 214 | } 215 | 216 | } 217 | ], 218 | "dependency_cache" : [ ], 219 | "autosave" : 0 220 | } 221 | 222 | } 223 | -------------------------------------------------------------------------------- /examples/multiple-contexts.js: -------------------------------------------------------------------------------- 1 | import { AudioContext } from '../index.mjs'; 2 | 3 | { 4 | console.log('> Creating AudioContext #1 - playing sine at 200Hz'); 5 | const audioContext = new AudioContext(); 6 | const src = audioContext.createOscillator(); 7 | src.frequency.value = 200; 8 | src.connect(audioContext.destination); 9 | src.start(); 10 | } 11 | 12 | { 13 | console.log('> Creating AudioContext #2 - playing sine at 300Hz'); 14 | const audioContext = new AudioContext(); 15 | const src = audioContext.createOscillator(); 16 | src.frequency.value = 300; 17 | src.connect(audioContext.destination); 18 | src.start(); 19 | } 20 | -------------------------------------------------------------------------------- /examples/offline.js: -------------------------------------------------------------------------------- 1 | import { AudioContext, OfflineAudioContext } from '../index.mjs'; 2 | 3 | const offline = new OfflineAudioContext(1, 48000, 48000); 4 | 5 | offline.addEventListener('complete', (e) => { 6 | console.log('+ complete event:', e.renderedBuffer.toString()); 7 | }); 8 | 9 | offline.suspend(128 / 48000).then(async () => { 10 | const osc = offline.createOscillator(); 11 | osc.connect(offline.destination); 12 | osc.frequency.value = 220; 13 | osc.start(0.); 14 | osc.stop(1.); 15 | 16 | await offline.resume(); 17 | }); 18 | 19 | const buffer = await offline.startRendering(); 20 | console.log('+ buffer duration:', buffer.duration); 21 | 22 | console.log(''); 23 | console.log('> Playback computed buffer in loop, should hear a small silent gap in the middle'); 24 | 25 | const latencyHint = process.env.WEB_AUDIO_LATENCY === 'playback' ? 'playback' : 'interactive'; 26 | const online = new AudioContext({ latencyHint }); 27 | 28 | const src = online.createBufferSource(); 29 | src.buffer = buffer; 30 | src.loop = true; 31 | src.connect(online.destination); 32 | src.start(); 33 | 34 | await new Promise(resolve => setTimeout(resolve, 2000)); 35 | 36 | console.log('+ close context'); 37 | await online.close(); 38 | -------------------------------------------------------------------------------- /examples/oscillators.js: -------------------------------------------------------------------------------- 1 | import { AudioContext } from '../index.mjs'; 2 | 3 | const latencyHint = process.env.WEB_AUDIO_LATENCY === 'playback' ? 'playback' : 'interactive'; 4 | const audioContext = new AudioContext({ latencyHint }); 5 | 6 | const osc = audioContext.createOscillator(); 7 | osc.connect(audioContext.destination); 8 | osc.start(); 9 | 10 | const intervalTime = 2.; 11 | 12 | console.log('Sine'); 13 | 14 | osc.frequency.linearRampToValueAtTime(880., audioContext.currentTime + intervalTime); 15 | 16 | await new Promise(resolve => setTimeout(resolve, intervalTime * 1000)); 17 | console.log('Square'); 18 | 19 | osc.type = 'square'; 20 | osc.frequency.linearRampToValueAtTime(440., audioContext.currentTime + intervalTime); 21 | 22 | await new Promise(resolve => setTimeout(resolve, intervalTime * 1000)); 23 | console.log('Triangle'); 24 | 25 | osc.type = 'triangle'; 26 | osc.frequency.linearRampToValueAtTime(880., audioContext.currentTime + intervalTime); 27 | 28 | await new Promise(resolve => setTimeout(resolve, intervalTime * 1000)); 29 | console.log('Sawtooth'); 30 | 31 | osc.type = 'sawtooth'; 32 | osc.frequency.linearRampToValueAtTime(440., audioContext.currentTime + intervalTime); 33 | 34 | await new Promise(resolve => setTimeout(resolve, intervalTime * 1000)); 35 | console.log('PeriodicWave'); 36 | 37 | const real = new Float32Array([0., 0.5, 0.5]); 38 | const imag = new Float32Array([0., 0., 0.]); 39 | const constraints = { disableNormalization: false }; 40 | 41 | const periodicWave = audioContext.createPeriodicWave(real, imag, constraints); 42 | // const periodicWave = audioContext.createPeriodicWave(real, imag); 43 | 44 | osc.setPeriodicWave(periodicWave); 45 | osc.frequency.linearRampToValueAtTime(880., audioContext.currentTime + intervalTime); 46 | 47 | await new Promise(resolve => setTimeout(resolve, intervalTime * 1000)); 48 | 49 | await audioContext.close(); 50 | 51 | -------------------------------------------------------------------------------- /examples/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "type": "module" 3 | } 4 | -------------------------------------------------------------------------------- /examples/panner.js: -------------------------------------------------------------------------------- 1 | import { AudioContext } from '../index.mjs'; 2 | 3 | const latencyHint = process.env.WEB_AUDIO_LATENCY === 'playback' ? 'playback' : 'interactive'; 4 | const audioContext = new AudioContext({ latencyHint }); 5 | 6 | audioContext.listener.positionZ.value = 1; 7 | audioContext.listener.positionX.value = -10; 8 | audioContext.listener.positionX.linearRampToValueAtTime(10, 4); 9 | 10 | const osc = audioContext.createOscillator(); 11 | const panner = audioContext.createPanner(); 12 | osc.connect(panner); 13 | panner.connect(audioContext.destination); 14 | osc.start(); 15 | 16 | let direction = 1; 17 | setInterval(function loop() { 18 | console.log(audioContext.listener.positionX.value); 19 | 20 | if (Math.abs(audioContext.listener.positionX.value) >= 10.) { 21 | direction *= -1; 22 | const now = audioContext.currentTime; 23 | audioContext.listener.positionX.linearRampToValueAtTime(10 * direction, now + 4); 24 | } 25 | }, 500); 26 | 27 | -------------------------------------------------------------------------------- /examples/resampling.js: -------------------------------------------------------------------------------- 1 | import fs from 'node:fs'; 2 | import path from 'node:path'; 3 | import { AudioContext } from '../index.mjs'; 4 | 5 | const latencyHint = process.env.WEB_AUDIO_LATENCY === 'playback' ? 'playback' : 'interactive'; 6 | const audioContext = new AudioContext({ latencyHint }); 7 | 8 | console.log('> AudioContext sampleRate: %f', audioContext.sampleRate); 9 | 10 | { 11 | console.log('--------------------------------------------------------------'); 12 | console.log('> Case 1: buffers are decoded at right sample rate by context'); 13 | console.log('--------------------------------------------------------------'); 14 | 15 | const file38000 = fs.readFileSync(path.join('examples', 'samples', 'sample-38000.wav')).buffer; 16 | const buffer38000 = await audioContext.decodeAudioData(file38000); 17 | 18 | const file44100 = fs.readFileSync(path.join('examples', 'samples', 'sample-44100.wav')).buffer; 19 | const buffer44100 = await audioContext.decodeAudioData(file44100); 20 | 21 | const file48000 = fs.readFileSync(path.join('examples', 'samples', 'sample-48000.wav')).buffer; 22 | const buffer48000 = await audioContext.decodeAudioData(file48000); 23 | 24 | // audio context at default system sample rate 25 | { 26 | console.log('+ playing sample-38000.wav - decoded sample rate: %f', buffer38000.sampleRate); 27 | 28 | const src = audioContext.createBufferSource(); 29 | src.connect(audioContext.destination); 30 | src.buffer = buffer38000; 31 | src.start(); 32 | } 33 | 34 | await new Promise(resolve => setTimeout(resolve, 3500)); 35 | 36 | { 37 | console.log('+ playing sample-44100.wav - decoded sample rate: %f', buffer44100.sampleRate); 38 | 39 | const src = audioContext.createBufferSource(); 40 | src.connect(audioContext.destination); 41 | src.buffer = buffer44100; 42 | src.start(); 43 | } 44 | 45 | await new Promise(resolve => setTimeout(resolve, 3500)); 46 | 47 | { 48 | console.log('+ playing sample-48000.wav - decoded sample rate: %f', buffer48000.sampleRate); 49 | 50 | const src = audioContext.createBufferSource(); 51 | src.connect(audioContext.destination); 52 | src.buffer = buffer48000; 53 | src.start(); 54 | } 55 | 56 | await new Promise(resolve => setTimeout(resolve, 3500)); 57 | } 58 | 59 | { 60 | console.log('--------------------------------------------------------------'); 61 | console.log('> Case 2: buffers are decoded with another sample rate, then resampled by the AudioBufferSourceNode'); 62 | console.log('--------------------------------------------------------------'); 63 | 64 | const audioContext38000 = new AudioContext({ 65 | sampleRate: 38000., 66 | latencyHint: 'interactive', 67 | }); 68 | const file38000 = fs.readFileSync(path.join('examples', 'samples', 'sample-38000.wav')).buffer; 69 | const buffer38000 = await audioContext38000.decodeAudioData(file38000); 70 | await audioContext38000.close(); 71 | 72 | const audioContext44100 = new AudioContext({ 73 | sampleRate: 44100., 74 | latencyHint: 'interactive', 75 | }); 76 | const file44100 = fs.readFileSync(path.join('examples', 'samples', 'sample-44100.wav')).buffer; 77 | const buffer44100 = await audioContext44100.decodeAudioData(file44100); 78 | await audioContext44100.close(); 79 | 80 | const audioContext48000 = new AudioContext({ 81 | sampleRate: 48000., 82 | latencyHint: 'interactive', 83 | }); 84 | const file48000 = fs.readFileSync(path.join('examples', 'samples', 'sample-48000.wav')).buffer; 85 | const buffer48000 = await audioContext48000.decodeAudioData(file48000); 86 | await audioContext48000.close(); 87 | 88 | { 89 | // audio context at default system sample rate 90 | console.log('+ playing sample-38000.wav - decoded sample rate: %f', buffer38000.sampleRate); 91 | 92 | const src = audioContext.createBufferSource(); 93 | src.connect(audioContext.destination); 94 | src.buffer = buffer38000; 95 | src.start(); 96 | } 97 | 98 | await new Promise(resolve => setTimeout(resolve, 3500)); 99 | 100 | { 101 | console.log('+ playing sample-44100.wav - decoded sample rate: %f', buffer44100.sampleRate); 102 | 103 | const src = audioContext.createBufferSource(); 104 | src.connect(audioContext.destination); 105 | src.buffer = buffer44100; 106 | src.start(); 107 | } 108 | 109 | await new Promise(resolve => setTimeout(resolve, 3500)); 110 | 111 | { 112 | console.log('+ playing sample-48000.wav - decoded sample rate: %f', buffer48000.sampleRate); 113 | 114 | const src = audioContext.createBufferSource(); 115 | src.connect(audioContext.destination); 116 | src.buffer = buffer48000; 117 | src.start(); 118 | } 119 | 120 | await new Promise(resolve => setTimeout(resolve, 3500)); 121 | } 122 | 123 | await audioContext.close(); 124 | -------------------------------------------------------------------------------- /examples/samples/corrupt.wav: -------------------------------------------------------------------------------- 1 | RIFF%WAVEfmt  2 | -------------------------------------------------------------------------------- /examples/samples/empty_2c.wav: -------------------------------------------------------------------------------- 1 | RIFF%WAVEfmt  2 | -------------------------------------------------------------------------------- /examples/samples/major-scale.ogg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ircam-ismm/node-web-audio-api/63526ce3df3dfae914a8e37aa4181a3614330d1c/examples/samples/major-scale.ogg -------------------------------------------------------------------------------- /examples/samples/parking-garage-response.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ircam-ismm/node-web-audio-api/63526ce3df3dfae914a8e37aa4181a3614330d1c/examples/samples/parking-garage-response.wav -------------------------------------------------------------------------------- /examples/samples/sample-38000.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ircam-ismm/node-web-audio-api/63526ce3df3dfae914a8e37aa4181a3614330d1c/examples/samples/sample-38000.wav -------------------------------------------------------------------------------- /examples/samples/sample-44100.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ircam-ismm/node-web-audio-api/63526ce3df3dfae914a8e37aa4181a3614330d1c/examples/samples/sample-44100.wav -------------------------------------------------------------------------------- /examples/samples/sample-48000.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ircam-ismm/node-web-audio-api/63526ce3df3dfae914a8e37aa4181a3614330d1c/examples/samples/sample-48000.wav -------------------------------------------------------------------------------- /examples/samples/sample-faulty.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ircam-ismm/node-web-audio-api/63526ce3df3dfae914a8e37aa4181a3614330d1c/examples/samples/sample-faulty.wav -------------------------------------------------------------------------------- /examples/samples/sample.aiff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ircam-ismm/node-web-audio-api/63526ce3df3dfae914a8e37aa4181a3614330d1c/examples/samples/sample.aiff -------------------------------------------------------------------------------- /examples/samples/sample.flac: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ircam-ismm/node-web-audio-api/63526ce3df3dfae914a8e37aa4181a3614330d1c/examples/samples/sample.flac -------------------------------------------------------------------------------- /examples/samples/sample.mp3: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ircam-ismm/node-web-audio-api/63526ce3df3dfae914a8e37aa4181a3614330d1c/examples/samples/sample.mp3 -------------------------------------------------------------------------------- /examples/samples/sample.ogg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ircam-ismm/node-web-audio-api/63526ce3df3dfae914a8e37aa4181a3614330d1c/examples/samples/sample.ogg -------------------------------------------------------------------------------- /examples/samples/sample.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ircam-ismm/node-web-audio-api/63526ce3df3dfae914a8e37aa4181a3614330d1c/examples/samples/sample.wav -------------------------------------------------------------------------------- /examples/samples/sample.webm: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ircam-ismm/node-web-audio-api/63526ce3df3dfae914a8e37aa4181a3614330d1c/examples/samples/sample.webm -------------------------------------------------------------------------------- /examples/samples/siren.mp3: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ircam-ismm/node-web-audio-api/63526ce3df3dfae914a8e37aa4181a3614330d1c/examples/samples/siren.mp3 -------------------------------------------------------------------------------- /examples/samples/small-room-response.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ircam-ismm/node-web-audio-api/63526ce3df3dfae914a8e37aa4181a3614330d1c/examples/samples/small-room-response.wav -------------------------------------------------------------------------------- /examples/samples/think-mono-38000.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ircam-ismm/node-web-audio-api/63526ce3df3dfae914a8e37aa4181a3614330d1c/examples/samples/think-mono-38000.wav -------------------------------------------------------------------------------- /examples/samples/think-mono-44100.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ircam-ismm/node-web-audio-api/63526ce3df3dfae914a8e37aa4181a3614330d1c/examples/samples/think-mono-44100.wav -------------------------------------------------------------------------------- /examples/samples/think-mono-48000.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ircam-ismm/node-web-audio-api/63526ce3df3dfae914a8e37aa4181a3614330d1c/examples/samples/think-mono-48000.wav -------------------------------------------------------------------------------- /examples/samples/think-stereo-38000.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ircam-ismm/node-web-audio-api/63526ce3df3dfae914a8e37aa4181a3614330d1c/examples/samples/think-stereo-38000.wav -------------------------------------------------------------------------------- /examples/samples/think-stereo-44100.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ircam-ismm/node-web-audio-api/63526ce3df3dfae914a8e37aa4181a3614330d1c/examples/samples/think-stereo-44100.wav -------------------------------------------------------------------------------- /examples/samples/think-stereo-48000.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ircam-ismm/node-web-audio-api/63526ce3df3dfae914a8e37aa4181a3614330d1c/examples/samples/think-stereo-48000.wav -------------------------------------------------------------------------------- /examples/samples/vocals-dry.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ircam-ismm/node-web-audio-api/63526ce3df3dfae914a8e37aa4181a3614330d1c/examples/samples/vocals-dry.wav -------------------------------------------------------------------------------- /examples/samples/white.ogg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ircam-ismm/node-web-audio-api/63526ce3df3dfae914a8e37aa4181a3614330d1c/examples/samples/white.ogg -------------------------------------------------------------------------------- /examples/script-processor.js: -------------------------------------------------------------------------------- 1 | import { AudioContext, OscillatorNode } from '../index.mjs'; 2 | 3 | const latencyHint = process.env.WEB_AUDIO_LATENCY === 'playback' ? 'playback' : 'interactive'; 4 | const audioContext = new AudioContext({ latencyHint }); 5 | 6 | const sine = new OscillatorNode(audioContext); 7 | sine.frequency.value = 200; 8 | 9 | const scriptProcessor = audioContext.createScriptProcessor(); 10 | 11 | scriptProcessor.addEventListener('audioprocess', e => { 12 | const input = e.inputBuffer.getChannelData(0); 13 | const output = e.outputBuffer.getChannelData(0); 14 | 15 | // should ear noise only on left channel 16 | for (let i = 0; i < output.length; i++) { 17 | output[i] = input[i] + Math.random() * 2 - 1; 18 | } 19 | }); 20 | 21 | sine 22 | .connect(scriptProcessor) 23 | .connect(audioContext.destination); 24 | 25 | sine.start(); 26 | -------------------------------------------------------------------------------- /examples/sink-id.js: -------------------------------------------------------------------------------- 1 | import path from 'node:path'; 2 | import fs from 'node:fs'; 3 | import readline from 'readline'; 4 | import { AudioContext, mediaDevices } from '../index.mjs'; 5 | 6 | const latencyHint = process.env.WEB_AUDIO_LATENCY === 'playback' ? 'playback' : 'interactive'; 7 | const audioContext = new AudioContext({ latencyHint }); 8 | 9 | audioContext.addEventListener('sinkchange', (e) => { 10 | console.log('> sinkchange listener:', e); 11 | }); 12 | 13 | const file = fs.readFileSync(path.join('examples', 'samples', 'sample.wav')).buffer; 14 | const buffer = await audioContext.decodeAudioData(file); 15 | 16 | const src = audioContext.createBufferSource(); 17 | src.buffer = buffer; 18 | src.loop = true; 19 | src.connect(audioContext.destination); 20 | src.start(); 21 | 22 | const deviceList = await mediaDevices.enumerateDevices(); 23 | const audioOutput = deviceList.filter(d => d.kind === 'audiooutput'); 24 | 25 | console.log(''); 26 | audioOutput.map(d => `- id: ${d.deviceId} - label: ${d.label}`).map(l => console.log(l)); 27 | console.log(''); 28 | 29 | const prompt = readline.createInterface({ 30 | input: process.stdin, 31 | output: process.stdout, 32 | }); 33 | 34 | (function selectSinkId() { 35 | prompt.question(`+ select output deviceId: 36 | > `, deviceId => { 37 | audioContext.setSinkId(deviceId); 38 | 39 | console.log('+ switched to sinkId:', audioContext.sinkId); 40 | 41 | selectSinkId(); 42 | }); 43 | }()); 44 | -------------------------------------------------------------------------------- /examples/stereo-panner.js: -------------------------------------------------------------------------------- 1 | import { AudioContext } from '../index.mjs'; 2 | 3 | const latencyHint = process.env.WEB_AUDIO_LATENCY === 'playback' ? 'playback' : 'interactive'; 4 | const audioContext = new AudioContext({ latencyHint }); 5 | 6 | // pipe 2 oscillator into two panner, one on each side of the stereo image 7 | // inverse the direction of the panning every 4 second 8 | 9 | // create a stereo panner 10 | const panner1 = audioContext.createStereoPanner(); 11 | let pan1 = -1.; 12 | panner1.channelCount = 1; 13 | panner1.connect(audioContext.destination); 14 | panner1.pan.value = pan1; 15 | // create an oscillator 16 | const osc1 = audioContext.createOscillator(); 17 | osc1.connect(panner1); 18 | osc1.frequency.value = 200.; 19 | osc1.start(); 20 | 21 | // create a stereo panner for mono input 22 | const panner2 = audioContext.createStereoPanner(); 23 | let pan2 = 1.; 24 | panner2.channelCount = 1; 25 | panner2.connect(audioContext.destination); 26 | panner2.pan.value = pan2; 27 | // create an oscillator 28 | const osc2 = audioContext.createOscillator(); 29 | osc2.connect(panner2); 30 | osc2.frequency.value = 300.; 31 | osc2.start(); 32 | 33 | setInterval(function loop() { 34 | // reverse the stereo image 35 | const now = audioContext.currentTime; 36 | 37 | panner1.pan.setValueAtTime(pan1, now); 38 | pan1 *= -1; 39 | panner1.pan.linearRampToValueAtTime(pan1, now + 1.); 40 | 41 | panner2.pan.setValueAtTime(pan2, now); 42 | pan2 *= -1; 43 | panner2.pan.linearRampToValueAtTime(pan2, now + 1.); 44 | }, 4 * 1000); 45 | -------------------------------------------------------------------------------- /examples/trigger-soundfiles.js: -------------------------------------------------------------------------------- 1 | import path from 'node:path'; 2 | import fs from 'node:fs'; 3 | 4 | import { AudioContext } from '../index.mjs'; 5 | 6 | const latencyHint = process.env.WEB_AUDIO_LATENCY === 'playback' ? 'playback' : 'interactive'; 7 | const context = new AudioContext({ latencyHint }); 8 | 9 | // Showcase different methods of the AudioBufferSourceNode 10 | 11 | const file = fs.readFileSync(path.join('examples', 'samples', 'sample.wav')).buffer; 12 | const audioBuffer = await context.decodeAudioData(file); 13 | 14 | { 15 | console.log('++ play until end'); 16 | const src = context.createBufferSource(); 17 | src.buffer = audioBuffer; 18 | src.connect(context.destination); 19 | src.start(context.currentTime); 20 | } 21 | 22 | await new Promise(resolve => setTimeout(resolve, 3500)); 23 | 24 | { 25 | console.log('++ play / stop 1sec'); 26 | const src = context.createBufferSource(); 27 | src.buffer = audioBuffer; 28 | src.connect(context.destination); 29 | src.start(context.currentTime); 30 | src.stop(context.currentTime + 1.); 31 | } 32 | 33 | await new Promise(resolve => setTimeout(resolve, 1500)); 34 | 35 | { 36 | console.log('++ play / stop 1sec with offset'); 37 | const src = context.createBufferSource(); 38 | src.buffer = audioBuffer; 39 | src.connect(context.destination); 40 | src.start(context.currentTime, 1.); 41 | src.stop(context.currentTime + 1.); 42 | } 43 | 44 | await new Promise(resolve => setTimeout(resolve, 1500)); 45 | 46 | { 47 | console.log('++ play 1sec with offset and duration'); 48 | const src = context.createBufferSource(); 49 | src.buffer = audioBuffer; 50 | src.connect(context.destination); 51 | src.start(context.currentTime, 1., 1.); 52 | } 53 | 54 | await new Promise(resolve => setTimeout(resolve, 1500)); 55 | 56 | { 57 | console.log('++ play backward from offset 1.'); 58 | const src = context.createBufferSource(); 59 | src.buffer = audioBuffer; 60 | src.connect(context.destination); 61 | src.playbackRate.value = -1.; 62 | src.start(context.currentTime, 1.); 63 | } 64 | 65 | await new Promise(resolve => setTimeout(resolve, 1500)); 66 | 67 | { 68 | console.log('++ play backward full buffer'); 69 | const src = context.createBufferSource(); 70 | src.buffer = audioBuffer; 71 | src.connect(context.destination); 72 | src.playbackRate.value = -1.; 73 | src.start(context.currentTime, audioBuffer.duration); 74 | } 75 | 76 | await new Promise(resolve => setTimeout(resolve, 3500)); 77 | 78 | { 79 | console.log('++ simple loop (x2)'); 80 | const src = context.createBufferSource(); 81 | src.buffer = audioBuffer; 82 | src.connect(context.destination); 83 | src.loop = true; 84 | src.start(context.currentTime); 85 | src.stop(context.currentTime + audioBuffer.duration * 2.); 86 | } 87 | 88 | await new Promise(resolve => setTimeout(resolve, 7000)); 89 | 90 | { 91 | console.log('++ loop between 1 and 2 starting from 0'); 92 | const src = context.createBufferSource(); 93 | src.buffer = audioBuffer; 94 | src.connect(context.destination); 95 | src.loop = true; 96 | src.loopStart = 1.; 97 | src.loopEnd = 2.; 98 | src.start(context.currentTime); 99 | 100 | await new Promise(resolve => setTimeout(resolve, 4500)); 101 | src.loop = false; 102 | } 103 | 104 | await new Promise(resolve => setTimeout(resolve, 2500)); 105 | 106 | { 107 | console.log('++ loop backward between 1 and 2 starting from end'); 108 | const src = context.createBufferSource(); 109 | src.buffer = audioBuffer; 110 | src.connect(context.destination); 111 | src.playbackRate.value = -1.; 112 | src.loop = true; 113 | src.loopStart = 1.; 114 | src.loopEnd = 2.; 115 | src.start(context.currentTime, audioBuffer.duration); 116 | 117 | await new Promise(resolve => setTimeout(resolve, 4500)); 118 | src.loop = false; 119 | } 120 | 121 | await new Promise(resolve => setTimeout(resolve, 2500)); 122 | 123 | console.log('++ end of examples'); 124 | 125 | for (let i = 0; i < 9; i++) { 126 | let offset = i / 2.; 127 | 128 | let gain = i % 4 == 0 ? 1. : 0.2 ; 129 | let env = context.createGain(); 130 | env.gain.value = gain; 131 | env.connect(context.destination); 132 | 133 | const src = context.createBufferSource(); 134 | src.buffer = audioBuffer; 135 | src.connect(env); 136 | src.start(context.currentTime + offset); 137 | } 138 | 139 | await new Promise(resolve => setTimeout(resolve, 8000)); 140 | 141 | await context.close(); 142 | -------------------------------------------------------------------------------- /examples/waveshaper.js: -------------------------------------------------------------------------------- 1 | import fs from 'node:fs'; 2 | import path from 'node:path'; 3 | import { AudioContext } from '../index.mjs'; 4 | 5 | // use part of cosine, between [π, 2π] as shaping cureve 6 | function makeDistortionCurve(size) { 7 | const curve = new Float32Array(size); 8 | let phase = 0.; 9 | const phaseIncr = Math.PI / (size - 1); 10 | 11 | for (let i = 0; i < size; i++) { 12 | curve[i] = Math.cos(Math.PI + phase); 13 | phase += phaseIncr; 14 | } 15 | 16 | return curve; 17 | } 18 | 19 | 20 | console.log('> gradually increase the amount of distortion applied on the sample'); 21 | 22 | const latencyHint = process.env.WEB_AUDIO_LATENCY === 'playback' ? 'playback' : 'interactive'; 23 | const audioContext = new AudioContext({ latencyHint }); 24 | 25 | let file = fs.readFileSync(path.join('examples', 'samples', 'sample.wav')).buffer; 26 | let buffer = await audioContext.decodeAudioData(file); 27 | let curve = makeDistortionCurve(2048); 28 | 29 | let postGain = audioContext.createGain(); 30 | postGain.connect(audioContext.destination); 31 | postGain.gain.value = 0.; 32 | 33 | let shaper = audioContext.createWaveShaper(); 34 | shaper.curve = curve; 35 | shaper.oversample = 'none'; 36 | // shaper.oversample = "2x"; 37 | // shaper.oversample = "4x"; 38 | shaper.connect(postGain); 39 | 40 | let preGain = audioContext.createGain(); 41 | preGain.connect(shaper); 42 | preGain.gain.value = 0.; 43 | 44 | for (let i = 1; i < 7; i++) { 45 | const gain = i * 2.; 46 | console.log('+ pre gain:', gain); 47 | 48 | preGain.gain.value = gain; 49 | postGain.gain.value = 1. / gain; 50 | 51 | let src = audioContext.createBufferSource(); 52 | src.connect(preGain); 53 | src.buffer = buffer; 54 | src.start(); 55 | 56 | await new Promise(resolve => setTimeout(resolve, 4000)); 57 | } 58 | 59 | await audioContext.close(); 60 | -------------------------------------------------------------------------------- /examples/worklets/Makefile: -------------------------------------------------------------------------------- 1 | DEPS = SimpleKernel.cc 2 | 3 | build: $(DEPS) 4 | @emcc --bind -O1 \ 5 | -s WASM=1 \ 6 | -s BINARYEN_ASYNC_COMPILATION=0 \ 7 | -s SINGLE_FILE=1 \ 8 | -s ENVIRONMENT=node \ 9 | -s EXPORT_ES6=1 \ 10 | -s EXPORTED_FUNCTIONS="['_malloc']" \ 11 | SimpleKernel.cc \ 12 | -o simple-kernel.wasmmodule.mjs 13 | 14 | clean: 15 | @rm -f simple-kernel.wasmmodule.js 16 | -------------------------------------------------------------------------------- /examples/worklets/SimpleKernel.cc: -------------------------------------------------------------------------------- 1 | // adapted from https://github.com/GoogleChromeLabs/web-audio-samples/tree/main/src/audio-worklet/design-pattern/wasm 2 | 3 | /** 4 | * Copyright 2018 Google LLC 5 | * 6 | * Licensed under the Apache License, Version 2.0 (the "License"); you may not 7 | * use this file except in compliance with the License. You may obtain a copy of 8 | * the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT 14 | * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the 15 | * License for the specific language governing permissions and limitations under 16 | * the License. 17 | */ 18 | 19 | #include "emscripten/bind.h" 20 | 21 | using namespace emscripten; 22 | 23 | const unsigned kRenderQuantumFrames = 128; 24 | const unsigned kBytesPerChannel = kRenderQuantumFrames * sizeof(float); 25 | 26 | // The "kernel" is an object that processes a audio stream, which contains 27 | // one or more channels. It is supposed to obtain the frame data from an 28 | // |input|, process and fill an |output| of the AudioWorkletProcessor. 29 | // 30 | // AudioWorkletProcessor Input(multi-channel, 128-frames) 31 | // | 32 | // V 33 | // Kernel 34 | // | 35 | // V 36 | // AudioWorkletProcessor Output(multi-channel, 128-frames) 37 | // 38 | // In this implementation, the kernel operates based on 128-frames, which is 39 | // the render quantum size of Web Audio API. 40 | class SimpleKernel { 41 | public: 42 | SimpleKernel() {} 43 | 44 | void Process(uintptr_t input_ptr, uintptr_t output_ptr, 45 | unsigned channel_count) { 46 | float* input_buffer = reinterpret_cast(input_ptr); 47 | float* output_buffer = reinterpret_cast(output_ptr); 48 | 49 | // Bypasses the data. By design, the channel count will always be the same 50 | // for |input_buffer| and |output_buffer|. 51 | for (unsigned channel = 0; channel < channel_count; ++channel) { 52 | float* destination = output_buffer + channel * kRenderQuantumFrames; 53 | float* source = input_buffer + channel * kRenderQuantumFrames; 54 | memcpy(destination, source, kBytesPerChannel); 55 | } 56 | } 57 | }; 58 | 59 | EMSCRIPTEN_BINDINGS(CLASS_SimpleKernel) { 60 | class_("SimpleKernel") 61 | .constructor() 62 | .function("process", 63 | &SimpleKernel::Process, 64 | allow_raw_pointers()); 65 | } 66 | -------------------------------------------------------------------------------- /examples/worklets/array-source.js: -------------------------------------------------------------------------------- 1 | class ArraySourceProcessor extends AudioWorkletProcessor { 2 | constructor(options) { 3 | super(); 4 | this.sharedFloats = options.processorOptions.sharedFloats; 5 | } 6 | 7 | process(inputs, outputs, parameters) { 8 | const output = outputs[0]; 9 | 10 | output.forEach((channel) => { 11 | for (let i = 0; i < channel.length; i++) { 12 | channel[i] = this.sharedFloats[i]; 13 | } 14 | }); 15 | 16 | return true; 17 | } 18 | } 19 | 20 | registerProcessor('array-source', ArraySourceProcessor); 21 | -------------------------------------------------------------------------------- /examples/worklets/bitcrusher.js: -------------------------------------------------------------------------------- 1 | class Bitcrusher extends AudioWorkletProcessor { 2 | static get parameterDescriptors() { 3 | return [{ 4 | name: 'bitDepth', 5 | defaultValue: 12, 6 | minValue: 1, 7 | maxValue: 16 8 | }, { 9 | name: 'frequencyReduction', 10 | defaultValue: 0.5, 11 | minValue: 0, 12 | maxValue: 1 13 | }]; 14 | } 15 | 16 | constructor(options) { 17 | console.log(`++ in constructor: ${JSON.stringify(options, null, 2)}\n`); 18 | // The initial parameter value can be set by passing |options| 19 | // to the processor's constructor. 20 | super(); 21 | 22 | this._phase = 0; 23 | this._lastSampleValue = 0; 24 | this._msg = options.processorOptions.msg; 25 | 26 | this.port.on('message', event => { 27 | console.log(`++ on message: ${JSON.stringify(event, null, 2)}\n`); 28 | }); 29 | } 30 | 31 | process(inputs, outputs, parameters) { 32 | const input = inputs[0]; 33 | const output = outputs[0]; 34 | const bitDepth = parameters.bitDepth; 35 | const frequencyReduction = parameters.frequencyReduction; 36 | 37 | if (bitDepth.length > 1) { 38 | for (let channel = 0; channel < output.length; ++channel) { 39 | for (let i = 0; i < output[channel].length; ++i) { 40 | let step = Math.pow(0.5, bitDepth[i]); 41 | // Use modulo for indexing to handle the case where 42 | // the length of the frequencyReduction array is 1. 43 | this._phase += frequencyReduction[i % frequencyReduction.length]; 44 | if (this._phase >= 1.0) { 45 | this._phase -= 1.0; 46 | this._lastSampleValue = step * Math.floor(input[channel][i] / step + 0.5); 47 | } 48 | output[channel][i] = this._lastSampleValue; 49 | } 50 | } 51 | } else { 52 | // Because we know bitDepth is constant for this call, 53 | // we can lift the computation of step outside the loop, 54 | // saving many operations. 55 | const step = Math.pow(0.5, bitDepth[0]); 56 | for (let channel = 0; channel < output.length; ++channel) { 57 | for (let i = 0; i < output[channel].length; ++i) { 58 | this._phase += frequencyReduction[i % frequencyReduction.length]; 59 | if (this._phase >= 1.0) { 60 | this._phase -= 1.0; 61 | this._lastSampleValue = step * Math.floor(input[channel][i] / step + 0.5); 62 | } 63 | output[channel][i] = this._lastSampleValue; 64 | } 65 | } 66 | } 67 | 68 | if (Math.random() < 0.005) { 69 | this.port.postMessage({ hello: 'from render', msg: this._msg }); 70 | } 71 | 72 | // No need to return a value; this node's lifetime is dependent only on its 73 | // input connections. 74 | } 75 | } 76 | 77 | registerProcessor('bitcrusher', Bitcrusher); 78 | -------------------------------------------------------------------------------- /examples/worklets/wasm-worklet-processor.mjs: -------------------------------------------------------------------------------- 1 | // adapted from https://github.com/GoogleChromeLabs/web-audio-samples/tree/main/src/audio-worklet/design-pattern/wasm 2 | // 3 | // Copyright (c) 2018 The Chromium Authors. All rights reserved. 4 | // Use of this source code is governed by a BSD-style license that can be 5 | // found in the LICENSE file. 6 | 7 | import Module from './simple-kernel.wasmmodule.mjs'; 8 | import { RENDER_QUANTUM_FRAMES, MAX_CHANNEL_COUNT, FreeQueue } from './free-queue.mjs'; 9 | 10 | /** 11 | * A simple demonstration of WASM-powered AudioWorkletProcessor. 12 | * 13 | * @class WASMWorkletProcessor 14 | * @extends AudioWorkletProcessor 15 | */ 16 | class WASMWorkletProcessor extends AudioWorkletProcessor { 17 | /** 18 | * @constructor 19 | */ 20 | constructor() { 21 | super(); 22 | 23 | const mod = Module(); 24 | // Allocate the buffer for the heap access. Start with stereo, but it can 25 | // be expanded up to 32 channels. 26 | try { 27 | this._heapInputBuffer = new FreeQueue( 28 | mod, RENDER_QUANTUM_FRAMES, 2, MAX_CHANNEL_COUNT); 29 | this._heapOutputBuffer = new FreeQueue( 30 | mod, RENDER_QUANTUM_FRAMES, 2, MAX_CHANNEL_COUNT); 31 | this._kernel = new mod.SimpleKernel(); 32 | } catch (err) { 33 | console.log(err.message); 34 | } 35 | } 36 | 37 | /** 38 | * System-invoked process callback function. 39 | * @param {Array} inputs Incoming audio stream. 40 | * @param {Array} outputs Outgoing audio stream. 41 | * @param {Object} parameters AudioParam data. 42 | * @return {Boolean} Active source flag. 43 | */ 44 | process(inputs, outputs, parameters) { 45 | // Use the 1st input and output only to make the example simpler. |input| 46 | // and |output| here have the similar structure with the AudioBuffer 47 | // interface. (i.e. An array of Float32Array) 48 | const input = inputs[0]; 49 | const output = outputs[0]; 50 | 51 | // For this given render quantum, the channel count of the node is fixed 52 | // and identical for the input and the output. 53 | const channelCount = input.length; 54 | 55 | // Prepare HeapAudioBuffer for the channel count change in the current 56 | // render quantum. 57 | this._heapInputBuffer.adaptChannel(channelCount); 58 | this._heapOutputBuffer.adaptChannel(channelCount); 59 | 60 | // Copy-in, process and copy-out. 61 | for (let channel = 0; channel < channelCount; ++channel) { 62 | this._heapInputBuffer.getChannelData(channel).set(input[channel]); 63 | } 64 | this._kernel.process( 65 | this._heapInputBuffer.getHeapAddress(), 66 | this._heapOutputBuffer.getHeapAddress(), 67 | channelCount); 68 | for (let channel = 0; channel < channelCount; ++channel) { 69 | output[channel].set(this._heapOutputBuffer.getChannelData(channel)); 70 | } 71 | 72 | return true; 73 | } 74 | } 75 | 76 | registerProcessor('wasm-worklet-processor', WASMWorkletProcessor); 77 | -------------------------------------------------------------------------------- /examples/worklets/white-noise.js: -------------------------------------------------------------------------------- 1 | class WhiteNoiseProcessor extends AudioWorkletProcessor { 2 | process(inputs, outputs, parameters) { 3 | const output = outputs[0]; 4 | 5 | output.forEach((channel) => { 6 | for (let i = 0; i < channel.length; i++) { 7 | channel[i] = Math.random() * 2 - 1; 8 | } 9 | }); 10 | 11 | return true; 12 | } 13 | } 14 | 15 | registerProcessor('white-noise', WhiteNoiseProcessor); 16 | -------------------------------------------------------------------------------- /generator/js/index.tmpl.cjs: -------------------------------------------------------------------------------- 1 | const nativeBinding = require('./load-native.cjs'); 2 | const jsExport = {}; 3 | 4 | // -------------------------------------------------------------------------- 5 | // Events 6 | // -------------------------------------------------------------------------- 7 | jsExport.OfflineAudioCompletionEvent = require('./js/Events').OfflineAudioCompletionEvent; 8 | jsExport.AudioProcessingEvent = require('./js/Events').AudioProcessingEvent; 9 | jsExport.AudioRenderCapacityEvent = require('./js/Events').AudioRenderCapacityEvent; 10 | // -------------------------------------------------------------------------- 11 | // Create Web Audio API facade 12 | // -------------------------------------------------------------------------- 13 | jsExport.BaseAudioContext = require('./js/BaseAudioContext.js')(jsExport, nativeBinding); 14 | jsExport.AudioContext = require('./js/AudioContext.js')(jsExport, nativeBinding); 15 | jsExport.OfflineAudioContext = require('./js/OfflineAudioContext.js')(jsExport, nativeBinding); 16 | 17 | ${d.nodes.map((node) => { 18 | return ` 19 | jsExport.${d.name(node)} = require('./js/${d.name(node)}.js')(jsExport, nativeBinding);` 20 | }).join('')} 21 | 22 | jsExport.AudioNode = require('./js/AudioNode.js'); 23 | jsExport.AudioScheduledSourceNode = require('./js/AudioScheduledSourceNode.js'); 24 | jsExport.AudioParam = require('./js/AudioParam.js'); 25 | jsExport.AudioDestinationNode = require('./js/AudioDestinationNode.js'); 26 | jsExport.AudioListener = require('./js/AudioListener.js'); 27 | jsExport.AudioWorklet = require('./js/AudioWorklet.js'); 28 | jsExport.AudioParamMap = require('./js/AudioParamMap.js'); 29 | jsExport.AudioRenderCapacity = require('./js/AudioRenderCapacity.js'); 30 | 31 | jsExport.PeriodicWave = require('./js/PeriodicWave.js')(jsExport, nativeBinding); 32 | jsExport.AudioBuffer = require('./js/AudioBuffer.js')(jsExport, nativeBinding); 33 | 34 | // -------------------------------------------------------------------------- 35 | // Promisify MediaDevices API 36 | // -------------------------------------------------------------------------- 37 | jsExport.mediaDevices = {}; 38 | 39 | const enumerateDevicesSync = nativeBinding.mediaDevices.enumerateDevices; 40 | jsExport.mediaDevices.enumerateDevices = async function enumerateDevices() { 41 | const list = enumerateDevicesSync(); 42 | return Promise.resolve(list); 43 | }; 44 | 45 | const getUserMediaSync = nativeBinding.mediaDevices.getUserMedia; 46 | jsExport.mediaDevices.getUserMedia = async function getUserMedia(options) { 47 | if (options === undefined) { 48 | throw new TypeError('Failed to execute "getUserMedia" on "MediaDevices": audio must be requested'); 49 | } 50 | 51 | const stream = getUserMediaSync(options); 52 | return Promise.resolve(stream); 53 | }; 54 | 55 | module.exports = jsExport; 56 | 57 | -------------------------------------------------------------------------------- /generator/js/index.tmpl.mjs: -------------------------------------------------------------------------------- 1 | // re-export index.cjs to support esm import syntax 2 | // see https://github.com/nodejs/node/issues/40541#issuecomment-951609570 3 | 4 | import { createRequire } from 'module'; 5 | const require = createRequire(import.meta.url); 6 | 7 | const nativeModule = require('./index.cjs'); 8 | export const { 9 | // events 10 | OfflineAudioCompletionEvent, 11 | AudioProcessingEvent, 12 | AudioRenderCapacityEvent, 13 | 14 | // manually written nodes 15 | BaseAudioContext, 16 | AudioContext, 17 | OfflineAudioContext, 18 | 19 | AudioNode, 20 | AudioScheduledSourceNode, 21 | AudioParam, 22 | AudioDestinationNode, 23 | AudioListener, 24 | AudioWorklet, 25 | AudioParamMap, 26 | AudioRenderCapacity, 27 | 28 | PeriodicWave, 29 | AudioBuffer, 30 | // generated nodes 31 | ${d.nodes.map(n => ` ${d.name(n)},`).join('\n')} 32 | 33 | // helper methods 34 | mediaDevices, 35 | } = nativeModule; 36 | 37 | export default nativeModule; 38 | 39 | -------------------------------------------------------------------------------- /generator/rs/lib.tmpl.rs: -------------------------------------------------------------------------------- 1 | #![deny(clippy::all)] 2 | // @todo - properly fix this clippy issue 3 | #![allow(clippy::zero_repeat_side_effects)] 4 | 5 | use napi::{Env, JsObject, Result}; 6 | use napi_derive::module_exports; 7 | 8 | #[macro_use] 9 | mod base_audio_context; 10 | #[macro_use] 11 | mod audio_node; 12 | 13 | // halpers 14 | mod utils; 15 | // Web Audio API 16 | mod audio_context; 17 | use crate::audio_context::NapiAudioContext; 18 | mod audio_destination_node; 19 | use crate::audio_destination_node::NapiAudioDestinationNode; 20 | mod audio_param; 21 | use crate::audio_param::NapiAudioParam; 22 | mod audio_listener; 23 | use crate::audio_listener::NapiAudioListener; 24 | mod audio_render_capacity; 25 | use crate::audio_render_capacity::NapiAudioRenderCapacity; 26 | mod audio_buffer; 27 | use crate::audio_buffer::NapiAudioBuffer; 28 | mod periodic_wave; 29 | use crate::periodic_wave::NapiPeriodicWave; 30 | mod offline_audio_context; 31 | use crate::offline_audio_context::NapiOfflineAudioContext; 32 | // Generated audio nodes 33 | ${d.nodes.map(n => { return ` 34 | mod ${d.slug(n)}; 35 | use crate::${d.slug(n)}::${d.napiName(n)};`}).join('')} 36 | 37 | // AudioWorklet internals 38 | use crate::audio_worklet_node::{ 39 | exit_audio_worklet_global_scope, 40 | run_audio_worklet_global_scope, 41 | }; 42 | 43 | // MediaDevices & MediaStream API 44 | mod media_streams; 45 | use crate::media_streams::NapiMediaStream; 46 | mod media_devices; 47 | use crate::media_devices::napi_enumerate_devices; 48 | use crate::media_devices::napi_get_user_media; 49 | 50 | #[cfg(all( 51 | any(windows, unix), 52 | target_arch = "x86_64", 53 | not(target_env = "musl"), 54 | not(debug_assertions) 55 | ))] 56 | #[global_allocator] 57 | static ALLOC: mimalloc::MiMalloc = mimalloc::MiMalloc; 58 | 59 | #[module_exports] 60 | fn init(mut exports: JsObject, env: Env) -> Result<()> { 61 | // Do not print panic messages, handle through JS errors 62 | std::panic::set_hook(Box::new(|_panic_info| {})); 63 | 64 | let napi_class = NapiAudioContext::create_js_class(&env)?; 65 | exports.set_named_property("AudioContext", napi_class)?; 66 | 67 | let napi_class = NapiOfflineAudioContext::create_js_class(&env)?; 68 | exports.set_named_property("OfflineAudioContext", napi_class)?; 69 | 70 | let napi_class = NapiAudioBuffer::create_js_class(&env)?; 71 | exports.set_named_property("AudioBuffer", napi_class)?; 72 | 73 | let napi_class = NapiPeriodicWave::create_js_class(&env)?; 74 | exports.set_named_property("PeriodicWave", napi_class)?; 75 | 76 | let napi_class = NapiMediaStreamAudioSourceNode::create_js_class(&env)?; 77 | exports.set_named_property("MediaStreamAudioSourceNode", napi_class)?; 78 | 79 | // ---------------------------------------------------------------- 80 | // Generated audio nodes 81 | // ---------------------------------------------------------------- 82 | ${d.nodes.map(n => { return ` 83 | let napi_class = ${d.napiName(n)}::create_js_class(&env)?; 84 | exports.set_named_property("${d.name(n)}", napi_class)?; 85 | `}).join('')} 86 | 87 | // ---------------------------------------------------------------- 88 | // AudioWorklet utils (internal) 89 | // ---------------------------------------------------------------- 90 | exports.create_named_method( 91 | "run_audio_worklet_global_scope", 92 | run_audio_worklet_global_scope, 93 | )?; 94 | exports.create_named_method( 95 | "exit_audio_worklet_global_scope", 96 | exit_audio_worklet_global_scope, 97 | )?; 98 | 99 | // ---------------------------------------------------------------- 100 | // MediaStream API & Media Devices API 101 | // ---------------------------------------------------------------- 102 | let mut media_devices = env.create_object()?; 103 | 104 | let napi_class = NapiMediaStream::create_js_class(&env)?; 105 | media_devices.set_named_property("MediaStream", napi_class)?; 106 | 107 | media_devices.create_named_method("enumerateDevices", napi_enumerate_devices)?; 108 | media_devices.create_named_method("getUserMedia", napi_get_user_media)?; 109 | // expose media devices 110 | exports.set_named_property("mediaDevices", media_devices)?; 111 | 112 | // ---------------------------------------------------------------- 113 | // Store constructors for classes that need to be created from within Rust code 114 | // ---------------------------------------------------------------- 115 | let mut store = env.create_object()?; 116 | 117 | let napi_class = NapiAudioDestinationNode::create_js_class(&env)?; 118 | store.set_named_property("AudioDestinationNode", napi_class)?; 119 | 120 | let napi_class = NapiAudioListener::create_js_class(&env)?; 121 | store.set_named_property("AudioListener", napi_class)?; 122 | 123 | let napi_class = NapiAudioRenderCapacity::create_js_class(&env)?; 124 | store.set_named_property("AudioRenderCapacity", napi_class)?; 125 | 126 | let napi_class = NapiAudioBuffer::create_js_class(&env)?; 127 | store.set_named_property("AudioBuffer", napi_class)?; 128 | 129 | let napi_class = NapiMediaStream::create_js_class(&env)?; 130 | store.set_named_property("MediaStream", napi_class)?; 131 | 132 | // push store into env instance data so that it can be globally accessed 133 | let store_ref = env.create_reference(store)?; 134 | env.set_instance_data(store_ref, 0, |mut c| { 135 | c.value.unref(c.env).unwrap(); 136 | })?; 137 | 138 | Ok(()) 139 | } 140 | -------------------------------------------------------------------------------- /index.d.ts: -------------------------------------------------------------------------------- 1 | // Referencing the default lib web api typings 2 | /// 3 | declare module "node-web-audio-api" { 4 | export import OfflineAudioCompletionEvent = globalThis.OfflineAudioCompletionEvent; 5 | export import AudioProcessingEvent = globalThis.AudioProcessingEvent; 6 | // not implemented in browsers yet 7 | // export import AudioRenderCapacityEvent = globalThis.AudioRenderCapacityEvent; 8 | 9 | export import BaseAudioContext = globalThis.BaseAudioContext; 10 | export import AudioContext = globalThis.AudioContext; 11 | export import OfflineAudioContext = globalThis.OfflineAudioContext; 12 | 13 | export import AudioNode = globalThis.AudioNode; 14 | export import AudioScheduledSourceNode = globalThis.AudioScheduledSourceNode; 15 | export import AudioParam = globalThis.AudioParam; 16 | export import AudioDestinationNode = globalThis.AudioDestinationNode; 17 | export import AudioListener = globalThis.AudioListener; 18 | export import AudioWorklet = globalThis.AudioWorklet; 19 | export import AudioParamMap = globalThis.AudioParamMap; 20 | // not implemented in browsers yet 21 | // export import AudioRenderCapacity = globalThis.AudioRenderCapacity; 22 | 23 | export import PeriodicWave = globalThis.PeriodicWave; 24 | export import AudioBuffer = globalThis.AudioBuffer; 25 | 26 | export import ScriptProcessorNode = globalThis.ScriptProcessorNode; 27 | export import AudioWorkletNode = globalThis.AudioWorkletNode; 28 | export import AnalyserNode = globalThis.AnalyserNode; 29 | export import AudioBufferSourceNode = globalThis.AudioBufferSourceNode; 30 | export import BiquadFilterNode = globalThis.BiquadFilterNode; 31 | export import ChannelMergerNode = globalThis.ChannelMergerNode; 32 | export import ChannelSplitterNode = globalThis.ChannelSplitterNode; 33 | export import ConstantSourceNode = globalThis.ConstantSourceNode; 34 | export import ConvolverNode = globalThis.ConvolverNode; 35 | export import DelayNode = globalThis.DelayNode; 36 | export import DynamicsCompressorNode = globalThis.DynamicsCompressorNode; 37 | export import GainNode = globalThis.GainNode; 38 | export import IIRFilterNode = globalThis.IIRFilterNode; 39 | export import MediaStreamAudioSourceNode = globalThis.MediaStreamAudioSourceNode; 40 | export import OscillatorNode = globalThis.OscillatorNode; 41 | export import PannerNode = globalThis.PannerNode; 42 | export import StereoPannerNode = globalThis.StereoPannerNode; 43 | export import WaveShaperNode = globalThis.WaveShaperNode; 44 | } 45 | -------------------------------------------------------------------------------- /index.mjs: -------------------------------------------------------------------------------- 1 | // -------------------------------------------------------------------------- // 2 | // -------------------------------------------------------------------------- // 3 | // // 4 | // // 5 | // // 6 | // ██╗ ██╗ █████╗ ██████╗ ███╗ ██╗██╗███╗ ██╗ ██████╗ // 7 | // ██║ ██║██╔══██╗██╔══██╗████╗ ██║██║████╗ ██║██╔════╝ // 8 | // ██║ █╗ ██║███████║██████╔╝██╔██╗ ██║██║██╔██╗ ██║██║ ███╗ // 9 | // ██║███╗██║██╔══██║██╔══██╗██║╚██╗██║██║██║╚██╗██║██║ ██║ // 10 | // ╚███╔███╔╝██║ ██║██║ ██║██║ ╚████║██║██║ ╚████║╚██████╔╝ // 11 | // ╚══╝╚══╝ ╚═╝ ╚═╝╚═╝ ╚═╝╚═╝ ╚═══╝╚═╝╚═╝ ╚═══╝ ╚═════╝ // 12 | // // 13 | // // 14 | // - This file has been generated --------------------------- // 15 | // // 16 | // // 17 | // -------------------------------------------------------------------------- // 18 | // -------------------------------------------------------------------------- // 19 | 20 | // re-export index.cjs to support esm import syntax 21 | // see https://github.com/nodejs/node/issues/40541#issuecomment-951609570 22 | 23 | import { 24 | createRequire, 25 | } from 'module'; 26 | const require = createRequire(import.meta.url); 27 | 28 | const nativeModule = require('./index.cjs'); 29 | export const { 30 | // events 31 | OfflineAudioCompletionEvent, 32 | AudioProcessingEvent, 33 | AudioRenderCapacityEvent, 34 | 35 | // manually written nodes 36 | BaseAudioContext, 37 | AudioContext, 38 | OfflineAudioContext, 39 | 40 | AudioNode, 41 | AudioScheduledSourceNode, 42 | AudioParam, 43 | AudioDestinationNode, 44 | AudioListener, 45 | AudioWorklet, 46 | AudioParamMap, 47 | AudioRenderCapacity, 48 | 49 | PeriodicWave, 50 | AudioBuffer, 51 | // generated nodes 52 | ScriptProcessorNode, 53 | AudioWorkletNode, 54 | AnalyserNode, 55 | AudioBufferSourceNode, 56 | BiquadFilterNode, 57 | ChannelMergerNode, 58 | ChannelSplitterNode, 59 | ConstantSourceNode, 60 | ConvolverNode, 61 | DelayNode, 62 | DynamicsCompressorNode, 63 | GainNode, 64 | IIRFilterNode, 65 | MediaStreamAudioSourceNode, 66 | OscillatorNode, 67 | PannerNode, 68 | StereoPannerNode, 69 | WaveShaperNode, 70 | 71 | // helper methods 72 | mediaDevices, 73 | } = nativeModule; 74 | 75 | export default nativeModule; 76 | -------------------------------------------------------------------------------- /js/AudioDestinationNode.js: -------------------------------------------------------------------------------- 1 | const { kNapiObj } = require('./lib/symbols.js'); 2 | const { kEnumerableProperty } = require('./lib/utils.js'); 3 | const AudioNode = require('./AudioNode.js'); 4 | 5 | class AudioDestinationNode extends AudioNode { 6 | constructor(context, options) { 7 | // Make constructor "private" 8 | if ( 9 | (typeof options !== 'object') 10 | || !(kNapiObj in options) 11 | || options[kNapiObj]['Symbol.toStringTag'] !== 'AudioDestinationNode' 12 | ) { 13 | throw new TypeError('Illegal constructor'); 14 | } 15 | 16 | super(context, { 17 | [kNapiObj]: options[kNapiObj], 18 | }); 19 | } 20 | 21 | get maxChannelCount() { 22 | if (!(this instanceof AudioDestinationNode)) { 23 | throw new TypeError(`Invalid Invocation: Value of 'this' must be of type 'AudioDestinationNode'`); 24 | } 25 | 26 | return this[kNapiObj].maxChannelCount; 27 | } 28 | } 29 | 30 | Object.defineProperties(AudioDestinationNode, { 31 | length: { 32 | __proto__: null, 33 | writable: false, 34 | enumerable: false, 35 | configurable: true, 36 | value: 0, 37 | }, 38 | }); 39 | 40 | Object.defineProperties(AudioDestinationNode.prototype, { 41 | [Symbol.toStringTag]: { 42 | __proto__: null, 43 | writable: false, 44 | enumerable: false, 45 | configurable: true, 46 | value: 'AudioDestinationNode', 47 | }, 48 | 49 | maxChannelCount: kEnumerableProperty, 50 | }); 51 | 52 | module.exports = AudioDestinationNode; 53 | 54 | -------------------------------------------------------------------------------- /js/AudioParamMap.js: -------------------------------------------------------------------------------- 1 | const { 2 | kPrivateConstructor, 3 | } = require('./lib/symbols.js'); 4 | const { 5 | kEnumerableProperty, 6 | } = require('./lib/utils.js'); 7 | 8 | class AudioParamMap { 9 | #parameters = null; 10 | 11 | constructor(options) { 12 | if ( 13 | (typeof options !== 'object') || 14 | options[kPrivateConstructor] !== true 15 | ) { 16 | throw new TypeError('Illegal constructor'); 17 | } 18 | 19 | this.#parameters = options.parameters; 20 | } 21 | 22 | get size() { 23 | return this.#parameters.size; 24 | } 25 | 26 | entries() { 27 | return this.#parameters.entries(); 28 | } 29 | 30 | keys() { 31 | return this.#parameters.keys(); 32 | } 33 | 34 | values() { 35 | return this.#parameters.values(); 36 | } 37 | 38 | forEach(func) { 39 | return this.#parameters.forEach(func); 40 | } 41 | 42 | get(name) { 43 | return this.#parameters.get(name); 44 | } 45 | 46 | has(name) { 47 | return this.#parameters.has(name); 48 | } 49 | } 50 | 51 | Object.defineProperties(AudioParamMap, { 52 | length: { 53 | __proto__: null, 54 | writable: false, 55 | enumerable: false, 56 | configurable: true, 57 | value: 0, 58 | }, 59 | }); 60 | 61 | Object.defineProperties(AudioParamMap.prototype, { 62 | [Symbol.toStringTag]: { 63 | __proto__: null, 64 | writable: false, 65 | enumerable: false, 66 | configurable: true, 67 | value: 'AudioParamMap', 68 | }, 69 | [Symbol.iterator]: { 70 | value: AudioParamMap.prototype.entries, 71 | enumerable: false, 72 | configurable: true, 73 | writable: true, 74 | }, 75 | size: { 76 | __proto__: null, 77 | enumerable: true, 78 | configurable: true, 79 | }, 80 | entries: kEnumerableProperty, 81 | keys: kEnumerableProperty, 82 | values: kEnumerableProperty, 83 | forEach: kEnumerableProperty, 84 | get: kEnumerableProperty, 85 | has: kEnumerableProperty, 86 | }); 87 | 88 | module.exports = AudioParamMap; 89 | -------------------------------------------------------------------------------- /js/AudioRenderCapacity.js: -------------------------------------------------------------------------------- 1 | const conversions = require('webidl-conversions'); 2 | 3 | const { 4 | kNapiObj, 5 | kOnUpdate, 6 | } = require('./lib/symbols.js'); 7 | const { 8 | isFunction, 9 | kEnumerableProperty, 10 | } = require('./lib/utils.js'); 11 | const { 12 | propagateEvent, 13 | } = require('./lib/events.js'); 14 | const { 15 | AudioRenderCapacityEvent, 16 | } = require('./Events.js'); 17 | 18 | class AudioRenderCapacity extends EventTarget { 19 | #onupdate = null; 20 | 21 | constructor(options) { 22 | // Make constructor "private" 23 | if ( 24 | (typeof options !== 'object') 25 | || !(kNapiObj in options) 26 | || options[kNapiObj]['Symbol.toStringTag'] !== 'AudioRenderCapacity' 27 | ) { 28 | throw new TypeError('Illegal constructor'); 29 | } 30 | 31 | super(); 32 | 33 | this[kNapiObj] = options[kNapiObj]; 34 | 35 | this[kNapiObj][kOnUpdate] = (function(err, rawEvent) { 36 | const event = new AudioRenderCapacityEvent('update', rawEvent); 37 | propagateEvent(this, event); 38 | }).bind(this); 39 | 40 | this[kNapiObj].listen_to_events(); 41 | } 42 | 43 | get onupdate() { 44 | if (!(this instanceof AudioRenderCapacity)) { 45 | throw new TypeError('Invalid Invocation: Value of \'this\' must be of type \'AudioRenderCapacity\''); 46 | } 47 | 48 | return this.#onupdate; 49 | } 50 | 51 | set onupdate(value) { 52 | if (!(this instanceof AudioRenderCapacity)) { 53 | throw new TypeError('Invalid Invocation: Value of \'this\' must be of type \'AudioRenderCapacity\''); 54 | } 55 | 56 | if (isFunction(value) || value === null) { 57 | this.#onupdate = value; 58 | } 59 | } 60 | 61 | start(options = null) { 62 | if (!(this instanceof AudioRenderCapacity)) { 63 | throw new TypeError(`Invalid Invocation: Value of 'this' must be of type 'AudioRenderCapacity'`); 64 | } 65 | 66 | let targetOptions = {}; 67 | 68 | if (typeof options === 'object' && options !== null) { 69 | if (!('updateInterval' in options)) { 70 | throw new TypeError(`Failed to execute 'start' on 'AudioRenderCapacity': Failed to read the 'updateInterval' property on 'AudioRenderCapacityOptions'`); 71 | } 72 | 73 | targetOptions.updateInterval = conversions['double'](options.updateInterval, { 74 | context: `Failed to execute 'start' on 'AudioRenderCapacity': Failed to read the 'updateInterval' property on 'AudioRenderCapacityOptions': The provided value ()`, 75 | }); 76 | } else { 77 | targetOptions.updateInterval = 1; 78 | } 79 | 80 | return this[kNapiObj].start(targetOptions); 81 | } 82 | 83 | stop() { 84 | if (!(this instanceof AudioRenderCapacity)) { 85 | throw new TypeError(`Invalid Invocation: Value of 'this' must be of type 'AudioRenderCapacity'`); 86 | } 87 | 88 | return this[kNapiObj].start(); 89 | } 90 | } 91 | 92 | Object.defineProperties(AudioRenderCapacity, { 93 | length: { 94 | __proto__: null, 95 | writable: false, 96 | enumerable: false, 97 | configurable: true, 98 | value: 0, 99 | }, 100 | }); 101 | 102 | Object.defineProperties(AudioRenderCapacity.prototype, { 103 | [Symbol.toStringTag]: { 104 | __proto__: null, 105 | writable: false, 106 | enumerable: false, 107 | configurable: true, 108 | value: 'AudioRenderCapacity', 109 | }, 110 | 111 | onupdate: kEnumerableProperty, 112 | start: kEnumerableProperty, 113 | stop: kEnumerableProperty, 114 | }); 115 | 116 | module.exports = AudioRenderCapacity; 117 | 118 | 119 | -------------------------------------------------------------------------------- /js/AudioScheduledSourceNode.js: -------------------------------------------------------------------------------- 1 | const conversions = require('webidl-conversions'); 2 | 3 | const { 4 | throwSanitizedError, 5 | } = require('./lib/errors.js'); 6 | const { 7 | propagateEvent, 8 | } = require('./lib/events.js'); 9 | const { 10 | isFunction, 11 | kEnumerableProperty, 12 | } = require('./lib/utils.js'); 13 | const { 14 | kNapiObj, 15 | kOnEnded, 16 | } = require('./lib/symbols.js'); 17 | 18 | const AudioNode = require('./AudioNode.js'); 19 | 20 | class AudioScheduledSourceNode extends AudioNode { 21 | #onended = null; 22 | 23 | constructor(context, options) { 24 | // Make constructor "private" 25 | if ( 26 | (typeof options !== 'object') 27 | || !(kNapiObj in options) 28 | ) { 29 | throw new TypeError('Illegal constructor'); 30 | } 31 | 32 | super(context, options); 33 | 34 | // Add function to Napi object to bridge from Rust events to JS EventTarget 35 | // It will be effectively registered on rust side when `start` is called 36 | // 37 | // Note 2024-06-05 - We use bind instead of arrow function because arrow function 38 | // prevent the node to be collected by Scavenge step of GC, which can lead to 39 | // oversized graphs and performance issues. 40 | // cf. https://github.com/ircam-ismm/node-web-audio-api/tree/fix/118 41 | this[kNapiObj][kOnEnded] = (function(_err, rawEvent) { 42 | const event = new Event(rawEvent.type); 43 | propagateEvent(this, event); 44 | }).bind(this); 45 | } 46 | 47 | get onended() { 48 | if (!(this instanceof AudioScheduledSourceNode)) { 49 | throw new TypeError('Invalid Invocation: Value of \'this\' must be of type \'AudioScheduledSourceNode\''); 50 | } 51 | 52 | return this.#onended; 53 | } 54 | 55 | set onended(value) { 56 | if (!(this instanceof AudioScheduledSourceNode)) { 57 | throw new TypeError('Invalid Invocation: Value of \'this\' must be of type \'AudioScheduledSourceNode\''); 58 | } 59 | 60 | if (isFunction(value) || value === null) { 61 | this.#onended = value; 62 | } 63 | } 64 | 65 | start(when = 0) { 66 | if (!(this instanceof AudioScheduledSourceNode)) { 67 | throw new TypeError('Invalid Invocation: Value of \'this\' must be of type \'AudioScheduledSourceNode\''); 68 | } 69 | 70 | when = conversions['double'](when, { 71 | context: `Failed to execute 'start' on 'AudioScheduledSourceNode': Parameter 1`, 72 | }); 73 | 74 | try { 75 | return this[kNapiObj].start(when); 76 | } catch (err) { 77 | throwSanitizedError(err); 78 | } 79 | } 80 | 81 | stop(when = 0) { 82 | if (!(this instanceof AudioScheduledSourceNode)) { 83 | throw new TypeError('Invalid Invocation: Value of \'this\' must be of type \'AudioScheduledSourceNode\''); 84 | } 85 | 86 | when = conversions['double'](when, { 87 | context: `Failed to execute 'stop' on 'AudioScheduledSourceNode': Parameter 1`, 88 | }); 89 | 90 | try { 91 | return this[kNapiObj].stop(when); 92 | } catch (err) { 93 | throwSanitizedError(err); 94 | } 95 | } 96 | } 97 | 98 | Object.defineProperties(AudioScheduledSourceNode, { 99 | length: { 100 | __proto__: null, 101 | writable: false, 102 | enumerable: false, 103 | configurable: true, 104 | value: 0, 105 | }, 106 | }); 107 | 108 | Object.defineProperties(AudioScheduledSourceNode.prototype, { 109 | [Symbol.toStringTag]: { 110 | __proto__: null, 111 | writable: false, 112 | enumerable: false, 113 | configurable: true, 114 | value: 'AudioScheduledSourceNode', 115 | }, 116 | onended: kEnumerableProperty, 117 | start: kEnumerableProperty, 118 | stop: kEnumerableProperty, 119 | }); 120 | 121 | module.exports = AudioScheduledSourceNode; 122 | -------------------------------------------------------------------------------- /js/ChannelMergerNode.js: -------------------------------------------------------------------------------- 1 | // -------------------------------------------------------------------------- // 2 | // -------------------------------------------------------------------------- // 3 | // // 4 | // // 5 | // // 6 | // ██╗ ██╗ █████╗ ██████╗ ███╗ ██╗██╗███╗ ██╗ ██████╗ // 7 | // ██║ ██║██╔══██╗██╔══██╗████╗ ██║██║████╗ ██║██╔════╝ // 8 | // ██║ █╗ ██║███████║██████╔╝██╔██╗ ██║██║██╔██╗ ██║██║ ███╗ // 9 | // ██║███╗██║██╔══██║██╔══██╗██║╚██╗██║██║██║╚██╗██║██║ ██║ // 10 | // ╚███╔███╔╝██║ ██║██║ ██║██║ ╚████║██║██║ ╚████║╚██████╔╝ // 11 | // ╚══╝╚══╝ ╚═╝ ╚═╝╚═╝ ╚═╝╚═╝ ╚═══╝╚═╝╚═╝ ╚═══╝ ╚═════╝ // 12 | // // 13 | // // 14 | // - This file has been generated --------------------------- // 15 | // // 16 | // // 17 | // -------------------------------------------------------------------------- // 18 | // -------------------------------------------------------------------------- // 19 | 20 | /* eslint-disable no-unused-vars */ 21 | const conversions = require('webidl-conversions'); 22 | const { 23 | toSanitizedSequence, 24 | } = require('./lib/cast.js'); 25 | const { 26 | isFunction, 27 | kEnumerableProperty, 28 | } = require('./lib/utils.js'); 29 | const { 30 | throwSanitizedError, 31 | } = require('./lib/errors.js'); 32 | const { 33 | kNapiObj, 34 | kAudioBuffer, 35 | } = require('./lib/symbols.js'); 36 | /* eslint-enable no-unused-vars */ 37 | 38 | const AudioNode = require('./AudioNode.js'); 39 | 40 | module.exports = (jsExport, nativeBinding) => { 41 | class ChannelMergerNode extends AudioNode { 42 | 43 | constructor(context, options) { 44 | 45 | if (arguments.length < 1) { 46 | throw new TypeError(`Failed to construct 'ChannelMergerNode': 1 argument required, but only ${arguments.length} present`); 47 | } 48 | 49 | if (!(context instanceof jsExport.BaseAudioContext)) { 50 | throw new TypeError(`Failed to construct 'ChannelMergerNode': argument 1 is not of type BaseAudioContext`); 51 | } 52 | 53 | // parsed version of the option to be passed to NAPI 54 | const parsedOptions = {}; 55 | 56 | if (options && typeof options !== 'object') { 57 | throw new TypeError('Failed to construct \'ChannelMergerNode\': argument 2 is not of type \'ChannelMergerOptions\''); 58 | } 59 | 60 | if (options && options.numberOfInputs !== undefined) { 61 | parsedOptions.numberOfInputs = conversions['unsigned long'](options.numberOfInputs, { 62 | enforceRange: true, 63 | context: `Failed to construct 'ChannelMergerNode': Failed to read the 'numberOfInputs' property from ChannelMergerOptions: The provided value (${options.numberOfInputs}})`, 64 | }); 65 | } else { 66 | parsedOptions.numberOfInputs = 6; 67 | } 68 | 69 | if (options && options.channelCount !== undefined) { 70 | parsedOptions.channelCount = conversions['unsigned long'](options.channelCount, { 71 | enforceRange: true, 72 | context: `Failed to construct 'ChannelMergerNode': Failed to read the 'channelCount' property from ChannelMergerOptions: The provided value '${options.channelCount}'`, 73 | }); 74 | } 75 | 76 | if (options && options.channelCountMode !== undefined) { 77 | parsedOptions.channelCountMode = conversions['DOMString'](options.channelCountMode, { 78 | context: `Failed to construct 'ChannelMergerNode': Failed to read the 'channelCount' property from ChannelMergerOptions: The provided value '${options.channelCountMode}'`, 79 | }); 80 | } 81 | 82 | if (options && options.channelInterpretation !== undefined) { 83 | parsedOptions.channelInterpretation = conversions['DOMString'](options.channelInterpretation, { 84 | context: `Failed to construct 'ChannelMergerNode': Failed to read the 'channelInterpretation' property from ChannelMergerOptions: The provided value '${options.channelInterpretation}'`, 85 | }); 86 | } 87 | 88 | let napiObj; 89 | 90 | try { 91 | napiObj = new nativeBinding.ChannelMergerNode(context[kNapiObj], parsedOptions); 92 | } catch (err) { 93 | throwSanitizedError(err); 94 | } 95 | 96 | super(context, { 97 | [kNapiObj]: napiObj, 98 | }); 99 | 100 | } 101 | 102 | } 103 | 104 | Object.defineProperties(ChannelMergerNode, { 105 | length: { 106 | __proto__: null, 107 | writable: false, 108 | enumerable: false, 109 | configurable: true, 110 | value: 1, 111 | }, 112 | }); 113 | 114 | Object.defineProperties(ChannelMergerNode.prototype, { 115 | [Symbol.toStringTag]: { 116 | __proto__: null, 117 | writable: false, 118 | enumerable: false, 119 | configurable: true, 120 | value: 'ChannelMergerNode', 121 | }, 122 | 123 | }); 124 | 125 | return ChannelMergerNode; 126 | }; 127 | -------------------------------------------------------------------------------- /js/ChannelSplitterNode.js: -------------------------------------------------------------------------------- 1 | // -------------------------------------------------------------------------- // 2 | // -------------------------------------------------------------------------- // 3 | // // 4 | // // 5 | // // 6 | // ██╗ ██╗ █████╗ ██████╗ ███╗ ██╗██╗███╗ ██╗ ██████╗ // 7 | // ██║ ██║██╔══██╗██╔══██╗████╗ ██║██║████╗ ██║██╔════╝ // 8 | // ██║ █╗ ██║███████║██████╔╝██╔██╗ ██║██║██╔██╗ ██║██║ ███╗ // 9 | // ██║███╗██║██╔══██║██╔══██╗██║╚██╗██║██║██║╚██╗██║██║ ██║ // 10 | // ╚███╔███╔╝██║ ██║██║ ██║██║ ╚████║██║██║ ╚████║╚██████╔╝ // 11 | // ╚══╝╚══╝ ╚═╝ ╚═╝╚═╝ ╚═╝╚═╝ ╚═══╝╚═╝╚═╝ ╚═══╝ ╚═════╝ // 12 | // // 13 | // // 14 | // - This file has been generated --------------------------- // 15 | // // 16 | // // 17 | // -------------------------------------------------------------------------- // 18 | // -------------------------------------------------------------------------- // 19 | 20 | /* eslint-disable no-unused-vars */ 21 | const conversions = require('webidl-conversions'); 22 | const { 23 | toSanitizedSequence, 24 | } = require('./lib/cast.js'); 25 | const { 26 | isFunction, 27 | kEnumerableProperty, 28 | } = require('./lib/utils.js'); 29 | const { 30 | throwSanitizedError, 31 | } = require('./lib/errors.js'); 32 | const { 33 | kNapiObj, 34 | kAudioBuffer, 35 | } = require('./lib/symbols.js'); 36 | /* eslint-enable no-unused-vars */ 37 | 38 | const AudioNode = require('./AudioNode.js'); 39 | 40 | module.exports = (jsExport, nativeBinding) => { 41 | class ChannelSplitterNode extends AudioNode { 42 | 43 | constructor(context, options) { 44 | 45 | if (arguments.length < 1) { 46 | throw new TypeError(`Failed to construct 'ChannelSplitterNode': 1 argument required, but only ${arguments.length} present`); 47 | } 48 | 49 | if (!(context instanceof jsExport.BaseAudioContext)) { 50 | throw new TypeError(`Failed to construct 'ChannelSplitterNode': argument 1 is not of type BaseAudioContext`); 51 | } 52 | 53 | // parsed version of the option to be passed to NAPI 54 | const parsedOptions = {}; 55 | 56 | if (options && typeof options !== 'object') { 57 | throw new TypeError('Failed to construct \'ChannelSplitterNode\': argument 2 is not of type \'ChannelSplitterOptions\''); 58 | } 59 | 60 | if (options && options.numberOfOutputs !== undefined) { 61 | parsedOptions.numberOfOutputs = conversions['unsigned long'](options.numberOfOutputs, { 62 | enforceRange: true, 63 | context: `Failed to construct 'ChannelSplitterNode': Failed to read the 'numberOfOutputs' property from ChannelSplitterOptions: The provided value (${options.numberOfOutputs}})`, 64 | }); 65 | } else { 66 | parsedOptions.numberOfOutputs = 6; 67 | } 68 | 69 | if (options && options.channelCount !== undefined) { 70 | parsedOptions.channelCount = conversions['unsigned long'](options.channelCount, { 71 | enforceRange: true, 72 | context: `Failed to construct 'ChannelSplitterNode': Failed to read the 'channelCount' property from ChannelSplitterOptions: The provided value '${options.channelCount}'`, 73 | }); 74 | } 75 | 76 | if (options && options.channelCountMode !== undefined) { 77 | parsedOptions.channelCountMode = conversions['DOMString'](options.channelCountMode, { 78 | context: `Failed to construct 'ChannelSplitterNode': Failed to read the 'channelCount' property from ChannelSplitterOptions: The provided value '${options.channelCountMode}'`, 79 | }); 80 | } 81 | 82 | if (options && options.channelInterpretation !== undefined) { 83 | parsedOptions.channelInterpretation = conversions['DOMString'](options.channelInterpretation, { 84 | context: `Failed to construct 'ChannelSplitterNode': Failed to read the 'channelInterpretation' property from ChannelSplitterOptions: The provided value '${options.channelInterpretation}'`, 85 | }); 86 | } 87 | 88 | let napiObj; 89 | 90 | try { 91 | napiObj = new nativeBinding.ChannelSplitterNode(context[kNapiObj], parsedOptions); 92 | } catch (err) { 93 | throwSanitizedError(err); 94 | } 95 | 96 | super(context, { 97 | [kNapiObj]: napiObj, 98 | }); 99 | 100 | } 101 | 102 | } 103 | 104 | Object.defineProperties(ChannelSplitterNode, { 105 | length: { 106 | __proto__: null, 107 | writable: false, 108 | enumerable: false, 109 | configurable: true, 110 | value: 1, 111 | }, 112 | }); 113 | 114 | Object.defineProperties(ChannelSplitterNode.prototype, { 115 | [Symbol.toStringTag]: { 116 | __proto__: null, 117 | writable: false, 118 | enumerable: false, 119 | configurable: true, 120 | value: 'ChannelSplitterNode', 121 | }, 122 | 123 | }); 124 | 125 | return ChannelSplitterNode; 126 | }; 127 | -------------------------------------------------------------------------------- /js/ConstantSourceNode.js: -------------------------------------------------------------------------------- 1 | // -------------------------------------------------------------------------- // 2 | // -------------------------------------------------------------------------- // 3 | // // 4 | // // 5 | // // 6 | // ██╗ ██╗ █████╗ ██████╗ ███╗ ██╗██╗███╗ ██╗ ██████╗ // 7 | // ██║ ██║██╔══██╗██╔══██╗████╗ ██║██║████╗ ██║██╔════╝ // 8 | // ██║ █╗ ██║███████║██████╔╝██╔██╗ ██║██║██╔██╗ ██║██║ ███╗ // 9 | // ██║███╗██║██╔══██║██╔══██╗██║╚██╗██║██║██║╚██╗██║██║ ██║ // 10 | // ╚███╔███╔╝██║ ██║██║ ██║██║ ╚████║██║██║ ╚████║╚██████╔╝ // 11 | // ╚══╝╚══╝ ╚═╝ ╚═╝╚═╝ ╚═╝╚═╝ ╚═══╝╚═╝╚═╝ ╚═══╝ ╚═════╝ // 12 | // // 13 | // // 14 | // - This file has been generated --------------------------- // 15 | // // 16 | // // 17 | // -------------------------------------------------------------------------- // 18 | // -------------------------------------------------------------------------- // 19 | 20 | /* eslint-disable no-unused-vars */ 21 | const conversions = require('webidl-conversions'); 22 | const { 23 | toSanitizedSequence, 24 | } = require('./lib/cast.js'); 25 | const { 26 | isFunction, 27 | kEnumerableProperty, 28 | } = require('./lib/utils.js'); 29 | const { 30 | throwSanitizedError, 31 | } = require('./lib/errors.js'); 32 | const { 33 | kNapiObj, 34 | kAudioBuffer, 35 | } = require('./lib/symbols.js'); 36 | /* eslint-enable no-unused-vars */ 37 | 38 | const AudioScheduledSourceNode = require('./AudioScheduledSourceNode.js'); 39 | 40 | module.exports = (jsExport, nativeBinding) => { 41 | class ConstantSourceNode extends AudioScheduledSourceNode { 42 | 43 | #offset = null; 44 | 45 | constructor(context, options) { 46 | 47 | if (arguments.length < 1) { 48 | throw new TypeError(`Failed to construct 'ConstantSourceNode': 1 argument required, but only ${arguments.length} present`); 49 | } 50 | 51 | if (!(context instanceof jsExport.BaseAudioContext)) { 52 | throw new TypeError(`Failed to construct 'ConstantSourceNode': argument 1 is not of type BaseAudioContext`); 53 | } 54 | 55 | // parsed version of the option to be passed to NAPI 56 | const parsedOptions = {}; 57 | 58 | if (options && typeof options !== 'object') { 59 | throw new TypeError('Failed to construct \'ConstantSourceNode\': argument 2 is not of type \'ConstantSourceOptions\''); 60 | } 61 | 62 | if (options && options.offset !== undefined) { 63 | parsedOptions.offset = conversions['float'](options.offset, { 64 | context: `Failed to construct 'ConstantSourceNode': Failed to read the 'offset' property from ConstantSourceOptions: The provided value (${options.offset}})`, 65 | }); 66 | } else { 67 | parsedOptions.offset = 1; 68 | } 69 | 70 | let napiObj; 71 | 72 | try { 73 | napiObj = new nativeBinding.ConstantSourceNode(context[kNapiObj], parsedOptions); 74 | } catch (err) { 75 | throwSanitizedError(err); 76 | } 77 | 78 | super(context, { 79 | [kNapiObj]: napiObj, 80 | }); 81 | 82 | this.#offset = new jsExport.AudioParam({ 83 | [kNapiObj]: this[kNapiObj].offset, 84 | }); 85 | } 86 | 87 | get offset() { 88 | if (!(this instanceof ConstantSourceNode)) { 89 | throw new TypeError('Invalid Invocation: Value of \'this\' must be of type \'ConstantSourceNode\''); 90 | } 91 | 92 | return this.#offset; 93 | } 94 | 95 | } 96 | 97 | Object.defineProperties(ConstantSourceNode, { 98 | length: { 99 | __proto__: null, 100 | writable: false, 101 | enumerable: false, 102 | configurable: true, 103 | value: 1, 104 | }, 105 | }); 106 | 107 | Object.defineProperties(ConstantSourceNode.prototype, { 108 | [Symbol.toStringTag]: { 109 | __proto__: null, 110 | writable: false, 111 | enumerable: false, 112 | configurable: true, 113 | value: 'ConstantSourceNode', 114 | }, 115 | offset: kEnumerableProperty, 116 | 117 | }); 118 | 119 | return ConstantSourceNode; 120 | }; 121 | -------------------------------------------------------------------------------- /js/GainNode.js: -------------------------------------------------------------------------------- 1 | // -------------------------------------------------------------------------- // 2 | // -------------------------------------------------------------------------- // 3 | // // 4 | // // 5 | // // 6 | // ██╗ ██╗ █████╗ ██████╗ ███╗ ██╗██╗███╗ ██╗ ██████╗ // 7 | // ██║ ██║██╔══██╗██╔══██╗████╗ ██║██║████╗ ██║██╔════╝ // 8 | // ██║ █╗ ██║███████║██████╔╝██╔██╗ ██║██║██╔██╗ ██║██║ ███╗ // 9 | // ██║███╗██║██╔══██║██╔══██╗██║╚██╗██║██║██║╚██╗██║██║ ██║ // 10 | // ╚███╔███╔╝██║ ██║██║ ██║██║ ╚████║██║██║ ╚████║╚██████╔╝ // 11 | // ╚══╝╚══╝ ╚═╝ ╚═╝╚═╝ ╚═╝╚═╝ ╚═══╝╚═╝╚═╝ ╚═══╝ ╚═════╝ // 12 | // // 13 | // // 14 | // - This file has been generated --------------------------- // 15 | // // 16 | // // 17 | // -------------------------------------------------------------------------- // 18 | // -------------------------------------------------------------------------- // 19 | 20 | /* eslint-disable no-unused-vars */ 21 | const conversions = require('webidl-conversions'); 22 | const { 23 | toSanitizedSequence, 24 | } = require('./lib/cast.js'); 25 | const { 26 | isFunction, 27 | kEnumerableProperty, 28 | } = require('./lib/utils.js'); 29 | const { 30 | throwSanitizedError, 31 | } = require('./lib/errors.js'); 32 | const { 33 | kNapiObj, 34 | kAudioBuffer, 35 | } = require('./lib/symbols.js'); 36 | /* eslint-enable no-unused-vars */ 37 | 38 | const AudioNode = require('./AudioNode.js'); 39 | 40 | module.exports = (jsExport, nativeBinding) => { 41 | class GainNode extends AudioNode { 42 | 43 | #gain = null; 44 | 45 | constructor(context, options) { 46 | 47 | if (arguments.length < 1) { 48 | throw new TypeError(`Failed to construct 'GainNode': 1 argument required, but only ${arguments.length} present`); 49 | } 50 | 51 | if (!(context instanceof jsExport.BaseAudioContext)) { 52 | throw new TypeError(`Failed to construct 'GainNode': argument 1 is not of type BaseAudioContext`); 53 | } 54 | 55 | // parsed version of the option to be passed to NAPI 56 | const parsedOptions = {}; 57 | 58 | if (options && typeof options !== 'object') { 59 | throw new TypeError('Failed to construct \'GainNode\': argument 2 is not of type \'GainOptions\''); 60 | } 61 | 62 | if (options && options.gain !== undefined) { 63 | parsedOptions.gain = conversions['float'](options.gain, { 64 | context: `Failed to construct 'GainNode': Failed to read the 'gain' property from GainOptions: The provided value (${options.gain}})`, 65 | }); 66 | } else { 67 | parsedOptions.gain = 1.0; 68 | } 69 | 70 | if (options && options.channelCount !== undefined) { 71 | parsedOptions.channelCount = conversions['unsigned long'](options.channelCount, { 72 | enforceRange: true, 73 | context: `Failed to construct 'GainNode': Failed to read the 'channelCount' property from GainOptions: The provided value '${options.channelCount}'`, 74 | }); 75 | } 76 | 77 | if (options && options.channelCountMode !== undefined) { 78 | parsedOptions.channelCountMode = conversions['DOMString'](options.channelCountMode, { 79 | context: `Failed to construct 'GainNode': Failed to read the 'channelCount' property from GainOptions: The provided value '${options.channelCountMode}'`, 80 | }); 81 | } 82 | 83 | if (options && options.channelInterpretation !== undefined) { 84 | parsedOptions.channelInterpretation = conversions['DOMString'](options.channelInterpretation, { 85 | context: `Failed to construct 'GainNode': Failed to read the 'channelInterpretation' property from GainOptions: The provided value '${options.channelInterpretation}'`, 86 | }); 87 | } 88 | 89 | let napiObj; 90 | 91 | try { 92 | napiObj = new nativeBinding.GainNode(context[kNapiObj], parsedOptions); 93 | } catch (err) { 94 | throwSanitizedError(err); 95 | } 96 | 97 | super(context, { 98 | [kNapiObj]: napiObj, 99 | }); 100 | 101 | this.#gain = new jsExport.AudioParam({ 102 | [kNapiObj]: this[kNapiObj].gain, 103 | }); 104 | } 105 | 106 | get gain() { 107 | if (!(this instanceof GainNode)) { 108 | throw new TypeError('Invalid Invocation: Value of \'this\' must be of type \'GainNode\''); 109 | } 110 | 111 | return this.#gain; 112 | } 113 | 114 | } 115 | 116 | Object.defineProperties(GainNode, { 117 | length: { 118 | __proto__: null, 119 | writable: false, 120 | enumerable: false, 121 | configurable: true, 122 | value: 1, 123 | }, 124 | }); 125 | 126 | Object.defineProperties(GainNode.prototype, { 127 | [Symbol.toStringTag]: { 128 | __proto__: null, 129 | writable: false, 130 | enumerable: false, 131 | configurable: true, 132 | value: 'GainNode', 133 | }, 134 | gain: kEnumerableProperty, 135 | 136 | }); 137 | 138 | return GainNode; 139 | }; 140 | -------------------------------------------------------------------------------- /js/MediaStreamAudioSourceNode.js: -------------------------------------------------------------------------------- 1 | // -------------------------------------------------------------------------- // 2 | // -------------------------------------------------------------------------- // 3 | // // 4 | // // 5 | // // 6 | // ██╗ ██╗ █████╗ ██████╗ ███╗ ██╗██╗███╗ ██╗ ██████╗ // 7 | // ██║ ██║██╔══██╗██╔══██╗████╗ ██║██║████╗ ██║██╔════╝ // 8 | // ██║ █╗ ██║███████║██████╔╝██╔██╗ ██║██║██╔██╗ ██║██║ ███╗ // 9 | // ██║███╗██║██╔══██║██╔══██╗██║╚██╗██║██║██║╚██╗██║██║ ██║ // 10 | // ╚███╔███╔╝██║ ██║██║ ██║██║ ╚████║██║██║ ╚████║╚██████╔╝ // 11 | // ╚══╝╚══╝ ╚═╝ ╚═╝╚═╝ ╚═╝╚═╝ ╚═══╝╚═╝╚═╝ ╚═══╝ ╚═════╝ // 12 | // // 13 | // // 14 | // - This file has been generated --------------------------- // 15 | // // 16 | // // 17 | // -------------------------------------------------------------------------- // 18 | // -------------------------------------------------------------------------- // 19 | 20 | /* eslint-disable no-unused-vars */ 21 | const conversions = require('webidl-conversions'); 22 | const { 23 | toSanitizedSequence, 24 | } = require('./lib/cast.js'); 25 | const { 26 | isFunction, 27 | kEnumerableProperty, 28 | } = require('./lib/utils.js'); 29 | const { 30 | throwSanitizedError, 31 | } = require('./lib/errors.js'); 32 | const { 33 | kNapiObj, 34 | kAudioBuffer, 35 | } = require('./lib/symbols.js'); 36 | /* eslint-enable no-unused-vars */ 37 | 38 | const AudioNode = require('./AudioNode.js'); 39 | 40 | module.exports = (jsExport, nativeBinding) => { 41 | class MediaStreamAudioSourceNode extends AudioNode { 42 | 43 | constructor(context, options) { 44 | 45 | if (arguments.length < 2) { 46 | throw new TypeError(`Failed to construct 'MediaStreamAudioSourceNode': 2 argument required, but only ${arguments.length} present`); 47 | } 48 | 49 | if (!(context instanceof jsExport.AudioContext)) { 50 | throw new TypeError(`Failed to construct 'MediaStreamAudioSourceNode': argument 1 is not of type AudioContext`); 51 | } 52 | 53 | // parsed version of the option to be passed to NAPI 54 | const parsedOptions = {}; 55 | 56 | if (options && typeof options !== 'object') { 57 | throw new TypeError('Failed to construct \'MediaStreamAudioSourceNode\': argument 2 is not of type \'MediaStreamAudioSourceOptions\''); 58 | } 59 | 60 | // required options 61 | if (typeof options !== 'object' || (options && options.mediaStream === undefined)) { 62 | throw new TypeError('Failed to construct \'MediaStreamAudioSourceNode\': Failed to read the \'mediaStream\' property from MediaStreamAudioSourceOptions: Required member is undefined'); 63 | } 64 | 65 | parsedOptions.mediaStream = options.mediaStream; 66 | 67 | let napiObj; 68 | 69 | try { 70 | napiObj = new nativeBinding.MediaStreamAudioSourceNode(context[kNapiObj], parsedOptions); 71 | } catch (err) { 72 | throwSanitizedError(err); 73 | } 74 | 75 | super(context, { 76 | [kNapiObj]: napiObj, 77 | }); 78 | 79 | } 80 | 81 | get mediaStream() { 82 | if (!(this instanceof MediaStreamAudioSourceNode)) { 83 | throw new TypeError('Invalid Invocation: Value of \'this\' must be of type \'MediaStreamAudioSourceNode\''); 84 | } 85 | 86 | return this[kNapiObj].mediaStream; 87 | } 88 | 89 | } 90 | 91 | Object.defineProperties(MediaStreamAudioSourceNode, { 92 | length: { 93 | __proto__: null, 94 | writable: false, 95 | enumerable: false, 96 | configurable: true, 97 | value: 2, 98 | }, 99 | }); 100 | 101 | Object.defineProperties(MediaStreamAudioSourceNode.prototype, { 102 | [Symbol.toStringTag]: { 103 | __proto__: null, 104 | writable: false, 105 | enumerable: false, 106 | configurable: true, 107 | value: 'MediaStreamAudioSourceNode', 108 | }, 109 | 110 | mediaStream: kEnumerableProperty, 111 | 112 | }); 113 | 114 | return MediaStreamAudioSourceNode; 115 | }; 116 | -------------------------------------------------------------------------------- /js/PeriodicWave.js: -------------------------------------------------------------------------------- 1 | const conversions = require('webidl-conversions'); 2 | 3 | const { throwSanitizedError } = require('./lib/errors.js'); 4 | const { toSanitizedSequence } = require('./lib/cast.js'); 5 | const { kNapiObj } = require('./lib/symbols.js'); 6 | const { kHiddenProperty } = require('./lib/utils.js'); 7 | 8 | module.exports = (jsExport, nativeBinding) => { 9 | class PeriodicWave { 10 | constructor(context, options) { 11 | if (arguments.length < 1) { 12 | throw new TypeError(`Failed to construct 'PeriodicWave': 1 argument required, but only ${arguments.length} present`); 13 | } 14 | 15 | if (!(context instanceof jsExport.BaseAudioContext)) { 16 | throw new TypeError(`Failed to construct 'PeriodicWave': argument 1 is not of type BaseAudioContext`); 17 | } 18 | 19 | const parsedOptions = {}; 20 | 21 | if (options && 'real' in options) { 22 | try { 23 | parsedOptions.real = toSanitizedSequence(options.real, Float32Array); 24 | } catch (err) { 25 | throw new TypeError(`Failed to construct 'PeriodicWave': Failed to read the 'real' property from PeriodicWaveOptions: The provided value ${err.message}`); 26 | } 27 | } 28 | 29 | if (options && 'imag' in options) { 30 | try { 31 | parsedOptions.imag = toSanitizedSequence(options.imag, Float32Array); 32 | } catch (err) { 33 | throw new TypeError(`Failed to construct 'PeriodicWave': Failed to read the 'imag' property from PeriodicWaveOptions: The provided value ${err.message}`); 34 | } 35 | } 36 | 37 | // disableNormalization = false 38 | if (options && 'disableNormalization' in options) { 39 | parsedOptions.disableNormalization = conversions['boolean'](options.disableNormalization, { 40 | context: `Failed to construct 'PeriodicWave': Failed to read the 'imag' property from PeriodicWaveOptions: The provided value`, 41 | }); 42 | } else { 43 | parsedOptions.disableNormalization; 44 | } 45 | 46 | try { 47 | const napiObj = new nativeBinding.PeriodicWave(context[kNapiObj], parsedOptions); 48 | Object.defineProperty(this, kNapiObj, { 49 | value: napiObj, 50 | ...kHiddenProperty, 51 | }); 52 | } catch (err) { 53 | throwSanitizedError(err); 54 | } 55 | } 56 | } 57 | 58 | Object.defineProperties(PeriodicWave, { 59 | length: { 60 | __proto__: null, 61 | writable: false, 62 | enumerable: false, 63 | configurable: true, 64 | value: 1, 65 | }, 66 | }); 67 | 68 | Object.defineProperties(PeriodicWave.prototype, { 69 | [Symbol.toStringTag]: { 70 | __proto__: null, 71 | writable: false, 72 | enumerable: false, 73 | configurable: true, 74 | value: 'PeriodicWave', 75 | }, 76 | }); 77 | 78 | return PeriodicWave; 79 | }; 80 | 81 | -------------------------------------------------------------------------------- /js/lib/cast.js: -------------------------------------------------------------------------------- 1 | exports.toSanitizedSequence = function toSanitizedSequence(data, targetCtor) { 2 | if ( 3 | (data.buffer && data.buffer instanceof ArrayBuffer) 4 | || Array.isArray(data) 5 | ) { 6 | data = new targetCtor(data); 7 | } else { 8 | throw new TypeError(`cannot be converted to sequence of ${targetCtor}`); 9 | } 10 | 11 | // check it only contains finite values 12 | for (let i = 0; i < data.length; i++) { 13 | if (!Number.isFinite(data[i])) { 14 | throw new TypeError(`should contain only finite values`); 15 | } 16 | } 17 | 18 | return data; 19 | } 20 | -------------------------------------------------------------------------------- /js/lib/errors.js: -------------------------------------------------------------------------------- 1 | const { EOL } = require('os'); 2 | const path = require('path'); 3 | 4 | const internalPath = path.join('node-web-audio-api', 'js'); 5 | const internalRe = new RegExp(internalPath); 6 | 7 | function overrideStack(originalError, newError) { 8 | // override previous error message 9 | const stack = originalError.stack.replace(originalError.message, newError.message); 10 | const lines = stack.split(EOL); 11 | 12 | // remove all lines that refer to internal classes, i.e. contains `node-web-audio-api/js` 13 | for (let i = lines.length - 1; i > 0; i--) { 14 | const line = lines[i]; 15 | if (internalRe.test(line)) { 16 | lines.splice(i, 1); 17 | } 18 | } 19 | 20 | // override new stack with modified one 21 | newError.stack = lines.join(EOL); 22 | } 23 | 24 | exports.throwSanitizedError = function throwSanitizedError(err) { 25 | // We also need to handle output of `assert_ne!` as well, e.g. 26 | // assertion `left != right` failed: NotSupportedError - StereoPannerNode channel count mode cannot be set to max 27 | // left: Max 28 | // right: Max 29 | let originalMessage = err.message; 30 | originalMessage = originalMessage.replace('assertion `left != right` failed: ', ''); 31 | originalMessage = originalMessage.replace('assertion `left == right` failed: ', ''); 32 | originalMessage = originalMessage.split(EOL)[0]; // keep only first line 33 | 34 | // "Native Errors" 35 | if (originalMessage.startsWith('TypeError')) { 36 | const msg = originalMessage.replace(/^TypeError - /, ''); 37 | const error = new TypeError(msg); 38 | overrideStack(err, error); 39 | 40 | throw error; 41 | } else if (originalMessage.startsWith('RangeError')) { 42 | const msg = originalMessage.replace(/^RangeError - /, ''); 43 | const error = new RangeError(msg); 44 | overrideStack(err, error); 45 | 46 | throw error; 47 | } 48 | 49 | // DOM Exceptions 50 | if (originalMessage.startsWith('NotSupportedError')) { 51 | const msg = originalMessage.replace(/^NotSupportedError - /, ''); 52 | const error = new DOMException(msg, 'NotSupportedError'); 53 | overrideStack(err, error); 54 | 55 | throw error; 56 | } else if (originalMessage.startsWith('InvalidStateError')) { 57 | const msg = originalMessage.replace(/^InvalidStateError - /, ''); 58 | const error = new DOMException(msg, 'InvalidStateError'); 59 | overrideStack(err, error); 60 | 61 | throw error; 62 | } else if (originalMessage.startsWith('IndexSizeError')) { 63 | const msg = originalMessage.replace(/^IndexSizeError - /, ''); 64 | const error = new DOMException(msg, 'IndexSizeError'); 65 | overrideStack(err, error); 66 | 67 | throw error; 68 | } else if (originalMessage.startsWith('InvalidAccessError')) { 69 | const msg = originalMessage.replace(/^InvalidAccessError - /, ''); 70 | const error = new DOMException(msg, 'InvalidAccessError'); 71 | overrideStack(err, error); 72 | 73 | throw error; 74 | } else if (originalMessage.startsWith('NotFoundError')) { 75 | const msg = originalMessage.replace(/^NotFoundError - /, ''); 76 | const error = new DOMException(msg, 'NotFoundError'); 77 | overrideStack(err, error); 78 | 79 | throw error; 80 | } 81 | 82 | console.warn('[lib/errors.js] Unexpected Rust error', err); 83 | throw err; 84 | } 85 | -------------------------------------------------------------------------------- /js/lib/events.js: -------------------------------------------------------------------------------- 1 | const { isFunction } = require('./utils.js'); 2 | 3 | module.exports.propagateEvent = function propagateEvent(eventTarget, event) { 4 | // call attribute first if exists 5 | if (isFunction(eventTarget[`on${event.type}`])) { 6 | eventTarget[`on${event.type}`](event); 7 | } 8 | // then distach to add event listeners 9 | eventTarget.dispatchEvent(event); 10 | } 11 | -------------------------------------------------------------------------------- /js/lib/symbols.js: -------------------------------------------------------------------------------- 1 | module.exports.kNapiObj = Symbol('node-web-audio-api:napi-obj'); 2 | module.exports.kAudioBuffer = Symbol('node-web-audio-api:audio-buffer'); 3 | module.exports.kPrivateConstructor = Symbol('node-web-audio-api:private-constructor'); 4 | module.exports.kCreateProcessor = Symbol('node-web-audio-api:create-processor'); 5 | module.exports.kProcessorRegistered = Symbol('node-web-audio-api:processor-registered'); 6 | module.exports.kGetParameterDescriptors = Symbol('node-web-audio-api:get-parameter-descriptors'); 7 | module.exports.kWorkletRelease = Symbol('node-web-audio-api:worklet-release'); 8 | module.exports.kCheckProcessorsCreated = Symbol('node-web-audio-api:check-processor-created'); 9 | 10 | // semi-private keys for events listeners 11 | 12 | // # BaseAudioContext 13 | module.exports.kOnStateChange = Symbol.for('node-web-audio-api:onstatechange'); 14 | // AudioContext 15 | module.exports.kOnSinkChange = Symbol.for('node-web-audio-api:onsinkchange'); 16 | // # OfflineAudioContext 17 | // > [The onstatechange] event is fired before the complete event is fired 18 | // cf. https://webaudio.github.io/web-audio-api/#dom-baseaudiocontext-onstatechange 19 | // @fixme: for now the `complete` event is triggered **before** startRenring fulfills 20 | module.exports.kOnComplete = Symbol.for('node-web-audio-api:oncomplete'); 21 | // # AudioScheduledSourceNode 22 | module.exports.kOnEnded = Symbol.for('node-web-audio-api:onended'); 23 | // # ScriptProcessorNode 24 | module.exports.kOnAudioProcess = Symbol.for('node-web-audio-api:onaudioprocess'); 25 | // # AudioRenderCapacity 26 | module.exports.kOnUpdate = Symbol.for('node-web-audio-api:onupdate'); 27 | -------------------------------------------------------------------------------- /js/lib/utils.js: -------------------------------------------------------------------------------- 1 | exports.isFunction = function isFunction(val) { 2 | return Object.prototype.toString.call(val) == '[object Function]' || 3 | Object.prototype.toString.call(val) == '[object AsyncFunction]'; 4 | }; 5 | 6 | const kEnumerableProperty = { __proto__: null }; 7 | kEnumerableProperty.enumerable = true; 8 | Object.freeze(kEnumerableProperty); 9 | 10 | exports.kEnumerableProperty = kEnumerableProperty; 11 | 12 | const kHiddenProperty = { __proto__: null }; 13 | kHiddenProperty.enumerable = false; 14 | Object.freeze(kHiddenProperty); 15 | 16 | exports.kHiddenProperty = kHiddenProperty; 17 | -------------------------------------------------------------------------------- /load-native.cjs: -------------------------------------------------------------------------------- 1 | const fs = require('node:fs'); 2 | const { platform, arch } = process; 3 | 4 | let nativeBinding = null; 5 | let loadError = null; 6 | 7 | switch (platform) { 8 | case 'win32': 9 | switch (arch) { 10 | case 'x64': 11 | try { 12 | nativeBinding = require('./node-web-audio-api.win32-x64-msvc.node'); 13 | } catch (e) { 14 | loadError = e; 15 | } 16 | break; 17 | case 'arm64': 18 | try { 19 | nativeBinding = require('./node-web-audio-api.win32-arm64-msvc.node'); 20 | } catch (e) { 21 | loadError = e; 22 | } 23 | break; 24 | default: 25 | loadError = new Error(`Unsupported architecture on Windows: ${arch}`); 26 | } 27 | break; 28 | case 'darwin': 29 | switch (arch) { 30 | case 'x64': 31 | try { 32 | nativeBinding = require('./node-web-audio-api.darwin-x64.node'); 33 | } catch (e) { 34 | loadError = e; 35 | } 36 | break; 37 | case 'arm64': 38 | try { 39 | nativeBinding = require('./node-web-audio-api.darwin-arm64.node'); 40 | } catch (e) { 41 | loadError = e; 42 | } 43 | break; 44 | default: 45 | loadError = new Error(`Unsupported architecture on macOS: ${arch}`); 46 | } 47 | break; 48 | // case 'freebsd': x64 only 49 | case 'linux': 50 | switch (arch) { 51 | // @todo 52 | // - support riscv64 arch 53 | // - support musl C lib 54 | case 'x64': 55 | try { 56 | nativeBinding = require('./node-web-audio-api.linux-x64-gnu.node'); 57 | } catch (e) { 58 | loadError = e; 59 | } 60 | break; 61 | case 'arm64': 62 | try { 63 | nativeBinding = require('./node-web-audio-api.linux-arm64-gnu.node'); 64 | } catch (e) { 65 | loadError = e; 66 | } 67 | break; 68 | case 'arm': 69 | try { 70 | nativeBinding = require('./node-web-audio-api.linux-arm-gnueabihf.node'); 71 | } catch (e) { 72 | loadError = e; 73 | } 74 | break; 75 | default: 76 | loadError = new Error(`Unsupported architecture on Linux: ${arch}`); 77 | } 78 | break; 79 | default: 80 | loadError = new Error(`Unsupported OS: ${platform}, architecture: ${arch}`); 81 | } 82 | 83 | // use local build if exists 84 | if (fs.existsSync('node-web-audio-api.build-release.node')) { 85 | nativeBinding = require('./node-web-audio-api.build-release.node'); 86 | } 87 | 88 | if (fs.existsSync('node-web-audio-api.build-debug.node')) { 89 | nativeBinding = require('./node-web-audio-api.build-debug.node'); 90 | } 91 | 92 | if (!nativeBinding) { 93 | if (loadError) { 94 | throw loadError; 95 | } 96 | 97 | throw new Error(`Failed to load native binding for OS: ${platform}, architecture: ${arch}`); 98 | } 99 | 100 | module.exports = nativeBinding; 101 | 102 | -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "node-web-audio-api", 3 | "version": "1.0.4", 4 | "author": "Benjamin Matuszewski", 5 | "description": "Web Audio API implementation for Node.js", 6 | "exports": { 7 | "import": "./index.mjs", 8 | "require": "./index.cjs", 9 | "types": "./index.d.ts" 10 | }, 11 | "repository": { 12 | "type": "git", 13 | "url": "git+https://github.com/ircam-ismm/node-web-audio-api.git" 14 | }, 15 | "license": "BSD-3-Clause", 16 | "keywords": [ 17 | "audio", 18 | "web audio api", 19 | "webaudio", 20 | "sound", 21 | "music", 22 | "dsp", 23 | "rust", 24 | "node-api" 25 | ], 26 | "engines": { 27 | "node": ">= 14" 28 | }, 29 | "napi": { 30 | "name": "node-web-audio-api" 31 | }, 32 | "types": "./index.d.ts", 33 | "publishConfig": { 34 | "registry": "https://registry.npmjs.org/", 35 | "access": "public" 36 | }, 37 | "scripts": { 38 | "build": "npm run generate && cargo build --release && node ./.scripts/move-artifact.mjs --release", 39 | "build:jack": "npm run generate && cargo build --features jack --release && node ./.scripts/move-artifact.mjs --release", 40 | "build:debug": "npm run generate && cargo build && node ./.scripts/move-artifact.mjs", 41 | "build:only": "cargo build --release && node ./.scripts/move-artifact.mjs --release", 42 | "check": "cargo fmt && cargo clippy", 43 | "generate": "node generator/index.mjs && cargo fmt", 44 | "lint": "npx eslint index.cjs index.mjs && npx eslint js/*.js && npx eslint examples/*.js", 45 | "preversion": "npm install && npm run generate", 46 | "postversion": "cargo bump $npm_package_version && git commit -am \"v$npm_package_version\" && node .scripts/check-changelog.mjs", 47 | "test": "mocha tests/*.spec.mjs", 48 | "test:ci": "mocha tests/*.spec.mjs -- --ci", 49 | "test:only": "mocha", 50 | "wpt": "npm run build && node ./.scripts/wpt-harness.mjs", 51 | "wpt:only": "node ./.scripts/wpt-harness.mjs" 52 | }, 53 | "devDependencies": { 54 | "@ircam/eslint-config": "^2.0.0", 55 | "@ircam/sc-scheduling": "^1.0.0", 56 | "@ircam/sc-utils": "^1.9.0", 57 | "@sindresorhus/slugify": "^2.1.1", 58 | "camelcase": "^8.0.0", 59 | "chai": "^5.1.2", 60 | "chalk": "^5.3.0", 61 | "cli-table": "^0.3.11", 62 | "commander": "^13.0.0", 63 | "dotenv": "^16.0.3", 64 | "eslint": "^9.18.0", 65 | "js-beautify": "^1.15.1", 66 | "mocha": "^11.0.1", 67 | "octokit": "^4.1.0", 68 | "template-literal": "^1.0.4", 69 | "webidl2": "^24.2.0", 70 | "wpt-runner": "^5.0.0" 71 | }, 72 | "dependencies": { 73 | "caller": "^1.1.0", 74 | "node-fetch": "^3.3.2", 75 | "webidl-conversions": "^7.0.0" 76 | } 77 | } 78 | -------------------------------------------------------------------------------- /src/audio_destination_node.rs: -------------------------------------------------------------------------------- 1 | use crate::*; 2 | use napi::*; 3 | use napi_derive::js_function; 4 | use web_audio_api::context::*; 5 | use web_audio_api::node::*; 6 | 7 | pub(crate) struct NapiAudioDestinationNode(AudioDestinationNode); 8 | 9 | // https://webaudio.github.io/web-audio-api/#AudioDestinationNode 10 | // 11 | // @note: This should be generated as any other AudioNode, but has no constructor 12 | // defined in IDL, so the generation script crashes 13 | impl NapiAudioDestinationNode { 14 | pub fn create_js_class(env: &Env) -> Result { 15 | let interface = audio_node_interface![ 16 | Property::new("maxChannelCount")?.with_getter(get_max_channel_count) 17 | ]; 18 | 19 | env.define_class("AudioDestinationNode", constructor, &interface) 20 | } 21 | 22 | pub fn unwrap(&self) -> &AudioDestinationNode { 23 | &self.0 24 | } 25 | } 26 | 27 | #[js_function(1)] 28 | fn constructor(ctx: CallContext) -> Result { 29 | let mut js_this = ctx.this_unchecked::(); 30 | 31 | let js_audio_context = ctx.get::(0)?; 32 | 33 | // create native node 34 | let audio_context_name = 35 | js_audio_context.get_named_property::("Symbol.toStringTag")?; 36 | let audio_context_utf8_name = audio_context_name.into_utf8()?.into_owned()?; 37 | let audio_context_str = &audio_context_utf8_name[..]; 38 | 39 | let native_node = match audio_context_str { 40 | "AudioContext" => { 41 | let napi_audio_context = ctx.env.unwrap::(&js_audio_context)?; 42 | let audio_context = napi_audio_context.unwrap(); 43 | audio_context.destination() // this is also different from other audio nodes 44 | } 45 | "OfflineAudioContext" => { 46 | let napi_audio_context = ctx 47 | .env 48 | .unwrap::(&js_audio_context)?; 49 | let audio_context = napi_audio_context.unwrap(); 50 | audio_context.destination() // this is also different from other audio nodes 51 | } 52 | &_ => panic!("not supported"), 53 | }; 54 | 55 | js_this.define_properties(&[ 56 | Property::new("context")? 57 | .with_value(&js_audio_context) 58 | .with_property_attributes(PropertyAttributes::Enumerable), 59 | // this must be put on the instance and not in the prototype to be reachable 60 | Property::new("Symbol.toStringTag")? 61 | .with_value(&ctx.env.create_string("AudioDestinationNode")?) 62 | .with_property_attributes(PropertyAttributes::Static), 63 | ])?; 64 | 65 | // finalize instance creation 66 | let napi_node = NapiAudioDestinationNode(native_node); 67 | ctx.env.wrap(&mut js_this, napi_node)?; 68 | 69 | ctx.env.get_undefined() 70 | } 71 | 72 | audio_node_impl!(NapiAudioDestinationNode); 73 | 74 | // ------------------------------------------------- 75 | // AudioDestinationNode Interface 76 | // ------------------------------------------------- 77 | 78 | #[js_function] 79 | fn get_max_channel_count(ctx: CallContext) -> Result { 80 | let js_this = ctx.this_unchecked::(); 81 | let napi_node = ctx.env.unwrap::(&js_this)?; 82 | let node = napi_node.unwrap(); 83 | 84 | let max_channel_count = node.max_channel_count() as f64; 85 | 86 | ctx.env.create_double(max_channel_count) 87 | } 88 | -------------------------------------------------------------------------------- /src/audio_render_capacity.rs: -------------------------------------------------------------------------------- 1 | use crate::*; 2 | 3 | use napi::threadsafe_function::{ThreadSafeCallContext, ThreadsafeFunctionCallMode}; 4 | use napi::*; 5 | use napi_derive::js_function; 6 | use web_audio_api::{AudioRenderCapacity, AudioRenderCapacityEvent, AudioRenderCapacityOptions}; 7 | 8 | pub(crate) struct NapiAudioRenderCapacity(AudioRenderCapacity); 9 | 10 | impl NapiAudioRenderCapacity { 11 | pub fn create_js_class(env: &Env) -> Result { 12 | env.define_class( 13 | "AudioRenderCapacity", 14 | constructor, 15 | &[ 16 | Property::new("start")?.with_method(start), 17 | Property::new("stop")?.with_method(stop), 18 | // Workaround to bind the `update` events to EventTarget. 19 | // This must be called from JS facade ctor as the JS handler are added to the Napi 20 | // object after its instantiation, and that we don't have any initial `resume` call. 21 | Property::new("listen_to_events")?.with_method(listen_to_events), 22 | ], 23 | ) 24 | } 25 | 26 | pub fn unwrap(&mut self) -> &mut AudioRenderCapacity { 27 | &mut self.0 28 | } 29 | } 30 | 31 | // https://webaudio.github.io/web-audio-api/#AudioRenderCapacity 32 | #[js_function(1)] 33 | fn constructor(ctx: CallContext) -> Result { 34 | let mut js_this = ctx.this_unchecked::(); 35 | 36 | let js_audio_context = ctx.get::(0)?; 37 | 38 | js_this.define_properties(&[ 39 | // this must be put on the instance and not in the prototype to be reachable 40 | Property::new("Symbol.toStringTag")? 41 | .with_value(&ctx.env.create_string("AudioRenderCapacity")?) 42 | .with_property_attributes(PropertyAttributes::Static), 43 | ])?; 44 | 45 | // create native node 46 | let audio_context_name = 47 | js_audio_context.get_named_property::("Symbol.toStringTag")?; 48 | let audio_context_utf8_name = audio_context_name.into_utf8()?.into_owned()?; 49 | let audio_context_str = &audio_context_utf8_name[..]; 50 | 51 | let native_node = match audio_context_str { 52 | "AudioContext" => { 53 | let napi_audio_context = ctx.env.unwrap::(&js_audio_context)?; 54 | let audio_context = napi_audio_context.unwrap(); 55 | audio_context.render_capacity() 56 | } 57 | &_ => unreachable!(), 58 | }; 59 | 60 | // finalize instance creation 61 | let napi_node = NapiAudioRenderCapacity(native_node); 62 | ctx.env.wrap(&mut js_this, napi_node)?; 63 | 64 | ctx.env.get_undefined() 65 | } 66 | 67 | #[js_function(1)] 68 | fn start(ctx: CallContext) -> Result { 69 | let js_this = ctx.this_unchecked::(); 70 | let napi_node = ctx.env.unwrap::(&js_this)?; 71 | let node = napi_node.unwrap(); 72 | 73 | let js_options = ctx.get::(0)?; 74 | let update_interval = js_options 75 | .get_named_property::("updateInterval")? 76 | .get_double()?; 77 | 78 | node.start(AudioRenderCapacityOptions { update_interval }); 79 | 80 | ctx.env.get_undefined() 81 | } 82 | 83 | #[js_function] 84 | fn stop(ctx: CallContext) -> Result { 85 | let js_this = ctx.this_unchecked::(); 86 | let napi_node = ctx.env.unwrap::(&js_this)?; 87 | let node = napi_node.unwrap(); 88 | 89 | node.stop(); 90 | 91 | ctx.env.get_undefined() 92 | } 93 | 94 | #[js_function] 95 | fn listen_to_events(ctx: CallContext) -> Result { 96 | let js_this = ctx.this_unchecked::(); 97 | let napi_node = ctx.env.unwrap::(&js_this)?; 98 | let node = napi_node.unwrap(); 99 | 100 | let k_onupdate = ctx.env.symbol_for("node-web-audio-api:onupdate")?; 101 | let update_cb = js_this.get_property(k_onupdate).unwrap(); 102 | let mut update_tsfn = ctx.env.create_threadsafe_function( 103 | &update_cb, 104 | 0, 105 | |ctx: ThreadSafeCallContext| { 106 | let event = ctx.value; 107 | let mut js_event = ctx.env.create_object()?; 108 | 109 | js_event.set_named_property("type", ctx.env.create_string("update"))?; 110 | js_event.set_named_property("timestamp", ctx.env.create_double(event.timestamp))?; 111 | js_event 112 | .set_named_property("averageLoad", ctx.env.create_double(event.average_load))?; 113 | js_event.set_named_property("peakLoad", ctx.env.create_double(event.peak_load))?; 114 | js_event 115 | .set_named_property("underrunRatio", ctx.env.create_double(event.underrun_ratio))?; 116 | 117 | Ok(vec![js_event]) 118 | }, 119 | )?; 120 | 121 | let _ = update_tsfn.unref(ctx.env); 122 | 123 | node.set_onupdate(move |e| { 124 | update_tsfn.call(Ok(e), ThreadsafeFunctionCallMode::Blocking); 125 | }); 126 | 127 | ctx.env.get_undefined() 128 | } 129 | -------------------------------------------------------------------------------- /src/base_audio_context.rs: -------------------------------------------------------------------------------- 1 | #[macro_export] 2 | macro_rules! base_audio_context_interface { 3 | [$($e:expr),*] => { 4 | [ 5 | Property::new("currentTime")?.with_getter(get_current_time), 6 | Property::new("sampleRate")?.with_getter(get_sample_rate), 7 | Property::new("listener")?.with_getter(get_listener), 8 | Property::new("state")?.with_getter(get_state), 9 | Property::new("decodeAudioData")?.with_method(decode_audio_data), 10 | $($e,)* 11 | ] 12 | } 13 | } 14 | 15 | #[macro_export] 16 | macro_rules! base_audio_context_impl { 17 | ($napi_struct:ident) => { 18 | #[js_function] 19 | fn get_current_time(ctx: CallContext) -> Result { 20 | let js_this = ctx.this_unchecked::(); 21 | let napi_obj = ctx.env.unwrap::<$napi_struct>(&js_this)?; 22 | let obj = napi_obj.unwrap(); 23 | 24 | let current_time = obj.current_time(); 25 | ctx.env.create_double(current_time) 26 | } 27 | 28 | #[js_function] 29 | fn get_sample_rate(ctx: CallContext) -> Result { 30 | let js_this = ctx.this_unchecked::(); 31 | let napi_obj = ctx.env.unwrap::<$napi_struct>(&js_this)?; 32 | let obj = napi_obj.unwrap(); 33 | 34 | let sample_rate = obj.sample_rate() as f64; 35 | ctx.env.create_double(sample_rate) 36 | } 37 | 38 | // use a getter so we can lazily create the listener on first call and retrieve it afterward 39 | #[js_function] 40 | fn get_listener(ctx: CallContext) -> Result { 41 | let mut js_this = ctx.this_unchecked::(); 42 | 43 | // reproduce lazy instanciation strategy from rust crate 44 | if js_this.has_named_property("__listener__").ok().unwrap() { 45 | js_this.get_named_property("__listener__") 46 | } else { 47 | let store_ref: &mut napi::Ref<()> = ctx.env.get_instance_data()?.unwrap(); 48 | let store: JsObject = ctx.env.get_reference_value(store_ref)?; 49 | let ctor: JsFunction = store.get_named_property("AudioListener")?; 50 | let js_obj = ctor.new_instance(&[&js_this])?; 51 | js_this.set_named_property("__listener__", &js_obj)?; 52 | 53 | Ok(js_obj) 54 | } 55 | } 56 | 57 | #[js_function] 58 | fn get_state(ctx: CallContext) -> Result { 59 | let js_this = ctx.this_unchecked::(); 60 | let napi_obj = ctx.env.unwrap::<$napi_struct>(&js_this)?; 61 | let obj = napi_obj.unwrap(); 62 | 63 | let state = obj.state(); 64 | let state_str = match state { 65 | AudioContextState::Suspended => "suspended", 66 | AudioContextState::Running => "running", 67 | AudioContextState::Closed => "closed", 68 | }; 69 | 70 | ctx.env.create_string(state_str) 71 | } 72 | 73 | // ---------------------------------------------------- 74 | // METHODS 75 | // ---------------------------------------------------- 76 | 77 | #[js_function(1)] 78 | fn decode_audio_data(ctx: CallContext) -> Result { 79 | let js_this = ctx.this_unchecked::(); 80 | let napi_obj = ctx.env.unwrap::<$napi_struct>(&js_this)?; 81 | let clone = Arc::clone(&napi_obj.0); 82 | 83 | let js_buffer = ctx.get::(0)?.into_value()?; 84 | let cursor = Cursor::new(js_buffer.to_vec()); 85 | 86 | ctx.env.execute_tokio_future( 87 | async move { Ok(clone.decode_audio_data_sync(cursor)) }, 88 | |&mut env, result| { 89 | match result { 90 | Ok(audio_buffer) => { 91 | // create js audio buffer instance 92 | let store_ref: &mut napi::Ref<()> = env.get_instance_data()?.unwrap(); 93 | let store: JsObject = env.get_reference_value(store_ref)?; 94 | let ctor: JsFunction = store.get_named_property("AudioBuffer")?; 95 | let js_audio_buffer = ctor.new_instance(&[env.get_null()?])?; 96 | // populate with native audio buffer 97 | let napi_audio_buffer = 98 | env.unwrap::(&js_audio_buffer)?; 99 | napi_audio_buffer.insert(audio_buffer); 100 | 101 | Ok(js_audio_buffer) 102 | } 103 | Err(e) => Err(napi::Error::from_reason(e.to_string())), 104 | } 105 | }, 106 | ) 107 | } 108 | }; 109 | } 110 | -------------------------------------------------------------------------------- /src/media_devices/enumerate_devices.rs: -------------------------------------------------------------------------------- 1 | use napi::{CallContext, JsObject, Result}; 2 | use napi_derive::js_function; 3 | use web_audio_api::media_devices::{enumerate_devices_sync, MediaDeviceInfoKind}; 4 | 5 | #[js_function(0)] 6 | pub(crate) fn napi_enumerate_devices(ctx: CallContext) -> Result { 7 | let list = enumerate_devices_sync(); 8 | 9 | let mut napi_list = ctx.env.create_array(0)?; 10 | 11 | for d in list { 12 | let mut device = ctx.env.create_object()?; 13 | device.set_named_property("deviceId", ctx.env.create_string(d.device_id())?)?; 14 | device.set_named_property("label", ctx.env.create_string(d.label())?)?; 15 | 16 | if d.group_id().is_some() { 17 | device.set_named_property("groupId", ctx.env.create_string(d.group_id().unwrap())?)?; 18 | } else { 19 | device.set_named_property("groupId", ctx.env.create_string("")?)?; 20 | } 21 | 22 | match d.kind() { 23 | MediaDeviceInfoKind::VideoInput => { 24 | device.set_named_property("kind", ctx.env.create_string("videoinput")?)?; 25 | } 26 | MediaDeviceInfoKind::AudioInput => { 27 | device.set_named_property("kind", ctx.env.create_string("audioinput")?)?; 28 | } 29 | MediaDeviceInfoKind::AudioOutput => { 30 | device.set_named_property("kind", ctx.env.create_string("audiooutput")?)?; 31 | } 32 | } 33 | 34 | napi_list.insert(device)?; 35 | } 36 | 37 | napi_list.coerce_to_object() 38 | } 39 | -------------------------------------------------------------------------------- /src/media_devices/get_user_media.rs: -------------------------------------------------------------------------------- 1 | use crate::media_streams::NapiMediaStream; 2 | 3 | use napi::{CallContext, Either, JsFunction, JsNumber, JsObject, JsString, Result}; 4 | use napi_derive::js_function; 5 | 6 | use web_audio_api::media_devices::{ 7 | get_user_media_sync, MediaStreamConstraints, MediaTrackConstraints, 8 | }; 9 | 10 | // @note: this factory pattern could be used for params as well 11 | // so we could expose the AudioParam ctor (for the web test suite) 12 | 13 | #[js_function(1)] 14 | pub(crate) fn napi_get_user_media(ctx: CallContext) -> Result { 15 | // we never go here, probably because monkey explictely pass undefined (?) 16 | if ctx.length == 0 { 17 | return Err(napi::Error::from_reason( 18 | "TypeError - Failed to execute 'getUserMedia' on 'MediaDevices': audio must be requested".to_string(), 19 | )); 20 | } 21 | 22 | // @todo - handle options 23 | let options = match ctx.try_get::(0)? { 24 | Either::A(options_js) => { 25 | if options_js.has_own_property("video")? { 26 | return Err(napi::Error::from_reason( 27 | "TypeError - Failed to execute 'getUserMedia' on 'MediaDevices': video not supported".to_string(), 28 | )); 29 | } 30 | 31 | if let Some(js_constraints) = options_js.get::<&str, JsObject>("audio")? { 32 | let mut constraints = MediaTrackConstraints::default(); 33 | 34 | if let Ok(Some(js_device_id)) = js_constraints.get::<&str, JsString>("deviceId") { 35 | let device_id = js_device_id.into_utf8()?.into_owned()?; 36 | constraints.device_id = Some(device_id); 37 | } 38 | 39 | if let Ok(Some(js_sample_rate)) = js_constraints.get::<&str, JsNumber>("sampleRate") 40 | { 41 | let sample_rate = js_sample_rate.get_double()? as f32; 42 | constraints.sample_rate = Some(sample_rate); 43 | } 44 | 45 | if let Ok(Some(js_latency)) = js_constraints.get::<&str, JsNumber>("latency") { 46 | let latency = js_latency.get_double()?; 47 | constraints.latency = Some(latency); 48 | } 49 | 50 | if let Ok(Some(js_channel_count)) = 51 | js_constraints.get::<&str, JsNumber>("channelCount") 52 | { 53 | let channel_count = js_channel_count.get_uint32()?; 54 | constraints.channel_count = Some(channel_count); 55 | } 56 | 57 | MediaStreamConstraints::AudioWithConstraints(constraints) 58 | } else { 59 | return Err(napi::Error::from_reason( 60 | "TypeError - Failed to execute 'getUserMedia' on 'MediaDevices': audio must be requested".to_string(), 61 | )); 62 | } 63 | } 64 | Either::B(_) => { 65 | return Err(napi::Error::from_reason( 66 | "TypeError - Argument should be an object".to_string(), 67 | )); 68 | } 69 | }; 70 | 71 | // create rust stream 72 | let stream = get_user_media_sync(options); 73 | let napi_stream = NapiMediaStream::new(stream); 74 | // retrieve the JS ctor and create a new instance 75 | let store_ref: &mut napi::Ref<()> = ctx.env.get_instance_data()?.unwrap(); 76 | let store: JsObject = ctx.env.get_reference_value(store_ref)?; 77 | let ctor: JsFunction = store.get_named_property("MediaStream")?; 78 | 79 | // @note - this argument is pure bullshit 80 | let js_this = ctx.this_unchecked::(); 81 | let mut js_stream = ctor.new_instance(&[js_this])?; 82 | // wrap JS instance and rust napi stream 83 | ctx.env.wrap(&mut js_stream, napi_stream)?; 84 | 85 | Ok(js_stream) 86 | } 87 | -------------------------------------------------------------------------------- /src/media_devices/mod.rs: -------------------------------------------------------------------------------- 1 | mod enumerate_devices; 2 | pub(crate) use enumerate_devices::napi_enumerate_devices; 3 | 4 | mod get_user_media; 5 | pub(crate) use get_user_media::napi_get_user_media; 6 | -------------------------------------------------------------------------------- /src/media_streams/media_stream.rs: -------------------------------------------------------------------------------- 1 | use crate::*; 2 | use napi::*; 3 | use napi_derive::js_function; 4 | 5 | use web_audio_api::media_streams::*; 6 | 7 | pub(crate) struct NapiMediaStream(MediaStream); 8 | 9 | impl NapiMediaStream { 10 | pub fn new(stream: MediaStream) -> Self { 11 | Self(stream) 12 | } 13 | 14 | pub fn create_js_class(env: &Env) -> Result { 15 | env.define_class("MediaStream", constructor, &[]) 16 | } 17 | 18 | pub fn unwrap(&self) -> &MediaStream { 19 | &self.0 20 | } 21 | } 22 | 23 | #[js_function(1)] 24 | fn constructor(ctx: CallContext) -> Result { 25 | ctx.env.get_undefined() 26 | } 27 | -------------------------------------------------------------------------------- /src/media_streams/mod.rs: -------------------------------------------------------------------------------- 1 | mod media_stream; 2 | pub(crate) use media_stream::NapiMediaStream; 3 | -------------------------------------------------------------------------------- /src/periodic_wave.rs: -------------------------------------------------------------------------------- 1 | use crate::*; 2 | use napi::*; 3 | use napi_derive::js_function; 4 | use web_audio_api::*; 5 | 6 | pub(crate) struct NapiPeriodicWave(PeriodicWave); 7 | 8 | impl NapiPeriodicWave { 9 | pub fn create_js_class(env: &Env) -> Result { 10 | env.define_class("PeriodicWave", constructor, &[]) 11 | } 12 | 13 | // is this false clippy positive? 14 | #[allow(dead_code)] 15 | pub fn unwrap(&self) -> &PeriodicWave { 16 | &self.0 17 | } 18 | } 19 | 20 | #[js_function(2)] 21 | fn constructor(ctx: CallContext) -> Result { 22 | let mut js_this = ctx.this_unchecked::(); 23 | 24 | let js_audio_context = ctx.get::(0)?; 25 | // parse options 26 | let options = match ctx.try_get::(1)? { 27 | Either::A(options_js) => { 28 | let real = if let Some(real_js) = options_js.get::<&str, JsTypedArray>("real")? { 29 | let real_value = real_js.into_value()?; 30 | let real: &[f32] = real_value.as_ref(); 31 | Some(real.to_vec()) 32 | } else { 33 | None 34 | }; 35 | 36 | let imag = if let Some(imag_js) = options_js.get::<&str, JsTypedArray>("imag")? { 37 | let imag_value = imag_js.into_value()?; 38 | let imag: &[f32] = imag_value.as_ref(); 39 | Some(imag.to_vec()) 40 | } else { 41 | None 42 | }; 43 | 44 | let disable_normalization = if let Some(js_value) = 45 | options_js.get::<&str, JsBoolean>("disableNormalization")? 46 | { 47 | js_value.try_into()? 48 | } else { 49 | false 50 | }; 51 | 52 | PeriodicWaveOptions { 53 | real, 54 | imag, 55 | disable_normalization, 56 | } 57 | } 58 | Either::B(_) => PeriodicWaveOptions::default(), 59 | }; 60 | 61 | let audio_context_name = 62 | js_audio_context.get_named_property::("Symbol.toStringTag")?; 63 | let audio_context_utf8_name = audio_context_name.into_utf8()?.into_owned()?; 64 | let audio_context_str = &audio_context_utf8_name[..]; 65 | // create native node 66 | let periodic_wave = match audio_context_str { 67 | "AudioContext" => { 68 | let napi_audio_context = ctx.env.unwrap::(&js_audio_context)?; 69 | let audio_context = napi_audio_context.unwrap(); 70 | PeriodicWave::new(audio_context, options) 71 | } 72 | "OfflineAudioContext" => { 73 | let napi_audio_context = ctx 74 | .env 75 | .unwrap::(&js_audio_context)?; 76 | let audio_context = napi_audio_context.unwrap(); 77 | PeriodicWave::new(audio_context, options) 78 | } 79 | &_ => unreachable!(), 80 | }; 81 | 82 | js_this.define_properties(&[ 83 | // @todo - review 84 | Property::new("Symbol.toStringTag")? 85 | .with_value(&ctx.env.create_string("PeriodicWave")?) 86 | .with_property_attributes(PropertyAttributes::Static), 87 | ])?; 88 | 89 | let napi_periodic_wave = NapiPeriodicWave(periodic_wave); 90 | ctx.env.wrap(&mut js_this, napi_periodic_wave)?; 91 | 92 | ctx.env.get_undefined() 93 | } 94 | -------------------------------------------------------------------------------- /src/utils/mod.rs: -------------------------------------------------------------------------------- 1 | use napi::{Env, JsFunction, JsObject, Result}; 2 | 3 | // alternative implementation of Napi ThreadsafeFunction 4 | // cf. https://github.com/parcel-bundler/lightningcss/blob/master/napi/src/threadsafe_function.rs 5 | mod thread_safe_function; 6 | pub(crate) use thread_safe_function::*; 7 | 8 | pub(crate) fn get_class_ctor(env: &Env, name: &str) -> Result { 9 | let store_ref: &mut napi::Ref<()> = env.get_instance_data()?.unwrap(); 10 | let store: JsObject = env.get_reference_value(store_ref)?; 11 | let ctor: JsFunction = store.get_named_property(name)?; 12 | Ok(ctor) 13 | } 14 | -------------------------------------------------------------------------------- /tests/AudioParam.spec.mjs: -------------------------------------------------------------------------------- 1 | import { assert } from 'chai'; 2 | import { AudioContext } from '../index.mjs'; 3 | 4 | describe('AudioParam', () => { 5 | describe('# attributes', () => { 6 | it(`should implement all attributes`, async () => { 7 | const audioContext = new AudioContext(); 8 | const gain = audioContext.createGain(); 9 | 10 | assert.equal(gain.gain.automationRate, 'a-rate'); 11 | assert.equal(gain.gain.defaultValue, 1); 12 | // should accept some delta 13 | assert.equal(gain.gain.maxValue, 3.4028234663852886e+38); 14 | assert.equal(gain.gain.minValue, -3.4028234663852886e+38); 15 | assert.equal(gain.gain.value, 1); 16 | 17 | await audioContext.close(); 18 | }); 19 | }); 20 | }); 21 | -------------------------------------------------------------------------------- /tests/OfflineAudioContext.spec.mjs: -------------------------------------------------------------------------------- 1 | import { assert } from 'chai'; 2 | import { 3 | AudioContext, 4 | AudioBuffer, 5 | OfflineAudioContext, 6 | } from '../index.mjs'; 7 | 8 | describe('# OfflineAudioContext', () => { 9 | describe('## await startRendering()', () => { 10 | it('buffer returned by startRendering and buffer from `oncomplete` event should be same instance', async () => { 11 | const offline = new OfflineAudioContext(1, 48000, 48000); 12 | 13 | let aResult = null; 14 | let bResult = null; 15 | let renderingEnded = false; 16 | 17 | offline.addEventListener('complete', (e) => { 18 | // check that the complete event is triggered after startRendering fulfills 19 | assert.isTrue(renderingEnded); 20 | aResult = e.renderedBuffer; 21 | }); 22 | 23 | const osc = offline.createOscillator(); 24 | osc.connect(offline.destination); 25 | osc.frequency.value = 220; 26 | osc.start(0.); 27 | osc.stop(1.); 28 | 29 | bResult = await offline.startRendering(); 30 | renderingEnded = true; 31 | // make sure we received the event 32 | await new Promise(resolve => setTimeout(resolve, 100)); 33 | 34 | assert.isTrue(aResult instanceof AudioBuffer); 35 | assert.isTrue(bResult instanceof AudioBuffer); 36 | assert.deepEqual(aResult, bResult); 37 | }); 38 | }); 39 | }); 40 | 41 | 42 | -------------------------------------------------------------------------------- /tests/PeriodicWave.spec.mjs: -------------------------------------------------------------------------------- 1 | import { assert } from 'chai'; 2 | import { OfflineAudioContext, PeriodicWave } from '../index.mjs'; 3 | 4 | describe('# PeriodicWave', () => { 5 | describe('constructor', () => { 6 | it(`accept any sequence as imag`, () => { 7 | const audioContext = new OfflineAudioContext(1, 1, 48000); 8 | const periodicWave = new PeriodicWave(audioContext, { 9 | imag: [-1, 1], 10 | }); 11 | 12 | assert.isTrue(periodicWave instanceof PeriodicWave); 13 | }); 14 | 15 | it(`accept any sequence as real`, () => { 16 | const audioContext = new OfflineAudioContext(1, 1, 48000); 17 | const periodicWave = new PeriodicWave(audioContext, { 18 | real: [-1, 1], 19 | }); 20 | 21 | assert.isTrue(periodicWave instanceof PeriodicWave); 22 | }); 23 | }); 24 | }); 25 | 26 | -------------------------------------------------------------------------------- /tests/WaveShaper.spec.mjs: -------------------------------------------------------------------------------- 1 | import { assert } from 'chai'; 2 | import { AudioContext } from '../index.mjs'; 3 | 4 | describe('# WaveShaper', () => { 5 | describe('## curve', () => { 6 | it('getter should return a copy of the given curve', async () => { 7 | const context = new AudioContext({}); 8 | const curve = new Float32Array([-1, -0.5, 0, 0.5, 1]); 9 | 10 | const ws = context.createWaveShaper(); 11 | ws.curve = curve; 12 | 13 | // same content 14 | assert.deepEqual(curve, ws.curve); 15 | // not the same instance 16 | assert.notStrictEqual(curve, ws.curve); 17 | 18 | await context.close(); 19 | }); 20 | }); 21 | }); 22 | -------------------------------------------------------------------------------- /tests/cast.spec.mjs: -------------------------------------------------------------------------------- 1 | import { assert } from 'chai'; 2 | import { toSanitizedSequence } from '../js/lib/cast.js' 3 | 4 | describe('toSanitizedSequence - Float32Array', () => { 5 | const target = Float32Array; 6 | it('should work with Float32', () => { 7 | const data = new Float32Array([0., 1]); 8 | const result = toSanitizedSequence(data, target); 9 | const expected = new target([0., 1]); 10 | 11 | assert.deepEqual(result, expected); 12 | }); 13 | 14 | it('should work with Float64', () => { 15 | const data = new Float64Array([0., 1]); 16 | const result = toSanitizedSequence(data, target); 17 | const expected = new target([0., 1]); 18 | 19 | assert.deepEqual(result, expected); 20 | }); 21 | 22 | it('should work with Arrays', () => { 23 | const data = [0, 1]; 24 | const result = toSanitizedSequence(data, target); 25 | const expected = new target([0., 1]); 26 | 27 | assert.deepEqual(result, expected); 28 | }); 29 | 30 | it('should throw if item is non finite', () => { 31 | const data = [0., NaN]; 32 | 33 | assert.throws(() => { 34 | toSanitizedSequence(data, target) 35 | }); 36 | }); 37 | }); 38 | -------------------------------------------------------------------------------- /tests/ctor.errors.mjs: -------------------------------------------------------------------------------- 1 | import { AudioContext, GainNode, AudioParam } from '../index.mjs'; 2 | 3 | const audioContext = new AudioContext({}); 4 | 5 | try { 6 | new GainNode(); 7 | } catch (err) { 8 | console.log(err); 9 | } 10 | 11 | try { 12 | new GainNode(1); 13 | } catch (err) { 14 | console.log(err); 15 | } 16 | 17 | try { 18 | new GainNode(audioContext, 42); 19 | } catch (err) { 20 | console.log(err); 21 | } 22 | 23 | // this hsould not throw 24 | try { 25 | new GainNode(audioContext); 26 | } catch (err) { 27 | console.log(err); 28 | } 29 | 30 | try { 31 | new GainNode(audioContext, { gain: 0.1 }); 32 | } catch (err) { 33 | console.log(err); 34 | } 35 | 36 | try { 37 | new GainNode(audioContext, null); 38 | } catch (err) { 39 | console.log(err); 40 | } 41 | 42 | // check audio param 43 | try { 44 | const node = new GainNode(audioContext); 45 | 46 | console.log(node.gain instanceof AudioParam); 47 | } catch (err) { 48 | console.log(err); 49 | } 50 | 51 | 52 | // try { 53 | // new AnalyserNode(audioContext) 54 | // } catch (err) { 55 | // console.log(err); 56 | // } 57 | 58 | // try { 59 | // new AnalyserNode(audioContext, {"minDecibels":-10,"maxDecibels":20}) 60 | // } catch (err) { 61 | // console.log(err); 62 | // } 63 | 64 | // try { 65 | // new AnalyserNode(audioContext, {"minDecibels":-10,"maxDecibels":20}) 66 | // } catch (err) { 67 | // console.log(err); 68 | // } 69 | 70 | audioContext.close(); 71 | // src.connect(gain); 72 | // src.disconnect({}); 73 | -------------------------------------------------------------------------------- /tests/getUserMedia.spec.mjs: -------------------------------------------------------------------------------- 1 | import { assert } from 'chai'; 2 | import { sleep } from '@ircam/sc-utils'; 3 | 4 | import { mediaDevices, AudioContext, MediaStreamAudioSourceNode } from '../index.mjs'; 5 | 6 | const CI = process.argv.includes('--ci'); 7 | 8 | describe('# mediaDevices.getUserMedia(options)', () => { 9 | it('should fail if no argument given', async () => { 10 | let failed = false; 11 | try { 12 | await mediaDevices.getUserMedia(); 13 | } catch (err) { 14 | console.log(err.message); 15 | failed = true; 16 | } 17 | 18 | if (!failed) { 19 | assert.fail('should have failed'); 20 | } 21 | }); 22 | 23 | // @todo - clean error message 24 | it('should fail if argument is not an object', async () => { 25 | let failed = false; 26 | try { 27 | await mediaDevices.getUserMedia(true); 28 | } catch (err) { 29 | console.log(err.message); 30 | failed = true; 31 | } 32 | 33 | if (!failed) { 34 | assert.fail('should have failed'); 35 | } 36 | }); 37 | 38 | it('should fail if options.video', async () => { 39 | let failed = false; 40 | try { 41 | await mediaDevices.getUserMedia({ video: true }); 42 | } catch (err) { 43 | console.log(err.message); 44 | failed = true; 45 | } 46 | 47 | if (!failed) { 48 | assert.fail('should have failed'); 49 | } 50 | }); 51 | 52 | it('should not fail if options.audio = true', async () => { 53 | // accessing microphone in CI make the process stuck 54 | if (CI) { 55 | console.log('Run in CI, aborting...'); 56 | return; 57 | } 58 | 59 | let failed = false; 60 | const audioContext = new AudioContext(); 61 | 62 | try { 63 | const stream = await mediaDevices.getUserMedia({ audio: true }); 64 | } catch (err) { 65 | console.log(err); 66 | failed = true; 67 | } 68 | 69 | await sleep(0.4); 70 | await audioContext.close(); 71 | 72 | if (failed) { 73 | assert.fail('should not have failed'); 74 | } 75 | }); 76 | 77 | it('should work with MediaStreamAudioSourceNode [1 factory] (make some noise)', async () => { 78 | // accessing microphone in CI make the process stuck 79 | if (CI) { 80 | console.log('Run in CI, aborting...'); 81 | return; 82 | } 83 | 84 | let failed = false; 85 | const audioContext = new AudioContext(); 86 | 87 | const stream = await mediaDevices.getUserMedia({ audio: true }); 88 | 89 | try { 90 | const src = audioContext.createMediaStreamSource(stream); 91 | src.connect(audioContext.destination); 92 | } catch (err) { 93 | console.log(err); 94 | failed = true; 95 | } 96 | 97 | await sleep(0.4); 98 | await audioContext.close(); 99 | 100 | if (failed) { 101 | assert.fail('should not have failed'); 102 | } 103 | }); 104 | 105 | it('should work with MediaStreamAudioSourceNode [2 ctor] (make some noise)', async () => { 106 | // accessing microphone in CI make the process stuck 107 | if (CI) { 108 | console.log('Run in CI, aborting...'); 109 | return; 110 | } 111 | 112 | let failed = false; 113 | const audioContext = new AudioContext(); 114 | 115 | const stream = await mediaDevices.getUserMedia({ audio: true }); 116 | 117 | try { 118 | const src = new MediaStreamAudioSourceNode(audioContext, { mediaStream: stream }); 119 | src.connect(audioContext.destination); 120 | } catch (err) { 121 | console.log(err); 122 | failed = true; 123 | } 124 | 125 | await sleep(0.4); 126 | await audioContext.close(); 127 | 128 | if (failed) { 129 | assert.fail('should not have failed'); 130 | } 131 | }); 132 | }); 133 | -------------------------------------------------------------------------------- /tests/test-offline-context-gc.mjs: -------------------------------------------------------------------------------- 1 | import { OfflineAudioContext } from '../index.mjs'; 2 | 3 | // Regression test for #133 4 | // 5 | // 6 | // uncomment Drop trait in `offline_audio_context.rs` and rebuild 7 | // `node --trace-gc --expose-gc tests/junk-test-offline-context-gc.mjs` 8 | // @todo - add a build flag to automate this 9 | 10 | for( let i=0; i < 100000; i++ ) { 11 | console.log('+ i:', i); 12 | 13 | let offline = new OfflineAudioContext(1, 10*48000, 48000); 14 | 15 | const osc = offline.createOscillator(); 16 | osc.connect(offline.destination); 17 | osc.frequency.value = 220; 18 | osc.start(0.); 19 | osc.stop(10.); 20 | 21 | const buffer = await offline.startRendering(); 22 | console.log('+ buffer duration:', buffer.duration); 23 | 24 | offline = null; 25 | 26 | if (global.gc) { 27 | global.gc(); 28 | } 29 | } 30 | -------------------------------------------------------------------------------- /tests/worklets/invalid-ctor.worklet.mjs: -------------------------------------------------------------------------------- 1 | class InvalidCtor extends AudioWorkletProcessor { 2 | constructor() { 3 | super(); 4 | 5 | this.stuff = invalid; 6 | } 7 | 8 | process(inputs, outputs, parameters) { 9 | return true; 10 | } 11 | } 12 | 13 | registerProcessor('invalid-ctor', InvalidCtor); 14 | -------------------------------------------------------------------------------- /tests/worklets/invalid.worklet.js: -------------------------------------------------------------------------------- 1 | const a = invalid; 2 | -------------------------------------------------------------------------------- /tests/worklets/invalid.worklet.mjs: -------------------------------------------------------------------------------- 1 | const a = invalid; 2 | -------------------------------------------------------------------------------- /tests/worklets/noise-generator.worklet.mjs: -------------------------------------------------------------------------------- 1 | class NoiseGenerator extends AudioWorkletProcessor { 2 | process(_, outputs) { 3 | const output = outputs[0]; 4 | 5 | for (let channel = 0; channel < output.length; ++channel) { 6 | const outputChannel = output[channel]; 7 | for (let i = 0; i < outputChannel.length; ++i) { 8 | outputChannel[i] = 2 * Math.random() - 1; 9 | } 10 | } 11 | 12 | return true; 13 | } 14 | } 15 | 16 | registerProcessor('noise-generator', NoiseGenerator); 17 | --------------------------------------------------------------------------------