├── .babelrc ├── .gitignore ├── README.md ├── index.js ├── package.json ├── src └── index.js └── test ├── index.js └── standard_test.js /.babelrc: -------------------------------------------------------------------------------- 1 | { 2 | "presets": ["es2015"] 3 | } 4 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | lib 2 | 3 | # Created by https://www.gitignore.io/api/node 4 | 5 | ### Node ### 6 | # Logs 7 | logs 8 | *.log 9 | npm-debug.log* 10 | 11 | # Runtime data 12 | pids 13 | *.pid 14 | *.seed 15 | *.pid.lock 16 | 17 | # Directory for instrumented libs generated by jscoverage/JSCover 18 | lib-cov 19 | 20 | # Coverage directory used by tools like istanbul 21 | coverage 22 | 23 | # nyc test coverage 24 | .nyc_output 25 | 26 | # Grunt intermediate storage (http://gruntjs.com/creating-plugins#storing-task-files) 27 | .grunt 28 | 29 | # node-waf configuration 30 | .lock-wscript 31 | 32 | # Compiled binary addons (http://nodejs.org/api/addons.html) 33 | build/Release 34 | 35 | # Dependency directories 36 | node_modules 37 | jspm_packages 38 | 39 | # Optional npm cache directory 40 | .npm 41 | 42 | # Optional eslint cache 43 | .eslintcache 44 | 45 | # Optional REPL history 46 | .node_repl_history 47 | 48 | # Output of 'npm pack' 49 | *.tgz 50 | 51 | # Yarn Integrity file 52 | .yarn-integrity 53 | 54 | 55 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # web-audio-buffer-queue 2 | 3 | This module provides a Web Audio API source node that streams (Audio)Buffers from 4 | a queue or Node-style Stream. 5 | 6 | ### Usage 7 | 8 | ```javascript 9 | import BufferQueueNode from 'web-audio-buffer-queue' 10 | 11 | var node = new BufferQueueNode({ 12 | audioContext: audioContext 13 | }) 14 | node.connect(audioContext.destination) 15 | 16 | node.write(bufferContainingPCMSamples) 17 | ``` 18 | 19 | See `src/index.js` for detailed documentation. 20 | 21 | ### License 22 | ISC 23 | -------------------------------------------------------------------------------- /index.js: -------------------------------------------------------------------------------- 1 | module.exports = require('./lib/index.js').default 2 | -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "web-audio-buffer-queue", 3 | "version": "1.1.0", 4 | "description": "Web Audio API source node that streams (Audio)Buffers from a queue or Node-style Stream", 5 | "main": "index.js", 6 | "repository": { 7 | "type": "git", 8 | "url": "git://github.com/johni0702/web-audio-buffer-queue.git" 9 | }, 10 | "bugs": { 11 | "url": "https://github.com/johni0702/web-audio-buffer-queue/issues", 12 | "email": "me@johni0702.de" 13 | }, 14 | "scripts": { 15 | "compile": "node_modules/.bin/babel -d lib/ src/", 16 | "prepublish": "npm run compile", 17 | "mocha": "node_modules/.bin/mocha --compilers js:babel-core/register", 18 | "test": "npm run compile && npm run mocha" 19 | }, 20 | "keywords": [ 21 | "web", 22 | "audio", 23 | "pcm", 24 | "stream", 25 | "queue", 26 | "playback", 27 | "source", 28 | "buffer" 29 | ], 30 | "author": "Jonas Herzig ", 31 | "license": "ISC", 32 | "files": [ 33 | "index.js", 34 | "lib" 35 | ], 36 | "devDependencies": { 37 | "babel-cli": "^6.14.0", 38 | "babel-preset-es2015": "^6.14.0", 39 | "chai": "^3.5.0", 40 | "mocha": "^3.0.2", 41 | "mocha-standard": "^1.0.0", 42 | "standard": "^8.0.0", 43 | "web-audio-engine": "^0.9.3" 44 | }, 45 | "dependencies": { 46 | "audio-context": "^1.0.3", 47 | "extend": "^3.0.0" 48 | } 49 | } 50 | -------------------------------------------------------------------------------- /src/index.js: -------------------------------------------------------------------------------- 1 | import globalAudioContext from 'audio-context' 2 | import extend from 'extend' 3 | import { Writable } from 'stream' 4 | 5 | /** 6 | * A source node that plays queued PCM buffers. 7 | * 8 | * When no more data is queued, this node emits silence. 9 | * 10 | * The queued buffers are played at the frequency of the audio context. 11 | * 12 | * Multiple channels are supported, both interleaved and 13 | * non interleaved layouts. Every single buffer queued is expected 14 | * to contain the same amount of samples for every channel. Therefore a single 15 | * frame may not be split across mutliple buffers. 16 | * 17 | * When in object mode, the input format is determined automatically. 18 | * Supported formats are Float32Array, Int16Array and AudioBuffer. 19 | * When not in object mode, the input format has to be specified manually by 20 | * passing {@link BufferQueueNode#Float32Array} or {@link BufferQueueNode#Int16Array} 21 | * to the constructor. 22 | * 23 | * Note that this does only implement a small part of the AudioNode interface. 24 | * This node will disconnect automatically when its stream is closed. 25 | * 26 | * @extends Writable 27 | */ 28 | class BufferQueueNode extends Writable { 29 | /** 30 | * Create a BufferQueueNode. 31 | * @param {Object} [options] - Options passed to the Writable constructor. 32 | * @param {AudioBufferFormat} [options.dataType=BufferQueueNode.Float32Array] - 33 | * Format of input data when not in objectMode. 34 | * @param {boolean} [options.interleaved=true] - Whether the input data is interleaved 35 | * @param {number} [options.channels=1] - Number of channels 36 | * @param {number} [options.bufferSize=0] - Buffer size, must be a power of two 37 | * between 256 and 16284. May also be 0 in which case the implementation will 38 | * pick a good value (recommanded). 39 | * @param {AudioContext} [options.audioContext=require('audio-context')()] - The audio context 40 | */ 41 | constructor (options) { 42 | super(options) 43 | 44 | options = extend({ 45 | dataType: Float32ArrayBuffer, 46 | objectMode: false, 47 | interleaved: true, 48 | channels: 1, 49 | bufferSize: 0 50 | }, options) 51 | if (!options.audioContext) { 52 | options.audioContext = globalAudioContext() 53 | } 54 | this._dataType = options.dataType 55 | this._objectMode = options.objectMode 56 | this._interleaved = options.interleaved 57 | const channels = this._channels = options.channels 58 | const bufferSize = options.bufferSize 59 | const audioContext = options.audioContext 60 | // const sampleRate = audioContext.sampleRate 61 | 62 | // Queue that holds all future audio buffer 63 | this._queue = [] 64 | 65 | // Create a script processor node that will inject our samples 66 | var processorNode = audioContext.createScriptProcessor(bufferSize, 0, channels) 67 | // Create a buffer source that will power the script processor 68 | // Note: This isn't strictly required, however some browsers are buggy 69 | var inputNode = audioContext.createBufferSource() 70 | // That source should be looping over a short, silent buffer 71 | inputNode.loop = true 72 | 73 | var shuttingDown = false 74 | var shutDown = false 75 | // The buffer which holds the current audio data 76 | var currentBuffer = null 77 | // Offset into the current buffer 78 | var currentBufferOffset 79 | processorNode.addEventListener('audioprocess', (e) => { 80 | if (shutDown) { 81 | // Already shut down 82 | return 83 | } 84 | const out = e.outputBuffer 85 | // Offset into the output buffer 86 | let outOffset = 0 87 | // Try to fill the whole output buffer 88 | while (outOffset < out.length) { 89 | // If we don't have a current buffer but there are some in the queue 90 | if (!currentBuffer && this._queue.length > 0) { 91 | // Then get the next queued buffer from the queue 92 | currentBuffer = this._queue.shift() 93 | currentBufferOffset = 0 94 | } 95 | // If we still don't have any data, 96 | if (!currentBuffer) { 97 | // then fill the rest of the output with silence 98 | for (let channel = 0; channel < channels; channel++) { 99 | out.getChannelData(channel).fill(0, outOffset) 100 | } 101 | // and shut down if requested 102 | if (shuttingDown) { 103 | shutDown = true 104 | process.nextTick(() => this.emit('close')) 105 | } 106 | break 107 | } 108 | // Otherwise (we have data), copy as much as possible 109 | const remainingOutput = out.length - outOffset 110 | const remainingInput = currentBuffer.length - currentBufferOffset 111 | const remaining = Math.min(remainingOutput, remainingInput) 112 | // Do the actual copying 113 | currentBuffer.copyTo(out, outOffset, currentBufferOffset, remaining) 114 | // Increase offsets 115 | currentBufferOffset += remaining 116 | outOffset += remaining 117 | // Check if there is still data remaining in the current buffer 118 | if (currentBufferOffset >= currentBuffer.length) { 119 | currentBuffer = null 120 | } 121 | } 122 | }) 123 | // Connect the input node to the script processor 124 | // inputNode.connect(processorNode) 125 | // inputNode.start() 126 | 127 | // Store node for later connecting 128 | this._node = processorNode 129 | 130 | this.on('finish', () => { 131 | shuttingDown = true 132 | }) 133 | this.on('close', () => { 134 | processorNode.disconnect() 135 | }) 136 | } 137 | 138 | /** 139 | * Connect this node to another node. 140 | * @see https://developer.mozilla.org/en-US/docs/Web/API/AudioNode/connect(AudioNode) 141 | */ 142 | connect () { 143 | return this._node.connect.apply(this._node, arguments) 144 | } 145 | 146 | /** 147 | * Disconnect this node from another node. 148 | * @see https://developer.mozilla.org/en-US/docs/Web/API/AudioNode/disconnect 149 | */ 150 | disconnect () { 151 | return this._node.disconnect.apply(this._node, arguments) 152 | } 153 | 154 | _write (chunk, encoding, callback) { 155 | if (this._objectMode) { 156 | if (chunk instanceof Float32Array) { 157 | chunk = new Float32ArrayBuffer(this._channels, this._interleaved, chunk) 158 | } else if (chunk instanceof Int16Array) { 159 | chunk = new Int16ArrayBuffer(this._channels, this._interleaved, chunk) 160 | } else { 161 | chunk = new AudioBufferBuffer(chunk) 162 | } 163 | } else { 164 | chunk = new (this._dataType)(this._channels, this._interleaved, chunk) 165 | } 166 | this._queue.push(chunk) 167 | callback(null) 168 | } 169 | } 170 | 171 | /** 172 | * @interface AudioBufferFormat 173 | */ 174 | 175 | /** 176 | * Copy samples from this buffer to the target AudioBuffer. 177 | * 178 | * @function 179 | * @name AudioBufferFormat#copyTo 180 | * @param {AudioBuffer} to - The target audio buffer 181 | * @param {number} toOffset - Offset into the target audio buffer 182 | * @param {number} fromOffset - Offset into this buffer 183 | * @param {number} length - Amount of sample-frames to copy 184 | */ 185 | 186 | /** @implements AudioBufferFormat */ 187 | class AudioBufferBuffer { 188 | constructor (it) { 189 | this._it = it 190 | } 191 | 192 | get length () { 193 | return this._it.length 194 | } 195 | 196 | copyTo (to, toOffset, fromOffset, length) { 197 | for (let channel = 0; channel < this._it.numberOfChannels; channel++) { 198 | const source = this._it.getChannelData(channel) 199 | to.copyToChannel(source.subarray(fromOffset, fromOffset + length), channel, toOffset) 200 | } 201 | } 202 | } 203 | 204 | class TypedArrayBuffer { 205 | constructor (channels, interleaved, it) { 206 | this._channels = channels 207 | this._interleaved = interleaved 208 | this._it = it 209 | } 210 | 211 | get length () { 212 | return this._it.length / this._channels 213 | } 214 | 215 | /** 216 | * Return the sample at the specified offset 217 | * @param {number} i - The offset 218 | * @returns {number} The sample 219 | */ 220 | _get (i) { 221 | return this._it[i] 222 | } 223 | 224 | /** 225 | * Copy some samples to the specified array. 226 | * @param {Float32Array} to - The target array 227 | * @param {number} toOffset - Offset into the target array 228 | * @param {number} fromOffset - Offset into this array 229 | * @param {number} length - Amount of samples to copy 230 | */ 231 | _bulkCopy (to, toOffset, fromOffset, length) { 232 | to.set(this._it.subarray(fromOffset, fromOffset + length), toOffset) 233 | } 234 | 235 | copyTo (to, toOffset, fromOffset, length) { 236 | for (let channel = 0; channel < this._channels; channel++) { 237 | const channelData = to.getChannelData(channel) 238 | if (this._interleaved && this._channels > 1) { 239 | // For interleaved data we have to copy every sample on its own 240 | for (let i = 0; i < length; i++) { 241 | const actualFromOffset = (fromOffset + i) * this._channels + channel 242 | channelData[toOffset + i] = this._get(actualFromOffset) 243 | } 244 | } else { 245 | // Otherwise we can do a bulk copy 246 | const actualFromOffset = this.length * channel + fromOffset 247 | this._bulkCopy(channelData, toOffset, actualFromOffset, length) 248 | } 249 | } 250 | } 251 | } 252 | 253 | /** @implements AudioBufferFormat */ 254 | class Float32ArrayBuffer extends TypedArrayBuffer { 255 | constructor (channels, interleaved, it) { 256 | if (it instanceof Buffer) { 257 | it = new Float32Array(it.buffer, it.byteOffset, it.byteLength / 4) 258 | } else if (!(it instanceof Float32Array)) { 259 | throw new Error('Unsupported buffer type: ' + it) 260 | } 261 | super(channels, interleaved, it) 262 | } 263 | } 264 | 265 | /** @implements AudioBufferFormat */ 266 | class Int16ArrayBuffer extends TypedArrayBuffer { 267 | constructor (channels, interleaved, it) { 268 | if (it instanceof Buffer) { 269 | it = new Int16Array(it.buffer, it.byteOffset, it.byteLength / 2) 270 | } else if (!(it instanceof Int16Array)) { 271 | throw new Error('Unsupported buffer type: ' + it) 272 | } 273 | super(channels, interleaved, it) 274 | } 275 | 276 | /** @see TypedArrayBuffer#_get */ 277 | _get (i) { 278 | const val = this._it[i] 279 | return val / ((1 << 15) - (val > 0 ? 1 : 0)) 280 | } 281 | 282 | /** @see TypedArrayBuffer#_bulkCopy */ 283 | _bulkCopy (to, toOffset, fromOffset, length) { 284 | for (let i = 0; i < length; i++) { 285 | to[toOffset + i] = this._get(fromOffset + i) 286 | } 287 | } 288 | } 289 | 290 | BufferQueueNode.AudioBuffer = AudioBufferBuffer 291 | BufferQueueNode.Float32Array = Float32ArrayBuffer 292 | BufferQueueNode.Int16Array = Int16ArrayBuffer 293 | export default BufferQueueNode 294 | -------------------------------------------------------------------------------- /test/index.js: -------------------------------------------------------------------------------- 1 | /* eslint-env mocha */ 2 | import chai from 'chai' 3 | const expect = chai.expect 4 | import {RenderingAudioContext as AudioContext} from 'web-audio-engine' 5 | import BufferQueueNode from '..' 6 | 7 | chai.use(function (chai, utils) { 8 | chai.Assertion.addProperty('silent', function () { 9 | var obj = utils.flag(this, 'object') 10 | expect(obj).to.be.a('float32array') 11 | expect(obj.filter(e => e !== 0)).to.be.empty 12 | }) 13 | }) 14 | 15 | describe('BufferQueueNode', function () { 16 | var audioContext 17 | var node 18 | describe('in object mode', function () { 19 | describe('for a single channel', function () { 20 | beforeEach(function () { 21 | audioContext = new AudioContext({ 22 | sampleRate: 4096, 23 | numberOfChannels: 1, 24 | blockSize: 256 25 | }) 26 | node = new BufferQueueNode({ 27 | audioContext: audioContext, 28 | bufferSize: 256, 29 | objectMode: true 30 | }) 31 | node.connect(audioContext.destination) 32 | }) 33 | it('should emit silence without any data', function () { 34 | audioContext.processTo(1) 35 | const result = audioContext.exportAsAudioData().channelData[0].subarray(256) 36 | expect(result).to.be.silent 37 | }) 38 | it('should handle a single Float32Array', function () { 39 | const input = Float32Array.of(0, 1, 2, 3, 4, 5, 6, 7) 40 | node.write(input) 41 | audioContext.processTo(1) 42 | const result = audioContext.exportAsAudioData().channelData[0].subarray(256) 43 | expect(result.subarray(0, 8)).to.deep.equal(input) 44 | expect(result.subarray(8)).to.be.silent 45 | }) 46 | it('should handle a single Int16Array', function () { 47 | node.write(Int16Array.of(0, -(1 << 15), 0, (1 << 15) - 1)) 48 | audioContext.processTo(1) 49 | const result = audioContext.exportAsAudioData().channelData[0].subarray(256) 50 | expect(result.subarray(0, 4)).to.deep.equal(Float32Array.of(0, -1, 0, 1)) 51 | expect(result.subarray(4)).to.be.silent 52 | }) 53 | it('should handle a single AudioBuffer', function () { 54 | const input = Float32Array.of(0, 1, 2, 3, 4, 5, 6, 7) 55 | const audioBuffer = audioContext.createBuffer(1, 8, 4096) 56 | audioBuffer.getChannelData(0).set(input) 57 | node.write(audioBuffer) 58 | audioContext.processTo(1) 59 | const result = audioContext.exportAsAudioData().channelData[0].subarray(256) 60 | expect(result.subarray(0, 8)).to.deep.equal(input) 61 | expect(result.subarray(8)).to.be.silent 62 | }) 63 | it('should concatenate multiple buffers', function () { 64 | const input = Float32Array.of(0, 1, 2, 3, 4, 5, 6, 7) 65 | node.write(input.subarray(0, 1)) 66 | node.write(input.subarray(1, 2)) 67 | node.write(input.subarray(2, 4)) 68 | node.write(input.subarray(4, 8)) 69 | audioContext.processTo(1) 70 | const result = audioContext.exportAsAudioData().channelData[0].subarray(256) 71 | expect(result.subarray(0, 8)).to.deep.equal(input) 72 | expect(result.subarray(8)).to.be.silent 73 | }) 74 | it('should concatenate multiple buffers across blocks', function () { 75 | node.write(new Float32Array(4092)) 76 | const input = Float32Array.of(0, 1, 2, 3, 4, 5, 6, 7) 77 | node.write(input) 78 | audioContext.processTo(2) 79 | const result = audioContext.exportAsAudioData().channelData[0].subarray(256) 80 | expect(result.subarray(0, 4092)).to.be.silent 81 | expect(result.subarray(4092, 4100)).to.deep.equal(input) 82 | expect(result.subarray(4100)).to.be.silent 83 | }) 84 | }) 85 | describe('for two interleaved channels', function () { 86 | beforeEach(function () { 87 | audioContext = new AudioContext({ 88 | sampleRate: 4096, 89 | numberOfChannels: 2, 90 | blockSize: 256 91 | }) 92 | node = new BufferQueueNode({ 93 | audioContext: audioContext, 94 | channels: 2, 95 | interleaved: true, 96 | bufferSize: 256, 97 | objectMode: true 98 | }) 99 | node.connect(audioContext.destination) 100 | }) 101 | it('should emit silence without any data', function () { 102 | audioContext.processTo(10) 103 | expect(audioContext.exportAsAudioData().channelData[0].subarray(256)).to.be.silent 104 | expect(audioContext.exportAsAudioData().channelData[1].subarray(256)).to.be.silent 105 | }) 106 | it('should handle a single Float32Array', function () { 107 | const input = Float32Array.of(0, 1, 2, 3, 4, 5, 6, 7) 108 | node.write(input) 109 | audioContext.processTo(1) 110 | const audioData = audioContext.exportAsAudioData() 111 | const resultA = audioData.channelData[0].subarray(256) 112 | const resultB = audioData.channelData[1].subarray(256) 113 | expect(resultA.subarray(0, 4)).to.deep.equal(Float32Array.of(0, 2, 4, 6)) 114 | expect(resultA.subarray(4)).to.be.silent 115 | expect(resultB.subarray(0, 4)).to.deep.equal(Float32Array.of(1, 3, 5, 7)) 116 | expect(resultB.subarray(4)).to.be.silent 117 | }) 118 | it('should handle a single Int16Array', function () { 119 | node.write(Int16Array.of(0, -(1 << 15), 0, (1 << 15) - 1)) 120 | audioContext.processTo(1) 121 | const audioData = audioContext.exportAsAudioData() 122 | const resultA = audioData.channelData[0].subarray(256) 123 | const resultB = audioData.channelData[1].subarray(256) 124 | expect(resultA.subarray(0, 2)).to.deep.equal(Float32Array.of(0, 0)) 125 | expect(resultA.subarray(2)).to.be.silent 126 | expect(resultB.subarray(0, 2)).to.deep.equal(Float32Array.of(-1, 1)) 127 | expect(resultB.subarray(2)).to.be.silent 128 | }) 129 | it('should handle a single AudioBuffer', function () { 130 | const inputA = Float32Array.of(0, 1, 2, 3) 131 | const inputB = Float32Array.of(4, 5, 6, 7) 132 | const audioBuffer = audioContext.createBuffer(2, 4, 4096) 133 | audioBuffer.getChannelData(0).set(inputA) 134 | audioBuffer.getChannelData(1).set(inputB) 135 | node.write(audioBuffer) 136 | audioContext.processTo(1) 137 | const audioData = audioContext.exportAsAudioData() 138 | const resultA = audioData.channelData[0].subarray(256) 139 | const resultB = audioData.channelData[1].subarray(256) 140 | expect(resultA.subarray(0, 4)).to.deep.equal(inputA) 141 | expect(resultA.subarray(4)).to.be.silent 142 | expect(resultB.subarray(0, 4)).to.deep.equal(inputB) 143 | expect(resultB.subarray(4)).to.be.silent 144 | }) 145 | it('should concatenate multiple buffers', function () { 146 | const input = Float32Array.of(0, 1, 2, 3, 4, 5, 6, 7) 147 | node.write(input.subarray(0, 2)) 148 | node.write(input.subarray(2, 4)) 149 | node.write(input.subarray(4, 6)) 150 | node.write(input.subarray(6, 8)) 151 | audioContext.processTo(1) 152 | const audioData = audioContext.exportAsAudioData() 153 | const resultA = audioData.channelData[0].subarray(256) 154 | const resultB = audioData.channelData[1].subarray(256) 155 | expect(resultA.subarray(0, 4)).to.deep.equal(Float32Array.of(0, 2, 4, 6)) 156 | expect(resultA.subarray(4)).to.be.silent 157 | expect(resultB.subarray(0, 4)).to.deep.equal(Float32Array.of(1, 3, 5, 7)) 158 | expect(resultB.subarray(4)).to.be.silent 159 | }) 160 | }) 161 | describe('for two non interleaved channels', function () { 162 | beforeEach(function () { 163 | audioContext = new AudioContext({ 164 | sampleRate: 4096, 165 | numberOfChannels: 2, 166 | blockSize: 256 167 | }) 168 | node = new BufferQueueNode({ 169 | audioContext: audioContext, 170 | channels: 2, 171 | interleaved: false, 172 | bufferSize: 256, 173 | objectMode: true 174 | }) 175 | node.connect(audioContext.destination) 176 | }) 177 | it('should emit silence without any data', function () { 178 | audioContext.processTo(10) 179 | expect(audioContext.exportAsAudioData().channelData[0].subarray(256)).to.be.silent 180 | expect(audioContext.exportAsAudioData().channelData[1].subarray(256)).to.be.silent 181 | }) 182 | it('should handle a single Float32Array', function () { 183 | const input = Float32Array.of(0, 1, 2, 3, 4, 5, 6, 7) 184 | node.write(input) 185 | audioContext.processTo(1) 186 | const audioData = audioContext.exportAsAudioData() 187 | const resultA = audioData.channelData[0].subarray(256) 188 | const resultB = audioData.channelData[1].subarray(256) 189 | expect(resultA.subarray(0, 4)).to.deep.equal(Float32Array.of(0, 1, 2, 3)) 190 | expect(resultA.subarray(4)).to.be.silent 191 | expect(resultB.subarray(0, 4)).to.deep.equal(Float32Array.of(4, 5, 6, 7)) 192 | expect(resultB.subarray(4)).to.be.silent 193 | }) 194 | it('should handle a single Int16Array', function () { 195 | node.write(Int16Array.of(0, -(1 << 15), 0, (1 << 15) - 1)) 196 | audioContext.processTo(1) 197 | const audioData = audioContext.exportAsAudioData() 198 | const resultA = audioData.channelData[0].subarray(256) 199 | const resultB = audioData.channelData[1].subarray(256) 200 | expect(resultA.subarray(0, 2)).to.deep.equal(Float32Array.of(0, -1)) 201 | expect(resultA.subarray(2)).to.be.silent 202 | expect(resultB.subarray(0, 2)).to.deep.equal(Float32Array.of(0, 1)) 203 | expect(resultB.subarray(2)).to.be.silent 204 | }) 205 | it('should handle a single AudioBuffer', function () { 206 | const inputA = Float32Array.of(0, 1, 2, 3) 207 | const inputB = Float32Array.of(4, 5, 6, 7) 208 | const audioBuffer = audioContext.createBuffer(2, 4, 4096) 209 | audioBuffer.getChannelData(0).set(inputA) 210 | audioBuffer.getChannelData(1).set(inputB) 211 | node.write(audioBuffer) 212 | audioContext.processTo(1) 213 | const audioData = audioContext.exportAsAudioData() 214 | const resultA = audioData.channelData[0].subarray(256) 215 | const resultB = audioData.channelData[1].subarray(256) 216 | expect(resultA.subarray(0, 4)).to.deep.equal(inputA) 217 | expect(resultA.subarray(4)).to.be.silent 218 | expect(resultB.subarray(0, 4)).to.deep.equal(inputB) 219 | expect(resultB.subarray(4)).to.be.silent 220 | }) 221 | it('should concatenate multiple buffers', function () { 222 | const input = Float32Array.of(0, 1, 2, 3, 4, 5, 6, 7) 223 | node.write(input.subarray(0, 2)) 224 | node.write(input.subarray(2, 4)) 225 | node.write(input.subarray(4, 6)) 226 | node.write(input.subarray(6, 8)) 227 | audioContext.processTo(1) 228 | const audioData = audioContext.exportAsAudioData() 229 | const resultA = audioData.channelData[0].subarray(256) 230 | const resultB = audioData.channelData[1].subarray(256) 231 | expect(resultA.subarray(0, 4)).to.deep.equal(Float32Array.of(0, 2, 4, 6)) 232 | expect(resultA.subarray(4)).to.be.silent 233 | expect(resultB.subarray(0, 4)).to.deep.equal(Float32Array.of(1, 3, 5, 7)) 234 | expect(resultB.subarray(4)).to.be.silent 235 | }) 236 | }) 237 | }) 238 | describe('not in object mode', function () { 239 | describe('for a single channel', function () { 240 | beforeEach(function () { 241 | audioContext = new AudioContext({ 242 | sampleRate: 4096, 243 | numberOfChannels: 1, 244 | blockSize: 256 245 | }) 246 | node = new BufferQueueNode({ 247 | audioContext: audioContext, 248 | channels: 1, 249 | bufferSize: 256 250 | }) 251 | node.connect(audioContext.destination) 252 | }) 253 | it('should handle Float32Array-like data', function () { 254 | node = new BufferQueueNode({ 255 | dataType: BufferQueueNode.Float32Array, 256 | audioContext: audioContext, 257 | channels: 1, 258 | bufferSize: 256 259 | }) 260 | node.connect(audioContext.destination) 261 | 262 | const input = Float32Array.of(0, 1, 2, 3, 4, 5, 6, 7) 263 | node.write(Buffer.from(input.buffer)) 264 | audioContext.processTo(1) 265 | const result = audioContext.exportAsAudioData().channelData[0].subarray(256) 266 | expect(result.subarray(0, 8)).to.deep.equal(input) 267 | expect(result.subarray(8)).to.be.silent 268 | }) 269 | it('should handle Int16Array-like data', function () { 270 | node = new BufferQueueNode({ 271 | dataType: BufferQueueNode.Int16Array, 272 | audioContext: audioContext, 273 | channels: 1, 274 | bufferSize: 256 275 | }) 276 | node.connect(audioContext.destination) 277 | 278 | const input = Int16Array.of(0, (1 << 15) - 1, 0, -(1 << 15)) 279 | node.write(Buffer.from(input.buffer)) 280 | audioContext.processTo(1) 281 | const result = audioContext.exportAsAudioData().channelData[0].subarray(256) 282 | expect(result.subarray(0, 4)).to.deep.equal(Float32Array.of(0, 1, 0, -1)) 283 | expect(result.subarray(4)).to.be.silent 284 | }) 285 | }) 286 | }) 287 | }) 288 | -------------------------------------------------------------------------------- /test/standard_test.js: -------------------------------------------------------------------------------- 1 | /* eslint-env mocha */ 2 | it('conforms to standard', require('mocha-standard')) 3 | --------------------------------------------------------------------------------