├── .gitignore ├── .gitmodules ├── LICENSE ├── Native └── WebAudio.js ├── README.md ├── WebAudio.elm ├── elm-package.json └── examples ├── Makefile ├── Scales.elm ├── Stream.elm ├── Visual.elm ├── Visual.html └── elm-package.json /.gitignore: -------------------------------------------------------------------------------- 1 | examples/release/ 2 | examples/.soundcloudkey 3 | build/ 4 | cache/ 5 | *.elmi 6 | *.elmo 7 | *.swp 8 | elm-stuff 9 | elm.js 10 | 11 | -------------------------------------------------------------------------------- /.gitmodules: -------------------------------------------------------------------------------- 1 | [submodule "lib/elm-usermedia"] 2 | path = lib/elm-usermedia 3 | url = git@github.com:giraj/elm-usermedia.git 4 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Copyright (c) 2014, Bob Matcuk 2 | All rights reserved. 3 | 4 | Redistribution and use in source and binary forms, with or without 5 | modification, are permitted provided that the following conditions are met: 6 | 7 | * Redistributions of source code must retain the above copyright notice, this 8 | list of conditions and the following disclaimer. 9 | 10 | * Redistributions in binary form must reproduce the above copyright notice, 11 | this list of conditions and the following disclaimer in the documentation 12 | and/or other materials provided with the distribution. 13 | 14 | * Neither the name of the author nor the names of its 15 | contributors may be used to endorse or promote products derived from 16 | this software without specific prior written permission. 17 | 18 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 19 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 21 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 22 | FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 24 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 25 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 26 | OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 27 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 | 29 | -------------------------------------------------------------------------------- /Native/WebAudio.js: -------------------------------------------------------------------------------- 1 | Elm.Native.WebAudio = {}; 2 | Elm.Native.WebAudio.make = function(elm) { 3 | elm.Native = elm.Native || {}; 4 | elm.Native.WebAudio = elm.Native.WebAudio || {}; 5 | if (elm.Native.WebAudio.values) return elm.Native.WebAudio.values; 6 | 7 | var Maybe = Elm.Maybe.make(elm); 8 | var Signal = Elm.Signal.make(elm); 9 | var List = Elm.Native.List.make(elm); 10 | var toArray = List.toArray; 11 | var fromArray = List.fromArray; 12 | 13 | 14 | 15 | var values = {}; 16 | 17 | /* AudioContext */ 18 | function createStandardContext() { 19 | return new (window.AudioContext || window.webkitAudioContext)(); 20 | } 21 | 22 | function createAudioContext(context) { 23 | return {ctor: "AudioContext", _context: context}; 24 | } 25 | 26 | values.createContext = function() { 27 | return createAudioContext(createStandardContext()); 28 | }; 29 | 30 | var defaultContext = null; 31 | function extractContext(context) { 32 | if (context.ctor === "DefaultContext") 33 | return defaultContext || (defaultContext = createStandardContext()); 34 | return context._context; 35 | } 36 | 37 | values.getSampleRate = function(context) { 38 | return extractContext(context).sampleRate; 39 | }; 40 | 41 | values.getCurrentTime = function(context) { 42 | return extractContext(context).currentTime; 43 | }; 44 | 45 | values.createOfflineContext = F3(function(channels, length, sampleRate) { 46 | var context = new (window.OfflineAudioContext || window.webkitOfflineAudioContext)(channels, length, sampleRate); 47 | var signal = Signal.constant(Maybe.Nothing); 48 | context.oncomplete = function(e) { 49 | elm.notify(signal.id, Maybe.Just(values.createAudioBuffer(e.renderedBuffer))); 50 | }; 51 | return {_:{}, _context: createAudioContext(context), _signal: signal}; 52 | }); 53 | 54 | values.startOfflineRendering = function(offlineContext) { 55 | offlineContext._context._context.startRendering(); 56 | return offlineContext; 57 | }; 58 | 59 | 60 | 61 | /* AudioParam */ 62 | values.setValue = F2(function(val, param) { 63 | param._node._node[param._0].value = val; 64 | return param; 65 | }); 66 | 67 | values.getValue = function(param) { 68 | return param._node._node[param._0].value; 69 | }; 70 | 71 | values.setValueAtTime = F3(function(value, time, param) { 72 | param._node._node[param._0].setValueAtTime(value, time); 73 | return param; 74 | }); 75 | 76 | values.linearRampToValue = F3(function(value, time, param) { 77 | param._node._node[param._0].linearRampToValueAtTime(value, time); 78 | return param; 79 | }); 80 | 81 | values.exponentialRampToValue = F3(function(value, time, param) { 82 | param._node._node[param._0].exponentialRampToValueAtTime(value, time); 83 | return param; 84 | }); 85 | 86 | values.setTargetAtTime = F4(function(target, starttime, constant, param) { 87 | param._node._node[param._0].setTargetAtTime(target, starttime, constant); 88 | return param; 89 | }); 90 | 91 | values.setValueCurveAtTime = F4(function(curve, starttime, duration, param) { 92 | param._node._node[param._0].setValueCurveAtTime(toArray(curve), starttime, duration); 93 | return param; 94 | }); 95 | 96 | values.cancelScheduledValues = F2(function(time, param) { 97 | param._node._node[param._0].cancelScheduledValues(time); 98 | return param; 99 | }); 100 | 101 | 102 | 103 | /* AudioBuffer */ 104 | values.createAudioBuffer = function(buffer) { 105 | return {ctor: "AudioBuffer", _buffer: buffer}; 106 | }; 107 | 108 | values.loadAudioBufferFromUrl = F2(function(context, url) { 109 | var signal = Signal.constant(Maybe.Nothing); 110 | var request = new XMLHttpRequest(); 111 | request.open('GET', url, true); 112 | request.responseType = 'arraybuffer'; 113 | request.onload = function() { 114 | extractContext(context).decodeAudioData(request.response, function(buffer) { 115 | elm.notify(signal.id, Maybe.Just(values.createAudioBuffer(buffer))); 116 | }); 117 | }; 118 | request.send(); 119 | return signal; 120 | }); 121 | 122 | values.getBufferSampleRate = function(buffer) { 123 | return buffer._buffer.sampleRate; 124 | }; 125 | 126 | values.getBufferLength = function(buffer) { 127 | return buffer._buffer.length; 128 | }; 129 | 130 | values.getBufferDuration = function(buffer) { 131 | return buffer._buffer.duration; 132 | }; 133 | 134 | values.getBufferNumberOfChannels = function(buffer) { 135 | return buffer._buffer.numberOfChannels; 136 | }; 137 | 138 | values.getChannelData = F2(function(channel, buffer) { 139 | return fromArray(buffer._buffer.getChannelData(channel)); 140 | }); 141 | 142 | values.getChannelDataSlice = F4(function(channel, start, length, buffer) { 143 | if (!buffer._slice || buffer._slice.length != length) 144 | buffer._slice = new Float32Array(length); 145 | buffer._buffer.copyFromChannel(buffer._slice, channel, start); 146 | return fromArray(buffer._buffer); 147 | }); 148 | 149 | values.setChannelDataSlice = F4(function(channel, start, data, buffer) { 150 | buffer._buffer.copyToChannel(toArray(data), channel, start); 151 | return buffer; 152 | }); 153 | 154 | 155 | 156 | /* Audio Node Utility Functions*/ 157 | function buildAudioNode(node) { 158 | return {_:{}, inputs:node.numberOfInputs, outputs:node.numberOfOutputs, _node:node}; 159 | } 160 | 161 | function buildAudioParam(externalName, internalName, node) { 162 | node[externalName] = {ctor: "AudioParam", _0: internalName, _node: node}; 163 | } 164 | 165 | function buildGetter(externalName, internalName) { 166 | values[externalName] = function(node) { 167 | return node._node[internalName]; 168 | }; 169 | } 170 | 171 | function buildSetter(externalName, internalName) { 172 | values[externalName] = F2(function(value, node) { 173 | node._node[internalName] = value; 174 | return node; 175 | }); 176 | } 177 | 178 | function buildProperty(externalName, internalName) { 179 | buildGetter('get' + externalName, internalName); 180 | buildSetter('set' + externalName, internalName); 181 | } 182 | 183 | 184 | 185 | /* Audio Node */ 186 | values.connectNodes = F4(function(destination, outputIdx, inputIdx, source) { 187 | source._node.connect(destination._node, outputIdx, inputIdx); 188 | return source; 189 | }); 190 | 191 | values.connectToParam = F3(function(destination, outputIdx, source) { 192 | source._node.connect(destination.param, outputIdx); 193 | return source; 194 | }); 195 | 196 | buildProperty('ChannelCount', 'channelCount'); 197 | 198 | values.getChannelCountMode = function(node) { 199 | switch (node._node.channelCountMode) { 200 | case "max": 201 | return elm.WebAudio.values.Max; 202 | case "clamped-max": 203 | return elm.WebAudio.values.ClampedMax; 204 | case "explicit": 205 | return elm.WebAudio.values.Explicit; 206 | } 207 | }; 208 | 209 | values.setChannelCountMode = F2(function(mode, node) { 210 | switch (mode.ctor) { 211 | case "Max": 212 | node._node.channelCountMode = "max"; 213 | break; 214 | case "ClampedMax": 215 | node._node.channelCountMode = "clamped-max"; 216 | break; 217 | case "Explicit": 218 | node._node.channelCountMode = "explicit"; 219 | break; 220 | } 221 | return node; 222 | }); 223 | 224 | values.getChannelInterpretation = function(node) { 225 | switch (node._node.channelInterpretation) { 226 | case "speakers": 227 | return elm.WebAudio.values.Speakers; 228 | case "discrete": 229 | return elm.WebAudio.values.Discrete; 230 | } 231 | }; 232 | 233 | values.setChannelInterpretation = F2(function(mode, node) { 234 | switch (mode.ctor) { 235 | case "Speakers": 236 | node._node.channelInterpretation = "speakers"; 237 | break; 238 | case "Discrete": 239 | node._node.channelInterpretation = "discrete"; 240 | break; 241 | } 242 | return node; 243 | }); 244 | 245 | 246 | 247 | /* Analyser Node */ 248 | values.createAnalyserNode = function(context) { 249 | var node = extractContext(context).createAnalyser(); 250 | return buildAudioNode(node); 251 | }; 252 | 253 | buildProperty('FFTSize', 'fftSize'); 254 | buildProperty('MaxDecibels', 'maxDecibels'); 255 | buildProperty('MinDecibels', 'minDecibels'); 256 | buildProperty('SmoothingConstant', 'smoothingTimeConstant'); 257 | 258 | values.getByteFrequencyData = function(node) { 259 | if (!node._bFreq || node._bFreq.length != node._node.frequencyBinCount) 260 | node._bFreq = new Uint8Array(node._node.frequencyBinCount); 261 | node._node.getByteFrequencyData(node._bFreq); 262 | return fromArray(node._bFreq); 263 | }; 264 | 265 | values.getByteTimeDomainData = function(node) { 266 | if (!node._bTime || node._bTime.length != node._node.fftSize) 267 | node._bTime = new Uint8Array(node._node.fftSize); 268 | node._node.getByteTimeDomainData(node._bTime); 269 | return fromArray(node._bTime); 270 | }; 271 | 272 | values.getFloatFrequencyData = function(node) { 273 | if (!node._fFreq || node._fFreq.length != node._node.frequencyBinCount) 274 | node._fFreq = new Float32Array(node._node.frequencyBinCount); 275 | node._node.getFloatFrequencyData(node._fFreq); 276 | return fromArray(node._fFreq); 277 | }; 278 | 279 | values.getFloatTimeDomainData = function(node) { 280 | if (!node._fTime || node._fTime.length != node._node.fftSize) 281 | node._fTime = new Float32Array(node._node.fftSize); 282 | node._node.getFloatTimeDomainData(node._fTime); 283 | return fromArray(node._fTime); 284 | }; 285 | 286 | 287 | 288 | /* Audio Buffer Source Node */ 289 | values.createAudioBufferSourceNode = function(context) { 290 | var node = extractContext(context).createBufferSource(); 291 | var ret = buildAudioNode(node); 292 | buildAudioParam('playbackRate', 'playbackRate', ret); 293 | 294 | var signal = Signal.constant(false); 295 | ret._ended = signal; 296 | node.onended = function() { 297 | elm.notify(signal.id, true); 298 | }; 299 | 300 | return ret; 301 | }; 302 | 303 | buildGetter('AudioBufferFromNode', 'buffer'); 304 | values.setAudioBufferForNode = F2(function(value, node) { 305 | node._node.buffer = value._buffer; 306 | return node; 307 | }); 308 | 309 | buildProperty('AudioBufferIsLooping', 'loop'); 310 | buildProperty('AudioBufferLoopStart', 'loopStart'); 311 | buildProperty('AudioBufferLoopEnd', 'loopEnd'); 312 | 313 | values.startAudioBufferNode = F4(function(when, offset, duration, node) { 314 | if (duration.ctor == "Nothing") 315 | node._node.start(when, offset); 316 | else 317 | node._node.start(when, offset, duration._0); 318 | return node; 319 | }); 320 | 321 | values.stopAudioBufferNode = F2(function(when, node) { 322 | node._node.stop(when); 323 | return node; 324 | }); 325 | 326 | 327 | 328 | /* AudioDestinationNode */ 329 | values.getDestinationNode = function(context) { 330 | var node = extractContext(context).destination; 331 | return buildAudioNode(node); 332 | } 333 | 334 | buildGetter('MaxChannelCount', 'maxChannelCount'); 335 | 336 | 337 | 338 | /* TODO: Audio Worker Node */ 339 | 340 | 341 | 342 | /* Biquad Filter Node */ 343 | values.createBiquadFilterNode = function(context) { 344 | var node = extractContext(context).createBiquadFilter(); 345 | var ret = buildAudioNode(node); 346 | buildAudioParam('frequency', 'frequency', ret); 347 | buildAudioParam('detune', 'detune', ret); 348 | buildAudioParam('q', 'q', ret); 349 | buildAudioParam('gain', 'gain', ret); 350 | return ret; 351 | } 352 | 353 | values.getFilterType = function(node) { 354 | switch (node._node.type) { 355 | case "lowpass": 356 | return elm.WebAudio.values.LowPass; 357 | case "highpass": 358 | return elm.WebAudio.values.HighPass; 359 | case "bandpass": 360 | return elm.WebAudio.values.BandPass; 361 | case "lowshelf": 362 | return elm.WebAudio.values.LowShelf; 363 | case "highshelf": 364 | return elm.WebAudio.values.HighShelf; 365 | case "peaking": 366 | return elm.WebAudio.values.Peaking; 367 | case "notch": 368 | return elm.WebAudio.values.Notch; 369 | case "allpass": 370 | return elm.WebAudio.values.AllPass; 371 | } 372 | } 373 | 374 | values.setFilterType = F2(function(type, node) { 375 | switch (type.ctor) { 376 | case "LowPass": 377 | node._node.type = "lowpass"; 378 | break; 379 | case "HighPass": 380 | node._node.type = "highpass"; 381 | break; 382 | case "BandPass": 383 | node._node.type = "bandpass"; 384 | break; 385 | case "LowShelf": 386 | node._node.type = "lowshelf"; 387 | break; 388 | case "HighShelf": 389 | node._node.type = "highshelf"; 390 | break; 391 | case "Peaking": 392 | node._node.type = "peaking"; 393 | break; 394 | case "Notch": 395 | node._node.type = "notch"; 396 | break; 397 | case "AllPass": 398 | node._node.type = "allpass"; 399 | break; 400 | } 401 | return node; 402 | }); 403 | 404 | 405 | 406 | /* ChannelMergerNode */ 407 | values.createChannelMergerNode = F2(function(context, numberOfInputs) { 408 | var node = extractContext(context).createChannelMerger(numberOfInputs); 409 | return buildAudioNode(node); 410 | }); 411 | 412 | 413 | 414 | /* ChannelSplitterNode */ 415 | values.createChannelSplitterNode = F2(function(context, numberOfOutputs) { 416 | var node = extractContext(context).createChannelSplitter(numberOfOutputs); 417 | return buildAudioNode(node); 418 | }); 419 | 420 | 421 | 422 | /* DelayNode */ 423 | values.createDelayNode = F2(function(context, maxDelayTime) { 424 | var node = extractContext(context).createDelay(maxDelayTime); 425 | var ret = buildAudioNode(node); 426 | buildAudioParam('delayTime', 'delayTime', ret); 427 | return ret; 428 | }); 429 | 430 | 431 | 432 | /* DynamicsCompressorNode */ 433 | values.createDynamicsCompressorNode = function(context) { 434 | var node = extractContext(context).createDynamicsCompressor(); 435 | var ret = buildAudioNode(node); 436 | buildAudioParam('threshold', 'threshold', ret); 437 | buildAudioParam('knee', 'knee', ret); 438 | buildAudioParam('ratio', 'ratio', ret); 439 | buildAudioParam('reduction', 'reduction', ret); 440 | buildAudioParam('attack', 'attack', ret); 441 | buildAudioParam('release', 'release', ret); 442 | return ret; 443 | }; 444 | 445 | 446 | 447 | /* GainNode */ 448 | values.createGainNode = function(context) { 449 | var node = extractContext(context).createGain(); 450 | var ret = buildAudioNode(node); 451 | buildAudioParam('gain', 'gain', ret); 452 | return ret; 453 | }; 454 | 455 | 456 | 457 | /* MediaElementAudioSourceNode */ 458 | values.createHiddenMediaElementAudioSourceNode = function(context) { 459 | var element = new Audio(); 460 | element.crossOrigin = "anonymous"; 461 | return A2(values.createMediaElementAudioSourceNode, context, element); 462 | }; 463 | 464 | values.createMediaElementAudioSourceNode = F2(function(context, element) { 465 | var node = extractContext(context).createMediaElementSource(element); 466 | var ret = buildAudioNode(node); 467 | ret._element = element; 468 | return ret; 469 | }); 470 | 471 | values.getMediaElementIsLooping = function(node) { 472 | return node._element.loop; 473 | }; 474 | 475 | values.setMediaElementIsLooping = F2(function(loop, node) { 476 | node._element.loop = loop; 477 | return node; 478 | }); 479 | 480 | values.getMediaElementSource = function(node) { 481 | return node._element.src; 482 | }; 483 | 484 | values.setMediaElementSource = F2(function(source, node) { 485 | node._element.src = source; 486 | node._element.load(); 487 | return node; 488 | }); 489 | 490 | values.playMediaElement = function(node) { 491 | node._element.play(); 492 | return node; 493 | }; 494 | 495 | values.pauseMediaElement = function(node) { 496 | node._element.pause(); 497 | return node; 498 | }; 499 | 500 | 501 | /* MediaStreamAudioSourceNode */ 502 | values.createMediaStreamAudioSourceNode = F2(function(context, source) { 503 | var node = extractContext(context).createMediaStreamSource(source); 504 | return buildAudioNode(node); 505 | }); 506 | 507 | 508 | /* OscillatorNode */ 509 | function setOscillatorWaveType(type, node) { 510 | switch (type.ctor) { 511 | case "Sine": 512 | node._node.type = "sine"; 513 | break; 514 | case "Square": 515 | node._node.type = "square"; 516 | break; 517 | case "Sawtooth": 518 | node._node.type = "sawtooth"; 519 | break; 520 | case "Triangle": 521 | node._node.type = "triangle"; 522 | break; 523 | } 524 | return node; 525 | } 526 | 527 | values.createOscillatorNode = F2(function(context, type) { 528 | var node = extractContext(context).createOscillator(); 529 | var ret = buildAudioNode(node); 530 | buildAudioParam('frequency', 'frequency', ret); 531 | buildAudioParam('detune', 'detune', ret); 532 | return setOscillatorWaveType(type, ret); 533 | }); 534 | 535 | values.getOscillatorWaveType = function(node) { 536 | switch (node._node.type) { 537 | case "sine": 538 | return elm.WebAudio.values.Sine; 539 | case "square": 540 | return elm.WebAudio.values.Square; 541 | case "sawtooth": 542 | return elm.WebAudio.values.Sawtooth; 543 | case "triangle": 544 | return elm.WebAudio.values.Triangle; 545 | } 546 | }; 547 | 548 | values.setOscillatorWaveType = F2(setOscillatorWaveType); 549 | 550 | values.startOscillator = F2(function(startTime, node) { 551 | node._node.start(startTime); 552 | return node; 553 | }); 554 | 555 | values.stopOscillator = F2(function(stopTime, node) { 556 | node._node.stop(stopTime); 557 | return {ctor: '_Tuple0'}; 558 | }); 559 | 560 | 561 | 562 | /* PannerNode */ 563 | values.createPannerNode = function(context) { 564 | var node = extractContext(context).createPanner(); 565 | return buildAudioNode(node); 566 | }; 567 | 568 | values.getPanningModel = function(node) { 569 | switch (node._node.panningModel) { 570 | case "equalpower": 571 | return elm.WebAudio.values.EqualPower; 572 | case "hrtf": 573 | return elm.WebAudio.values.HRTF; 574 | } 575 | }; 576 | 577 | values.setPanningModel = F2(function(model, node) { 578 | switch (model.ctor) { 579 | case "EqualPower": 580 | node._node.panningModel = "equalpower"; 581 | break; 582 | case "HRTF": 583 | node._node.panningModel = "hrtf"; 584 | break; 585 | } 586 | return node; 587 | }); 588 | 589 | values.getDistanceModel = function(node) { 590 | switch (node._node.distanceModel) { 591 | case "linear": 592 | return elm.WebAudio.values.Linear; 593 | case "inverse": 594 | return elm.WebAudio.values.Inverse; 595 | case "exponential": 596 | return elm.WebAudio.values.Exponential; 597 | } 598 | }; 599 | 600 | values.setDistanceModel = F2(function(model, node) { 601 | switch (model.ctor) { 602 | case "Linear": 603 | node._node.distanceModel = "linear"; 604 | break; 605 | case "Inverse": 606 | node._node.distanceModel = "inverse"; 607 | break; 608 | case "Exponential": 609 | node._node.distanceModel = "exponential"; 610 | break; 611 | } 612 | return node; 613 | }); 614 | 615 | buildProperty('ReferenceDistance', 'refDistance'); 616 | buildProperty('MaxDistance', 'maxDistance'); 617 | buildProperty('RolloffFactor', 'rolloffFactor'); 618 | buildProperty('ConeInnerAngle', 'coneInnerAngle'); 619 | buildProperty('ConeOuterAngle', 'coneOuterAngle'); 620 | buildProperty('ConeOuterGain', 'coneOuterGain'); 621 | 622 | values.setPosition = F4(function(x, y, z, node) { 623 | node._node.setPosition(x, y, z); 624 | return node; 625 | }); 626 | 627 | values.setOrientation = F4(function(x, y, z, node) { 628 | node._node.setOrientation(x, y, z); 629 | return node; 630 | }); 631 | 632 | values.setVelocity = F4(function(x, y, z, node) { 633 | node._node.setVelocity(x, y, z); 634 | return node; 635 | }); 636 | 637 | return elm.Native.WebAudio.values = values; 638 | }; 639 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | elm-webaudio 2 | ============ 3 | 4 | This project is abandoned! It has been superseded by [trotha01's implementation](https://github.com/trotha01/elm-webaudio). 5 | 6 | **ABANDONED**: This project has been abandoned. When this library was authored, 7 | there was no good way to put a functional wrapper around a very non-functional 8 | API such as WebAudio. I thought this was a decent attempt, for the time. Since 9 | then, elm has grown to include new language constructs that would make the 10 | implementation easier. However, I do not have the time or interest to rewrite 11 | this library using the new language features. If you'd like to take the torch, 12 | [submit an issue](https://github.com/bmatcuk/elm-webaudio/issues) with your 13 | project's URL and I'll include it in this README so that others may find it. 14 | 15 | The `elm-webaudio` library connects your [Elm](http://elm-lang.org/) programs 16 | to the [Web Audio API](http://webaudio.github.io/web-audio-api/). This library 17 | is somewhat experimental and incomplete, but it is useable. Check out an example 18 | of the library in use: [oscillator scales](http://www.squeg.net/elm-webaudio/Scales.html). 19 | 20 | And another, much more complicated example: 21 | [soundcloud](http://www.squeg.net/elm-webaudio/Visual.html). 22 | 23 | I highly recommend you check out the documentation for the Web Audio API to 24 | make sure you understand the concepts before trying to use this library. This 25 | library basically represents a fairly thin wrapper around the Web Audio API, 26 | so everything in the Web Audio API documentation applies. 27 | -------------------------------------------------------------------------------- /WebAudio.elm: -------------------------------------------------------------------------------- 1 | module WebAudio where 2 | 3 | {-| A module for accessing the Web Audio API via Elm. 4 | 5 | # Getting Started 6 | 7 | First, you will need an `AudioContext`. There are two types of contexts: 8 | a standard context (which outputs to the user's audio device - speakers, 9 | headphones, etc), and an "offline" context which renders audio to a buffer. 10 | It is fairly rare that you would need more than one standard context, and, so 11 | this library provides a convenience context called the `DefaultContext`. Think 12 | of the `DefaultContext` as a singleton for a standard context. 13 | 14 | I highly recommend you read through the Web Audio API documentation to 15 | familiarize yourself with the concepts. You can find the documentation here: 16 | http://webaudio.github.io/web-audio-api/ 17 | 18 | @docs AudioContext, createContext, createOfflineContext, getSampleRate, getCurrentTime 19 | 20 | # Special Notes 21 | 22 | Most "set" functions take the object whose value is being set as the last 23 | parameter, and then the function returns that same object to facilitate 24 | function chaining using the "|>" operator. 25 | 26 | # Audio Params 27 | 28 | Most parameters for various Audio Nodes are actually "AudioParams". These 29 | allow you to either set a constant value, or schedule changes to the value at 30 | appropriate times. All times are relative to the AudioContext's current time. 31 | If you try to schedule a change for a time that has already passed, the change 32 | will take effect immediately. 33 | 34 | The documentation below will note if the node has any AudioParams that you can 35 | modify in the form of a bulleted list. These params can be accessed using the 36 | record notation. For example, a Biquad Filter Node has a "frequency" param. It 37 | could be accessed with: `node.frequency` 38 | 39 | @docs AudioParam, setValue, getValue, setValueAtTime, linearRampToValue, exponentialRampToValue, setTargetAtTime, setValueCurveAtTime, cancelScheduledValues 40 | 41 | # Audio Buffers 42 | 43 | An `AudioBuffer` stores audio data in memory in a PCM format with a range of 44 | -1 to 1. AudioBuffers may contain multiple channels. Typically, AudioBuffers 45 | are used for "short" audio clips (less than a minute) because the entire file 46 | must be loaded before the audio can be played. An HTMLMediaElement, such as 47 | the HTML `audio` tag handled by the MediaElementAudioSourceNode, is typically 48 | used for longer audio clips because it supports streaming. 49 | 50 | There are many ways to create AudioBuffers, some of which are beyond the scope 51 | of this library. However, this library does have a few functions available to 52 | load audio files into buffers. If you have a need to create an AudioBuffer in a 53 | way this library does not directly support, the Native library contains a 54 | function called `createAudioBuffer` that takes an AudioBuffer and returns an 55 | AudioBuffer compatible with this library. 56 | 57 | @docs AudioBuffer, getBufferSampleRate, getBufferLength, getBufferDuration, getBufferNumberOfChannels, getChannelData, getChannelDataSlice, setChannelDataSlice 58 | 59 | # Audio Nodes 60 | 61 | Once you have your AudioContext, you can begin to build your graph of Audio 62 | Nodes. 63 | 64 | @docs AudioNode, ChannelCountMode, ChannelInterpretation, connectNodes, connectToParam, getChannelCount, setChannelCount, getChannelCountMode, setChannelCountMode, getChannelInterpretation, setChannelInterpretation, tapNode 65 | 66 | # Analyser Nodes 67 | 68 | @docs AnalyserNode, createAnalyserNode, getFFTSize, setFFTSize, getMaxDecibels, setMaxDecibels, getMinDecibels, setMinDecibels, getSmoothingConstant, setSmoothingConstant, getByteFrequencyData, getByteTimeDomainData, getFloatFrequencyData, getFloatTimeDomainData 69 | 70 | # Audio Buffer Source Nodes 71 | 72 | @docs AudioBufferSourceNode, createAudioBufferSourceNode, getAudioBufferFromNode, setAudioBufferForNode, getAudioBufferIsLooping, setAudioBufferIsLooping, getAudioBufferLoopStart, setAudioBufferLoopStart, getAudioBufferLoopEnd, setAudioBufferLoopEnd, startAudioBufferNode, stopAudioBufferNode 73 | 74 | # Audio Destination Nodes 75 | 76 | Each Audio Context has only one Audio Destination Node. 77 | 78 | @docs AudioDestinationNode, getDestinationNode, getMaxChannelCount 79 | 80 | # Audio Worker Node 81 | 82 | These nodes are currently unimplemented. 83 | 84 | # Biquad Filter Nodes 85 | 86 | Biquad Filter Nodes have the following AudioParams: 87 | 88 | * frequency 89 | * detune 90 | * q 91 | * gain 92 | 93 | @docs BiquadFilterNode, BiquadFilterType, createBiquadFilterNode, getFilterType, setFilterType 94 | 95 | # Channel Merger Nodes 96 | 97 | @docs ChannelMergerNode, createChannelMergerNode 98 | 99 | # Channel Splitter Nodes 100 | 101 | @docs ChannelSplitterNode, createChannelSplitterNode 102 | 103 | # Convolver Nodes 104 | 105 | These nodes are currently unimplemented. 106 | 107 | # Delay Nodes 108 | 109 | Delay Nodes have the following AudioParams: 110 | 111 | * delayTime 112 | 113 | @docs DelayNode, createDelayNode 114 | 115 | # Dynamics Compressor Nodes 116 | 117 | Dynamics Compressor Nodes have the following AudioParams: 118 | 119 | * threshold 120 | * knee 121 | * ratio 122 | * reduction 123 | * attack 124 | * release 125 | 126 | @docs DynamicsCompressorNode, createDynamicsCompressorNode 127 | 128 | # Gain Nodes 129 | 130 | Gain Nodes have the following AudioParams: 131 | 132 | * gain 133 | 134 | @docs GainNode, createGainNode 135 | 136 | # Media Element Audio Source Nodes 137 | 138 | Media Element Audio Source Nodes connect HTMLMediaElements to the audio graph. 139 | This is the preferred way to connect a "long" audio file to the audio graph. 140 | HTMLMediaElements are things like the HTML video or audio tags, and creating 141 | these tags is a bit beyond the scope of this library. However, this library 142 | does have a convenience method for creating a "hidden" audio tag that will not 143 | be added to the page, but will load an audio file via the HTMLMediaElement 144 | interface thus gaining the benefits of streaming, etc. 145 | 146 | The Native library also includes a function called `createMediaElementSourceNode` 147 | that takes an instance of HTMLMediaElement (which you might get from doing a 148 | `document.getElementById()` or from creating an element with `document.createElement`) 149 | and returns a MediaElementAudioSourceNode. You could use this in your own code 150 | to create a MediaElementAudioSourceNode from an audio (or video) tag that you 151 | have created using other means. 152 | 153 | @docs MediaElementAudioSourceNode, createHiddenMediaElementAudioSourceNode, getMediaElementIsLooping, setMediaElementIsLooping, getMediaElementSource, setMediaElementSource, playMediaElement, pauseMediaElement 154 | 155 | # Media Stream Audio Destination Nodes 156 | 157 | These nodes are currently unimplemented. 158 | 159 | # Media Stream Audio Source Nodes 160 | 161 | These nodes are currently unimplemented. 162 | 163 | # Oscillator Nodes 164 | 165 | Oscillator Nodes have the following AudioParams: 166 | 167 | * frequency 168 | * detune 169 | 170 | @docs OscillatorNode, OscillatorWaveType, createOscillatorNode, getOscillatorWaveType, setOscillatorWaveType, startOscillator, stopOscillator 171 | 172 | # Panner Nodes 173 | 174 | @docs PannerNode, PanningModel, DistanceModel, createPannerNode, getPanningModel, setPanningModel, getDistanceModel, setDistanceModel, getReferenceDistance, setReferenceDistance, getMaxDistance, setMaxDistance, getRolloffFactor, setRolloffFactor, getConeInnerAngle, setConeInnerAngle, getConeOuterAngle, setConeOuterAngle, getConeOuterGain, setConeOuterGain, setPosition, setOrientation, setVelocity 175 | 176 | # Script Processor Nodes 177 | 178 | These nodes are deprecated and, thus, unimplemented. See: Audio Worker Nodes 179 | 180 | # Wave Shaper Nodes 181 | 182 | These nodes are currently unimplemented. 183 | 184 | -} 185 | 186 | import Native.WebAudio 187 | import UserMedia exposing (MediaStream) 188 | 189 | 190 | {-| The AudioContext 191 | 192 | Think of the `DefaultContext` as a global singleton. Just use the `DefaultContext` 193 | unless there's some reason you need to have more than one context. 194 | -} 195 | type AudioContext = AudioContext | DefaultContext 196 | 197 | {-| Create a new AudioContext 198 | 199 | Instead of creating a context, you can use the `DefaultContext`. The 200 | `DefaultContext` is like a singleton instance of an AudioContext and would be 201 | sufficient for most people. 202 | -} 203 | createContext : () -> AudioContext 204 | createContext = Native.WebAudio.createContext 205 | 206 | {-| Get the context's sample rate -} 207 | getSampleRate : AudioContext -> Float 208 | getSampleRate = Native.WebAudio.getSampleRate 209 | 210 | {-| Get the context's current time -} 211 | getCurrentTime : AudioContext -> Float 212 | getCurrentTime = Native.WebAudio.getCurrentTime 213 | 214 | {-| The OfflineAudioContext -} 215 | type alias OfflineAudioContext = {context: AudioContext, signal: Signal (Maybe AudioBuffer)} 216 | 217 | {-| Create a new Offline AudioContext 218 | 219 | Parameters are: the number of channels, length of the buffer in sample frames, 220 | and the sample rate in Hz. Offline Audio Contexts return a record with two 221 | fields: 222 | 223 | * returnedValue.context is the AudioContext 224 | * returnedValue.signal is a signal that is raised when the Offline Audio 225 | Context has finished rendering audio to the AudioBuffer 226 | -} 227 | createOfflineContext : Int -> Int -> Float -> OfflineAudioContext 228 | createOfflineContext = Native.WebAudio.createOfflineContext 229 | 230 | {-| Begin rendering audio in an Offline Audio Context 231 | 232 | When rendering has finished, the context.signal `Signal` will raise. 233 | -} 234 | startOfflineRendering : OfflineAudioContext -> OfflineAudioContext 235 | startOfflineRendering = Native.WebAudio.startOfflineRendering 236 | 237 | 238 | 239 | {-| AudioParams 240 | 241 | An AudioParam is used in a lot of places to allow you to either set a static 242 | value (such as a frequency, gain, etc), or to schedule changes over time. 243 | -} 244 | type AudioParam = AudioParam String 245 | 246 | {-| Set the static value of the param -} 247 | setValue : Float -> AudioParam -> AudioParam 248 | setValue = Native.WebAudio.setValue 249 | 250 | {-| Get the current value of the param -} 251 | getValue : AudioParam -> Float 252 | getValue = Native.WebAudio.getValue 253 | 254 | {-| Schedule the AudioParam to change values at a specific time -} 255 | setValueAtTime : Float -> Float -> AudioParam -> AudioParam 256 | setValueAtTime = Native.WebAudio.setValueAtTime 257 | 258 | {-| Schedule the AudioParam to linearly ramp to a new value, finishing at the 259 | specified time. 260 | -} 261 | linearRampToValue : Float -> Float -> AudioParam -> AudioParam 262 | linearRampToValue = Native.WebAudio.linearRampToValue 263 | 264 | {-| Schedule the AudioParam to exponentially ramp to a new value, finishing at 265 | the specified time. 266 | -} 267 | exponentialRampToValue : Float -> Float -> AudioParam -> AudioParam 268 | exponentialRampToValue = Native.WebAudio.exponentialRampToValue 269 | 270 | {-| Schedule the AudioParam to exponentially approach the target, starting at 271 | the specified time. The "constant" determines how quickly the value changes 272 | with the value changing roughly 63.2% in the first time constant. 273 | -} 274 | setTargetAtTime : Float -> Float -> Float -> AudioParam -> AudioParam 275 | setTargetAtTime = Native.WebAudio.setTargetAtTime 276 | 277 | {-| Schedule a curve of values to start at the given time and run for the 278 | specified duration. Each value will take effect for N / duration seconds. 279 | -} 280 | setValueCurveAtTime : List Float -> Float -> Float -> AudioParam -> AudioParam 281 | setValueCurveAtTime = Native.WebAudio.setValueCurveAtTime 282 | 283 | {-| Cancel all scheduled changes at and after the specified time. -} 284 | cancelScheduledValues : Float -> AudioParam -> AudioParam 285 | cancelScheduledValues = Native.WebAudio.cancelScheduledValues 286 | 287 | 288 | 289 | {-| AudioBuffers -} 290 | type AudioBuffer = AudioBuffer 291 | 292 | {-| Load an Audio Buffer from a URL -} 293 | loadAudioBufferFromUrl: AudioContext -> String -> Signal (Maybe AudioBuffer) 294 | loadAudioBufferFromUrl = Native.WebAudio.loadAudioBufferFromUrl 295 | 296 | {-| Retrieve the sample rate of the AudioBuffer -} 297 | getBufferSampleRate : AudioBuffer -> Float 298 | getBufferSampleRate = Native.WebAudio.getBufferSampleRate 299 | 300 | {-| Get the length of the AudioBuffer in sample frames -} 301 | getBufferLength : AudioBuffer -> Int 302 | getBufferLength = Native.WebAudio.getBufferLength 303 | 304 | {-| Get the duration of the AudioBuffer in seconds -} 305 | getBufferDuration : AudioBuffer -> Float 306 | getBufferDuration = Native.WebAudio.getBufferDuration 307 | 308 | {-| Retrieve the number of channels in the AudioBuffer -} 309 | getBufferNumberOfChannels : AudioBuffer -> Int 310 | getBufferNumberOfChannels = Native.WebAudio.getBufferNumberOfChannels 311 | 312 | {-| Get the buffer's data for the specified channel into an array -} 313 | getChannelData : Int -> AudioBuffer -> List Float 314 | getChannelData = Native.WebAudio.getChannelData 315 | 316 | {-| Get a slice of channel data from the buffer. 317 | 318 | This is more efficient than getting all of the channel data if you only need 319 | a small chunk of it. Parameters are: 320 | 321 | * Channel number, starting with 0 322 | * What sample frame to start with 323 | * How many frames to return 324 | * The AudioBuffer 325 | -} 326 | getChannelDataSlice : Int -> Int -> Int -> AudioBuffer -> List Float 327 | getChannelDataSlice = Native.WebAudio.getChannelDataSlice 328 | 329 | {-| Set a slice of channel data in the buffer. 330 | 331 | This method allows you to modify the channel data. Parameters are: 332 | 333 | * Channel number, starting with 0 334 | * The starting frame to modify 335 | * The new channel data 336 | * The AudioBuffer 337 | -} 338 | setChannelDataSlice : Int -> Int -> List Float -> AudioBuffer -> AudioBuffer 339 | setChannelDataSlice = Native.WebAudio.setChannelDataSlice 340 | 341 | 342 | 343 | {-| AudioNodes 344 | 345 | AudioNodes make up the building blocks of your audio signal graph. There are 346 | source nodes which produce an audio stream, destination nodes which can create 347 | sound from the stream, and processing nodes which allow you to modify the 348 | stream such as filters, delays, and effects. 349 | 350 | Audio Nodes have the following properties: 351 | * inputs: The number of inputs for this node. 0 means this is a source node. 352 | * outputs: The number of outputs for this node. 0 means this is a destination 353 | node. 354 | -} 355 | type alias AudioNode a = { a | inputs:Int, outputs:Int } 356 | 357 | {-| How channels are counted during up-mixing and down-mixing -} 358 | type ChannelCountMode = Max | ClampedMax | Explicit 359 | 360 | {-| How individual channels are treated when up-mixing and down-mixing -} 361 | type ChannelInterpretation = Speakers | Discrete 362 | 363 | {-| Connect Audio Nodes 364 | 365 | An output of node1 will be connected to an input of node2. You may specify the 366 | index of the output to connect, and the index of the input. These indexes are 367 | zero based. Fan-in and fan-out are both supported, so the output of a node can 368 | be connected to multiple nodes, and multiple nodes can be connected to a single 369 | input. This function will return node1 for chaining. 370 | -} 371 | connectNodes : AudioNode b -> Int -> Int -> AudioNode a -> AudioNode a 372 | connectNodes = Native.WebAudio.connectNodes 373 | 374 | {-| Connect an Audio Node to an Audio Param 375 | 376 | The signal from an AudioNode may be fed into an AudioParam to control the 377 | parameter value. You may also specify which output index to connect. The index 378 | is zero based. Fan-in and fan-out are both supported, so the output of a node 379 | can be connected to multiple AudioParams, and multiple AudioParams can be 380 | connected to a single node. This function will return the node for chaining. 381 | -} 382 | connectToParam : AudioParam b -> Int -> AudioNode a -> AudioNode a 383 | connectToParam = Native.WebAudio.connectToParam 384 | 385 | {-| Get a Node's Channel Count 386 | 387 | The number of channels used when up-mixing or down-mixing inputs. The default 388 | is 2 for most nodes, but some nodes determine this based on other settings. If 389 | the node has no inputs, this setting has no effect. 390 | -} 391 | getChannelCount : AudioNode a -> Int 392 | getChannelCount = Native.WebAudio.getChannelCount 393 | 394 | {-| Set a Node's Channel Count 395 | 396 | The number of channels used when up-mixing or down-mixing inputs. The default 397 | is 2 for most nodes, but some nodes determine this based on other settings. If 398 | the node has no inputs, this setting has no effect. 399 | -} 400 | setChannelCount : Int -> AudioNode a -> AudioNode a 401 | setChannelCount = Native.WebAudio.setChannelCount 402 | 403 | {-| Get a Node's Channel Count Mode 404 | @docs ChannelCountMode 405 | -} 406 | getChannelCountMode : AudioNode a -> ChannelCountMode 407 | getChannelCountMode = Native.WebAudio.getChannelCountMode 408 | 409 | {-| Set a Node's Channel Count Mode - returns the node itself for chaining. 410 | @docs ChannelCountMode 411 | -} 412 | setChannelCountMode : ChannelCountMode -> AudioNode a -> AudioNode a 413 | setChannelCountMode = Native.WebAudio.setChannelCountMode 414 | 415 | {-| Get a Node's Channel Interpretation 416 | @docs ChannelInterpretation 417 | -} 418 | getChannelInterpretation : AudioNode a -> ChannelInterpretation 419 | getChannelInterpretation = Native.WebAudio.getChannelInterpretation 420 | 421 | {-| Set a Node's Channel Interpretation - returns the node itself for chaining. 422 | @docs ChannelInterpretation 423 | -} 424 | setChannelInterpretation : ChannelInterpretation -> AudioNode a -> AudioNode a 425 | setChannelInterpretation = Native.WebAudio.setChannelInterpretation 426 | 427 | {-| "Tap" a node 428 | 429 | This is a convenience function, making it easy to access one of the node's 430 | AudioParam properties and then return the node itself at the end so you can 431 | continue to chain more functions. 432 | 433 | For example, if "node" is an OscillatorNode: 434 | 435 | ```haskell 436 | tapNode .frequency (\f -> setValue 440.0 f) node |> startOscillator 0.0 437 | ``` 438 | -} 439 | tapNode : (a -> b) -> (b -> c) -> a -> a 440 | tapNode f t n = 441 | let _ = t <| f n 442 | in n 443 | 444 | 445 | 446 | {-| Type of an AnalyserNode -} 447 | type alias AnalyserNode = AudioNode {} 448 | 449 | {-| Create an AnalyserNode -} 450 | createAnalyserNode : AudioContext -> AnalyserNode 451 | createAnalyserNode = Native.WebAudio.createAnalyserNode 452 | 453 | {-| Get the FFT Size of an Analyser Node -} 454 | getFFTSize : AnalyserNode -> Int 455 | getFFTSize = Native.WebAudio.getFFTSize 456 | 457 | {-| Set the FFT Size of an Analyser Node 458 | 459 | The FFT Size must be a power of 2 between 32 to 2048. Default is 2048. This 460 | function returns the AnalyserNode for chaining 461 | -} 462 | setFFTSize : Int -> AnalyserNode -> AnalyserNode 463 | setFFTSize = Native.WebAudio.setFFTSize 464 | 465 | {-| Get the maximum power in the scaling range of the AnalyserNode -} 466 | getMaxDecibels : AnalyserNode -> Float 467 | getMaxDecibels = Native.WebAudio.getMaxDecibels 468 | 469 | {-| Set the maximum power in the scaling range of the AnalyserNode 470 | 471 | The default is -30. This function returns the AnalyserNode for chaining. 472 | -} 473 | setMaxDecibels : Float -> AnalyserNode -> AnalyserNode 474 | setMaxDecibels = Native.WebAudio.setMaxDecibels 475 | 476 | {-| Get the minimum power in the scaling range of the AnalyserNode -} 477 | getMinDecibels : AnalyserNode -> Float 478 | getMinDecibels = Native.WebAudio.getMinDecibels 479 | 480 | {-| Set the minimum power in the scaling range of the AnalyserNode 481 | 482 | The default is -100. This function returns the AnalyserNode for chaining. 483 | -} 484 | setMinDecibels : Float -> AnalyserNode -> AnalyserNode 485 | setMinDecibels = Native.WebAudio.setMinDecibels 486 | 487 | {-| Get the smoothing constant for the AnalyserNode -} 488 | getSmoothingConstant : AnalyserNode -> Float 489 | getSmoothingConstant = Native.WebAudio.getSmoothingConstant 490 | 491 | {-| Set the smoothing constant for the AnalyserNode 492 | 493 | A value from 0 to 1, where 0 represents no averaging. Default is 0.8. This 494 | function returns the AnalyserNode for chaining. 495 | -} 496 | setSmoothingConstant : Float -> AnalyserNode -> AnalyserNode 497 | setSmoothingConstant = Native.WebAudio.setSmoothingConstant 498 | 499 | {-| Get frequency data from the AnalyserNode 500 | 501 | A value of 0 equals the minDecibels setting, and a value of 255 equals the 502 | maxDecibels setting. 503 | -} 504 | getByteFrequencyData : AnalyserNode -> List Int 505 | getByteFrequencyData = Native.WebAudio.getByteFrequencyData 506 | 507 | {-| Get time domain data from the AnalyserNode 508 | 509 | A value of 0 equals the minDecibels setting, and a value of 255 equals the 510 | maxDecibels setting. 511 | -} 512 | getByteTimeDomainData : AnalyserNode -> List Int 513 | getByteTimeDomainData = Native.WebAudio.getByteTimeDomainData 514 | 515 | {-| Get frequency data from the AnalyserNode 516 | 517 | Values are in the range of minDecibels to maxDecibels. 518 | -} 519 | getFloatFrequencyData : AnalyserNode -> List Float 520 | getFloatFrequencyData = Native.WebAudio.getFloatFrequencyData 521 | 522 | {-| Get time domain data from the AnalyserNode 523 | 524 | Values are in the range of minDecibels to maxDecibels. 525 | -} 526 | getFloatTimeDomainData : AnalyserNode -> List Float 527 | getFloatTimeDomainData = Native.WebAudio.getFloatTimeDomainData 528 | 529 | 530 | 531 | {-| Type of an AudioBufferSourceNode -} 532 | type alias AudioBufferSourceNode = AudioNode { playbackRate:AudioParam, ended: Signal Bool } 533 | 534 | {-| Create an AudioBufferSourceNode -} 535 | createAudioBufferSourceNode : AudioContext -> AudioBufferSourceNode 536 | createAudioBufferSourceNode = Native.WebAudio.createAudioBufferSourceNode 537 | 538 | {-| Get the AudioBuffer associated with the AudioBufferSourceNode -} 539 | getAudioBufferFromNode : AudioBufferSourceNode -> AudioBuffer 540 | getAudioBufferFromNode = Native.WebAudio.getAudioBufferFromNode 541 | 542 | {-| Set the AudioBuffer associated with the AudioBufferSourceNode -} 543 | setAudioBufferForNode : AudioBuffer -> AudioBufferSourceNode -> AudioBufferSourceNode 544 | setAudioBufferForNode = Native.WebAudio.setAudioBufferForNode 545 | 546 | {-| Get whether or not the AudioBufferSourceNode is looping. -} 547 | getAudioBufferIsLooping : AudioBufferSourceNode -> Bool 548 | getAudioBufferIsLooping = Native.WebAudio.getAudioBufferIsLooping 549 | 550 | {-| Set whether or not the AudioBufferSourceNode should loop. -} 551 | setAudioBufferIsLooping : Bool -> AudioBufferSourceNode -> AudioBufferSourceNode 552 | setAudioBufferIsLooping = Native.WebAudio.setAudioBufferIsLooping 553 | 554 | {-| Get the starting point for looping in seconds. -} 555 | getAudioBufferLoopStart : AudioBufferSourceNode -> Float 556 | getAudioBufferLoopStart = Native.WebAudio.getAudioBufferLoopStart 557 | 558 | {-| Set the starting point for looping in seconds. -} 559 | setAudioBufferLoopStart : Float -> AudioBufferSourceNode -> AudioBufferSourceNode 560 | setAudioBufferLoopStart = Native.WebAudio.setAudioBufferLoopStart 561 | 562 | {-| Get the ending point for the looping in seconds. -} 563 | getAudioBufferLoopEnd : AudioBufferSourceNode -> Float 564 | getAudioBufferLoopEnd = Native.WebAudio.getAudioBufferLoopEnd 565 | 566 | {-| Set the ending point for the looping in seconds. -} 567 | setAudioBufferLoopEnd : Float -> AudioBufferSourceNode -> AudioBufferSourceNode 568 | setAudioBufferLoopEnd = Native.WebAudio.setAudioBufferLoopEnd 569 | 570 | {-| Start the AudioBufferSourceNode 571 | 572 | The parameters are: 573 | * The start time, relative to the context's current time 574 | * The offset into the AudioBuffer to start at, in seconds 575 | * The duration to play - if Nothing, plays until the end 576 | -} 577 | startAudioBufferNode : Float -> Float -> Maybe Float -> AudioBufferSourceNode -> AudioBufferSourceNode 578 | startAudioBufferNode = Native.WebAudio.startAudioBufferNode 579 | 580 | {-| Stops the AudioBufferSourceNode 581 | 582 | You may specify when to stop it. 583 | -} 584 | stopAudioBufferNode : Float -> AudioBufferSourceNode -> AudioBufferSourceNode 585 | stopAudioBufferNode = Native.WebAudio.stopAudioBufferNode 586 | 587 | 588 | 589 | 590 | {-| Type of an AudioDestinationNode -} 591 | type alias AudioDestinationNode = AudioNode {} 592 | 593 | {-| Get the AudioDestinationNode for the given context 594 | 595 | Each context has only one AudioDestinationNode. 596 | -} 597 | getDestinationNode : AudioContext -> AudioDestinationNode 598 | getDestinationNode = Native.WebAudio.getDestinationNode 599 | 600 | {-| Get the maximum number of channels -} 601 | getMaxChannelCount : AudioDestinationNode -> Int 602 | getMaxChannelCount = Native.WebAudio.getMaxChannelCount 603 | 604 | 605 | 606 | {- TODO: Type of an AudioWorkerNode -} 607 | 608 | 609 | 610 | {-| Type of a BiquadFilterNode -} 611 | type alias BiquadFilterNode = AudioNode { frequency:AudioParam, detune:AudioParam, q:AudioParam, gain:AudioParam } 612 | 613 | {-| Biquad Filter Type -} 614 | type BiquadFilterType = LowPass | HighPass | BandPass | LowShelf | HighShelf | Peaking | Notch | AllPass 615 | 616 | {-| Create a BiquadFilterNode -} 617 | createBiquadFilterNode : AudioContext -> BiquadFilterNode 618 | createBiquadFilterNode = Native.WebAudio.createBiquadFilterNode 619 | 620 | {-| Get the type of the BiquadFilterNode -} 621 | getFilterType : BiquadFilterNode -> BiquadFilterType 622 | getFilterType = Native.WebAudio.getFilterType 623 | 624 | {-| Set the type of the BiquadFilterNode 625 | 626 | The type of filter determines what the parameters mean. This function returns 627 | the BiquadFilterNode for chaining. 628 | -} 629 | setFilterType : BiquadFilterType -> BiquadFilterNode -> BiquadFilterNode 630 | setFilterType = Native.WebAudio.setFilterType 631 | 632 | 633 | 634 | {-| Type of a ChannelMergerNode -} 635 | type alias ChannelMergerNode = AudioNode {} 636 | 637 | {-| Create a ChannelMergerNode 638 | 639 | You may specify the number of inputs as the second parameter. 640 | -} 641 | createChannelMergerNode : AudioContext -> Int -> ChannelMergerNode 642 | createChannelMergerNode = Native.WebAudio.createChannelMergerNode 643 | 644 | 645 | 646 | {-| Type of a ChannelSplitterNode -} 647 | type alias ChannelSplitterNode = AudioNode {} 648 | 649 | {-| Create a ChannelSplitterNode 650 | 651 | You may specify the number of outputs as the second parameter 652 | -} 653 | createChannelSplitterNode : AudioContext -> Int -> ChannelSplitterNode 654 | createChannelSplitterNode = Native.WebAudio.createChannelSplitterNode 655 | 656 | 657 | 658 | {- TODO: Type of a ConvolverNode -} 659 | 660 | 661 | 662 | {-| Type of a DelayNode -} 663 | type alias DelayNode = AudioNode { delayTime:AudioParam } 664 | 665 | {-| Create a DelayNode 666 | 667 | You may specify the maximum delay time as the second parameter. 668 | -} 669 | createDelayNode : AudioContext -> Float -> DelayNode 670 | createDelayNode = Native.WebAudio.createDelayNode 671 | 672 | 673 | 674 | {-| Type of a DynamicsCompressorNode -} 675 | type alias DynamicsCompressorNode = AudioNode { threshold:AudioParam, knee:AudioParam, ratio:AudioParam, reduction:AudioParam, attack:AudioParam, release:AudioParam } 676 | 677 | {-| Create a DynamicsCompressorNode -} 678 | createDynamicsCompressorNode : AudioContext -> DynamicsCompressorNode 679 | createDynamicsCompressorNode = Native.WebAudio.createDynamicsCompressorNode 680 | 681 | 682 | 683 | {-| Type of a GainNode -} 684 | type alias GainNode = AudioNode { gain:AudioParam } 685 | 686 | {-| Create a GainNode -} 687 | createGainNode : AudioContext -> GainNode 688 | createGainNode = Native.WebAudio.createGainNode 689 | 690 | 691 | 692 | {-| Type of a MediaElementAudioSourceNode -} 693 | type alias MediaElementAudioSourceNode = AudioNode {} 694 | 695 | {-| Create a MediaElementAudioSourceNode using a hidden audio tag -} 696 | createHiddenMediaElementAudioSourceNode : AudioContext -> MediaElementAudioSourceNode 697 | createHiddenMediaElementAudioSourceNode = Native.WebAudio.createHiddenMediaElementAudioSourceNode 698 | 699 | {-| Get whether or not the MediaElementAudioSourceNode should loop -} 700 | getMediaElementIsLooping : MediaElementAudioSourceNode -> Bool 701 | getMediaElementIsLooping = Native.WebAudio.getMediaElementIsLooping 702 | 703 | {-| Set whether or not the MediaElementAudioSourceNode should loop -} 704 | setMediaElementIsLooping : Bool -> MediaElementAudioSourceNode -> MediaElementAudioSourceNode 705 | setMediaElementIsLooping = Native.WebAudio.setMediaElementIsLooping 706 | 707 | {-| Get the source of the MediaElementAudioSourceNode -} 708 | getMediaElementSource : MediaElementAudioSourceNode -> String 709 | getMediaElementSource = Native.WebAudio.getMediaElementSource 710 | 711 | {-| Set the source of the MediaElementAudioSourceNode -} 712 | setMediaElementSource : String -> MediaElementAudioSourceNode -> MediaElementAudioSourceNode 713 | setMediaElementSource = Native.WebAudio.setMediaElementSource 714 | 715 | {-| Play the MediaElementAudioSourceNode -} 716 | playMediaElement : MediaElementAudioSourceNode -> MediaElementAudioSourceNode 717 | playMediaElement = Native.WebAudio.playMediaElement 718 | 719 | {-| Pause the MediaElementAudioSourceNode -} 720 | pauseMediaElement : MediaElementAudioSourceNode -> MediaElementAudioSourceNode 721 | pauseMediaElement = Native.WebAudio.pauseMediaElement 722 | 723 | 724 | 725 | type alias MediaStreamAudioSourceNode = AudioNode {} 726 | 727 | createMediaStreamAudioSourceNode : AudioContext -> MediaStream -> MediaStreamAudioSourceNode 728 | createMediaStreamAudioSourceNode = Native.WebAudio.createMediaStreamAudioSourceNode 729 | 730 | 731 | {- TODO: Type of a MediaStreamAudioDestinationNode -} 732 | 733 | 734 | 735 | {-| Type of an OscillatorNode -} 736 | type alias OscillatorNode = AudioNode { frequency:AudioParam, detune:AudioParam } 737 | 738 | {-| Wave types for OscillatorNodes 739 | 740 | TODO: Custom 741 | -} 742 | type OscillatorWaveType = Sine | Square | Sawtooth | Triangle 743 | 744 | {-| Create an OscillatorNode 745 | 746 | Second parameter is the wave type of the oscillator 747 | -} 748 | createOscillatorNode : AudioContext -> OscillatorWaveType -> OscillatorNode 749 | createOscillatorNode = Native.WebAudio.createOscillatorNode 750 | 751 | {-| Get the oscillator wave type -} 752 | getOscillatorWaveType : OscillatorNode -> OscillatorWaveType 753 | getOscillatorWaveType = Native.WebAudio.getOscillatorWaveType 754 | 755 | {-| Set the oscillator wave type 756 | 757 | This function returns the oscillator for chaining. 758 | -} 759 | setOscillatorWaveType : OscillatorWaveType -> OscillatorNode -> OscillatorNode 760 | setOscillatorWaveType = Native.WebAudio.setOscillatorWaveType 761 | 762 | {-| Schedule the Oscillator to start 763 | 764 | This method returns the oscillator for chaining. 765 | -} 766 | startOscillator : Float -> OscillatorNode -> OscillatorNode 767 | startOscillator = Native.WebAudio.startOscillator 768 | 769 | {-| Schedule a stop time for the Oscillator. 770 | 771 | WARNING: 772 | After an end time has been set, the oscillator can no longer be started. Since 773 | an oscillator can no longer be started after it has been stopped, the 774 | oscillator is essentially useless. The system is supposed to automatically clean 775 | up AudioNodes that are no longer in use, provided that the node meets a couple 776 | requirements - one of which is that there are no more references to it. 777 | Therefore, Elm.WebAudio will automatically free the reference to the underlying 778 | javascript object as soon as a stop has been scheduled. What this means, from a 779 | practical standpoint, is that any further attempt to manipulate the Oscillator 780 | will result in a javascript error. It's not pretty, but, honestly, neither is 781 | the WebAudio Javascript API. 782 | -} 783 | stopOscillator : Float -> OscillatorNode -> () 784 | stopOscillator = Native.WebAudio.stopOscillator 785 | 786 | 787 | 788 | {-| Type of a PannerNode -} 789 | type alias PannerNode = AudioNode {} 790 | 791 | {-| Panning Model -} 792 | type PanningModel = EqualPower | HRTF 793 | 794 | {-| Distance Model -} 795 | type DistanceModel = Linear | Inverse | Exponential 796 | 797 | {-| Create a PannerNode -} 798 | createPannerNode : AudioContext -> PannerNode 799 | createPannerNode = Native.WebAudio.createPannerNode 800 | 801 | {-| Get the Panning Model of the Panner -} 802 | getPanningModel : PannerNode -> PanningModel 803 | getPanningModel = Native.WebAudio.getPanningModel 804 | 805 | {-| Set the Panning Model of the Panner 806 | 807 | This function returns the PannerNode for chaining. 808 | -} 809 | setPanningModel : PanningModel -> PannerNode -> PannerNode 810 | setPanningModel = Native.WebAudio.setPanningModel 811 | 812 | {-| Get the Distance Model of the Panner -} 813 | getDistanceModel : PannerNode -> DistanceModel 814 | getDistanceModel = Native.WebAudio.getDistanceModel 815 | 816 | {-| Set the Distance Model of the Panner 817 | 818 | This function returns the PannerNode for chaining. 819 | -} 820 | setDistanceModel : DistanceModel -> PannerNode -> PannerNode 821 | setDistanceModel = Native.WebAudio.setDistanceModel 822 | 823 | {-| Get the reference distance of the panner -} 824 | getReferenceDistance : PannerNode -> Float 825 | getReferenceDistance = Native.WebAudio.getReferenceDistance 826 | 827 | {-| Set the reference distance of the panner -} 828 | setReferenceDistance : Float -> PannerNode -> PannerNode 829 | setReferenceDistance = Native.WebAudio.setReferenceDistance 830 | 831 | {-| Get the max distance of the panner -} 832 | getMaxDistance : PannerNode -> Float 833 | getMaxDistance = Native.WebAudio.getMaxDistance 834 | 835 | {-| Set the max distance of the pannel -} 836 | setMaxDistance : Float -> PannerNode -> PannerNode 837 | setMaxDistance = Native.WebAudio.setMaxDistance 838 | 839 | {-| Get the rolloff factor for the panner -} 840 | getRolloffFactor : PannerNode -> Float 841 | getRolloffFactor = Native.WebAudio.getRolloffFactor 842 | 843 | {-| Set the rolloff factor for the panner -} 844 | setRolloffFactor : Float -> PannerNode -> PannerNode 845 | setRolloffFactor = Native.WebAudio.setRolloffFactor 846 | 847 | {-| Get the cone inner angle for the panner -} 848 | getConeInnerAngle : PannerNode -> Float 849 | getConeInnerAngle = Native.WebAudio.getConeInnerAngle 850 | 851 | {-| Set the cone inner angle for the panner -} 852 | setConeInnerAngle : Float -> PannerNode -> PannerNode 853 | setConeInnerAngle = Native.WebAudio.setConeInnerAngle 854 | 855 | {-| Get the cone outer angle for the panner -} 856 | getConeOuterAngle : PannerNode -> Float 857 | getConeOuterAngle = Native.WebAudio.getConeOuterAngle 858 | 859 | {-| Set the cone outer angle for the panner -} 860 | setConeOuterAngle : Float -> PannerNode -> PannerNode 861 | setConeOuterAngle = Native.WebAudio.setConeOuterAngle 862 | 863 | {-| Get the cone outer gain for the panner -} 864 | getConeOuterGain : PannerNode -> Float 865 | getConeOuterGain = Native.WebAudio.getConeOuterGain 866 | 867 | {-| Set the cone outer gain for the panner -} 868 | setConeOuterGain : Float -> PannerNode -> PannerNode 869 | setConeOuterGain = Native.WebAudio.setConeOuterGain 870 | 871 | {-| Set the listener position for the panner -} 872 | setPosition : Float -> Float -> Float -> PannerNode -> PannerNode 873 | setPosition = Native.WebAudio.setPosition 874 | 875 | {-| Set the listener orientation for the panner -} 876 | setOrientation : Float -> Float -> Float -> PannerNode -> PannerNode 877 | setOrientation = Native.WebAudio.setOrientation 878 | 879 | {-| Set the listener velocity for the panner -} 880 | setVelocity : Float -> Float -> Float -> PannerNode -> PannerNode 881 | setVelocity = Native.WebAudio.setVelocity 882 | 883 | 884 | 885 | {- TODO: Type of a ScriptProcessorNode -} 886 | {- TODO: Type of a WaveShaperNode -} 887 | 888 | -------------------------------------------------------------------------------- /elm-package.json: -------------------------------------------------------------------------------- 1 | { 2 | "version": "1.1.0", 3 | "summary": "Elm library for accessing the Web Audio API", 4 | "repository": "https://github.com/bmatcuk/elm-webaudio.git", 5 | "license": "BSD3", 6 | "source-directories": [ 7 | ".", 8 | "lib/elm-usermedia" 9 | ], 10 | "exposed-modules": [ "WebAudio" ], 11 | "native-modules": true, 12 | "dependencies": { 13 | "elm-lang/core": "2.0.1 <= v < 3.0.0" 14 | }, 15 | "elm-version": "0.15.0 <= v < 0.16.0" 16 | } 17 | -------------------------------------------------------------------------------- /examples/Makefile: -------------------------------------------------------------------------------- 1 | EXAMPLES := $(wildcard *.elm) 2 | TARGETS := $(addprefix build/,$(EXAMPLES:.elm=.html)) build/Visual.js 3 | 4 | INCLUDE_FILES := $(wildcard ../*.elm) $(wildcard ../lib/elm-usermedia/src/*.elm) 5 | SCRIPTS := $(wildcard ../Native/*.js) $(wildcard ../lib/elm-usermedia/src/Native/*.js) 6 | 7 | .PHONY: all 8 | all: $(TARGETS) 9 | 10 | $(TARGETS): $(INCLUDE_FILES) $(SCRIPTS) 11 | 12 | build/%.js: %.elm 13 | elm-make --yes $(CURDIR)/$< --output $(CURDIR)/$@ 14 | 15 | build/%.html: %.elm 16 | elm-make --yes $(CURDIR)/$< --output $(CURDIR)/$@ 17 | @echo "** Open $@ in your browser" 18 | 19 | # The following rules are for the "Visual" example: 20 | build/Visual.html: Visual.html build/Visual.js 21 | ifneq ("$(wildcard .soundcloudkey)","") 22 | sed 's/YOUR CLIENT ID HERE/$(shell cat .soundcloudkey)/' Visual.html > build/Visual.html 23 | @echo "** Open $@ in your browser" 24 | else 25 | cp Visual.html build/Visual.html 26 | @echo "** Edit $@ to add your SoundCloud API key." 27 | @echo "** You can also put your key in a file called .soundcloudkey" 28 | @echo "** in the same directory as this Makefile and it will" 29 | @echo "** automatically get added when you 'make'" 30 | endif 31 | 32 | -------------------------------------------------------------------------------- /examples/Scales.elm: -------------------------------------------------------------------------------- 1 | import Graphics.Element exposing (..) 2 | import Graphics.Input exposing (..) 3 | import Text 4 | import WebAudio exposing (..) 5 | import Maybe exposing (withDefault) 6 | import Debug exposing (crash) 7 | import Signal exposing ((<~), (~)) 8 | import Time exposing (Time, every) 9 | 10 | -- Models 11 | 12 | type alias HalfStep = Float 13 | type alias Label = String 14 | 15 | type Tonic = Tonic Label HalfStep 16 | type Scale = Scale String (List HalfStep) 17 | type MusicState = Playing OscillatorNode HalfStep (List HalfStep) Time | Paused 18 | 19 | tonics = [ Tonic "A" 0 20 | , Tonic "A#/Bb" 1 21 | , Tonic "B" 2 22 | , Tonic "C" 3 23 | , Tonic "C#/Db" 4 24 | , Tonic "D" 5 25 | , Tonic "D#/Eb" 6 26 | , Tonic "E" 7 27 | , Tonic "F" 8 28 | , Tonic "F#/Gb" 9 29 | , Tonic "G" 10 30 | , Tonic "G#/Ab" 11 31 | ] 32 | 33 | musicalScales = [ Scale "Major" [0, 2, 2, 1, 2, 2, 2, 1] 34 | , Scale "Minor" [0, 2, 1, 2, 2, 1, 2, 2] 35 | ] 36 | 37 | headOrDie lst err = 38 | let x = List.head lst 39 | in case x of 40 | Just x' -> x' 41 | Nothing -> crash err 42 | 43 | visualModel = { tonic = headOrDie tonics "No tonics defined" 44 | , scale = headOrDie musicalScales "No scales defined" 45 | } 46 | 47 | musicModel = {state = Paused, changed = False, buttonCount = 0} 48 | 49 | getTonicLabel (Tonic label _) = label 50 | getTonicHalfStep (Tonic _ halfstep) = halfstep 51 | 52 | getScaleLabel (Scale label _) = label 53 | getScaleSteps (Scale _ steps) = steps 54 | 55 | isPlaying state = 56 | case state of 57 | Paused -> False 58 | _ -> True 59 | 60 | 61 | 62 | -- Update 63 | 64 | stopMusic oscillator = 65 | let _ = stopOscillator 0.0 oscillator 66 | in Paused 67 | 68 | updateNote oscillator tonic tones t = 69 | case tones of 70 | tone::remainingTones -> 71 | let 72 | currentStep = tonic + tone 73 | frequency = (220 * (2 ^ (currentStep / 12))) 74 | _ = setValue frequency oscillator.frequency 75 | in Playing oscillator currentStep remainingTones t 76 | [] -> stopMusic oscillator 77 | 78 | startMusic {tonic,scale} t = 79 | let node = createOscillatorNode DefaultContext Sine |> connectNodes (getDestinationNode DefaultContext) 0 0 80 | |> startOscillator 0.0 81 | in updateNote node (getTonicHalfStep tonic) (getScaleSteps scale) t 82 | 83 | updateMusic (vmodel,btncnt,t) mmodel = 84 | case mmodel.state of 85 | Playing node tonicStep steps oldt -> 86 | if | btncnt /= mmodel.buttonCount -> {state = stopMusic node, changed = True, buttonCount = btncnt} 87 | | otherwise -> 88 | let newState = updateNote node tonicStep steps t 89 | in {state = newState, changed = not <| isPlaying newState, buttonCount = btncnt} 90 | Paused -> 91 | if | btncnt /= mmodel.buttonCount -> {state = startMusic vmodel t, changed = True, buttonCount = btncnt} 92 | | otherwise -> {mmodel | changed <- False} 93 | 94 | 95 | 96 | -- Input 97 | 98 | tonicInput : Signal.Mailbox Tonic 99 | tonicInput = Signal.mailbox visualModel.tonic 100 | 101 | scaleInput : Signal.Mailbox Scale 102 | scaleInput = Signal.mailbox visualModel.scale 103 | 104 | playInput = Signal.mailbox () 105 | 106 | playCount : Signal Int 107 | playCount = Signal.foldp (\_ total -> total + 1) 0 playInput.signal 108 | 109 | deadLetter = Signal.mailbox () 110 | 111 | visualSignal = Signal.map2 (\t s -> {tonic = t, scale = s}) tonicInput.signal scaleInput.signal 112 | musicSignal = Signal.map3 (,,) visualSignal (playCount) (Time.every 350.0) 113 | 114 | 115 | 116 | -- Render 117 | 118 | checkboxWithLabel : String -> (Bool -> Signal.Message) -> Bool -> Element 119 | checkboxWithLabel label handler checked = 120 | container 70 30 middle <| flow right [checkbox handler checked 121 | , leftAligned (Text.monospace (Text.fromString label)) 122 | ] 123 | 124 | onOff : Signal.Address a -> a -> Bool -> Signal.Message 125 | onOff addr toSend checked = 126 | if checked then Signal.message addr toSend else Signal.message deadLetter.address () 127 | 128 | tonicBoxes tonic = 129 | let box t = checkboxWithLabel (getTonicLabel t) (onOff tonicInput.address t) (t == tonic) 130 | in flow right <| List.map box tonics 131 | 132 | scaleBoxes scale = 133 | let box s = checkboxWithLabel (getScaleLabel s) (onOff scaleInput.address s) (s == scale) 134 | in flow right <| List.map box musicalScales 135 | 136 | playButton vmodel mmodel = 137 | button (Signal.message playInput.address ()) (if (isPlaying mmodel.state) then "Stop" else "Play") 138 | 139 | render (vmodel,mmodel) = 140 | flow down [ tonicBoxes vmodel.tonic 141 | , scaleBoxes vmodel.scale 142 | , playButton vmodel mmodel 143 | ] 144 | 145 | -- Main 146 | 147 | mainMusic = Signal.foldp updateMusic musicModel musicSignal |> Signal.filter (\m -> m.changed || (isPlaying m.state)) musicModel 148 | main = render <~ (Signal.map2 (,) visualSignal mainMusic) 149 | 150 | -------------------------------------------------------------------------------- /examples/Stream.elm: -------------------------------------------------------------------------------- 1 | import UserMedia exposing (MediaStream, requestUserMedia) 2 | import WebAudio exposing (..) 3 | import Task as T 4 | import Signal as S exposing ((<~)) 5 | import Html exposing (div, text) 6 | 7 | view model = 8 | case model of 9 | Nothing -> div [] [ text "Nothing" ] 10 | Just stream -> 11 | let node = createMediaStreamAudioSourceNode DefaultContext stream 12 | |> connectNodes (getDestinationNode DefaultContext) 0 0 13 | in 14 | div [] [ text ("Got user media : " ++ (.label stream)) ] 15 | 16 | {-| send one time request for usermedia, stream is then forwarded to the 17 | mailbox 18 | -} 19 | port getUserMedia : T.Task x () 20 | port getUserMedia = 21 | requestUserMedia userMediaStream.address { audio=True, video=False } 22 | 23 | userMediaStream : S.Mailbox (Maybe MediaStream) 24 | userMediaStream = 25 | S.mailbox Nothing 26 | 27 | 28 | main = 29 | view <~ userMediaStream.signal 30 | -------------------------------------------------------------------------------- /examples/Visual.elm: -------------------------------------------------------------------------------- 1 | module Visual where 2 | 3 | import Color exposing (rgb, black, darkRed, lightRed, orange) 4 | import Debug exposing (crash) 5 | import Graphics.Collage exposing (..) 6 | import Graphics.Element exposing (..) 7 | import Graphics.Input exposing (button) 8 | import Graphics.Input.Field exposing (Content, Direction (..), Selection, 9 | defaultStyle, field, noContent) 10 | import Maybe exposing (withDefault) 11 | import Mouse 12 | import Signal exposing ((<~), (~)) 13 | import Time 14 | import Transform2D exposing (matrix, translation) 15 | import WebAudio exposing (..) 16 | import Window 17 | 18 | doOrDie f lst err = 19 | let x = f lst 20 | in case x of 21 | Just x' -> x' 22 | Nothing -> crash err 23 | 24 | headOrDie = doOrDie List.head 25 | tailOrDie = doOrDie List.tail 26 | 27 | -- Models 28 | 29 | analyser = createAnalyserNode DefaultContext |> connectNodes (getDestinationNode DefaultContext) 0 0 30 | 31 | filters = 32 | let 33 | frequencies = [31.25, 62.5, 125, 250, 500, 1000, 2000, 4000, 8000, 16000] 34 | makeFilter f = createBiquadFilterNode DefaultContext 35 | |> setFilterType Peaking 36 | |> tapNode .frequency (\freq -> setValue f freq) 37 | rlst = List.map makeFilter (List.reverse frequencies) 38 | end = headOrDie rlst "no filters" 39 | |> setFilterType HighShelf 40 | |> connectNodes analyser 0 0 41 | lst = List.reverse <| List.scanl (\c p -> connectNodes p 0 0 c) end (tailOrDie rlst "no filters") 42 | in case lst of 43 | x::xs -> (setFilterType LowShelf x) :: xs 44 | 45 | mediaStream = createHiddenMediaElementAudioSourceNode DefaultContext 46 | |> setMediaElementIsLooping True 47 | |> connectNodes (headOrDie filters "no filters") 0 0 48 | 49 | sliderSize = {w = 20.0, h = 100.0} 50 | 51 | slidersState = 52 | { dimensions = (0,0) 53 | , dragging = False 54 | , lastPosition = (0,0) 55 | , selected = Nothing 56 | , scale = 1.0 57 | , move = 0.0 58 | , changes = False 59 | } 60 | 61 | isSelectedSlider idx state = 62 | withDefault False <| Maybe.map (\(i,_) -> i == idx) state.selected 63 | 64 | sliderValueClamp = (max -40.0) << (min 40.0) 65 | 66 | controlState = 67 | { playing = False 68 | , loadTrack = False 69 | , url = Content "https://soundcloud.com/failemotions/gravity-instrumental" (Selection 0 0 Forward) 70 | , btnCnt = 0 71 | } 72 | 73 | -- Update 74 | 75 | scaleToFit {w,h} desiredW desiredH = min (desiredW / w) (desiredH / h) 76 | 77 | updateSlidersVisual (w',h') isdown pos state = 78 | let 79 | (w,h) = (toFloat w', toFloat h') 80 | sliderScale = scaleToFit sliderSize (w / 10.0 * 0.9) (h / 2.0 * 0.9) 81 | sliderMove = sliderScale * sliderSize.w / 0.9 82 | updates = updateSliders ((w',h'),isdown,pos) {state | dimensions <- (w',h'), scale <- sliderScale, move <- sliderMove} 83 | in 84 | {updates | changes <- True} 85 | 86 | handleHitTest x y s f = 87 | let 88 | handleLeft = (0 - sliderSize.w / 2.0) * s 89 | handleRight = handleLeft + sliderSize.w * s 90 | posy = getValue f.gain 91 | handleBottom = (posy - sliderSize.w / 2.0) * s 92 | handleTop = handleBottom + sliderSize.w * s 93 | in handleLeft <= x && x <= handleRight && handleBottom <= y && y <= handleTop 94 | 95 | selectSlider (w',h') (x',y') state = 96 | let 97 | (w,h) = (toFloat w', toFloat h') 98 | x = toFloat x' - w / 2.0 + state.move * 5.0 99 | y = h / 4.0 - toFloat y' 100 | lst = List.indexedMap (,) filters 101 | filtered = List.filter (\(i,f) -> handleHitTest (x - (toFloat i + 0.5) * state.move) y state.scale f) lst 102 | selected = List.head filtered 103 | in 104 | updateSelectedSlider (x',y') {state | selected <- selected, dragging <- True} 105 | 106 | updateSelectedSlider pos state = 107 | case state.selected of 108 | Just (_,slider) -> 109 | let 110 | currentVal = getValue slider.gain 111 | delta = (toFloat (snd state.lastPosition - snd pos)) / state.scale 112 | newVal = sliderValueClamp (currentVal + delta) 113 | _ = setValue newVal slider.gain 114 | in {state | lastPosition <- pos, changes <- True} 115 | Nothing -> {state | lastPosition <- pos, changes <- False} 116 | 117 | disableSelectedSlider pos state = 118 | if | state.dragging -> {state | lastPosition <- pos, selected <- Nothing, dragging <- False, changes <- True} 119 | | otherwise -> {state | lastPosition <- pos, changes <- False} 120 | 121 | updateSliders (dim,isdown,pos) state = 122 | if | dim /= state.dimensions -> updateSlidersVisual dim isdown pos state 123 | | isdown -> if state.dragging then (updateSelectedSlider pos state) else (selectSlider dim pos state) 124 | | otherwise -> disableSelectedSlider pos state 125 | 126 | updateTrack = 127 | playMediaElement << withDefault mediaStream << Maybe.map (\url -> setMediaElementSource url mediaStream) 128 | 129 | pauseMusic state = 130 | let _ = pauseMediaElement mediaStream 131 | in {state | playing <- False, loadTrack <- False} 132 | 133 | playMusic state = {state | loadTrack <- True, playing <- True} 134 | 135 | toggleMusic state = 136 | if | state.playing -> pauseMusic state 137 | | otherwise -> playMusic state 138 | 139 | updateControls (cnt,url) state = 140 | if | cnt /= state.btnCnt -> toggleMusic {state | btnCnt <- cnt, url <- url} 141 | | otherwise -> {state | url <- url, loadTrack <- False} 142 | 143 | -- Input 144 | 145 | slidersInput = Signal.map3 (,,) 146 | Window.dimensions 147 | Mouse.isDown 148 | Mouse.position 149 | 150 | playButtonInput = Signal.mailbox controlState.playing 151 | 152 | playButtonCount = Signal.foldp (\_ total -> total + 1) 0 playButtonInput.signal 153 | 154 | urlFieldInput = Signal.mailbox controlState.url 155 | 156 | controlInput = Signal.map2 (,) 157 | playButtonCount 158 | urlFieldInput.signal 159 | 160 | port soundUrl : Signal (Maybe String) 161 | 162 | -- Render 163 | 164 | renderSlider val selected = 165 | let handleColor = if selected then lightRed else darkRed 166 | in 167 | [ rect (sliderSize.w / 4.0) (sliderSize.h - sliderSize.w) |> filled black 168 | , rect sliderSize.w sliderSize.w |> filled handleColor |> moveY val 169 | ] 170 | 171 | renderSliders w h state = 172 | let 173 | slider idx filter = 174 | let 175 | x = (toFloat idx + 0.5) * state.move 176 | val = getValue filter.gain 177 | selected = isSelectedSlider idx state 178 | in 179 | groupTransform (matrix state.scale 0 0 state.scale x 0) (renderSlider val selected) 180 | in 181 | groupTransform (translation (0 - state.move * 5.0) (h / 2.0)) <| List.indexedMap slider filters 182 | 183 | renderControls w state = 184 | let 185 | btn = button (Signal.message playButtonInput.address (not state.playing)) (if state.playing then "Pause" else "Play") 186 | url = field defaultStyle (Signal.message urlFieldInput.address) "SoundCloud Permalink URL" state.url |> width (round (w / 2) - widthOf btn) 187 | in 188 | beside btn url |> toForm 189 | 190 | renderAnalyser w h freqdata = 191 | let 192 | barWidth = w / (toFloat << List.length) freqdata 193 | draw idx datum = 194 | let barHeight = h * toFloat datum / 255.0 195 | in rect barWidth barHeight |> filled orange |> move ((toFloat idx + 0.5) * barWidth,(barHeight - h) / 2.0) 196 | in 197 | groupTransform (translation (0 - w / 2.0) (h / -2.0)) <| List.indexedMap draw freqdata 198 | 199 | render (w',h') sliderState controlState freqdata _ = 200 | let 201 | (w,h) = (toFloat w', toFloat h') 202 | halfw = w / 2.0 203 | halfh = h / 2.0 204 | quarterh = halfh / 2.0 205 | in collage w' h' 206 | [ rect w halfh |> filled (rgb 34 34 34) |> moveY quarterh 207 | , rect w halfh |> filled black |> moveY (0 - quarterh) 208 | , renderSliders w halfh sliderState 209 | , renderAnalyser w halfh freqdata 210 | , renderControls w controlState 211 | ] 212 | 213 | 214 | 215 | -- Main 216 | 217 | mainMediaStream = updateTrack <~ soundUrl 218 | 219 | mainSliders = Signal.foldp updateSliders slidersState slidersInput |> Signal.filter (\m -> m.changes) slidersState 220 | 221 | mainControls = Signal.foldp updateControls controlState controlInput 222 | 223 | port fetchSoundUrl : Signal String 224 | port fetchSoundUrl = (\{url} -> url.string) <~ Signal.filter (\{loadTrack} -> loadTrack) controlState mainControls 225 | 226 | main = render <~ Window.dimensions 227 | ~ mainSliders 228 | ~ mainControls 229 | ~ ((\_ -> getByteFrequencyData analyser) <~ Time.every 50.0) 230 | ~ mainMediaStream 231 | 232 | -------------------------------------------------------------------------------- /examples/Visual.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | elm-webaudio soundcloud visualizer 5 | 6 | 7 | 8 | 9 | 10 | 28 | 29 | 30 | -------------------------------------------------------------------------------- /examples/elm-package.json: -------------------------------------------------------------------------------- 1 | { 2 | "version": "1.1.0", 3 | "summary": "Examples for elm-webaudio, an elm library for accessing the Web Audio API", 4 | "repository": "https://github.com/bmatcuk/elm-webaudio.git", 5 | "license": "BSD3", 6 | "source-directories": [ 7 | "..", 8 | "../lib/elm-usermedia/src" 9 | ], 10 | "exposed-modules": [], 11 | "native-modules": true, 12 | "dependencies": { 13 | "elm-lang/core": "2.0.1 <= v < 3.0.0", 14 | "evancz/elm-html": "3.0.0 <= v < 4.0.0" 15 | }, 16 | "elm-version": "0.15.0 <= v < 0.16.0" 17 | } 18 | --------------------------------------------------------------------------------