├── .gitignore
├── resources
├── vox.wav
├── zara.wav
├── guitar.wav
├── spring.mp3
├── web-audio-api-flowchart.png
└── index.html
├── lib
├── send.js
├── persist.js
├── markdown.js
├── observ-set.js
├── interpolate-color.js
├── evalify.js
├── audio-match.js
├── spectrograph.js
├── editor.js
└── verifier.js
├── styles
├── raw-editor.mcss
├── main.mcss
├── index.js
├── base.mcss
├── index.mcss
├── player.mcss
└── lesson.mcss
├── lessons
├── 1. Subtractive Synthesis
│ ├── 00. Browser make sound!
│ │ ├── start.js
│ │ ├── answer.js
│ │ └── lesson.md
│ ├── 03. Play a short sequence of notes
│ │ ├── start.js
│ │ ├── answer.js
│ │ └── lesson.md
│ ├── 01. Play a pitched sawtooth wave
│ │ ├── start.js
│ │ ├── answer.js
│ │ └── lesson.md
│ ├── 02. Chromatic Scale
│ │ ├── start.js
│ │ ├── answer.js
│ │ └── lesson.md
│ ├── 06. Add an envelope
│ │ ├── start.js
│ │ ├── answer.js
│ │ └── lesson.md
│ ├── 04. Add a high-pass filter
│ │ ├── start.js
│ │ ├── answer.js
│ │ └── lesson.md
│ ├── 05. Modulate filter cutoff
│ │ ├── start.js
│ │ ├── answer.js
│ │ └── lesson.md
│ └── 07. Vibrato
│ │ ├── start.js
│ │ ├── answer.js
│ │ └── lesson.md
├── 2. Working With Samples
│ ├── 08. Decode and play an audio file
│ │ ├── start.js
│ │ ├── answer.js
│ │ └── lesson.md
│ ├── 10. Looping samples
│ │ ├── start.js
│ │ ├── answer.js
│ │ └── lesson.md
│ ├── 11. Set sample pitch
│ │ ├── start.js
│ │ ├── answer.js
│ │ └── lesson.md
│ ├── 09. Set in and out points
│ │ ├── start.js
│ │ ├── lesson.md
│ │ └── answer.js
│ └── 12. Play a sequence of pitched samples
│ │ ├── start.js
│ │ ├── answer.js
│ │ └── lesson.md
├── 3. Effects
│ ├── 13. Tremolo
│ │ ├── start.js
│ │ ├── answer.js
│ │ └── lesson.md
│ ├── 14. Stereo Tremolo
│ │ ├── start.js
│ │ ├── answer.js
│ │ └── lesson.md
│ ├── 18. Ping Pong Delay
│ │ ├── start.js
│ │ ├── answer.js
│ │ └── lesson.md
│ ├── 19. Simple Reverb
│ │ ├── start.js
│ │ ├── answer.js
│ │ └── lesson.md
│ ├── 15. Simple Overdrive
│ │ ├── start.js
│ │ ├── answer.js
│ │ └── lesson.md
│ ├── 17. Delay with feedback
│ │ ├── start.js
│ │ ├── answer.js
│ │ └── lesson.md
│ └── 16. Better Overdrive
│ │ ├── start.js
│ │ ├── answer.js
│ │ └── lesson.md
└── index.js
├── run.js
├── state
├── lesson.js
└── index.js
├── views
├── index.js
└── lesson.js
├── browser-entry.js
├── package.json
├── CONTRIBUTING.md
└── README.md
/.gitignore:
--------------------------------------------------------------------------------
1 | node_modules
2 | npm-debug.log
3 | build
--------------------------------------------------------------------------------
/resources/vox.wav:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mmckegg/web-audio-school/HEAD/resources/vox.wav
--------------------------------------------------------------------------------
/resources/zara.wav:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mmckegg/web-audio-school/HEAD/resources/zara.wav
--------------------------------------------------------------------------------
/resources/guitar.wav:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mmckegg/web-audio-school/HEAD/resources/guitar.wav
--------------------------------------------------------------------------------
/resources/spring.mp3:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mmckegg/web-audio-school/HEAD/resources/spring.mp3
--------------------------------------------------------------------------------
/lib/send.js:
--------------------------------------------------------------------------------
1 | module.exports = function (target, param) {
2 | return function send () {
3 | target(param)
4 | }
5 | }
6 |
--------------------------------------------------------------------------------
/resources/web-audio-api-flowchart.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mmckegg/web-audio-school/HEAD/resources/web-audio-api-flowchart.png
--------------------------------------------------------------------------------
/styles/raw-editor.mcss:
--------------------------------------------------------------------------------
1 | RawEditor {
2 | flex: 1
3 | position: relative;
4 | background-color: #333
5 | margin: 5px
6 |
7 | div {
8 | background-color: transparent
9 | position: absolute
10 | top:0;right:0;bottom:0;left:0
11 | }
12 | }
--------------------------------------------------------------------------------
/styles/main.mcss:
--------------------------------------------------------------------------------
1 | Main {
2 | display: flex
3 | height: 100%;
4 |
5 | div.side {
6 | display: flex
7 | flex-direction: column
8 | width: 50%
9 | }
10 |
11 | div.editor {
12 | flex: 1
13 | display: flex
14 | flex-direction: column
15 | }
16 | }
--------------------------------------------------------------------------------
/lessons/1. Subtractive Synthesis/00. Browser make sound!/start.js:
--------------------------------------------------------------------------------
1 | var audioContext = new AudioContext()
2 |
3 | var oscillator = audioContext.createOscillator()
4 | oscillator.connect(audioContext.destination)
5 |
6 | oscillator.start(audioContext.currentTime)
7 | oscillator.stop(audioContext.currentTime + 2)
8 |
--------------------------------------------------------------------------------
/lessons/1. Subtractive Synthesis/03. Play a short sequence of notes/start.js:
--------------------------------------------------------------------------------
1 | var audioContext = new AudioContext()
2 |
3 | play(0, 3, 0.5)
4 | play(1, 10, 0.5)
5 | play(2, 15, 0.5)
6 |
7 | function play (delay, pitch, duration) {
8 | var startTime = audioContext.currentTime + delay
9 | var endTime = startTime + duration
10 |
11 | }
12 |
--------------------------------------------------------------------------------
/lessons/1. Subtractive Synthesis/01. Play a pitched sawtooth wave/start.js:
--------------------------------------------------------------------------------
1 | var audioContext = new AudioContext()
2 |
3 | var oscillator = audioContext.createOscillator()
4 | oscillator.connect(audioContext.destination)
5 |
6 | oscillator.type = 'sawtooth'
7 |
8 | oscillator.start(audioContext.currentTime)
9 | oscillator.stop(audioContext.currentTime + 2)
10 |
--------------------------------------------------------------------------------
/lessons/1. Subtractive Synthesis/00. Browser make sound!/answer.js:
--------------------------------------------------------------------------------
1 | // # duration=2
2 |
3 | var audioContext = new AudioContext()
4 |
5 | var oscillator = audioContext.createOscillator()
6 | oscillator.connect(audioContext.destination)
7 |
8 | oscillator.type = 'sawtooth'
9 |
10 | oscillator.start(audioContext.currentTime)
11 | oscillator.stop(audioContext.currentTime + 2)
12 |
--------------------------------------------------------------------------------
/lessons/1. Subtractive Synthesis/02. Chromatic Scale/start.js:
--------------------------------------------------------------------------------
1 | var audioContext = new AudioContext()
2 |
3 | var oscillator = audioContext.createOscillator()
4 | oscillator.connect(audioContext.destination)
5 |
6 | oscillator.type = 'sawtooth'
7 | oscillator.frequency.value = 440
8 |
9 | oscillator.start(audioContext.currentTime)
10 | oscillator.stop(audioContext.currentTime + 2)
11 |
--------------------------------------------------------------------------------
/resources/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | Web Audio School
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
--------------------------------------------------------------------------------
/lessons/1. Subtractive Synthesis/02. Chromatic Scale/answer.js:
--------------------------------------------------------------------------------
1 | var audioContext = new AudioContext()
2 |
3 | var oscillator = audioContext.createOscillator()
4 | oscillator.connect(audioContext.destination)
5 |
6 | oscillator.type = 'sawtooth'
7 | oscillator.frequency.value = 440
8 | oscillator.detune.value = 300
9 |
10 | oscillator.start(audioContext.currentTime)
11 | oscillator.stop(audioContext.currentTime + 2)
12 |
--------------------------------------------------------------------------------
/lessons/1. Subtractive Synthesis/01. Play a pitched sawtooth wave/answer.js:
--------------------------------------------------------------------------------
1 | // # duration=2
2 |
3 | var audioContext = new AudioContext()
4 |
5 | var oscillator = audioContext.createOscillator()
6 | oscillator.connect(audioContext.destination)
7 |
8 | oscillator.type = 'sawtooth'
9 | oscillator.frequency.value = 220
10 |
11 | oscillator.start(audioContext.currentTime)
12 | oscillator.stop(audioContext.currentTime + 2)
13 |
--------------------------------------------------------------------------------
/run.js:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env node
2 | var exec = require('child_process').exec
3 | var opn = require('opn');
4 |
5 | process.chdir(__dirname)
6 | var runner = exec('npm start')
7 |
8 | console.log('Starting server...')
9 | setTimeout(function () {
10 | console.log('Go to http://localhost:9966')
11 | opn('http://localhost:9966')
12 | runner.stdout.pipe(process.stdout)
13 | runner.stderr.pipe(process.stderr)
14 | }, 2000)
15 |
--------------------------------------------------------------------------------
/lib/persist.js:
--------------------------------------------------------------------------------
1 | module.exports = function (key, obs) {
2 | var data = get(key)
3 | if (data != null) {
4 | obs.set(data)
5 | }
6 | return obs(set.bind(this, key))
7 | }
8 |
9 | function get (key) {
10 | var data = window.localStorage[key]
11 | if (data != null) {
12 | return JSON.parse(data)
13 | }
14 | }
15 |
16 | function set (key, value) {
17 | window.localStorage[key] = JSON.stringify(value) || 'null'
18 | }
19 |
--------------------------------------------------------------------------------
/styles/index.js:
--------------------------------------------------------------------------------
1 | var fs = require('fs')
2 | var path = require('path')
3 | var compile = require('micro-css')
4 | var result = ''
5 |
6 | fs.readdirSync(__dirname).forEach(function (file) {
7 | if (/\.mcss$/i.test(file)) {
8 | result += fs.readFileSync(path.resolve(__dirname, file), 'utf8') + '\n'
9 | }
10 | })
11 |
12 | var additional = fs.readFileSync(require.resolve('highlight.js/styles/monokai.css'), 'utf8')
13 |
14 | module.exports = compile(result) + additional
15 |
--------------------------------------------------------------------------------
/lessons/2. Working With Samples/08. Decode and play an audio file/start.js:
--------------------------------------------------------------------------------
1 | var audioContext = new AudioContext()
2 |
3 | // wait 100ms for sample to download/decode
4 | var startTime = audioContext.currentTime + 0.2
5 |
6 | getSample('zara.wav', function play (buffer) {
7 |
8 | })
9 |
10 | function getSample (url, cb) {
11 | var request = new XMLHttpRequest()
12 | request.open('GET', url)
13 | request.responseType = 'arraybuffer'
14 | request.onload = function () {
15 | audioContext.decodeAudioData(request.response, cb)
16 | }
17 | request.send()
18 | }
19 |
--------------------------------------------------------------------------------
/lessons/1. Subtractive Synthesis/06. Add an envelope/start.js:
--------------------------------------------------------------------------------
1 | var audioContext = new AudioContext()
2 |
3 | play(0, 3, 0.5)
4 | play(1, 10, 0.5)
5 | play(2, 15, 0.5)
6 |
7 | function play (delay, pitch, duration) {
8 | var startTime = audioContext.currentTime + delay
9 | var endTime = startTime + duration
10 |
11 | var oscillator = audioContext.createOscillator()
12 | oscillator.connect(audioContext.destination)
13 |
14 | oscillator.type = 'sawtooth'
15 | oscillator.detune.value = pitch * 100
16 |
17 | oscillator.start(startTime)
18 | oscillator.stop(endTime)
19 | }
20 |
--------------------------------------------------------------------------------
/lessons/1. Subtractive Synthesis/04. Add a high-pass filter/start.js:
--------------------------------------------------------------------------------
1 | var audioContext = new AudioContext()
2 |
3 | play(0, 3, 0.5)
4 | play(1, 10, 0.5)
5 | play(2, 15, 0.5)
6 |
7 | function play (delay, pitch, duration) {
8 | var startTime = audioContext.currentTime + delay
9 | var endTime = startTime + duration
10 |
11 | var oscillator = audioContext.createOscillator()
12 | oscillator.connect(audioContext.destination)
13 |
14 | oscillator.type = 'sawtooth'
15 | oscillator.detune.value = pitch * 100
16 |
17 | oscillator.start(startTime)
18 | oscillator.stop(endTime)
19 | }
20 |
--------------------------------------------------------------------------------
/lessons/3. Effects/13. Tremolo/start.js:
--------------------------------------------------------------------------------
1 | var audioContext = new AudioContext()
2 |
3 | // add effects here
4 |
5 | // ^^^^^^^^^^^^^^^^^
6 |
7 | play(0, -9, 2.25)
8 | play(0, 3, 2.25)
9 | play(0, 0, 2.25)
10 |
11 | function play (delay, pitch, duration) {
12 | var time = audioContext.currentTime + delay
13 |
14 | var oscillator = audioContext.createOscillator()
15 | oscillator.connect(audioContext.destination) // change output
16 |
17 | oscillator.type = 'triangle'
18 | oscillator.detune.value = pitch * 100
19 |
20 | oscillator.start(time)
21 | oscillator.stop(time + duration)
22 | }
23 |
--------------------------------------------------------------------------------
/lessons/3. Effects/14. Stereo Tremolo/start.js:
--------------------------------------------------------------------------------
1 | var audioContext = new AudioContext()
2 |
3 | // add effects here
4 |
5 | // ^^^^^^^^^^^^^^^^^
6 |
7 | play(0, -7, 2.25)
8 | play(0, 5, 2.25)
9 | play(0, 0, 2.25)
10 |
11 | function play (delay, pitch, duration) {
12 | var time = audioContext.currentTime + delay
13 |
14 | var oscillator = audioContext.createOscillator()
15 | oscillator.connect(audioContext.destination) // change output
16 |
17 | oscillator.type = 'triangle'
18 | oscillator.detune.value = pitch * 100
19 |
20 | oscillator.start(time)
21 | oscillator.stop(time + duration)
22 | }
23 |
--------------------------------------------------------------------------------
/lib/markdown.js:
--------------------------------------------------------------------------------
1 | var hljs = require('highlight.js')
2 | var iterator = require('markdown-it-for-inline')
3 |
4 | module.exports = require('markdown-it')({
5 | linkify: true,
6 | highlight: function (str, lang) {
7 | if (lang && hljs.getLanguage(lang)) {
8 | try {
9 | return hljs.highlight(lang, str).value
10 | } catch (__) {}
11 | }
12 |
13 | try {
14 | return hljs.highlightAuto(str).value
15 | } catch (__) {}
16 |
17 | return ''
18 | }
19 | }).use(iterator, 'url_new_win', 'link_open', function (tokens, idx) {
20 | tokens[idx].attrPush([ 'target', '_blank' ])
21 | })
22 |
--------------------------------------------------------------------------------
/lessons/1. Subtractive Synthesis/03. Play a short sequence of notes/answer.js:
--------------------------------------------------------------------------------
1 | // # duration=3
2 |
3 | var audioContext = new AudioContext()
4 |
5 | play(0, 3, 0.5)
6 | play(1, 10, 0.5)
7 | play(2, 15, 0.5)
8 |
9 | function play (delay, pitch, duration) {
10 | var startTime = audioContext.currentTime + delay
11 | var endTime = startTime + duration
12 |
13 | var oscillator = audioContext.createOscillator()
14 | oscillator.connect(audioContext.destination)
15 |
16 | oscillator.type = 'sawtooth'
17 | oscillator.detune.value = pitch * 100
18 |
19 | oscillator.start(startTime)
20 | oscillator.stop(endTime)
21 | }
22 |
--------------------------------------------------------------------------------
/lessons/3. Effects/18. Ping Pong Delay/start.js:
--------------------------------------------------------------------------------
1 | var audioContext = new AudioContext()
2 |
3 | // add effects here
4 |
5 | // ^^^^^^^^^^^^^^^^^
6 |
7 | play(1 / 8, 3, 0.05)
8 | play(2 / 8, 7, 0.05)
9 | play(3 / 8, 15, 0.05)
10 |
11 | function play (startAfter, pitch, duration) {
12 | var time = audioContext.currentTime + startAfter
13 |
14 | var oscillator = audioContext.createOscillator()
15 | oscillator.connect(audioContext.destination) // change output
16 |
17 | oscillator.type = 'square'
18 | oscillator.detune.value = pitch * 100
19 |
20 | oscillator.start(time)
21 | oscillator.stop(time + duration)
22 | }
23 |
--------------------------------------------------------------------------------
/lessons/2. Working With Samples/10. Looping samples/start.js:
--------------------------------------------------------------------------------
1 | var audioContext = new AudioContext()
2 | var startTime = audioContext.currentTime + 0.2
3 |
4 | getSample('zara.wav', function play (buffer) {
5 | var player = audioContext.createBufferSource()
6 | player.buffer = buffer
7 | player.connect(audioContext.destination)
8 | player.start(startTime)
9 | })
10 |
11 | function getSample (url, cb) {
12 | var request = new XMLHttpRequest()
13 | request.open('GET', url)
14 | request.responseType = 'arraybuffer'
15 | request.onload = function () {
16 | audioContext.decodeAudioData(request.response, cb)
17 | }
18 | request.send()
19 | }
20 |
--------------------------------------------------------------------------------
/lessons/2. Working With Samples/11. Set sample pitch/start.js:
--------------------------------------------------------------------------------
1 | var audioContext = new AudioContext()
2 | var startTime = audioContext.currentTime + 0.1
3 |
4 | getSample('vox.wav', function play (buffer) {
5 | var player = audioContext.createBufferSource()
6 | player.buffer = buffer
7 | player.connect(audioContext.destination)
8 | player.start(startTime)
9 | })
10 |
11 | function getSample (url, cb) {
12 | var request = new XMLHttpRequest()
13 | request.open('GET', url)
14 | request.responseType = 'arraybuffer'
15 | request.onload = function () {
16 | audioContext.decodeAudioData(request.response, cb)
17 | }
18 | request.send()
19 | }
20 |
--------------------------------------------------------------------------------
/lessons/2. Working With Samples/09. Set in and out points/start.js:
--------------------------------------------------------------------------------
1 | var audioContext = new AudioContext()
2 | var startTime = audioContext.currentTime + 0.2
3 |
4 | getSample('zara.wav', function play (buffer) {
5 | var player = audioContext.createBufferSource()
6 | player.buffer = buffer
7 | player.connect(audioContext.destination)
8 | player.start(startTime)
9 | })
10 |
11 | function getSample (url, cb) {
12 | var request = new XMLHttpRequest()
13 | request.open('GET', url)
14 | request.responseType = 'arraybuffer'
15 | request.onload = function () {
16 | audioContext.decodeAudioData(request.response, cb)
17 | }
18 | request.send()
19 | }
20 |
--------------------------------------------------------------------------------
/lessons/index.js:
--------------------------------------------------------------------------------
1 | var fs = require('fs')
2 | var path = require('path')
3 |
4 | module.exports = readDir(__dirname, true)
5 |
6 | function readDir (dir, directoriesOnly) {
7 | return fs.readdirSync(dir).reduce(function (result, file) {
8 | var stat = fs.lstatSync(path.join(dir, file))
9 | if (stat.isDirectory()) {
10 | result[file] = readDir(path.join(dir, file))
11 | } else if (stat.isFile() && !directoriesOnly) {
12 | var ext = path.extname(file)
13 | if (ext === '.js' || ext === '.md') {
14 | result[file] = fs.readFileSync(path.join(dir, file), 'utf8')
15 | }
16 | }
17 | return result
18 | }, {})
19 | }
20 |
--------------------------------------------------------------------------------
/lessons/3. Effects/19. Simple Reverb/start.js:
--------------------------------------------------------------------------------
1 | var audioContext = new AudioContext()
2 | var startTime = audioContext.currentTime + 0.2
3 |
4 | getSample('guitar.wav', function play (buffer) {
5 | // Add reverb logic here
6 | var player = audioContext.createBufferSource()
7 | player.buffer = buffer
8 | player.connect(audioContext.destination)
9 | player.start(startTime)
10 | })
11 |
12 | function getSample (url, cb) {
13 | var request = new XMLHttpRequest()
14 | request.open('GET', url)
15 | request.responseType = 'arraybuffer'
16 | request.onload = function () {
17 | audioContext.decodeAudioData(request.response, cb)
18 | }
19 | request.send()
20 | }
21 |
--------------------------------------------------------------------------------
/lessons/2. Working With Samples/09. Set in and out points/lesson.md:
--------------------------------------------------------------------------------
1 | > Trim the first `2.5` seconds off the sample, then play for `2` seconds.
2 |
3 | You can listen to how the audio is supposed to sound by clicking **Play Answer**.
4 |
5 | # Trimming Samples
6 |
7 | Sometimes the audio file might have extra content that you don't want to play. You can trim the begininning and end by passing extra arguments to `player.start`.
8 |
9 | In this example we'll only play the first `2` seconds of the file:
10 |
11 | ```
12 | player.start(startTime, 0, 2)
13 | ```
14 |
15 | Or we could start `4` seconds into the file and play to the end:
16 |
17 | ```
18 | player.start(startTime, 4)
19 | ```
20 |
--------------------------------------------------------------------------------
/lib/observ-set.js:
--------------------------------------------------------------------------------
1 | var Observ = require('observ')
2 |
3 | module.exports = ObservSet
4 |
5 | function ObservSet (array) {
6 | var obs = Observ(array || [])
7 |
8 | obs.add = function (key) {
9 | var data = obs() || []
10 | if (!~data.indexOf(key)) {
11 | obs.set(data.concat(key))
12 | }
13 | }
14 |
15 | obs.remove = function (key) {
16 | var data = obs() || []
17 | if (~data.indexOf(key)) {
18 | obs.set(data.filter(function (item) {
19 | return item !== key
20 | }))
21 | }
22 | }
23 |
24 | obs.has = function (key) {
25 | var data = obs() || []
26 | return !!~data.indexOf(key)
27 | }
28 |
29 | return obs
30 | }
31 |
--------------------------------------------------------------------------------
/lib/interpolate-color.js:
--------------------------------------------------------------------------------
1 | // adapted from: https://github.com/jsantell/interpolate-color/blob/master/index.js
2 |
3 | module.exports = interpolate
4 |
5 | function interpolate (start, end, step, precision) {
6 | precision = precision != null ? precision : 0
7 |
8 | var startH = start[0]
9 | var startS = start[1]
10 | var startL = start[2]
11 | var endH = end[0]
12 | var endS = end[1]
13 | var endL = end[2]
14 |
15 | var h = (startH - (startH - endH) * step).toFixed(precision)
16 | var s = (startS - (startS - endS) * step).toFixed(precision)
17 | var l = (startL - (startL - endL) * step).toFixed(precision)
18 |
19 | return 'hsl(' + h + ', ' + s + '%, ' + l + '%)'
20 | }
21 |
--------------------------------------------------------------------------------
/lessons/2. Working With Samples/09. Set in and out points/answer.js:
--------------------------------------------------------------------------------
1 | // # duration=2.2
2 |
3 | var audioContext = new AudioContext()
4 | var startTime = audioContext.currentTime + 0.2
5 |
6 | getSample('zara.wav', function play (buffer) {
7 | var player = audioContext.createBufferSource()
8 | player.buffer = buffer
9 | player.connect(audioContext.destination)
10 | player.start(startTime, 2.5, 2)
11 | })
12 |
13 | function getSample (url, cb) {
14 | var request = new XMLHttpRequest()
15 | request.open('GET', url)
16 | request.responseType = 'arraybuffer'
17 | request.onload = function () {
18 | audioContext.decodeAudioData(request.response, cb)
19 | }
20 | request.send()
21 | }
22 |
--------------------------------------------------------------------------------
/lessons/3. Effects/15. Simple Overdrive/start.js:
--------------------------------------------------------------------------------
1 | var audioContext = new AudioContext()
2 | var startTime = audioContext.currentTime + 0.2
3 |
4 | // add effects here
5 |
6 | // ^^^^^^^^^^^^^^^^^
7 |
8 | getSample('guitar.wav', function play (buffer) {
9 | var player = audioContext.createBufferSource()
10 | player.buffer = buffer
11 | player.connect(audioContext.destination)
12 | player.start(startTime)
13 | })
14 |
15 | function getSample (url, cb) {
16 | var request = new XMLHttpRequest()
17 | request.open('GET', url)
18 | request.responseType = 'arraybuffer'
19 | request.onload = function () {
20 | audioContext.decodeAudioData(request.response, cb)
21 | }
22 | request.send()
23 | }
24 |
--------------------------------------------------------------------------------
/lessons/2. Working With Samples/11. Set sample pitch/answer.js:
--------------------------------------------------------------------------------
1 | // # duration=5
2 | var audioContext = new AudioContext()
3 | var startTime = audioContext.currentTime + 0.1
4 |
5 | getSample('vox.wav', function play (buffer) {
6 | var player = audioContext.createBufferSource()
7 | player.buffer = buffer
8 | player.playbackRate.value = 0.5
9 | player.connect(audioContext.destination)
10 | player.start(startTime, 0, buffer.duration)
11 | })
12 |
13 | function getSample (url, cb) {
14 | var request = new XMLHttpRequest()
15 | request.open('GET', url)
16 | request.responseType = 'arraybuffer'
17 | request.onload = function () {
18 | audioContext.decodeAudioData(request.response, cb)
19 | }
20 | request.send()
21 | }
22 |
--------------------------------------------------------------------------------
/lessons/1. Subtractive Synthesis/05. Modulate filter cutoff/start.js:
--------------------------------------------------------------------------------
1 | var audioContext = new AudioContext()
2 |
3 | play(0, 3, 0.5)
4 | play(1, 10, 0.5)
5 | play(2, 15, 0.5)
6 |
7 | function play (delay, pitch, duration) {
8 | var startTime = audioContext.currentTime + delay
9 | var endTime = startTime + duration
10 |
11 | var filter = audioContext.createBiquadFilter()
12 | filter.connect(audioContext.destination)
13 | filter.type = 'highpass'
14 | filter.frequency.value = 10000
15 |
16 | var oscillator = audioContext.createOscillator()
17 | oscillator.connect(filter)
18 |
19 | oscillator.type = 'sawtooth'
20 | oscillator.detune.value = pitch * 100
21 |
22 | oscillator.start(startTime)
23 | oscillator.stop(endTime)
24 | }
25 |
--------------------------------------------------------------------------------
/lessons/2. Working With Samples/08. Decode and play an audio file/answer.js:
--------------------------------------------------------------------------------
1 | // # duration=8.4
2 |
3 | var audioContext = new AudioContext()
4 |
5 | // wait 100ms for sample to download/decode
6 | var startTime = audioContext.currentTime + 0.2
7 |
8 | getSample('zara.wav', function play (buffer) {
9 | var player = audioContext.createBufferSource()
10 | player.buffer = buffer
11 | player.connect(audioContext.destination)
12 | player.start(startTime)
13 | })
14 |
15 | function getSample (url, cb) {
16 | var request = new XMLHttpRequest()
17 | request.open('GET', url)
18 | request.responseType = 'arraybuffer'
19 | request.onload = function () {
20 | audioContext.decodeAudioData(request.response, cb)
21 | }
22 | request.send()
23 | }
24 |
--------------------------------------------------------------------------------
/lessons/1. Subtractive Synthesis/04. Add a high-pass filter/answer.js:
--------------------------------------------------------------------------------
1 | // # duration=3
2 |
3 | var audioContext = new AudioContext()
4 |
5 | var filter = audioContext.createBiquadFilter()
6 | filter.connect(audioContext.destination)
7 | filter.type = 'highpass'
8 | filter.frequency.value = 10000
9 |
10 | play(0, 3, 0.5)
11 | play(1, 10, 0.5)
12 | play(2, 15, 0.5)
13 |
14 | function play (delay, pitch, duration) {
15 | var startTime = audioContext.currentTime + delay
16 | var endTime = startTime + duration
17 |
18 | var oscillator = audioContext.createOscillator()
19 | oscillator.connect(filter)
20 |
21 | oscillator.type = 'sawtooth'
22 | oscillator.detune.value = pitch * 100
23 |
24 | oscillator.start(startTime)
25 | oscillator.stop(endTime)
26 | }
27 |
--------------------------------------------------------------------------------
/lessons/2. Working With Samples/10. Looping samples/answer.js:
--------------------------------------------------------------------------------
1 | // # duration=7.4
2 |
3 | var audioContext = new AudioContext()
4 | var startTime = audioContext.currentTime + 0.2
5 |
6 | getSample('zara.wav', function play (buffer) {
7 | var player = audioContext.createBufferSource()
8 | player.buffer = buffer
9 | player.loop = true
10 | player.loopStart = 3
11 | player.loopEnd = 4
12 | player.connect(audioContext.destination)
13 | player.start(startTime, 0)
14 | player.stop(startTime + 7)
15 | })
16 |
17 | function getSample (url, cb) {
18 | var request = new XMLHttpRequest()
19 | request.open('GET', url)
20 | request.responseType = 'arraybuffer'
21 | request.onload = function () {
22 | audioContext.decodeAudioData(request.response, cb)
23 | }
24 | request.send()
25 | }
26 |
--------------------------------------------------------------------------------
/lessons/2. Working With Samples/12. Play a sequence of pitched samples/start.js:
--------------------------------------------------------------------------------
1 | var audioContext = new AudioContext()
2 | var startTime = audioContext.currentTime + 0.1
3 |
4 | getSample('vox.wav', function (buffer) {
5 | play(0, -12)
6 | play(1, -5)
7 | play(2, 0)
8 |
9 | function play (delay, transpose) {
10 | var player = audioContext.createBufferSource()
11 | player.buffer = buffer
12 | player.connect(audioContext.destination)
13 | player.start(startTime + delay)
14 | }
15 | })
16 |
17 | function getSample (url, cb) {
18 | var request = new XMLHttpRequest()
19 | request.open('GET', url)
20 | request.responseType = 'arraybuffer'
21 | request.onload = function () {
22 | audioContext.decodeAudioData(request.response, cb)
23 | }
24 | request.send()
25 | }
26 |
--------------------------------------------------------------------------------
/lessons/1. Subtractive Synthesis/07. Vibrato/start.js:
--------------------------------------------------------------------------------
1 | var audioContext = new AudioContext()
2 |
3 | play(0, 3, 0.5)
4 | play(1, 10, 0.5)
5 | play(2, 15, 0.5)
6 |
7 | function play (delay, pitch, duration) {
8 | var startTime = audioContext.currentTime + delay
9 | var endTime = startTime + duration
10 |
11 | var envelope = audioContext.createGain()
12 | envelope.connect(audioContext.destination)
13 | envelope.gain.value = 0
14 | envelope.gain.setTargetAtTime(1, startTime, 0.1)
15 | envelope.gain.setTargetAtTime(0, endTime, 0.2)
16 |
17 | var oscillator = audioContext.createOscillator()
18 | oscillator.connect(envelope)
19 |
20 | oscillator.type = 'sawtooth'
21 | oscillator.detune.value = pitch * 100
22 |
23 | oscillator.start(startTime)
24 | oscillator.stop(endTime + 2)
25 | }
26 |
--------------------------------------------------------------------------------
/lib/evalify.js:
--------------------------------------------------------------------------------
1 | var through = require('through2')
2 | var path = require('path')
3 |
4 | module.exports = function (filename, opts) {
5 | var files = opts && opts.files || opts.f
6 |
7 | if (!files || !inPaths(filename, files, process.cwd())) {
8 | return through()
9 | }
10 |
11 | return through(
12 | function transform (chunk, enc, next) {
13 | next()
14 | },
15 | function flush (done) {
16 | delete require.cache[filename]
17 | var moduleBody = 'module.exports = ' + JSON.stringify(require(filename))
18 | this.push(moduleBody)
19 | this.push(null)
20 | done()
21 | }
22 | )
23 | }
24 |
25 | function inPaths (file, paths, cwd) {
26 | return paths.some(function (p) {
27 | return path.resolve(cwd, p) === path.resolve(cwd, file)
28 | })
29 | }
30 |
--------------------------------------------------------------------------------
/lessons/3. Effects/14. Stereo Tremolo/answer.js:
--------------------------------------------------------------------------------
1 | // # duration=2.5
2 |
3 | var audioContext = new AudioContext()
4 |
5 | var panner = audioContext.createStereoPanner()
6 | panner.connect(audioContext.destination)
7 |
8 | var lfo = audioContext.createOscillator()
9 | lfo.connect(panner.pan)
10 | lfo.type = 'sine'
11 | lfo.frequency.value = 2
12 | lfo.start(audioContext.currentTime)
13 |
14 | play(0, -7, 2.25)
15 | play(0, 5, 2.25)
16 | play(0, 0, 2.25)
17 |
18 | function play (delay, pitch, duration) {
19 | var time = audioContext.currentTime + delay
20 |
21 | var oscillator = audioContext.createOscillator()
22 | oscillator.connect(panner) // change output
23 |
24 | oscillator.type = 'triangle'
25 | oscillator.detune.value = pitch * 100
26 |
27 | oscillator.start(time)
28 | oscillator.stop(time + duration)
29 | }
30 |
--------------------------------------------------------------------------------
/lessons/1. Subtractive Synthesis/06. Add an envelope/answer.js:
--------------------------------------------------------------------------------
1 | // # duration=4.5
2 |
3 | var audioContext = new AudioContext()
4 |
5 | play(0, 3, 0.5)
6 | play(1, 10, 0.5)
7 | play(2, 15, 0.5)
8 |
9 | function play (delay, pitch, duration) {
10 | var startTime = audioContext.currentTime + delay
11 | var endTime = startTime + duration
12 |
13 | var envelope = audioContext.createGain()
14 | envelope.connect(audioContext.destination)
15 | envelope.gain.value = 0
16 | envelope.gain.setTargetAtTime(1, startTime, 0.1)
17 | envelope.gain.setTargetAtTime(0, endTime, 0.2)
18 |
19 | var oscillator = audioContext.createOscillator()
20 | oscillator.connect(envelope)
21 |
22 | oscillator.type = 'sawtooth'
23 | oscillator.detune.value = pitch * 100
24 |
25 | oscillator.start(startTime)
26 | oscillator.stop(endTime + 2)
27 | }
28 |
--------------------------------------------------------------------------------
/lessons/3. Effects/17. Delay with feedback/start.js:
--------------------------------------------------------------------------------
1 | var audioContext = new AudioContext()
2 |
3 | // add effects here
4 |
5 | // ^^^^^^^^^^^^^^^^^
6 |
7 | play(0, -3, 0.05)
8 | play(0.05, 2, 0.05)
9 | play(0.1, 9, 0.05)
10 | play(0.15, 14, 0.05)
11 | play(0.2, 9, 0.05)
12 | play(0.25, 2, 0.05)
13 | play(0.3, -3, 0.05)
14 | play(0.35, 7, 0.05)
15 | play(0.4, 14, 0.05)
16 | play(0.45, 18, 0.05)
17 | play(0.5, 9, 0.05)
18 | play(0.55, 2, 0.05)
19 |
20 | function play (startAfter, pitch, duration) {
21 | var time = audioContext.currentTime + startAfter
22 |
23 | var oscillator = audioContext.createOscillator()
24 | oscillator.connect(audioContext.destination) // change output
25 |
26 | oscillator.type = 'square'
27 | oscillator.detune.value = pitch * 100
28 |
29 | oscillator.start(time)
30 | oscillator.stop(time + duration)
31 | }
32 |
--------------------------------------------------------------------------------
/lessons/2. Working With Samples/11. Set sample pitch/lesson.md:
--------------------------------------------------------------------------------
1 | > Change the sample **pitch** down `1` octave (halve the speed).
2 |
3 | You can listen to how the audio is supposed to sound by clicking **Play Answer**.
4 |
5 | # Audio Playback Rate
6 |
7 | You can change the speed the audio plays back (and therefore its pitch) using the [`playbackRate`](https://developer.mozilla.org/en-US/docs/Web/API/AudioBufferSourceNode/playbackRate) [AudioParam](https://developer.mozilla.org/en-US/docs/Web/API/AudioParam).
8 |
9 |
10 | This will play audio back at **twice** its original speed and pitch (an octave up):
11 |
12 | ```js
13 | player.playbackRate.value = 2
14 | ```
15 |
16 | If we want to slow the audio down (reduce pitch), we specify a decimal:
17 |
18 | ```js
19 | // play at 80% the original speed
20 | player.playbackRate.value = 0.8
21 | ```
22 |
--------------------------------------------------------------------------------
/lessons/3. Effects/16. Better Overdrive/start.js:
--------------------------------------------------------------------------------
1 | var audioContext = new AudioContext()
2 | var startTime = audioContext.currentTime + 0.2
3 |
4 | var shaper = audioContext.createWaveShaper()
5 | shaper.curve = new Float32Array([-1, 1])
6 |
7 | var amp = audioContext.createGain()
8 | amp.gain.value = 20
9 | amp.connect(shaper)
10 | shaper.connect(audioContext.destination)
11 |
12 | getSample('guitar.wav', function play (buffer) {
13 | var player = audioContext.createBufferSource()
14 | player.buffer = buffer
15 | player.connect(amp)
16 | player.start(startTime)
17 | })
18 |
19 | function getSample (url, cb) {
20 | var request = new XMLHttpRequest()
21 | request.open('GET', url)
22 | request.responseType = 'arraybuffer'
23 | request.onload = function () {
24 | audioContext.decodeAudioData(request.response, cb)
25 | }
26 | request.send()
27 | }
28 |
--------------------------------------------------------------------------------
/lessons/1. Subtractive Synthesis/05. Modulate filter cutoff/answer.js:
--------------------------------------------------------------------------------
1 | // # duration=3
2 |
3 | var audioContext = new AudioContext()
4 |
5 | play(0, 3, 0.5)
6 | play(1, 10, 0.5)
7 | play(2, 15, 0.5)
8 |
9 | function play (delay, pitch, duration) {
10 | var startTime = audioContext.currentTime + delay
11 | var endTime = startTime + duration
12 |
13 | var filter = audioContext.createBiquadFilter()
14 | filter.connect(audioContext.destination)
15 | filter.type = 'highpass'
16 | filter.frequency.value = 10000
17 |
18 | filter.frequency.setValueAtTime(10000, startTime)
19 | filter.frequency.linearRampToValueAtTime(500, endTime)
20 |
21 | var oscillator = audioContext.createOscillator()
22 | oscillator.connect(filter)
23 |
24 | oscillator.type = 'sawtooth'
25 | oscillator.detune.value = pitch * 100
26 |
27 | oscillator.start(startTime)
28 | oscillator.stop(endTime)
29 | }
30 |
--------------------------------------------------------------------------------
/lessons/2. Working With Samples/12. Play a sequence of pitched samples/answer.js:
--------------------------------------------------------------------------------
1 | // # duration=5
2 | var audioContext = new AudioContext()
3 | var startTime = audioContext.currentTime + 0.1
4 |
5 | getSample('vox.wav', function (buffer) {
6 | play(0, -12)
7 | play(1, -5)
8 | play(2, 0)
9 |
10 | function play (delay, transpose) {
11 | var player = audioContext.createBufferSource()
12 | player.buffer = buffer
13 | player.connect(audioContext.destination)
14 |
15 | player.playbackRate.value = Math.pow(2, transpose / 12)
16 |
17 | player.start(startTime + delay, 0)
18 | }
19 | })
20 |
21 | function getSample (url, cb) {
22 | var request = new XMLHttpRequest()
23 | request.open('GET', url)
24 | request.responseType = 'arraybuffer'
25 | request.onload = function () {
26 | audioContext.decodeAudioData(request.response, cb)
27 | }
28 | request.send()
29 | }
30 |
--------------------------------------------------------------------------------
/lessons/3. Effects/15. Simple Overdrive/answer.js:
--------------------------------------------------------------------------------
1 | // # duration=4.5
2 |
3 | var audioContext = new AudioContext()
4 | var startTime = audioContext.currentTime + 0.2
5 |
6 | var shaper = audioContext.createWaveShaper()
7 | shaper.curve = new Float32Array([-1, 1])
8 |
9 | var amp = audioContext.createGain()
10 | amp.gain.value = 20
11 | amp.connect(shaper)
12 | shaper.connect(audioContext.destination)
13 |
14 | getSample('guitar.wav', function play (buffer) {
15 | var player = audioContext.createBufferSource()
16 | player.buffer = buffer
17 | player.connect(amp)
18 | player.start(startTime)
19 | })
20 |
21 | function getSample (url, cb) {
22 | var request = new XMLHttpRequest()
23 | request.open('GET', url)
24 | request.responseType = 'arraybuffer'
25 | request.onload = function () {
26 | audioContext.decodeAudioData(request.response, cb)
27 | }
28 | request.send()
29 | }
30 |
--------------------------------------------------------------------------------
/lessons/3. Effects/19. Simple Reverb/answer.js:
--------------------------------------------------------------------------------
1 | // # duration=5.5
2 |
3 | var audioContext = new AudioContext()
4 | var startTime = audioContext.currentTime + 0.2
5 |
6 | getSample('guitar.wav', function play (buffer) {
7 | getSample('spring.mp3', function play (impulseBuffer) {
8 | var player = audioContext.createBufferSource()
9 | player.buffer = buffer
10 |
11 | var convolver = audioContext.createConvolver()
12 | convolver.buffer = impulseBuffer
13 |
14 | player.connect(convolver)
15 | convolver.connect(audioContext.destination)
16 |
17 | player.start(startTime)
18 | })
19 | })
20 |
21 | function getSample (url, cb) {
22 | var request = new XMLHttpRequest()
23 | request.open('GET', url)
24 | request.responseType = 'arraybuffer'
25 | request.onload = function () {
26 | audioContext.decodeAudioData(request.response, cb)
27 | }
28 | request.send()
29 | }
30 |
--------------------------------------------------------------------------------
/lessons/3. Effects/13. Tremolo/answer.js:
--------------------------------------------------------------------------------
1 | // # duration=2.5
2 |
3 | var audioContext = new AudioContext()
4 | var tremolo = audioContext.createGain()
5 | tremolo.connect(audioContext.destination)
6 | tremolo.gain.value = 0
7 |
8 | var shaper = audioContext.createWaveShaper()
9 | shaper.curve = new Float32Array([0, 1])
10 | shaper.connect(tremolo.gain)
11 |
12 | var lfo = audioContext.createOscillator()
13 | lfo.frequency.value = 3
14 | lfo.type = 'sine'
15 | lfo.start(audioContext.currentTime)
16 | lfo.connect(shaper)
17 |
18 | play(0, -9, 2.25)
19 | play(0, 3, 2.25)
20 | play(0, 0, 2.25)
21 |
22 | function play (delay, pitch, duration) {
23 | var time = audioContext.currentTime + delay
24 |
25 | var oscillator = audioContext.createOscillator()
26 | oscillator.connect(tremolo)
27 |
28 | oscillator.type = 'triangle'
29 | oscillator.detune.value = pitch * 100
30 |
31 | oscillator.start(time)
32 | oscillator.stop(time + duration)
33 | }
34 |
--------------------------------------------------------------------------------
/lessons/3. Effects/14. Stereo Tremolo/lesson.md:
--------------------------------------------------------------------------------
1 | > Add a global effect so that the audio output has a `'sine'` shaped **stereo tremolo** (panning between **left** and **right** speakers) at a rate of `2` Hz.
2 |
3 | You can listen to how the audio is supposed to sound by clicking **Play Answer**.
4 |
5 | # Panning Between Left and Right Speakers
6 |
7 | You can use the [StereoPannerNode](https://developer.mozilla.org/en-US/docs/Web/API/StereoPannerNode) to change the amount of sound playing in each speaker.
8 |
9 | Create an instance using [`audioContext.createStereoPanner()`](https://developer.mozilla.org/en-US/docs/Web/API/AudioContext/createStereoPanner).
10 |
11 | Use the [`pan`](https://developer.mozilla.org/en-US/docs/Web/API/StereoPannerNode/pan) [AudioParam](https://developer.mozilla.org/en-US/docs/Web/API/AudioParam) to choose how much sound should play in each speaker. Setting to `0` will play equally in both speakers. `-1` is just the left, and `+1` just the right.
12 |
--------------------------------------------------------------------------------
/lessons/1. Subtractive Synthesis/07. Vibrato/answer.js:
--------------------------------------------------------------------------------
1 | // # duration=4
2 |
3 | var audioContext = new AudioContext()
4 |
5 | play(0, 3, 0.5)
6 | play(1, 10, 0.5)
7 | play(2, 15, 0.5)
8 |
9 | function play (delay, pitch, duration) {
10 | var startTime = audioContext.currentTime + delay
11 | var endTime = startTime + duration
12 |
13 | var envelope = audioContext.createGain()
14 | envelope.connect(audioContext.destination)
15 | envelope.gain.value = 0
16 | envelope.gain.setTargetAtTime(1, startTime, 0.1)
17 | envelope.gain.setTargetAtTime(0, endTime, 0.2)
18 |
19 | var oscillator = audioContext.createOscillator()
20 | oscillator.connect(envelope)
21 |
22 | oscillator.type = 'sawtooth'
23 | oscillator.detune.value = pitch * 100
24 |
25 | var vibrato = audioContext.createGain()
26 | vibrato.gain.value = 30
27 | vibrato.connect(oscillator.detune)
28 |
29 | var lfo = audioContext.createOscillator()
30 | lfo.frequency.value = 5
31 | lfo.connect(vibrato)
32 | lfo.start(startTime)
33 |
34 | oscillator.start(startTime)
35 | oscillator.stop(endTime + 2)
36 |
37 | }
38 |
--------------------------------------------------------------------------------
/lessons/3. Effects/17. Delay with feedback/answer.js:
--------------------------------------------------------------------------------
1 | // # duration=4
2 |
3 | var audioContext = new AudioContext()
4 |
5 | var input = audioContext.createGain()
6 | var feedback = audioContext.createGain()
7 | var delay = audioContext.createDelay()
8 | var output = audioContext.createGain()
9 | output.connect(audioContext.destination)
10 |
11 | delay.delayTime.value = 0.2
12 | feedback.gain.value = 0.6
13 |
14 | input.connect(delay)
15 | input.connect(output)
16 |
17 | delay.connect(feedback)
18 | feedback.connect(delay)
19 | feedback.connect(output)
20 |
21 | play(0, -3, 0.05)
22 | play(0.05, 2, 0.05)
23 | play(0.1, 9, 0.05)
24 | play(0.15, 14, 0.05)
25 | play(0.2, 9, 0.05)
26 | play(0.25, 2, 0.05)
27 | play(0.3, -3, 0.05)
28 | play(0.35, 7, 0.05)
29 | play(0.4, 14, 0.05)
30 | play(0.45, 18, 0.05)
31 | play(0.5, 9, 0.05)
32 | play(0.55, 2, 0.05)
33 |
34 | function play (startAfter, pitch, duration) {
35 | var time = audioContext.currentTime + startAfter
36 |
37 | var oscillator = audioContext.createOscillator()
38 | oscillator.connect(input)
39 |
40 | oscillator.type = 'square'
41 | oscillator.detune.value = pitch * 100
42 |
43 | oscillator.start(time)
44 | oscillator.stop(time + duration)
45 | }
46 |
--------------------------------------------------------------------------------
/state/lesson.js:
--------------------------------------------------------------------------------
1 | var Observ = require('observ')
2 | var ObservStruct = require('observ-struct')
3 |
4 | module.exports = Lesson
5 |
6 | function Lesson (descriptor) {
7 | var obs = ObservStruct({
8 | title: Observ(descriptor.title),
9 | workshop: Observ(descriptor.workshop),
10 | path: Observ(descriptor.path),
11 | verifyTime: Observ(descriptor.verifyTime),
12 | lesson: Observ(descriptor.lesson),
13 | answer: Observ(descriptor.answer),
14 | file: Observ(descriptor.start),
15 | modified: Observ(false)
16 | })
17 |
18 | obs.reset = function () {
19 | obs.file.set(descriptor.start)
20 | }
21 |
22 | if (window.localStorage[obs.workshop() + '/lessons/' + descriptor.path]) {
23 | obs.modified.set(true)
24 | obs.file.set(window.localStorage[obs.workshop() + '/lessons/' + descriptor.path])
25 | }
26 |
27 | obs.file(function (data) {
28 | if (data === descriptor.start) {
29 | obs.modified() && obs.modified.set(false)
30 | delete window.localStorage[obs.workshop() + '/lessons/' + descriptor.path]
31 | } else {
32 | !obs.modified() && obs.modified.set(true)
33 | window.localStorage[obs.workshop() + '/lessons/' + descriptor.path] = data
34 | }
35 | })
36 |
37 | return obs
38 | }
39 |
--------------------------------------------------------------------------------
/lessons/3. Effects/18. Ping Pong Delay/answer.js:
--------------------------------------------------------------------------------
1 | // # duration=4.5
2 |
3 | var audioContext = new AudioContext()
4 |
5 | var input = audioContext.createGain()
6 | var merger = audioContext.createChannelMerger(2)
7 | var output = audioContext.createGain()
8 |
9 | var leftDelay = audioContext.createDelay()
10 | var rightDelay = audioContext.createDelay()
11 | var feedback = audioContext.createGain()
12 |
13 | input.connect(feedback, 0)
14 | leftDelay.connect(rightDelay)
15 | rightDelay.connect(feedback)
16 | feedback.connect(leftDelay)
17 | merger.connect(output)
18 | input.connect(output)
19 | output.connect(audioContext.destination)
20 |
21 | feedback.gain.value = 0.4
22 |
23 | leftDelay.connect(merger, 0, 0)
24 | rightDelay.connect(merger, 0, 1)
25 |
26 | leftDelay.delayTime.value = 3 / 8
27 | rightDelay.delayTime.value = 3 / 8
28 |
29 | play(1 / 8, 3, 0.05)
30 | play(2 / 8, 7, 0.05)
31 | play(3 / 8, 15, 0.05)
32 |
33 | function play (startAfter, pitch, duration) {
34 | var time = audioContext.currentTime + startAfter
35 |
36 | var oscillator = audioContext.createOscillator()
37 | oscillator.connect(input)
38 |
39 | oscillator.type = 'square'
40 | oscillator.detune.value = pitch * 100
41 |
42 | oscillator.start(time)
43 | oscillator.stop(time + duration)
44 | }
45 |
--------------------------------------------------------------------------------
/lessons/2. Working With Samples/10. Looping samples/lesson.md:
--------------------------------------------------------------------------------
1 | > Play the sample from the **beginning** for `4` seconds, then **loop** between `3` seconds and `4` seconds **three** times.
2 |
3 | You can listen to how the audio is supposed to sound by clicking **Play Answer**.
4 |
5 | # Setting Loop Points
6 |
7 | By default, all [buffer source nodes](https://developer.mozilla.org/en-US/docs/Web/API/AudioBufferSourceNode/buffer) play from their **in point** for the **duration** specified then **stop**.
8 |
9 | You can use the [`loop`](https://developer.mozilla.org/en-US/docs/Web/API/AudioBufferSourceNode/loop), [`loopStart`](https://developer.mozilla.org/en-US/docs/Web/API/AudioBufferSourceNode/loopStart), and [`loopEnd`](https://developer.mozilla.org/en-US/docs/Web/API/AudioBufferSourceNode/loopEnd) properties to change this behavior.
10 |
11 | First enable looping:
12 |
13 | ```js
14 | player.loop = true
15 | ```
16 |
17 | Then mark the loop start and stop points.
18 |
19 | ```js
20 | player.loopStart = 1
21 | player.loopEnd = 2
22 | ```
23 |
24 | If we ran the code at this point, the player would **start** at `0`, **continue** until playback hits `loopEnd`, then **jump** to `loopStart`, continuing until it hits `loopEnd`, and so on, **forever**!
25 |
26 | Let's put a `stop()` to that:
27 |
28 | ```js
29 | player.stop(startTime + duration)
30 | ```
--------------------------------------------------------------------------------
/styles/base.mcss:
--------------------------------------------------------------------------------
1 | html, body {
2 | background: #444;
3 | margin: 0;
4 | font-family: "Helvetica Neue", Helvetica, "Segoe UI", Arial, freesans, sans-serif;;
5 | overflow: hidden;
6 | height: 100%
7 | }
8 |
9 | body {
10 | display: flex;
11 | flex-direction: column;
12 | line-height: 1.2;
13 | color: white
14 | }
15 |
16 | h1 {
17 | color: #C0C0C0;
18 | font-size: 130%;
19 | margin: 4px 0;
20 | font-weight: normal;
21 | text-shadow: 0px 0px 2px black;
22 | flex: 1;
23 | }
24 |
25 | pre {
26 | code {
27 | display: block
28 | padding: 10px
29 | background: #333
30 | border: 1px solid #555
31 | color: #EEE
32 | font: 12px/normal 'Monaco', 'Menlo', 'Ubuntu Mono', 'Consolas', 'source-code-pro', monospace;
33 | overflow-x: auto
34 | }
35 | }
36 |
37 | p, ul {
38 | (code) {
39 | padding: 0 3px
40 | background: #333
41 | color: #EEE
42 | border: 1px solid #444
43 | font: 12px/normal 'Monaco', 'Menlo', 'Ubuntu Mono', 'Consolas', 'source-code-pro', monospace;
44 | }
45 | }
46 |
47 | a {
48 | color: #8EC1FC
49 | text-decoration: none
50 |
51 | code {
52 | color: #8EC1FC
53 | }
54 |
55 | :hover {
56 | text-decoration: underline
57 | }
58 | }
59 |
60 | * + h1 {
61 | margin-top: 16px
62 | }
63 |
64 | * {
65 | box-sizing:border-box;
66 | flex-shrink: 0;
67 | }
--------------------------------------------------------------------------------
/lessons/1. Subtractive Synthesis/01. Play a pitched sawtooth wave/lesson.md:
--------------------------------------------------------------------------------
1 | > Pass this exercise by modifying the code on the right so that the generated frequency is `220` Hz instead of the default `440` Hz.
2 |
3 | You can listen to how the audio is supposed to sound by clicking **Play Answer**.
4 |
5 | # Setting Oscillator Frequency (or not)
6 |
7 | All oscillator nodes have a property called [`frequency`](https://developer.mozilla.org/en-US/docs/Web/API/OscillatorNode/frequency).
8 |
9 | Although, if you try and set it directly, it will have no effect.
10 |
11 | ```js
12 | // this doesn't work
13 | oscillator.frequency = 200
14 | ```
15 |
16 | That's because `oscillator.frequency` is an instance of [AudioParam](https://developer.mozilla.org/en-US/docs/Web/API/AudioParam).
17 |
18 | # Audio Params
19 |
20 | Most properties on Audio Nodes are instances of [AudioParam](https://developer.mozilla.org/en-US/docs/Web/API/AudioParam). They let you do all kinds of neat things with parameters, such as automation over time, and allow one AudioNode to modulate another's value.
21 |
22 | We will get into more depth later but for now **all you need to know is `param.value = 123`**.
23 |
24 | So to set the frequency of the oscillator to `880` Hz, you would do this:
25 |
26 | ```js
27 | // woo finally something works!
28 | oscillator.frequency.value = 880
29 | ```
--------------------------------------------------------------------------------
/lessons/1. Subtractive Synthesis/03. Play a short sequence of notes/lesson.md:
--------------------------------------------------------------------------------
1 | > Complete the `play()` function on the right so that the sequence is played correctly.
2 |
3 | You can listen to how the audio is supposed to sound by clicking **Play Answer**.
4 |
5 | # Scheduling Events
6 |
7 | Like we saw in the previous example, we need to specify the `start` and `stop` time of each generator node. This is specified relative to `audioContext.currentTime` which is time in **seconds** since the `AudioContext` was first created.
8 |
9 | Play a sound `3` seconds after `AudioContext` created and stop after `1` second:
10 |
11 | ```js
12 | play(3, 1)
13 |
14 | function play(at, duration) {
15 | var oscillator = audioContext.createOscillator()
16 | oscillator.connect(audioContext.destination)
17 | oscillator.start(at)
18 | oscillator.stop(at+duration)
19 | }
20 | ```
21 |
22 | But since we don't know how long the audio context has existed for, it's better to always schedule relative to the current time:
23 |
24 | ```js
25 | play(3, 1)
26 |
27 | function play(delay, duration) {
28 | var oscillator = audioContext.createOscillator()
29 | oscillator.connect(audioContext.destination)
30 |
31 | // add audioContext.currentTime
32 | oscillator.start(audioContext.currentTime + delay)
33 | oscillator.stop(audioContext.currentTime + delay + duration)
34 | }
35 | ```
--------------------------------------------------------------------------------
/lessons/3. Effects/16. Better Overdrive/answer.js:
--------------------------------------------------------------------------------
1 | // # duration=4.5
2 |
3 | var audioContext = new AudioContext()
4 | var startTime = audioContext.currentTime + 0.2
5 |
6 | var shaper = audioContext.createWaveShaper()
7 | shaper.curve = generateCurve(22050) // half of 44100 (sample rate)
8 |
9 | var bandpass = audioContext.createBiquadFilter()
10 | bandpass.type = 'bandpass'
11 | bandpass.frequency.value = 1000
12 | bandpass.connect(shaper)
13 |
14 | function generateCurve (steps) {
15 | var curve = new Float32Array(steps)
16 | var deg = Math.PI / 180
17 |
18 | for (var i = 0;i < steps;i++) {
19 | var x = i * 2 / steps - 1
20 | curve[i] = (3 + 10) * x * 20 * deg / (Math.PI + 10 * Math.abs(x))
21 | }
22 |
23 | return curve
24 | }
25 |
26 | var amp = audioContext.createGain()
27 | amp.gain.value = 20
28 | amp.connect(bandpass)
29 | shaper.connect(audioContext.destination)
30 |
31 | getSample('guitar.wav', function play (buffer) {
32 | var player = audioContext.createBufferSource()
33 | player.buffer = buffer
34 | player.connect(amp)
35 | player.start(startTime)
36 | })
37 |
38 | function getSample (url, cb) {
39 | var request = new XMLHttpRequest()
40 | request.open('GET', url)
41 | request.responseType = 'arraybuffer'
42 | request.onload = function () {
43 | audioContext.decodeAudioData(request.response, cb)
44 | }
45 | request.send()
46 | }
47 |
--------------------------------------------------------------------------------
/lessons/2. Working With Samples/12. Play a sequence of pitched samples/lesson.md:
--------------------------------------------------------------------------------
1 | > Complete the `play` function so that **each sample** in the sequence is played at the correct **pitch**.
2 |
3 | You can listen to how the audio is supposed to sound by clicking **Play Answer**.
4 |
5 | # Chromatic Pitching of Audio Samples
6 |
7 | [`playbackRate`](https://developer.mozilla.org/en-US/docs/Web/API/AudioBufferSourceNode/playbackRate) works well enough for simple speed changes, but if we want to set precise audio pitches, we'll have to do some math.
8 |
9 | Unfortunately, [AudioBufferSourceNode](https://developer.mozilla.org/en-US/docs/Web/API/AudioBufferSourceNode) doesn't have a `detune` option like the [OscillatorNode](https://developer.mozilla.org/en-US/docs/Web/API/OscillatorNode/detune) does.
10 |
11 | Audio pitch is slightly tricky to calculate because the **frequency/speed _doubles_ every octave you go up**.
12 |
13 | Setting the `playbackRate` to `2` would go **up one octave**, or **+12 semitones**.
14 |
15 | If you wanted to transpose **up 7 semitones**, you would do the following:
16 |
17 | ```js
18 | player.playbackRate.value = Math.pow(2, 7 / 12) // 1.50...
19 | ```
20 |
21 | Or **+24 semitones** (+2 octaves):
22 |
23 | ```js
24 | player.playbackRate.value = Math.pow(2, 24 / 12) // 4...
25 | ```
26 |
27 | This works going down as well. Here we transpose **down 14 semitones**:
28 |
29 | ```js
30 | player.playbackRate.value = Math.pow(2, -14 / 12) // 0.45...
31 | ```
32 |
--------------------------------------------------------------------------------
/views/index.js:
--------------------------------------------------------------------------------
1 | var h = require('micro-css/h')(require('hyperscript'))
2 | var send = require('../lib/send')
3 |
4 | module.exports = IndexView
5 |
6 | function IndexView (state) {
7 | return h('Index', [
8 | h('header', [ h('span.main', 'Web Audio School'), h('span', ['v', state.version])]),
9 | h('div.lessons', [
10 | h('ul', state.lessons().map(function (group) {
11 | return h('li', [
12 | h('h1', [ removeNumber(group.name) ]),
13 | h('ul', group.lessons.map(function (lesson) {
14 | var classes = []
15 |
16 | if (state.verifiedLessons.has(lesson.path)) {
17 | classes.push('-verified')
18 | }
19 |
20 | if (state.selectedLesson() === lesson.path) {
21 | classes.push('-selected')
22 | }
23 |
24 | return h('li', {
25 | onclick: send(state.viewLesson, lesson.path),
26 | tabIndex: 0,
27 | className: classes.join(' ')
28 | }, [ lesson.title ])
29 | }))
30 | ])
31 | }))
32 | ]),
33 | h('div.info', [
34 | h('p', [ 'More lessons coming soon!' ]),
35 | h('p', [
36 | h('a', {
37 | href: 'https://github.com/mmckegg/web-audio-school'
38 | }, ['github.com/mmckegg/web-audio-school' ])
39 | ])
40 | ])
41 | ])
42 | }
43 |
44 | function removeNumber (text) {
45 | var match = /^([0-9]+\. )(.+)$/.exec(text)
46 | return match && match[2] || text
47 | }
48 |
--------------------------------------------------------------------------------
/lessons/3. Effects/16. Better Overdrive/lesson.md:
--------------------------------------------------------------------------------
1 | > Improve the distortion effect by adding a **soft clipping** curve and insert a **bandpass** of `1000` Hz before clipping.
2 |
3 | There are a number things we can do to curb the harshness of digital "overdrive".
4 |
5 | # Soft Clipping
6 |
7 | For a start we could soften the clipping a little using something like this:
8 |
9 | ```js
10 | shaper.curve = generateCurve(22050) // half of 44100 (sample rate)
11 |
12 | function generateCurve(steps){
13 | var curve = new Float32Array(steps)
14 | var deg = Math.PI / 180
15 |
16 | for (var i=0;i Add a global effect so that the audio output has an **echo** with a **delay time of `0.2` seconds** and a **feedback ratio of `0.6`**.
2 |
3 | You can listen to how the audio is supposed to sound by clicking **Play Answer**.
4 |
5 | # Delaying an Audio Signal
6 |
7 | Here is how you delay an audio signal by **`1` second** using a [DelayNode](https://developer.mozilla.org/en-US/docs/Web/API/DelayNode):
8 |
9 | ```js
10 | var delay = audioContext.createDelay()
11 | delay.delayTime.value = 1
12 | delay.connect(audioContext.destination)
13 |
14 | input.connect(delay)
15 | ```
16 |
17 | # Creating an Echo Effect
18 |
19 | To acheive this effect we'll need to create a feedback loop of audio where each loop is delayed by a certain amount of time. We can use the [DelayNode](https://developer.mozilla.org/en-US/docs/Web/API/DelayNode) to add a delay in the signal path, and a couple instances of [GainNode](https://developer.mozilla.org/en-US/docs/Web/API/GainNode) to hook everything up.
20 |
21 | Here we create an echo effect with a delay time of **`1` second** and a feedback ratio of `0.2` (fast trail off):
22 |
23 | ```js
24 | var input = audioContext.createGain()
25 | var feedback = audioContext.createGain()
26 | var delay = audioContext.createDelay()
27 |
28 | var output = audioContext.createGain()
29 | output.connect(audioContext.destination)
30 |
31 | delay.delayTime.value = 1
32 | feedback.gain.value = 0.2 // dangerous when > 1 ;-)
33 |
34 | // dry path
35 | input.connect(output)
36 |
37 | // wet path
38 | input.connect(delay)
39 |
40 | // feedback loop
41 | delay.connect(feedback)
42 | feedback.connect(delay)
43 | feedback.connect(output)
44 | ```
45 |
--------------------------------------------------------------------------------
/lessons/3. Effects/19. Simple Reverb/lesson.md:
--------------------------------------------------------------------------------
1 | > Add the **spring.mp3** impulse response to the guitar to give it a reverb effect
2 |
3 | # Simple Reverb
4 |
5 | The perception of a sound is heavily depended on the environment that is played in. If you play the guitar in your rehearsal room it will sound differently than when you play it for example in a church. The sound's properties change when it bounces off different materials so that different locations provide unique acoustic experiences.
6 |
7 | A location's properties can be captured by recording an audio sample in the desired location and then extracting the difference of it and the original sound. The result of this operation is called an **impulse response** and it can be used to make every sound sound like it was played in that location.
8 |
9 | The mathematical concept that is applied to achieve this affect is called **convolution**. In the Web Audio API we can apply convolution effects by leveraging the **ConvolverNode**.
10 |
11 | ```js
12 | getSample('impulseresponse.mp3', function(impulse){
13 | var convolver = audioContext.createConvolver()
14 | convolver.buffer = impulse
15 |
16 | // myAudioSample is fetched and created before
17 | myAudioSample.connect(convolver)
18 | convolver.connect(audioContext.destination)
19 | })
20 | ```
21 |
22 | As you can see, impulse responses are handled like audio buffers (fetched via XHR and then decoded). The convolution is applied to whichever node that is connected to the convolver. Be aware that convoluting a signal is a pretty calculation-heavy operation and should be used sparsely (esp. on mobile devices).
23 |
24 | Luckily you don't need to record impulse responses your own and a huge variety of prerecorded signals can be found all over the internet.
25 |
--------------------------------------------------------------------------------
/browser-entry.js:
--------------------------------------------------------------------------------
1 | // safari support
2 | require('stereo-panner-node').polyfill()
3 | window.AudioContext = window.AudioContext || window.webkitAudioContext
4 |
5 | var insertCss = require('insert-css')
6 | var watch = require('observ/watch')
7 |
8 | var css = require('./styles')
9 | insertCss(css)
10 |
11 | var state = require('./state')
12 | var LessonView = require('./views/lesson')
13 | var IndexView = require('./views/index')
14 |
15 | // set persistance prefix
16 | state.workshop.set('web-audio')
17 |
18 | var lastElement = null
19 | var lastView = null
20 | var lastLesson = null
21 |
22 | watch(state, function refreshView () {
23 | var element = null
24 | if (state.view() === 'lesson') {
25 | if (lastLesson !== state.selectedLesson() || lastView !== 'lesson') {
26 | lastLesson = state.selectedLesson()
27 | var lesson = state.getLesson(state.selectedLesson())
28 | if (lesson) {
29 | element = LessonView(state, lesson)
30 | setView(element)
31 | } else {
32 | state.view.set('index')
33 | }
34 | }
35 | } else if (state.view() !== lastView) {
36 | element = IndexView(state)
37 | setView(element)
38 | scrollSelectedIntoView()
39 | }
40 | lastView = state.view()
41 | })
42 |
43 | function scrollSelectedIntoView () {
44 | var selected = document.querySelector('.-selected')
45 | if (selected) {
46 | if (selected.scrollIntoViewIfNeeded) {
47 | selected.scrollIntoViewIfNeeded()
48 | } else if (selected.scrollIntoView) {
49 | selected.scrollIntoView(false)
50 | }
51 | }
52 | }
53 |
54 | function setView (element) {
55 | if (lastElement) {
56 | lastElement.destroy && lastElement.destroy()
57 | lastElement.remove()
58 | }
59 | document.body.appendChild(element)
60 | lastElement = element
61 | }
62 |
--------------------------------------------------------------------------------
/lib/audio-match.js:
--------------------------------------------------------------------------------
1 | module.exports = AudioMatch
2 |
3 | function AudioMatch (audioContext) {
4 | var inputA = audioContext.createChannelSplitter(2)
5 | var inputB = audioContext.createChannelSplitter(2)
6 |
7 | var analyserA = null
8 | var analyserB = null
9 |
10 | setupAnalysers()
11 |
12 | return {
13 | inputA: inputA,
14 | inputB: inputB,
15 |
16 | reset: setupAnalysers,
17 |
18 | checkMatch: function () {
19 | var a = new Float32Array(analyserA.frequencyBinCount)
20 | var b = new Float32Array(analyserB.frequencyBinCount)
21 |
22 | analyserA.getFloatFrequencyData(a)
23 | analyserB.getFloatFrequencyData(b)
24 |
25 | for (var i = 0;i < a.length;i++) {
26 | if (difference(a[i], b[i]) > 20) {
27 | return false
28 | }
29 | }
30 |
31 | return true
32 | },
33 |
34 | checkSignal: function () {
35 | var a = new Uint8Array(analyserA.frequencyBinCount)
36 | var b = new Uint8Array(analyserB.frequencyBinCount)
37 |
38 | analyserA.getByteFrequencyData(a)
39 | analyserB.getByteFrequencyData(b)
40 |
41 | for (var i = 0;i < a.length;i++) {
42 | if (a[i] > 25 || b[i] > 25) {
43 | return true
44 | }
45 | }
46 |
47 | return false
48 | }
49 | }
50 |
51 | function setupAnalysers () {
52 | inputA.disconnect()
53 | inputB.disconnect()
54 |
55 | analyserA = audioContext.createAnalyser()
56 | analyserB = audioContext.createAnalyser()
57 |
58 | analyserA.fftSize = 512
59 | analyserB.fftSize = 512
60 | analyserA.smoothingTimeConstant = 0.3
61 | analyserB.smoothingTimeConstant = 0.3
62 |
63 | inputA.connect(analyserA)
64 | inputB.connect(analyserB)
65 | }
66 | }
67 |
68 | function difference (a, b) {
69 | return Math.abs(a - b)
70 | }
71 |
--------------------------------------------------------------------------------
/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "web-audio-school",
3 | "version": "1.3.1",
4 | "description": "An intro to the Web Audio API by a series of self-guided workshops.",
5 | "main": "index.js",
6 | "bin": {
7 | "web-audio-school": "run.js"
8 | },
9 | "standard": {
10 | "global": [
11 | "AudioContext",
12 | "XMLHttpRequest"
13 | ],
14 | "ignore": [
15 | "lessons/**/start.js"
16 | ]
17 | },
18 | "dependencies": {
19 | "beefy": "^2.1.4",
20 | "brace": "^0.5.1",
21 | "browserify": "^9.0.8",
22 | "highlight.js": "^8.5.0",
23 | "hyperscript": "^1.4.6",
24 | "insert-css": "^0.2.0",
25 | "markdown-it": "^4.1.0",
26 | "markdown-it-for-inline": "^0.1.0",
27 | "micro-css": "^0.5.0",
28 | "observ": "^0.2.0",
29 | "observ-struct": "^6.0.0",
30 | "opn": "^5.1.0",
31 | "stereo-panner-node": "~0.2.0",
32 | "through2": "^0.6.5"
33 | },
34 | "devDependencies": {
35 | "buildbranch": "0.0.3",
36 | "standard": "^4.5.2",
37 | "uglify-js": "^2.4.23"
38 | },
39 | "scripts": {
40 | "test": "standard",
41 | "start": "beefy browser-entry.js:bundle.js --index=resources/index.js --cwd resources -- -t [ ./lib/evalify -f lessons/index.js -f styles/index.js ]",
42 | "clean-static": "rm -rf build",
43 | "build-static": "npm run clean-static && mkdir build && cp -r resources/* build/ && browserify browser-entry -t [ ./lib/evalify -f lessons/index.js -f styles/index.js ] | uglifyjs > build/bundle.js",
44 | "deploy": "standard && npm run build-static && buildbranch gh-pages build"
45 | },
46 | "preferGlobal": true,
47 | "repository": {
48 | "type": "git",
49 | "url": "https://github.com/mmckegg/web-audio-school.git"
50 | },
51 | "author": "Matt McKegg",
52 | "license": "ISC",
53 | "bugs": {
54 | "url": "https://github.com/mmckegg/web-audio-school/issues"
55 | },
56 | "homepage": "https://github.com/mmckegg/web-audio-school"
57 | }
58 |
--------------------------------------------------------------------------------
/lessons/2. Working With Samples/08. Decode and play an audio file/lesson.md:
--------------------------------------------------------------------------------
1 | > Complete the `play` function so that the decoded sample `buffer` is played at `startTime`.
2 |
3 | You can listen to how the audio is supposed to sound by clicking **Play Answer**.
4 |
5 | # Loading Audio Samples
6 |
7 | Before we can play anything, we first need to fetch the file from the network. We can do this using good old fashioned [XmlHttpRequest](https://developer.mozilla.org/en-US/docs/Web/API/XMLHttpRequest/Sending_and_Receiving_Binary_Data) or use the new [Fetch API](https://developer.mozilla.org/en-US/docs/Web/API/Fetch_API):
8 |
9 | ```js
10 | fetch('http://example.com/sound.ogg').then(function(response) {
11 | return response.arrayBuffer()
12 | }).then(function(arrayBuffer) {
13 | // do something clever with the ArrayBuffer
14 | })
15 | ```
16 |
17 | Then we need to decode the file using [`audioContext.decodeAudioData`](https://developer.mozilla.org/en-US/docs/Web/API/AudioContext/decodeAudioData) into an [AudioBuffer](https://developer.mozilla.org/en-US/docs/Web/API/AudioBuffer). Most browsers support decoding **PCM Wave**, **Ogg Vorbis**, **MP3**, and **AAC** formats.
18 |
19 | ```js
20 | audioContext.decodeAudioData(arrayBuffer, function(buffer) {
21 | // returns an AudioBuffer, ready to play!
22 | })
23 | ```
24 |
25 | # Playing Decoded Audio Buffers
26 |
27 | Create an [AudioBufferSourceNode](https://developer.mozilla.org/en-US/docs/Web/API/AudioBufferSourceNode) using [`audioContext.createBufferSource`](https://developer.mozilla.org/en-US/docs/Web/API/AudioContext/createBufferSource) then set its [`buffer`](https://developer.mozilla.org/en-US/docs/Web/API/AudioBufferSourceNode/buffer) property to the decoded `buffer`.
28 |
29 | ```js
30 | var player = audioContext.createBufferSource()
31 | player.buffer = buffer
32 | ```
33 |
34 | Now we just connect the node to destination and call `start` as usual! We don't have to call `stop` as in this example, the playback will automatically stop at the end of the audio file.
35 |
36 | ```js
37 | player.connect(audioContext.destination)
38 | player.start(audioContext.currentTime)
39 | ```
40 |
--------------------------------------------------------------------------------
/lessons/1. Subtractive Synthesis/07. Vibrato/lesson.md:
--------------------------------------------------------------------------------
1 | > Modify the code on the right so that **each note** has a `5` Hz vibrato of +/- `30` cents.
2 |
3 | You can listen to how the audio is supposed to sound by clicking **Play Answer**.
4 |
5 | # Modulating Audio Parameters with Nodes
6 |
7 | As well as setting the time events manually as in _previous_ lessons, you can use an [AudioNode](https://developer.mozilla.org/en-US/docs/Web/API/AudioNode) to [modulate the value](https://developer.mozilla.org/en-US/docs/Web/API/AudioNode/connect(AudioParam)).
8 |
9 | The easiest way to use this is with an [OscillatorNode](https://developer.mozilla.org/en-US/docs/Web/API/OscillatorNode) with its [`frequency`](https://developer.mozilla.org/en-US/docs/Web/API/OscillatorNode/frequency) set to something low that we can perceive (`< 20` Hz).
10 |
11 | Here we create a simple tremolo effect, where the `gain` will oscillate between **0** and **2** at **5 Hz**.
12 |
13 | ```js
14 | var amp = audioContext.createGain()
15 | amp.connect(audioContext.destination)
16 | oscillator.connect(amp)
17 |
18 | var lfo = audioContext.createOscillator()
19 | lfo.frequency.value = 5
20 | lfo.connect(amp.gain)
21 | ```
22 |
23 | We also need to remember to **start** and **stop** our `lfo` oscillator accordingly.
24 |
25 | ```js
26 | // sync start time
27 | oscillator.start(audioContext.currentTime)
28 | lfo.start(audioContext.currentTime)
29 |
30 | // sync end time
31 | oscillator.stop(audioContext.currentTime+2)
32 | lfo.stop(audioContext.currentTime+2)
33 | ```
34 |
35 | # Modulating Pitch
36 |
37 | If we want to add a vibrato effect (i.e. pitch modulation) to an oscillator, we can use a similar technique to above, except we'll need to amplify the output of the `lfo` before we connect it to `oscillator.detune`.
38 |
39 | This example modulates the `detune` value by **+/- 100 cents** at 2 Hz.
40 |
41 | ```js
42 | var vibrato = audioContext.createGain()
43 | vibrato.gain.value = 100
44 | vibrato.connect(oscillator.detune)
45 |
46 | var lfo = audioContext.createOscillator()
47 | lfo.connect(vibrato)
48 | lfo.frequency.value = 2
49 |
50 | lfo.start(audioContext.currentTime)
51 | lfo.stop(audioContext.currentTime+2)
52 | ```
53 |
--------------------------------------------------------------------------------
/lessons/3. Effects/13. Tremolo/lesson.md:
--------------------------------------------------------------------------------
1 | > Add a global effect so that the audio output has a `'sine'` shaped **amplitude tremolo** at a rate of `3` Hz.
2 |
3 | You can listen to how the audio is supposed to sound by clicking **Play Answer**.
4 |
5 | # Create a Tremolo Effect
6 |
7 | The simple way of creating a tremolo effect is using a GainNode with its `gain` AudioParam modulated by a low `frequency` OscillatorNode.
8 |
9 | ```js
10 | var tremolo = audioContext.createGain()
11 | tremolo.connect(audioContext.destination)
12 | tremolo.gain.value = 0 // set base value
13 |
14 | var lfo = audioContext.createOscillator()
15 | lfo.type = 'sine'
16 | lfo.frequency.value = 2
17 | lfo.connect(tremolo.gain)
18 | lfo.start(audioContext.currentTime)
19 | ```
20 |
21 | Now we just have to connect our source nodes to the `tremolo` GainNode:
22 |
23 | ```js
24 | oscillator.connect(tremolo)
25 | ```
26 |
27 | You may notice that the tremolo seems to running at **double speed**. If you specified `2` Hz, what you are actually hearing is `4` Hz. This is because the **OscillatorNode** output value is sweeping between `-1` and `+1`. The tremolo `GainNode` is being set to -1 half the time, which is not what we want. This doesn't mute the sound, it actually inverts the phase.
28 |
29 | We'll need to override this behavior somehow.
30 |
31 | # Shaping Oscillator Output
32 |
33 | This is where the WaveShaperNode can help us. It is pretty simple to use. Create an instance using `audioContext.createWaveShaper()` and connect your oscillator to it. Then connect the WaveShaperNode to the target AudioParam.
34 |
35 | ```js
36 | var shaper = audioContext.createWaveShaper()
37 | shaper.connect(tremolo.gain)
38 | lfo.connect(shaper)
39 | ```
40 |
41 | So far, this won't actually make a difference. We need to set the `curve` property. It accepts an instance of Float32Array to map values from **`-1` to `+1`** to any arbitrary range. It will interpolate between all values you specify.
42 |
43 | ```js
44 | shaper.curve = new Float32Array([0, 8, 10])
45 | ```
46 |
47 | But for our purposes we just want to go from **`0` to `1`** instead of the default **`-1` to `+1`**:
48 |
49 | ```js
50 | shaper.curve = new Float32Array([0, 1])
51 | ```
52 |
--------------------------------------------------------------------------------
/lessons/1. Subtractive Synthesis/05. Modulate filter cutoff/lesson.md:
--------------------------------------------------------------------------------
1 | > Modify the code on the right so that **each note** has a **highpass** filter that **sweeps** from `10000` Hz to `500` Hz over the `duration` of the sound (linear).
2 |
3 | You can listen to how the audio is supposed to sound by clicking **Play Answer**.
4 |
5 | # Modulating Audio Parameters
6 |
7 | An [AudioParam](https://developer.mozilla.org/en-US/docs/Web/API/AudioParam) can be set to a specific [`value`](https://developer.mozilla.org/en-US/docs/Web/API/AudioParam/value) as we did in the previous lesson, but it can also be set to change over time.
8 |
9 | Here we will ramp the filter [`frequency`](https://developer.mozilla.org/en-US/docs/Web/API/BiquadFilterNode/frequency) from `200` Hz to `6000` Hz over 2 seconds:
10 |
11 | ```js
12 | // preset value to avoid popping on start
13 | filter.frequency.value = 200
14 |
15 | // schedule the start time
16 | filter.frequency.setValueAtTime(200, audioContext.currentTime)
17 |
18 | // ramp the value, and set end time!
19 | filter.frequency.linearRampToValueAtTime(6000, audioContext.currentTime + 2)
20 | ```
21 |
22 | # [`setValueAtTime(value, time)`](https://developer.mozilla.org/en-US/docs/Web/API/AudioParam/setValueAtTime)
23 |
24 | Schedules an instant change to the value of the AudioParam at a precise time, as measured against AudioContext.currentTime.
25 |
26 | # [`linearRampToValueAtTime(value, endTime)`](https://developer.mozilla.org/en-US/docs/Web/API/AudioParam/linearRampToValueAtTime)
27 |
28 | Schedules a gradual linear change in the value of the AudioParam. The change starts at the time specified for the _previous_ event, follows a **linear ramp** to the new value given in the `value` parameter, and reaches the new value at the time given in the `endTime` parameter.
29 |
30 | # [`exponentialRampToValueAtTime(value, endTime)`](https://developer.mozilla.org/en-US/docs/Web/API/AudioParam/exponentialRampToValueAtTime)
31 |
32 | Schedules a gradual exponential change in the value of the AudioParam. The change starts at the time specified for the _previous_ event, follows an **exponential ramp** to the new value given in the `value`parameter, and reaches the new value at the time given in the `endTime` parameter.
33 |
--------------------------------------------------------------------------------
/lessons/3. Effects/18. Ping Pong Delay/lesson.md:
--------------------------------------------------------------------------------
1 | > Add a global **stereo ping pong delay** effect, bouncing **left to right**, a delay **time** of `3/8` and **feedback** ratio of `0.4`.
2 |
3 | # Ping pong?
4 |
5 | Ping pong delay is much like standard delay, except that each time the sound feeds back, it swaps channels. So it sounds like the echo is bouncing backwards and forwards (left and right).
6 |
7 | Making **stereo ping pong delay** is very similar to a standard mono one, except we'll need two instances of **DelayNode** and a special kind of AudioNode called the **ChannelMergerNode**.
8 |
9 | Let's create an `input` and `output` **GainNode** that we'll hang everything else off:
10 |
11 | ```js
12 | var input = audioContext.createGain()
13 | var output = audioContext.createGain()
14 |
15 | oscillator.connect(input)
16 | output.connect(audioContext.destination)
17 | ```
18 |
19 | We want to hear our audio as it is being played, as well as the delayed sound, so let's connect the `input` to the `output`.
20 |
21 | ```js
22 | input.connect(output)
23 | ```
24 |
25 | Now let's create our left and right **DelayNode** instances:
26 |
27 | ```js
28 | var leftDelay = audioContext.createDelay()
29 | var rightDelay = audioContext.createDelay()
30 |
31 | leftDelay.delayTime.value = delayTime
32 | rightDelay.delayTime.value = delayTime
33 | ```
34 |
35 | Any sound that comes out of the `leftDelay` should be sent to the `rightDelay`.
36 |
37 | ```js
38 | leftDelay.connect(rightDelay)
39 | ```
40 |
41 | Any sound that comes out of `rightDelay` should go back into the loop. Let's create a feedback **GainNode**, and connect it to the delay nodes.
42 |
43 | ```js
44 | var feedback = audioContext.createGain()
45 | feedback.connect(leftDelay)
46 | rightDelay.connect(feedback)
47 | feedback.gain.value = feedbackRatio
48 | ```
49 |
50 | Now let's connect up to our `input` to the feedback loop:
51 |
52 | ```js
53 | input.connect(feedback)
54 | ```
55 |
56 | And finally merge the two mono audio signals into a signal stereo signal using the [ChannelMergerNode](https://developer.mozilla.org/en-US/docs/Web/API/ChannelMergerNode) and connect to `output`.
57 |
58 | ```js
59 | var merger = audioContext.createChannelMerger(2)
60 | leftDelay.connect(merger, 0, 0)
61 | rightDelay.connect(merger, 0, 1)
62 | merger.connect(output)
63 | ```
64 |
--------------------------------------------------------------------------------
/styles/index.mcss:
--------------------------------------------------------------------------------
1 | Index {
2 | display: flex
3 | flex-direction: column
4 | margin: 10px auto
5 | width: 500px
6 | background: #191919
7 | border: 1px solid #111
8 | border-radius: 3px
9 | box-shadow: 0 0 4px black
10 | overflow: hidden
11 | min-height: 95%
12 | max-height: 100%
13 | overflow: hidden
14 |
15 | div.info {
16 | position: relative
17 | box-shadow: 0 -10px 40px #111
18 | padding: 10px 20px
19 | border-top: 2px solid #333
20 | background-color: #2A2929
21 | font-size: 90%
22 | color: #CCC
23 | }
24 |
25 | div.lessons {
26 | flex: 1
27 | min-height: 20px
28 | background: #222
29 | padding: 15px 20px
30 | overflow-y: auto
31 |
32 | ul {
33 | margin: 0
34 | padding: 0
35 | list-style: none
36 | margin-bottom: 20px
37 |
38 | li {
39 |
40 | h1 {
41 | font-size: 100%
42 | font-weight: bold
43 | margin: 10px 0
44 | }
45 |
46 | ul {
47 | padding: 0
48 | list-style: none
49 |
50 | li {
51 | padding: 6px
52 | cursor: pointer
53 | border-radius: 3px
54 |
55 | -selected {
56 | background: svg(point) right 10px center #342F2F no-repeat;
57 | }
58 |
59 | -verified {
60 | background: svg(tick) right 10px center #232 no-repeat;
61 | color: #BAD5B8;
62 | }
63 |
64 | :hover {
65 | background-color: #555
66 | }
67 |
68 | }
69 | }
70 | }
71 | }
72 | }
73 |
74 | header {
75 | display: flex
76 | padding: 15px
77 | background: #111
78 |
79 | span.main {
80 | font-size: 190%
81 | flex: 1
82 | color: white
83 | }
84 |
85 | span {
86 | color: #AAA
87 | }
88 |
89 | }
90 |
91 | @svg tick {
92 | width: 16px
93 | height: 16px
94 | content: ""
95 |
96 | path {
97 | fill: #8F8
98 | }
99 | }
100 |
101 | @svg point {
102 | width: 16px
103 | height: 12px
104 | content: ""
105 |
106 | path {
107 | fill: #F88
108 | }
109 | }
110 | }
--------------------------------------------------------------------------------
/lessons/1. Subtractive Synthesis/00. Browser make sound!/lesson.md:
--------------------------------------------------------------------------------
1 | > All you need to do to **pass** this first exercise is add a line of code on the right that changes the oscillator to be a `'sawtooth'` instead of the default **sine** wave then press **Play / Verify**.
2 |
3 | You can listen to how the audio is supposed to sound by clicking **Play Answer**.
4 |
5 | # Introduction
6 |
7 | The [Web Audio API](https://developer.mozilla.org/en-US/docs/Web/API/Web_Audio_API) provides a powerful and versatile system for controlling audio on the Web, allowing developers to choose audio sources, add effects to audio, create audio visualizations, apply spatial effects (such as panning) and much more.
8 |
9 | We will **primarily focus on using the Web Audio API for music**, but the concepts here could be applied to game sound and other creative uses.
10 |
11 | Before we can do anything we need to create an [**AudioContext**](https://developer.mozilla.org/en-US/docs/Web/API/AudioContext). This is a global object that is available in most modern browsers on _any_ webpage.
12 |
13 | ```js
14 | var audioContext = new AudioContext()
15 | ```
16 |
17 | To generate and process sounds using the Web Audio API, you create a series of Nodes that connect together to form a signal path.
18 |
19 | 
20 |
21 | # Generate a simple audio pitch
22 |
23 | The [**OscillatorNode**](https://developer.mozilla.org/en-US/docs/Web/API/OscillatorNode) is particularly useful. It can generate an **audio pitch** at **any frequency** and **all the basic wave shapes** (sine, sawtooth, square, or triangle).
24 |
25 | Use it to synthesize musical sounds, generate sound effects or just for testing things out.
26 |
27 | ```js
28 | var oscillator = audioContext.createOscillator()
29 | oscillator.start(audioContext.currentTime)
30 | oscillator.stop(audioContext.currentTime + 2) // stop after 2 seconds
31 | ```
32 |
33 | Connect it to the speakers:
34 |
35 | ```js
36 | oscillator.connect(audioContext.destination)
37 | ```
38 |
39 | # Waveform Shape
40 |
41 | You can change the shape of the generated wave by setting the [`oscillator.type`](https://developer.mozilla.org/en-US/docs/Web/API/OscillatorNode/type) property.
42 |
43 | By default it is set to `'sine'`.
44 |
45 | Available shapes are `'sine'`, `'triangle'`, `'sawtooth'`, and `'square`'.
46 |
47 | ```js
48 | oscillator.type = 'square'
49 | ```
--------------------------------------------------------------------------------
/lessons/1. Subtractive Synthesis/06. Add an envelope/lesson.md:
--------------------------------------------------------------------------------
1 | > Modify the code on the right so that **each note** has an **attack** with a time constant of `0.1` and a **release** with a time constant of `0.2` (exponential decay).
2 |
3 | You can listen to how the audio is supposed to sound by clicking **Play Answer**.
4 |
5 | # Gain Node
6 |
7 | You can use the [`GainNode`](https://developer.mozilla.org/en-US/docs/Web/API/GainNode) to change the output volume of sounds.
8 |
9 | It has a single attribute, an [AudioParam](https://developer.mozilla.org/en-US/docs/Web/API/AudioParam) called [`gain`](https://developer.mozilla.org/en-US/docs/Web/API/GainNode/gain).
10 |
11 |
12 | ```js
13 | var amp = audioContext.createGain()
14 | amp.connect(audioContext.destination)
15 | oscillator.connect(amp)
16 |
17 | // halve the output volume
18 | amp.gain.value = 0.5
19 | ```
20 |
21 | # Scheduling attack and release
22 |
23 | Just like with `BiquadFilterNode.frequency` in the previous example, we can sweep the value of `amp.gain` over time. Though for the purposes of attack and release envelopes, it sounds a lot nicer (and is easier) to use `setTargetAtTime`.
24 |
25 | # [`audioParam.setTargetAtTime(targetValue, startTime, timeConstant)`](https://developer.mozilla.org/en-US/docs/Web/API/AudioParam/setTargetAtTime)
26 |
27 | Schedules the start of a change to the value of the AudioParam. The change starts at the time specified in `startTime` and **exponentially moves** towards the value given by the target parameter. The **exponential decay rate** is defined by the `timeConstant` parameter, which is a time measured in seconds.
28 |
29 |
30 | # Attack
31 |
32 | If we want to soften the **"attack"** of the sound (the start), we can sweep the value from `0` to `1`:
33 |
34 | ```js
35 | amp.gain.value = 0
36 | amp.gain.setTargetAtTime(1, audioContext.currentTime, 0.1)
37 | ```
38 |
39 | # Release
40 |
41 | If we want to soften the **"release"** of the sound (the tail end), we can sweep the value back to 0.
42 |
43 | ```js
44 | var endTime = audioContext.currentTime + 2
45 | amp.gain.setTargetAtTime(0, endTime, 0.2)
46 | ```
47 |
48 | Keep in mind that if you are going to add a release envelope to a sound, the sound needs to **keep playing until the release sweep finishes**, otherwise it will just stop.
49 |
50 | ```js
51 | // provide enough time for the exponential falloff
52 | oscillator.stop(endTime + 2)
53 | ```
--------------------------------------------------------------------------------
/state/index.js:
--------------------------------------------------------------------------------
1 | var Observ = require('observ')
2 | var ObservStruct = require('observ-struct')
3 | var ObservSet = require('../lib/observ-set')
4 | var lessons = require('../lessons')
5 | var Lesson = require('./lesson')
6 | var persist = require('../lib/persist')
7 | var pkgInfo = require('../package.json')
8 | var AudioContext = window.AudioContext
9 |
10 | var lessonOrder = Object.keys(lessons).reduce(function (result, groupName) {
11 | Object.keys(lessons[groupName]).forEach(function (name) {
12 | result.push(groupName + '/' + name)
13 | })
14 | return result
15 | }, [])
16 |
17 | var state = ObservStruct({
18 | view: Observ('index'),
19 | workshop: Observ(),
20 | selectedLesson: Observ(lessonOrder[0]),
21 | verifiedLessons: ObservSet([]),
22 | lessons: Observ([]),
23 | version: pkgInfo.version
24 | })
25 |
26 | persist(state.workshop() + '/selectedLesson', state.selectedLesson)
27 | persist(state.workshop() + '/verifiedLessons', state.verifiedLessons)
28 | persist(state.workshop() + '/view', state.view)
29 |
30 | state.audioContext = new AudioContext()
31 |
32 | state.lessons.set(Object.keys(lessons).map(function (groupName) {
33 | return {
34 | name: groupName,
35 | lessons: Object.keys(lessons[groupName]).map(function (name) {
36 | return {
37 | title: name,
38 | path: groupName + '/' + name
39 | }
40 | })
41 | }
42 | }))
43 |
44 | state.viewLesson = function (path) {
45 | state.selectedLesson.set(path)
46 | state.view.set('lesson')
47 | }
48 |
49 | state.getLesson = function (path) {
50 | var parts = path.split('/')
51 | var data = lessons[parts[0]][parts[1]]
52 | if (data) {
53 | return Lesson({
54 | title: parts[1],
55 | path: path,
56 | verifyTime: getDuration(data['answer.js']),
57 | answer: data['answer.js'],
58 | lesson: data['lesson.md'],
59 | start: data['start.js'],
60 | workshop: state.workshop()
61 | })
62 | }
63 | }
64 |
65 | state.nextLesson = function () {
66 | var index = lessonOrder.indexOf(state.selectedLesson())
67 | state.selectedLesson.set(lessonOrder[index + 1] || lessonOrder[0])
68 | }
69 |
70 | state.prevLesson = function () {
71 | var index = lessonOrder.indexOf(state.selectedLesson())
72 | state.selectedLesson.set(lessonOrder[index - 1] || lessonOrder[0])
73 | }
74 |
75 | function getDuration (src) {
76 | var match = /\/\/[ ]?# duration=([0-9\.]+)/.exec(src)
77 | return match && parseFloat(match[1]) || 2
78 | }
79 |
80 | module.exports = state
81 |
--------------------------------------------------------------------------------
/CONTRIBUTING.md:
--------------------------------------------------------------------------------
1 | # This is an OPEN Open Source Project
2 |
3 | ## What?
4 |
5 | Individuals making significant and valuable contributions are given commit-access to the project to contribute as they see fit. This project is more like an open wiki than a standard guarded open source project.
6 |
7 | ## Rules
8 |
9 | There are a few basic ground-rules for contributors:
10 |
11 | - No `--force` pushes or modifying the Git history in any way.
12 | - Non-master branches ought to be used for ongoing work.
13 | - External API changes and significant modifications ought to be subject to an internal pull-request to solicit feedback from other contributors.
14 | - Internal pull-requests to solicit feedback are encouraged for any other non-trivial contribution but left to the discretion of the contributor.
15 | - For significant changes wait a full 24 hours before merging so that active contributors who are distributed throughout the world have a chance to weigh in.
16 | - Contributors should attempt to adhere to the prevailing code-style.
17 |
18 | ## Releases
19 |
20 | Declaring formal releases requires peer review.
21 |
22 | - A reviewer of a pull request should recommend a new version number (patch, minor or major).
23 | - Once your change is merged feel free to bump the version as recommended by the reviewer.
24 | - A new version number should not be cut without peer review unless done by the project maintainer.
25 |
26 | ## Want to contribute?
27 |
28 | Even though collaborators may contribute as they see fit, if you are not sure what to do, here's a suggested process:
29 |
30 | ### Cutting a new version
31 |
32 | - Get your branch merged on master
33 | - Run `npm version major` or `npm version minor` or `npm version patch`
34 | - `git push origin master --tags`
35 | - If you are a project owner, then `npm publish`
36 |
37 | ### If you want to have a bug fixed or a feature added:
38 |
39 | - Check open issues for what you want.
40 | - If there is an open issue, comment on it, otherwise open an issue describing your bug or feature with use cases.
41 | - Discussion happens on the issue about how to solve your problem.
42 | - You or a core contributor opens a pull request solving the issue with tests and documentation.
43 | - The pull requests gets reviewed and then merged.
44 | - A new release version get's cut.
45 | - (Disclaimer: Your feature might get rejected.)
46 |
47 | ### Changes to this arrangement
48 |
49 | This is an experiment and feedback is welcome! This document may also be subject to pull-requests or changes by contributors where you believe you have something valuable to add or change.
50 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | Web Audio School
2 | ===
3 |
4 | An intro to the **Web Audio API** by a series of self-guided workshops.
5 |
6 | We will primarily focus on using the Web Audio API for music, but the concepts here could be applied to game sound and other creative uses.
7 |
8 |
9 | > This workshop was built for [CampJS V](http://v.campjs.com/)!
10 |
11 | ## Try it out here:
12 |
13 | [mmckegg.github.io/web-audio-school](http://mmckegg.github.io/web-audio-school/)
14 |
15 | ## TODO
16 |
17 | - Handle user code errors and display in editor
18 |
19 | ## Install via [npm](https://www.npmjs.com/package/web-audio-school)
20 |
21 | Global install:
22 |
23 | ```bash
24 | $ npm install web-audio-school -g
25 | ```
26 |
27 | Start the server:
28 |
29 | ```
30 | $ web-audio-school
31 | ```
32 |
33 | Now navigate to [localhost:9966](http://localhost:9966).
34 |
35 | ## Browser Support
36 |
37 | Works in latest versions of Chrome, Firefox, Safari.
38 |
39 | [Internet Explorer doesn't support the Web Audio API yet](https://status.modern.ie/webaudioapi).
40 |
41 | ## Lesson Overview
42 |
43 | ### Part I: Subtractive Synthesis
44 |
45 | 1. Play a pitched sawtooth wave
46 | 2. Play a short sequence of notes
47 | 3. Add a high-pass filter
48 | 4. Modulate filter cutoff
49 | 5. Add an envelope
50 | 6. Vibrato
51 |
52 | ### Part II: Working With Samples
53 |
54 | 7. Decode and play an audio file
55 | 8. Set in and out points
56 | 9. Looping samples
57 | 10. Set sample pitch
58 | 11. Play a sequence of pitched samples
59 |
60 | ### Part III: Effects
61 |
62 | 12. Tremolo
63 | 13. Stereo Tremolo
64 | 14. Echo / Delay
65 | 16. Overdrive (part 1)
66 | 17. Overdrive (part 2)
67 | 18. Ping Pong Delay
68 | 19. Simple Reverb
69 |
70 | ### Epilogue
71 |
72 | 20. Drop the Bass
73 |
74 | ## Coverage
75 |
76 | - AudioContext
77 | - AudioBuffer
78 | - AudioParam
79 | - AudioDestinationNode
80 | - OscillatorNode
81 | - AudioBufferSourceNode
82 | - GainNode
83 | - WaveShaperNode
84 | - StereoPannerNode
85 | - ConvolverNode
86 | - BiquadFilterNode
87 | - DelayNode
88 | - ChannelSplitterNode
89 | - ChannelMergerNode
90 |
91 | ## Not Yet Covered
92 |
93 | - AudioWorker (not yet supported by any browsers)
94 | - ScriptProcessor (deprecated)
95 | - OfflineAudioContext
96 | - DynamicsCompressorNode
97 | - AudioListener
98 | - PannerNode
99 | - AnalyserNode
100 |
101 | ## License
102 |
103 | MIT
104 |
105 | ## Contributors
106 |
107 | - [Matt McKegg](https://github.com/mmckegg)
108 | - [Jan Monschke](https://github.com/janmonschke)
109 |
110 | This is an OPEN open source project. See [CONTRIBUTING.md](https://github.com/mmckegg/web-audio-school/blob/master/CONTRIBUTING.md) for details
111 |
--------------------------------------------------------------------------------
/lessons/1. Subtractive Synthesis/02. Chromatic Scale/lesson.md:
--------------------------------------------------------------------------------
1 | > Pass this exercise by modifying the code on the right so that the generated frequency is **middle C** instead of the default **middle A**.
2 |
3 | You can listen to how the audio is supposed to sound by clicking **Play Answer**.
4 |
5 | # Mapping Frequencies to Musical Notes
6 |
7 | ```
8 | -3 -1 1 4 6 9 11
9 | -4 -2 0 2 3 5 7 8 10 12
10 | .___________________________________________________________________________.
11 | : | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | :
12 | : | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | :
13 | : | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | :
14 | <-: |_| | |_| |_| | |_| |_| |_| | |_| |_| | |_| |_| |_| | |_| |_| :->
15 | : | | | | | | | | | | | | | | | | | | :
16 | : A | B | C | D | E | F | G | A | B | C | D | E | F | G | A | B | C | D | E :
17 | :___|___|___|___|___|___|___|___|___|___|___|___|___|___|___|___|___|___|___:
18 | ^ ^ ^ ^ ^
19 | 220 Hz 440 Hz 523.25 Hz 880 Hz 1174.65 Hz
20 | (-1 Octave) (middle A) (+1 Octave)
21 |
22 | ```
23 |
24 | Chromatic note frequencies are slightly tricky to calculate because the **frequency _doubles_ every octave (12 semitones) you go up**.
25 |
26 | Changing the [`frequency`](https://developer.mozilla.org/en-US/docs/Web/API/OscillatorNode/frequency) from `440` to `880` would go **up one octave**, or **+12 semitones**.
27 |
28 | The formula for this is `baseFrequency * Math.pow(2, noteOffset / 12)`.
29 |
30 | So if you wanted to transpose from **middle A** up **7** semitones to **E**, you would do the following:
31 |
32 | ```js
33 | oscillator.frequency.value = 440 * Math.pow(2, 7 / 12) // 659.255...
34 | ```
35 |
36 | This works going down as well. Here we transpose **down 14 semitones** to **G**:
37 |
38 | ```js
39 | oscillator.frequency.value = 440 * Math.pow(2, -14 / 12) // 195.998...
40 | ```
41 |
42 | # A slightly easier way
43 |
44 | Instances of OscillatorNode also have a [`detune`](https://developer.mozilla.org/en-US/docs/Web/API/OscillatorNode/detune) [`AudioParam`](https://developer.mozilla.org/en-US/docs/Web/API/AudioParam), which allows you to specify transposes in 100ths of semitones.
45 |
46 | So using detune, if you wanted to transpose from **middle A** up **7** semitones to **E**, you would do the following:
47 |
48 | ```js
49 | oscillator.detune.value = 700 // noteOffset * 100
50 | ```
51 |
52 | We will be using the detune method for the remainder of this section.
53 |
--------------------------------------------------------------------------------
/lib/spectrograph.js:
--------------------------------------------------------------------------------
1 | // from: https://github.com/web-audio-components/spectrograph/blob/master/index.js
2 |
3 | var interpolate = require('./interpolate-color.js')
4 |
5 | function Spectrograph (analyser, opts) {
6 | if (!(this instanceof Spectrograph)) return new Spectrograph(analyser, opts)
7 |
8 | // Defaults
9 | opts = opts || {}
10 | var self = this
11 |
12 | this.analyser = analyser
13 | this.minHsl = [-85, 50, 10]
14 | this.maxHsl = [-50, 100, 100]
15 | this.speed = 1
16 |
17 | this.tempCanvas = document.createElement('canvas')
18 | this.fft = new Uint8Array(this.analyser.frequencyBinCount)
19 |
20 | 'speed minHsl maxHsl'.split(' ').forEach(function (prop) {
21 | if (opts[prop] != null) {
22 | self[prop] = opts[prop]
23 | }
24 | })
25 |
26 | try {
27 | this.ctx = opts.canvas.getContext('2d')
28 | } catch (e) {
29 | throw new Error('Spectrograph must have a valid canvas element')
30 | }
31 |
32 | window.requestAnimationFrame(function loop () {
33 | var ctx = self.ctx
34 | if (ctx.canvas.offsetHeight !== ctx.canvas.height || ctx.canvas.offsetWidth !== ctx.canvas.width) {
35 | ctx.canvas.height = ctx.canvas.offsetHeight
36 | ctx.canvas.width = ctx.canvas.offsetWidth
37 | self.tempCanvas.width = ctx.canvas.width
38 | self.tempCanvas.height = ctx.canvas.height
39 | }
40 |
41 | if (self.speed !== 0) {
42 | if (opts && opts.beforeRender) {
43 | opts.beforeRender()
44 | }
45 |
46 | self.analyser.getByteFrequencyData(self.fft)
47 | process(self)
48 | }
49 |
50 | window.requestAnimationFrame(loop)
51 | })
52 | }
53 |
54 | function process (mod) {
55 | var ctx = mod.ctx
56 | var fft = mod.fft
57 |
58 | var width = ctx.canvas.width
59 | var height = ctx.canvas.height
60 |
61 | var tempCtx = mod.tempCanvas.getContext('2d')
62 | tempCtx.drawImage(ctx.canvas, 0, 0, width, height)
63 |
64 | var range = fft.length / 2
65 |
66 | for (var i = 0; i <= height; i++) {
67 | var logIndex = logScale(i / height * range, fft.length / 2)
68 | var value = fft[logIndex]
69 |
70 | ctx.fillStyle = interpolate(mod.minHsl, mod.maxHsl, Math.max(0, value - 100) / (255 - 100))
71 | ctx.fillRect(
72 | width - mod.speed, height - i,
73 | mod.speed, 1
74 | )
75 | }
76 |
77 | ctx.translate(-mod.speed, 0)
78 | ctx.drawImage(mod.tempCanvas, 0, 0, width, height, 0, 0, width, height)
79 | ctx.setTransform(1, 0, 0, 1, 0, 0)
80 | }
81 |
82 | /**
83 | * Exports.
84 | */
85 |
86 | function logScale (index, total, opt_base) {
87 | var base = opt_base || 2
88 | var logmax = logBase(total + 1, base)
89 | var exp = logmax * index / total
90 | return Math.round(Math.pow(base, exp) - 1)
91 | }
92 |
93 | function logBase (val, base) {
94 | return Math.log(val) / Math.log(base)
95 | }
96 |
97 | module.exports = Spectrograph
98 |
--------------------------------------------------------------------------------
/lessons/1. Subtractive Synthesis/04. Add a high-pass filter/lesson.md:
--------------------------------------------------------------------------------
1 | > Modify the code on the right so that the audio is **filtered** to **remove** all frequencies **lower** than `10000` Hz.
2 |
3 | You can listen to how the audio is supposed to sound by clicking **Play Answer**.
4 |
5 | # Biquad Filter Node
6 |
7 | Use the [BiquadFilterNode](https://developer.mozilla.org/en-US/docs/Web/API/BiquadFilterNode) to shape the audio output frequencies.
8 |
9 | ```js
10 | var filter = audioContext.createBiquadFilter()
11 | filter.connect(audioContext.destination)
12 | oscillator.connect(filter)
13 | ```
14 |
15 | The most common [types of filters](https://developer.mozilla.org/en-US/docs/Web/API/BiquadFilterNode/type) are `'lowpass'`, `'highpass'`, and `'bandpass'`.
16 |
17 |
18 | # Connections
19 |
20 | The `connect()` function acts as a patch cable from one device (the oscillator or the filter) to another.
21 |
22 | In previous lessons, we've been connecting the output of an `OscillatorNode` directly to `audioContext.destination` (which in most cases would be your speakers). But in this lesson, we will instead connect the `OscillatorNode` to the `BiquadFilterNode`, and then finally connect the `BiquadFilterNode` to the destination.
23 |
24 | We want this:
25 |
26 | ```
27 | [oscillator] -> [filter] -> [audioContext.destination]
28 | ```
29 |
30 | **Note:** In this lesson you will need to remove the existing `oscillator.connect(audioContext.destination)`, otherwise the unfiltered audio signal will play along with the filtered one.
31 |
32 | # Lowpass
33 |
34 | The lowpass filter is the default type. It allows all signals with a frequency lower than [`frequency`](https://developer.mozilla.org/en-US/docs/Web/API/BiquadFilterNode/frequency) to pass and attenuates all signals with higher frequency.
35 |
36 | ```js
37 | // filter out all frequencies above 500 Hz
38 | filter.type = 'lowpass'
39 | filter.frequency.value = 500
40 | ```
41 |
42 | # Highpass
43 |
44 | Works the same as the lowpass, except the other way around. All signals with a frequency higher than [`frequency`](https://developer.mozilla.org/en-US/docs/Web/API/BiquadFilterNode/frequency) are allowed and anything lower is attenuated.
45 |
46 | ```js
47 | // filter out all frequencies below 3000 Hz
48 | filter.type = 'highpass'
49 | filter.frequency.value = 3000
50 | ```
51 |
52 | # Bandpass
53 |
54 | Only allows frequencies that are within a certain tolerance of [`frequency`](https://developer.mozilla.org/en-US/docs/Web/API/BiquadFilterNode/frequency) specified by [`Q`](https://developer.mozilla.org/en-US/docs/Web/API/BiquadFilterNode/Q).
55 |
56 | The greater the Q value, the smaller the frequency band.
57 |
58 | ```js
59 | // filter out all frequencies that are not near 1000 Hz
60 | filter.type = 'bandpass'
61 | filter.frequency.value = 1000
62 | filter.Q.value = 1
63 | ```
64 |
65 | # [Other filter types](https://developer.mozilla.org/en-US/docs/Web/API/BiquadFilterNode/type)
66 |
67 | - lowshelf
68 | - highshelf
69 | - peaking
70 | - notch
71 | - allpass
72 |
--------------------------------------------------------------------------------
/lessons/3. Effects/15. Simple Overdrive/lesson.md:
--------------------------------------------------------------------------------
1 | > Add a basic global **overdrive** effect by turning the gain up to `20`, and clipping the signal at **+/- 1**.
2 |
3 | # Clipping Audio
4 |
5 | Here is the output of a `'triangle'` wave from an **OscillatorNode**:
6 |
7 | ```
8 | +1 -| /\ /\ /\ /\ :
9 | | / \ / \ / \ / \ :
10 | +.5 -| / \ / \ / \ / \ :
11 | | / \ / \ / \ / \ :
12 | 0 -|- - - - /- - - - \- - - - /- - - - \- - - - /- - - - \- - - - /- - - - \:
13 | |\ / \ / \ / \ / :
14 | -.5 -| \ / \ / \ / \ / :
15 | | \ / \ / \ / \ / :
16 | -1 -| \/ \/ \/ \/ :
17 | ```
18 |
19 | When we add a gain node...
20 |
21 | ```js
22 | var amp = audioContext.createGain()
23 | amp.gain.value = 10
24 | ```
25 |
26 | ...the amplitude is just multiplied by the `gain` value:
27 |
28 | ```
29 | +10 -| /\ /\ /\ /\ :
30 | ^ | / \ / \ / \ / \ :
31 | ^ _| / \ / \ / \ / \ :
32 | +1 | / \ / \ / \ / \ :
33 | 0 -|- - - - /- - - - \- - - - /- - - - \- - - - /- - - - \- - - - /- - - - \:
34 | -1 _|\ / \ / \ / \ / :
35 | v | \ / \ / \ / \ / :
36 | v | \ / \ / \ / \ / :
37 | -10 -| \/ \/ \/ \/ :
38 | ```
39 |
40 | But if we then connect the GainNode to a WaveShaperNode with a standard curve...
41 |
42 | ```js
43 | var shaper = audioContext.createWaveShaper()
44 | shaper.curve = new Float32Array([-1, 1])
45 | ```
46 |
47 | ...any audio with amplitude greater than `+1` or less than `-1` is **clipped**:
48 |
49 | ```
50 | +10 -| :
51 | | :
52 | _| ______ ______ ______ ______ :
53 | +1 | / \ / \ / \ / \ :
54 | 0 -|- - - - /- - - - \- - - - /- - - - \- - - - /- - - - \- - - - /- - - - \:
55 | -1 _|\______/ \______/ \______/ \______/ :
56 | | :
57 | | :
58 | -10 -| :
59 | ```
60 |
61 | This produces some pretty interesting **audio distortion artifacts**, something like overdrive on a **guitar amp**, but a lot harsher sounding _thanks_ to the digital precision of computers.
--------------------------------------------------------------------------------
/styles/player.mcss:
--------------------------------------------------------------------------------
1 | Player {
2 | display: flex
3 | flex-direction: column
4 | margin: 5px
5 | background: #191919
6 | border: 1px solid #111
7 | border-radius: 3px
8 | box-shadow: 0 0 4px black
9 | overflow: hidden
10 |
11 | -verified {
12 | border-color: #3B5A1E;
13 | box-shadow: 0 0 4px #00FF20;
14 |
15 | header {
16 | background: #223722;
17 | color: #8FF58F;
18 |
19 | :after {
20 | content: ': verified!'
21 | color: white
22 | font-weight: normal
23 | }
24 | }
25 | }
26 |
27 | -error {
28 | border-color: #5A3B1E;
29 | box-shadow: 0 0 4px #FF0020;
30 |
31 | header {
32 | background: #372222;
33 | color: #F58F8F;
34 |
35 | :after {
36 | content: ": code error :("
37 | color: white
38 | font-weight: normal
39 | }
40 | }
41 | }
42 |
43 | -modified {
44 | div {
45 | button.reset {
46 | display: block
47 | }
48 | }
49 | }
50 |
51 | -playing {
52 | div {
53 | button.run {
54 | display: none
55 | }
56 |
57 | button.stop {
58 | display: block
59 | }
60 | }
61 | }
62 |
63 | header {
64 | font-size: 90%
65 | font-weight: bold
66 | text-shadow: 0px 0px 2px black;
67 | background: #111
68 | color: #AAA
69 | padding: 10px
70 | }
71 |
72 | div {
73 | flex: 1
74 | min-height: 150px
75 | position: relative
76 |
77 | button {
78 | display: block
79 | width: auto
80 | opacity: 0.9
81 | background-color: #222
82 | box-shadow: inset 2px 2px 5px #444
83 | border-radius: 4px
84 | margin: 10px
85 | padding: 10px
86 | color: #AAA
87 | cursor: pointer
88 |
89 | :focus {
90 | outline-style: none;
91 | color: white;
92 | z-index: 100;
93 | }
94 |
95 |
96 | :hover {
97 | background-color: #555
98 | color: white
99 | }
100 |
101 | }
102 |
103 | button.reset {
104 | background: svg(reset) no-repeat 10px center #222
105 | padding-left: 30px
106 | display: none
107 | }
108 |
109 | button.run {
110 | background: svg(play) no-repeat 10px center #222
111 | padding-left: 30px
112 | }
113 |
114 | button.stop {
115 | display: none
116 | background: svg(stop) no-repeat 10px center #222
117 | padding-left: 30px
118 | }
119 |
120 | canvas {
121 | position: absolute
122 | width: 100%
123 | height: 100%
124 | top:0
125 | left:0
126 | display: block
127 | }
128 | }
129 |
130 | @svg reset {
131 | width: 12px
132 | height: 12px
133 | content: ""
134 |
135 | path {
136 | fill: none
137 | stroke-width: 2px
138 | stroke: #F88
139 | }
140 | }
141 |
142 | @svg play {
143 | width: 16px
144 | height: 16px
145 | content: ""
146 |
147 | path {
148 | fill: #8F8
149 | }
150 | }
151 |
152 | @svg stop {
153 | width: 12px
154 | height: 12px
155 | content: ""
156 |
157 | path {
158 | fill: #BAA
159 | }
160 | }
161 | }
--------------------------------------------------------------------------------
/lib/editor.js:
--------------------------------------------------------------------------------
1 | var ace = require('brace')
2 | require('brace/mode/javascript')
3 | require('brace/theme/ambiance')
4 | require('brace/ext/language_tools')
5 |
6 | var NO_TRANSACTION = {}
7 |
8 | var watch = require('observ/watch')
9 |
10 | module.exports = RawEditor
11 |
12 | function RawEditor (fileObject, onSave) {
13 | if (!(this instanceof RawEditor)) {
14 | return new RawEditor(fileObject, onSave)
15 | }
16 | this.onSave = onSave
17 | this.fileObject = fileObject
18 | this.file = fileObject && fileObject.file
19 | }
20 |
21 | RawEditor.prototype.type = 'Widget'
22 |
23 | RawEditor.prototype.init = function () {
24 | var element = document.createElement('div')
25 | element.className = 'RawEditor'
26 |
27 | var el = document.createElement('div')
28 |
29 | var textEditor = this.editor = ace.edit(el)
30 | textEditor.onSave = this.onSave
31 |
32 | window.editors = window.editors || []
33 | window.editors.push(textEditor)
34 |
35 | textEditor.setTheme('ace/theme/ambiance')
36 | textEditor.session.setMode('ace/mode/javascript')
37 | textEditor.session.setUseWorker(false)
38 | textEditor.session.setTabSize(2)
39 | textEditor.renderer.setScrollMargin(20, 100)
40 | textEditor.renderer.setPadding(20)
41 | textEditor.renderer.setShowGutter(false)
42 | textEditor.setOptions({
43 | enableBasicAutocompletion: true,
44 | enableLiveAutocompletion: false
45 | })
46 |
47 | textEditor.commands.addCommand({
48 | name: 'saveFile',
49 | bindKey: {
50 | win: 'Ctrl-S',
51 | mac: 'Command-S',
52 | sender: 'editor|cli'
53 | },
54 | exec: function (env, args, request) {
55 | // hACKS!
56 | if (textEditor.onSave) {
57 | textEditor.onSave()
58 | }
59 | }
60 | })
61 |
62 | var currentFile = null
63 | var self = this
64 |
65 | var currentTransaction = NO_TRANSACTION
66 | var currentSaveTransaction = NO_TRANSACTION
67 |
68 | textEditor.setFile = function (fileObject) {
69 | clearTimeout(saveTimer)
70 |
71 | if (self.release) {
72 | self.release()
73 | self.release = null
74 | }
75 |
76 | currentFile = fileObject
77 |
78 | if (fileObject) {
79 | self.release = watch(fileObject, update)
80 | }
81 | }
82 | // textEditor.setSize('100%', '100%')
83 |
84 | function save () {
85 | var value = textEditor.session.getValue()
86 | currentSaveTransaction = value
87 | currentFile.set(value)
88 | currentSaveTransaction = NO_TRANSACTION
89 | }
90 |
91 | function update () {
92 | var data = currentFile ? currentFile() : null
93 | if (data && currentSaveTransaction !== data && textEditor.session.getValue() !== data) {
94 | currentTransaction = data
95 | textEditor.session.setValue(data, -1)
96 | currentTransaction = NO_TRANSACTION
97 | }
98 | }
99 |
100 | var blurTimer = null
101 | textEditor.on('focus', function () {
102 | clearTimeout(blurTimer)
103 | })
104 |
105 | textEditor.on('blur', function () {
106 | clearTimeout(blurTimer)
107 | blurTimer = setTimeout(function () {
108 | if (!textEditor.isFocused()) {
109 | update()
110 | }
111 | }, 100)
112 | })
113 |
114 | var saveTimer = null
115 | textEditor.on('change', function () {
116 | if (currentTransaction === NO_TRANSACTION) {
117 | clearTimeout(saveTimer)
118 | saveTimer = setTimeout(save, 100)
119 | }
120 | })
121 |
122 | textEditor.setFile(this.fileObject)
123 |
124 | element.appendChild(el)
125 | return element
126 | }
127 |
128 | RawEditor.prototype.update = function (prev, elem) {
129 | this.editor = prev.editor
130 | this.release = prev.release
131 | this.editor.onSave = this.onSave
132 |
133 | if (prev.file !== this.file) {
134 | this.editor.setFile(this.fileObject)
135 | }
136 | return elem
137 | }
138 |
139 | RawEditor.prototype.destroy = function (elem) {
140 | this.editor.destroy()
141 | this.release && this.release()
142 | this.release = null
143 | }
144 |
--------------------------------------------------------------------------------
/styles/lesson.mcss:
--------------------------------------------------------------------------------
1 | Lesson {
2 | position: relative
3 | display: flex
4 | flex-direction: column
5 | font-size: 90%
6 | margin: 5px
7 | flex: 1
8 | border: 3px solid #111
9 | border-radius: 3px
10 | box-shadow: 0 0 4px black
11 | min-height: 100px
12 |
13 | background: #222
14 | color: #FFF
15 |
16 | -verified {
17 | header {
18 | nav {
19 | button {
20 | -next {
21 |
22 | -webkit-animation-name: greenPulse;
23 | -webkit-animation-duration: 2s;
24 | -webkit-animation-iteration-count: infinite;
25 |
26 | color: white;
27 | background-color: #376C33;
28 | box-shadow: 0 0 15px rgba(103, 255, 103, 0.56);
29 |
30 | :hover {
31 | background-color: #31692C;
32 | color: white
33 | }
34 | }
35 | }
36 | }
37 | }
38 | }
39 |
40 | div {
41 | background: #222
42 | flex: 1
43 | padding: 15px
44 | overflow-y: auto
45 | color: #EEE
46 | line-height: 1.4
47 |
48 | (img) {
49 | max-width: 100%
50 | }
51 |
52 | (strong) {
53 | color: #FFF
54 | }
55 |
56 | blockquote {
57 | background: #3E5241;
58 | padding: 1px 10px;
59 | margin: 0;
60 | border: 3px solid #437743;
61 | color: #BBF2BB;
62 | font-size: 110%;
63 |
64 | (strong){
65 | color: #EFE
66 | }
67 | }
68 | }
69 |
70 | header {
71 | display: flex
72 | padding: 15px
73 | background: #111
74 |
75 | nav {
76 | margin: -5px 0
77 |
78 | button {
79 | border-width: 2px
80 | box-shadow: inset 2px 2px 5px rgba(255,255,255, 0.3)
81 | border-radius: 4px
82 | padding: 10px
83 | color: #AAA
84 | background: #222
85 | cursor: pointer
86 |
87 | :focus {
88 | outline-style: none;
89 | color: white;
90 | z-index: 100;
91 | }
92 |
93 | :hover {
94 | background-color: #555
95 | color: white
96 | }
97 |
98 | -index {
99 | background: svg(index) no-repeat 10px center #222
100 | padding-left: 25px
101 | margin-right: 4px
102 | }
103 |
104 | -next {
105 | border-top-left-radius: 0
106 | border-bottom-left-radius: 0
107 |
108 | background: svg(next) no-repeat right 10px center #222
109 | padding-right: 25px
110 | }
111 |
112 | -prev {
113 | border-top-right-radius: 0
114 | border-bottom-right-radius: 0
115 |
116 | background: svg(prev) no-repeat 10px center #222
117 | padding-left: 25px
118 | }
119 | }
120 | }
121 |
122 | h1 {
123 | flex: 1
124 | }
125 |
126 | :after {
127 | transform: translate3d(0,0,0)
128 | position: absolute
129 | display: block
130 | content: ' '
131 | height: 10px
132 | width: 100%
133 | bottom: 0
134 | left: 0
135 | background: linear-gradient(rgba(30, 30, 30, 0), rgba(30, 30, 30, 0.8))
136 | }
137 | }
138 |
139 | @svg next {
140 | width: 8px
141 | height: 12px
142 | content: ""
143 |
144 | path {
145 | fill: rgba(255,255,255,0.7)
146 | }
147 | }
148 |
149 | @svg prev {
150 | width: 8px
151 | height: 12px
152 | content: ""
153 |
154 | path {
155 | fill: rgba(255,255,255,0.7)
156 | }
157 | }
158 |
159 | @svg index {
160 | width: 10px
161 | height: 12px
162 | content: ""
163 |
164 | rect {
165 | width: 8px
166 | height: 2px
167 | fill: rgba(255,255,255,0.7)
168 | }
169 | }
170 | }
171 |
172 | @-webkit-keyframes greenPulse {
173 | from { background-color: #376C33; box-shadow: 0 0 9px #333; }
174 | 50% { background-color: #63C25C; box-shadow: 0 0 18px #63C25C; }
175 | to { background-color: #376C33; box-shadow: 0 0 9px #333; }
176 | }
--------------------------------------------------------------------------------
/views/lesson.js:
--------------------------------------------------------------------------------
1 | var watch = require('observ/watch')
2 | var EditorWidget = require('../lib/editor')
3 | var Spectrograph = require('../lib/spectrograph')
4 | var Verifier = require('../lib/verifier')
5 | var h = require('micro-css/h')(require('hyperscript'))
6 | var markdown = require('../lib/markdown')
7 | var send = require('../lib/send')
8 |
9 | module.exports = LessonView
10 |
11 | function LessonView (state, lesson) {
12 | var audio = state.audioContext
13 | var editor = EditorWidget(lesson.file, verify)
14 |
15 | var verifier = Verifier(audio)
16 | watch(lesson.file, verifier.set)
17 | watch(lesson.verifyTime, verifier.setDuration)
18 | watch(lesson.answer, verifier.setAnswer)
19 |
20 | var canvas = h('canvas')
21 | var answerCanvas = h('canvas')
22 | var stopOnNoSignal = false
23 |
24 | var spectrograph = Spectrograph(verifier.getAnalyser(), {
25 | canvas: canvas,
26 | beforeRender: checkMatch,
27 | speed: 0,
28 | minHsl: [-50, 0, 10]
29 | })
30 |
31 | var answerSpectrograph = Spectrograph(verifier.getAnswerAnalyser(), {
32 | canvas: answerCanvas,
33 | speed: 0,
34 | minHsl: [200, 0, 10]
35 | })
36 |
37 | function checkMatch () {
38 | if (stopOnNoSignal) {
39 | spectrograph.maxHsl = [0, 100, 50]
40 | spectrograph.minHsl = [0, 40, 10]
41 |
42 | if (!verifier.checkSignal()) {
43 | player.classList.remove('-playing')
44 | stopOnNoSignal = false
45 | spectrograph.speed = 0
46 | }
47 |
48 | } else if (verifier.checkMatch()) {
49 | spectrograph.maxHsl = [100, 100, 100]
50 | spectrograph.minHsl = [200, 0, 10]
51 | } else {
52 | spectrograph.maxHsl = [50, 0, 50]
53 | spectrograph.minHsl = [-50, 0, 10]
54 | }
55 | }
56 |
57 | var resetButton = h('button.reset', { onclick: lesson.reset }, ['Reset Code'])
58 |
59 | var player = h('Player', [
60 | h('header', [ 'Your Audio' ]),
61 | h('div', [
62 | canvas,
63 | h('button.run', { onclick: verify }, ['Play / Verify']),
64 | h('button.stop', { onclick: verifier.stop }, 'Stop'),
65 | resetButton
66 | ])
67 | ])
68 |
69 | var lessonElement = h('Lesson', [
70 | h('header', [
71 | h('h1', lesson.title()),
72 | h('nav', [
73 | h('button -index', { onclick: send(state.view.set, 'index') }, 'Index'),
74 | h('button -prev', { onclick: state.prevLesson }, 'Prev'),
75 | h('button -next', { onclick: state.nextLesson }, 'Next')
76 | ])
77 | ]),
78 | markdownElement(lesson.lesson())
79 | ])
80 |
81 | watch(state.verifiedLessons, function () {
82 | if (state.verifiedLessons.has(lesson.path())) {
83 | player.classList.add('-verified')
84 | lessonElement.classList.add('-verified')
85 | } else {
86 | player.classList.remove('-verified')
87 | lessonElement.classList.remove('-verified')
88 | }
89 | })
90 |
91 | watch(lesson.modified, function (modified) {
92 | if (modified) {
93 | player.classList.add('-modified')
94 | } else {
95 | player.classList.remove('-modified')
96 | }
97 | })
98 |
99 | var answerPlayer = h('Player', [
100 | h('header', [ 'Target Audio' ]),
101 | h('div', [
102 | answerCanvas,
103 | h('button.run', { onclick: playAnswer }, 'Play Answer'),
104 | h('button.stop', { onclick: verifier.stop }, 'Stop')
105 | ])
106 | ])
107 |
108 | var result = h('Main', [
109 | h('div.side', [
110 | lessonElement,
111 | answerPlayer
112 | ]),
113 | h('div.editor', [
114 | editor.init(),
115 | player
116 | ])
117 | ])
118 |
119 | result.destroy = function () {
120 | verifier.stop()
121 | }
122 |
123 | return result
124 |
125 | // scoped
126 |
127 | function markdownElement (md) {
128 | var el = h('div')
129 | el.innerHTML = markdown.render(md)
130 | return el
131 | }
132 |
133 | function playAnswer () {
134 | verifier.playAnswer(function () {
135 | answerSpectrograph.speed = 0
136 | answerPlayer.classList.remove('-playing')
137 | })
138 |
139 | answerPlayer.classList.add('-playing')
140 | answerSpectrograph.speed = 3
141 | answerSpectrograph.maxHsl = [200, 100, 100]
142 | answerSpectrograph.minHsl = [150, 0, 10]
143 | }
144 |
145 | function verify () {
146 | player.classList.remove('-error')
147 |
148 | verifier.verify(function (err, pass) {
149 | player.classList.remove('-playing')
150 | spectrograph.speed = 0
151 | answerSpectrograph.speed = 0
152 |
153 | if (err) {
154 | player.classList.add('-error')
155 | if (err instanceof Error) {
156 | console.log(err.message + '\n' + err.stack)
157 | } else {
158 | console.log(err)
159 | }
160 | } else if (pass) {
161 | state.verifiedLessons.add(lesson.path())
162 | } else {
163 | state.verifiedLessons.remove(lesson.path())
164 |
165 | if (verifier.checkSignal()) {
166 | spectrograph.speed = 3
167 | stopOnNoSignal = true
168 | player.classList.add('-playing')
169 | }
170 | }
171 |
172 | })
173 |
174 | player.classList.add('-playing')
175 | stopOnNoSignal = false
176 | spectrograph.speed = 3
177 | answerSpectrograph.speed = 3
178 | answerSpectrograph.maxHsl = [250, 10, 50]
179 | answerSpectrograph.minHsl = [200, 0, 10]
180 | }
181 | }
182 |
--------------------------------------------------------------------------------
/lib/verifier.js:
--------------------------------------------------------------------------------
1 | var AudioMatch = require('./audio-match')
2 |
3 | module.exports = function (audioContext) {
4 | var duration = 2
5 | var runAnswer = null
6 | var run = null
7 | var fails = 0
8 |
9 | var audioMatch = AudioMatch(audioContext)
10 |
11 | var lastAnswerOutput = null
12 | var lastOutput = null
13 | var verifyTimer = null
14 | var verifyInterval = null
15 | var verifyCb = null
16 | var verifyAnswerCb = null
17 |
18 | var analyser = audioContext.createAnalyser()
19 | var answerAnalyser = audioContext.createAnalyser()
20 |
21 | var toAnalyser = signalEndDelay(analyser, 1)
22 | var toAnswerAnalyser = signalEndDelay(answerAnalyser, 1)
23 |
24 | analyser.connect(audioMatch.inputA)
25 | answerAnalyser.connect(audioMatch.inputB)
26 |
27 | analyser.smoothingTimeConstant = 0.01
28 | answerAnalyser.smoothingTimeConstant = 0.01
29 |
30 | // let's not deafen everyone...
31 | var output = audioContext.createDynamicsCompressor()
32 | output.connect(audioContext.destination)
33 |
34 | var playback = audioContext.createGain()
35 | playback.connect(output)
36 | playback.gain.value = 0.6
37 |
38 | return { // eslint-disable-line accessor-pairs
39 | getAnalyser: function () {
40 | return analyser
41 | },
42 |
43 | getAnswerAnalyser: function () {
44 | return answerAnalyser
45 | },
46 |
47 | checkMatch: function () {
48 | return audioMatch.checkMatch()
49 | },
50 |
51 | checkSignal: function () {
52 | return audioMatch.checkSignal()
53 | },
54 |
55 | setDuration: function (seconds) {
56 | duration = seconds
57 | },
58 |
59 | setAnswer: function (src) {
60 | runAnswer = new Function('AudioContext', src) // eslint-disable-line no-new-func
61 | },
62 |
63 | set: function (src) {
64 | try {
65 | run = new Function('AudioContext', src) // eslint-disable-line no-new-func
66 | } catch (ex) {
67 | // syntax error
68 | }
69 | },
70 |
71 | playAnswer: function (cb) {
72 | stop()
73 |
74 | var answerOutput = audioContext.createGain()
75 | lastAnswerOutput = answerOutput
76 |
77 | answerOutput.connect(playback)
78 | answerOutput.connect(toAnswerAnalyser)
79 |
80 | runAnswer(wrapAudioContext(audioContext, answerOutput))
81 |
82 | verifyAnswerCb = function () {
83 | verifyAnswerCb = null
84 | cb && cb()
85 | }
86 |
87 | verifyTimer = setTimeout(verifyAnswerCb, (duration + 0.1) * 1000)
88 | },
89 |
90 | verify: function (cb) {
91 | stop()
92 |
93 | var output = audioContext.createGain()
94 | var answerOutput = audioContext.createGain()
95 | fails = 0
96 |
97 | output.connect(playback)
98 | output.connect(toAnalyser)
99 |
100 | answerOutput.connect(toAnswerAnalyser)
101 |
102 | lastOutput = output
103 | lastAnswerOutput = answerOutput
104 |
105 | audioMatch.reset()
106 |
107 | // sync audio contexts
108 | var startTime = audioContext.currentTime + 0.1
109 |
110 | try {
111 | run(wrapAudioContext(audioContext, output, startTime))
112 | } catch (ex) {
113 | process.nextTick(function () {
114 | cb && cb(ex)
115 | })
116 | return false
117 | }
118 |
119 | runAnswer(wrapAudioContext(audioContext, answerOutput, startTime))
120 |
121 | verifyCb = function () {
122 | clearInterval(verifyInterval)
123 | cb && cb(null, fails < 2)
124 | verifyCb = null
125 | }
126 |
127 | verifyInterval = setInterval(function () {
128 | if (!audioMatch.checkMatch()) {
129 | console.log('fail', audioContext.currentTime)
130 | fails += 1
131 | }
132 | }, 60)
133 |
134 | verifyTimer = setTimeout(verifyCb, (duration + 0.2) * 1000)
135 | },
136 |
137 | stop: stop
138 | }
139 |
140 | // scoped
141 |
142 | function stop () {
143 | if (lastOutput) {
144 | lastOutput.disconnect()
145 | lastOutput = null
146 | }
147 |
148 | if (lastAnswerOutput) {
149 | lastAnswerOutput.disconnect()
150 | lastAnswerOutput = null
151 | }
152 |
153 | fails += 100
154 |
155 | verifyCb && verifyCb()
156 | verifyAnswerCb && verifyAnswerCb()
157 |
158 | clearInterval(verifyInterval)
159 | clearTimeout(verifyTimer)
160 | }
161 | }
162 |
163 | function wrapAudioContext (audioContext, output, startTime) {
164 | return function AudioContext () {
165 | return {
166 | createDelay: audioContext.createDelay.bind(audioContext),
167 | createBufferSource: audioContext.createBufferSource.bind(audioContext),
168 | createStereoPanner: audioContext.createStereoPanner.bind(audioContext),
169 | createWaveShaper: audioContext.createWaveShaper.bind(audioContext),
170 | decodeAudioData: audioContext.decodeAudioData.bind(audioContext),
171 | createOscillator: audioContext.createOscillator.bind(audioContext),
172 | createChannelMerger: audioContext.createChannelMerger.bind(audioContext),
173 | createBiquadFilter: audioContext.createBiquadFilter.bind(audioContext),
174 | createGain: audioContext.createGain.bind(audioContext),
175 | createConvolver: audioContext.createConvolver.bind(audioContext),
176 | get currentTime () {
177 | // for sync between audio contexts
178 | return Math.max(startTime || 0, audioContext.currentTime)
179 | },
180 | destination: output
181 | }
182 | }
183 | }
184 |
185 | function signalEndDelay (target, delayTime) {
186 | var signal = target.context.createGain()
187 |
188 | var delay = target.context.createDelay(delayTime)
189 | delay.delayTime.value = delayTime
190 |
191 | var mute = target.context.createWaveShaper()
192 | var curve = new Float32Array([0, 0])
193 | mute.curve = curve
194 |
195 | signal.connect(delay)
196 | delay.connect(mute)
197 | mute.connect(target)
198 | signal.connect(target)
199 |
200 | return signal
201 | }
202 |
--------------------------------------------------------------------------------