├── assets
├── audio
│ └── ambience.mp3
└── js
│ ├── canvas.js
│ └── direct-monitor.js
├── index.html
└── README.md
/assets/audio/ambience.mp3:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/house/web-audio-talk/master/assets/audio/ambience.mp3
--------------------------------------------------------------------------------
/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
75 |
76 |
77 |
78 |
79 |
80 |
81 |
82 |
--------------------------------------------------------------------------------
/assets/js/canvas.js:
--------------------------------------------------------------------------------
1 | var canvas = document.querySelector('#c');
2 | var canvasContext = canvas.getContext("2d");
3 |
4 | var canvasWidth = document.querySelector('.wrapper').clientWidth;
5 | var canvasHeight = document.querySelector('.wrapper').clientHeight;
6 |
7 | canvas.setAttribute('width', canvasWidth);
8 | canvas.setAttribute('height', canvasHeight);
9 |
10 | var drawVisual;
11 |
12 | function drawBackground() {
13 |
14 | WIDTH = canvas.width;
15 | HEIGHT = canvas.height;
16 |
17 | var bgGradient = canvasContext.createLinearGradient(0, 0, 0, canvas.height);
18 | bgGradient.addColorStop(0, 'rgb(32,43,53)');
19 | bgGradient.addColorStop(1, 'rgb(59,83,103)');
20 |
21 | canvasContext.fillStyle = bgGradient;
22 | canvasContext.fillRect(0, 0, WIDTH, HEIGHT);
23 |
24 | }
25 |
26 | function redrawFog() {
27 | var fogGradient = canvasContext.createLinearGradient(0, 0, 0, canvas.height);
28 | fogGradient.addColorStop(0, 'rgba(32,43,53, .9)');
29 | fogGradient.addColorStop(1, 'rgba(59,83,103, .0)');
30 | canvasContext.fillStyle = fogGradient;
31 | canvasContext.fillRect(0, 0, WIDTH, HEIGHT);
32 | }
33 |
34 | function visualise() {
35 |
36 | function draw(drawType, xOff, foreground, yHeight) {
37 |
38 | console.log('drawing');
39 |
40 | drawVisual = requestAnimationFrame(draw);
41 |
42 | analyser.getByteTimeDomainData(dataArray);
43 |
44 | for(var i = 0; i < bufferLength; i++) {
45 | var v = dataArray[i] / 128.0;
46 | var y = v * HEIGHT/2;
47 | console.log("V : " + v + "Y: " + y);
48 | }
49 |
50 | // set up baselines for foreground and background drawing (terrain only)
51 | var bgBaseline = canvas.height - 300;
52 | var fgBaseline = canvas.height;
53 |
54 | if (foreground) {
55 | drawingBaseline = fgBaseline;
56 | fill = 'rgb(24,37,46)';
57 | }
58 | else {
59 | drawingBaseline = bgBaseline;
60 | fill = 'rgb(24,37,46)';
61 | }
62 |
63 | if (drawType === 'mountain') {
64 |
65 | var mountain = new Path2D();
66 | // start x = mountain middle offset
67 | // start y = mountain height
68 | mountain.moveTo(xOff, drawingBaseline - yHeight);
69 |
70 | // draw line to background baseline and rightmost triangle point
71 | mountain.lineTo(xOff + (yHeight / 2), drawingBaseline);
72 |
73 | // draw bottom of mountain
74 | mountain.lineTo(xOff - (yHeight / 2), drawingBaseline);
75 |
76 | console.log(mountain);
77 |
78 | // draw the mountain yo
79 | canvasContext.fillStyle = fill;
80 | canvasContext.fill(mountain);
81 |
82 | redrawFog();
83 |
84 | }
85 | if(drawType === 'hill') {
86 |
87 | }
88 | }
89 |
90 | // drawBackground();
91 | // draw('mountain', 200, 0, 400);
92 | // draw('mountain', 800, 0, 200);
93 | // draw('mountain', 0, 0, 600);
94 | // draw('mountain', 1200, 0, 400);
95 |
96 | }
97 |
98 | drawBackground();
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Cosmic noise demo thingy
2 |
3 | This is a demo (with _real bad_ JavaScript) that I presented at Hybrid Conf. It takes audio input and turns it into stars and mountains and shit.
4 |
5 | ## Input and audio
6 | The input gets a stereo delay added to it for #super #cool #space #ambience when you’re making noise. It uses direct input by default and pipes that through a few delay and panner nodes to achieve this. The demo is live-input by default which means anything you play is output after the delay/panning signal chain. Use headphones if you don’t want crazy feedback.
7 |
8 | The code can be tweaked to use an audio file as an input if you want to go the pre-recorded route but in the name of randomness and experimentation, try it with direct input for the best results.
9 |
10 | The background ambience is basically a super long A-minor chord and is _chill as fuck_. The visualisation doesn’t take the audio file into account, it’s just there so it works better when you’re stoned off your face. Subsequently, you can replace it with an audio file of your choosing and not have it intefere with the visualisation results.
11 |
12 | ## Visualisation
13 | The actual visualisation depends on a few factors from the audio input. It uses an `AnalyserNode` that the default `AudioContext` is connected to. This can reveal lots of dope information about the incoming audio signal. This information is used differently depending on what part of the ‘scene’ is being created.
14 |
15 | All visualisation are just 2D Canvas shapes with various effects. The stars are circles with shadows, the mountains are triangular paths, with background mountains having a glow for dat ethereal ambience. The foreground and background ‘hills’ are just bezier curves and the trees are combinations of triangles that end up looking like those shitty Christmas cards you made in primary school.
16 |
17 | Colours are currently locked in but you can totally make this ‘themeable’ (I plan to do so soon) to create any combination of natural/totally fucked up environments.
18 |
19 | ### Stars
20 | The stars depend mostly on signal amplitude. Play loud/drawn-out sounds and more stars will appear, play sporadically and less will appear. Play nothing and you’ll have a shit sky with no stars and a big fuckin mooon.
21 |
22 | ### Background mountains
23 | These are based on frequency and amplitude, with a bit of randomisation to stop you getting shitty, homogenous, ‘jagged-teeth’ like mountains. Higher frequencies will draw more mountains on the right of the scene, while lower frequencies will draw them towards the left. The amplitude of those frequencies will determine the peak of the mountains. There’s some really dumb code to try to isolate peak frequencies away from any harmonics/overtones but it is far from a good example of frequency-detection (also, that’s not the point of this, ovetones should make little shitty mountains pop up anyway).
24 |
25 | ### Foreground mountains
26 | Pretty much exactly the same as the background ones except there’s a few different multipliers to make sure the peaks stay slightly lower than the background mountains and they don’t grow as ‘wide’.
27 |
28 | ### Background trees
29 | These are based around a horizontal grid, and again use non-intelligent frequency detection. Instead of plotting based on every frequency in the `AnalyserNode`’s bin; they’re limited to a customisable horizontal grid. This means you can ‘stack’ triangles on top of each other instead of having a load of shitty triangles scattered everywhere and shitty floating trees.
30 |
31 | ### Background hills
32 | This is the dumbest part of the scene and something that needs improvement. There’s a lot of randomisation, but basically the frequency determines where the bezier curve ‘peaks’ and it slopes off to the bottom after that. It uses ‘randomised symmetry’ which basically means the left–right frequency ‘grid’ is halved and a random integer dictates if the hill is drawn left/right or right/left. It results in a kinda-but-not symmetrical layout for hills but also means there’s little variation in terrain.
33 |
34 | ### Foreground trees
35 | The trees themselves are drawn randomly across a grid, the audio has no impact on where the trees are placed but the amplitude has a slight effect on the height of the tree, which is also randomised. The ‘branches’ for each tree are based on how intensely you play in a 2.5s window while each tree is being drawn. The left/right ‘growth’ of the branches are randomised so the audio input only affects the horizontal spread of each branch.
36 |
37 | ### Foreground terrain
38 | Exactly like the background terrain but with a lower peak for the curves.
39 |
40 | ### Ambience and celestials
41 | There’s some really shitty shooting stars and comets and also a random fucking planet that zoom past (in front of the moon). These are done with simple `setInterval`s because they’re not as accurate as other JS timing functions and that’s what I want. They don’t have to be there and they’re the only CSS/HTML that’s used outside of the body and canvas tags etc. but I kinda like them and also fuck you this is my world I’ll setInterval wherever I want.
42 |
43 | ### Transitions between visualise sections
44 | The transitions currently use `setTimeout` to change the `drawing` variable. This is far from ideal but a quick and dirty solution to switching through each section. This should be rewritten to use `requestAnimationFrame` but for now I can live with it.
45 |
46 | ## The code
47 | The code is not great at all.
48 |
49 | I’ve tried to comment it as best as I can; a decent understanding of Canvas and the Web Audio API would be useful but there’s still some idiosyncrasies from how I’ve pieced it together. The comments should help but I should also rewrite it when I get better at JS. Pls don’t hate me.
50 |
51 | ## Running the thing
52 | This all runs locally, the best way I’ve found to do it is using iojs simple http-server.
53 |
54 | * `npm install -g http-server`
55 | * `http-server`
56 |
57 | The go to `0.0.0.0:8000` (or whatever you set the port to be) and you should be able to run it fine.
58 |
59 | You’ll get the whole
60 |
61 | > 0.0.0.0 Wants to use your microphone
62 |
63 | Message, just click `Allow` and melt some faces.
64 |
65 | ## Press Enter to go full-screen
66 | Right now, because I demoed it at full-screen, the background won’t get drawn until it’s full-screen and it’ll all look fucking terrible. So press Enter to go full screen and it should be okay. I’ll fix this soon. Maybe.
67 |
68 | ## License and stuff
69 | Do what you want with this code and the music I’ve written for the demo except sell it like a fucking shitlord. If you make some dope shit based off it PLEASE PLEASE share it with me. Visualising audio like this is super interesting to me and I’d absolutely love someone more creative than me to jump on and experiment. If you like it, hit me up on [Twitter](http://twitter.com/scott_riley) and we can say nice things to each other xoxo
70 |
71 |
--------------------------------------------------------------------------------
/assets/js/direct-monitor.js:
--------------------------------------------------------------------------------
1 | "use strict";
2 |
3 | var fullScreen = document.querySelector('.fullScreen');
4 |
5 | // Toggle full-screen when the user hits the enter key
6 | document.addEventListener("keydown", function(e) {
7 | if (e.keyCode == 13) {
8 | toggleFullScreen();
9 | }
10 | }, false);
11 |
12 | // Fucking vendor prefixes fml
13 | function toggleFullScreen() {
14 | if (!document.fullscreenElement && // alternative standard method
15 | !document.mozFullScreenElement && !document.webkitFullscreenElement && !document.msFullscreenElement ) { // current working methods
16 | if (document.documentElement.requestFullscreen) {
17 | document.documentElement.requestFullscreen();
18 | } else if (document.documentElement.msRequestFullscreen) {
19 | document.documentElement.msRequestFullscreen();
20 | } else if (document.documentElement.mozRequestFullScreen) {
21 | document.documentElement.mozRequestFullScreen();
22 | } else if (document.documentElement.webkitRequestFullscreen) {
23 | document.documentElement.webkitRequestFullscreen(Element.ALLOW_KEYBOARD_INPUT);
24 | }
25 | } else {
26 | if (document.exitFullscreen) {
27 | document.exitFullscreen();
28 | } else if (document.msExitFullscreen) {
29 | document.msExitFullscreen();
30 | } else if (document.mozCancelFullScreen) {
31 | document.mozCancelFullScreen();
32 | } else if (document.webkitExitFullscreen) {
33 | document.webkitExitFullscreen();
34 | }
35 | }
36 | setTimeout (function(){
37 | drawBackground();
38 | }, 1000)
39 | }
40 |
41 | // LOTS AND LOTS OF INITIALISING AND SHIT
42 |
43 | // This is our One And Only™ audio context
44 | var audioContext = new (window.AudioContext || window.webkitAudioContext)();
45 |
46 | // This is our global analyser that gives us all sorts of information on the audio stream
47 | var analyser = audioContext.createAnalyser();
48 |
49 | // Canvas context for all our visualisation needs
50 | var canvas = document.querySelector('#c');
51 | var canvasContext = canvas.getContext("2d");
52 | var canvasWidth = document.querySelector('.wrapper').clientWidth;
53 | var canvasHeight = document.querySelector('.wrapper').clientHeight;
54 | canvas.setAttribute('width', canvasWidth);
55 | canvas.setAttribute('height', canvasHeight);
56 |
57 | // Visalisation variables that update over time
58 | var drawVisual; // we will populate this with the requestAnimationFrame function call
59 | var previousTime = 0;
60 | var treeX = 50;
61 | var lastTreeX = 300;
62 | var lastTreeHeight = 0;
63 | var timeBetweenBranches = 0;
64 |
65 | // So our visualise function always knows what to draw; this is updated as time passes in the scene
66 | var drawing = "stars";
67 |
68 | // Until for generating a random int because Math.random is fucking hilariously shit
69 | function getRandomInt(min, max) {
70 | return Math.floor(Math.random() * (max - min + 1)) + min;
71 | }
72 |
73 | // I probs shouldn’t even us these; will fix
74 | var WIDTH,
75 | HEIGHT;
76 |
77 | // Drawing our background gradient to show the sky
78 | function drawBackground() {
79 |
80 | var canvasWidth = document.querySelector('.wrapper').clientWidth;
81 | var canvasHeight = document.querySelector('.wrapper').clientHeight;
82 |
83 | canvas.setAttribute('width', canvasWidth);
84 | canvas.setAttribute('height', canvasHeight);
85 |
86 | WIDTH = canvas.width;
87 | HEIGHT = canvas.height;
88 |
89 | var bgGradient = canvasContext.createLinearGradient(0, 0, 0, canvas.height);
90 | bgGradient.addColorStop(0, 'rgb(33,44,57)');
91 | bgGradient.addColorStop(.7, 'rgb(58,82,102)');
92 |
93 | canvasContext.fillStyle = bgGradient;
94 | canvasContext.fillRect(0, 0, WIDTH, HEIGHT);
95 |
96 | }
97 |
98 | // Draw a foggy gradient to make the background mountains more subtle
99 | // Also draw a moon because WHY NOT???
100 | function redrawFog() {
101 | var fogGradient = canvasContext.createLinearGradient(0, 0, 0, canvas.height);
102 | fogGradient.addColorStop(0, 'rgba(33,44,57, .9)');
103 | fogGradient.addColorStop(1, 'rgba(59,83,103, .0)');
104 | canvasContext.fillStyle = fogGradient;
105 | canvasContext.fillRect(0, 0, canvas.width, canvas.height);
106 |
107 | var moonRadius = 180;
108 | canvasContext.beginPath();
109 | canvasContext.arc(20, 20, moonRadius, 0, 2 * Math.PI, false);
110 | canvasContext.fillStyle = 'rgba(245,230,230, 1)';
111 | canvasContext.shadowColor = 'transparent';
112 | canvasContext.fill();
113 |
114 | }
115 |
116 | // This big fucker handles all the visualisation and drawing
117 | function visualise() {
118 |
119 | WIDTH = canvas.width;
120 | HEIGHT = canvas.height;
121 |
122 | // Setting up our initial analyser values
123 | analyser.fftSize = 64; // Fast-fourier Transform size; tells us how much fidelity we’ll be working with
124 | var bufferLength = analyser.frequencyBinCount; // This is almost always half of the FFT length, and is how many ‘bins’ of frequency data we have
125 | var dataArray = new Uint8Array(bufferLength); // Fuck Uint
126 |
127 | function draw() {
128 |
129 | // Re-call the draw function every frame so we don’t have to worry about setInterval’s shocking performance
130 | // This means that draw is ran every frame, which means we’re pretty close to real-time manipulation
131 | drawVisual = requestAnimationFrame(draw);
132 |
133 | // Reset the previousTime every ten seconds, used so we know how much time has elapsed for time-dependent visualisation (used sparingly)
134 | if (previousTime == 0) {
135 | previousTime = audioContext.currentTime;
136 | }
137 | else if ( (audioContext.currentTime - previousTime) > 10) {
138 | previousTime = audioContext.currentTime;
139 | }
140 |
141 | // Populate our dataArray with the frequency bins from our analyser.
142 | // This is tricky to explain. Basically a ‘bin’ contains data about frequencies
143 | // The index of the array represents a frequency ‘bin’ e.g. dataArray[0] is our first bin, dataArray[12] is our 13th
144 | // The frequencies contained in each bin are determined by our fftSize. The higher the FFT size, the more granularly split our frequencies are
145 | // Ergo, a higher FFT size = more frequency bins, with smaller ranges
146 | // The value of an index in the dataArray represents the volume of those frequencies
147 | // As an axample dataArray[0] with a value of 255 means there’s a load of bass frequencies, dataArray[0] with a value of 0 means there’s no bass frequencies
148 | // The value sensitivity of each ‘bin’ is determined by a min/max dB setting on the analyser; this isn’t too important right now, though
149 | // This is the longest comment I’ve ever written
150 | // I’ve basically just blogged
151 | // Skype me if you don’t get this
152 | // Kiss me if you love me
153 | analyser.getByteFrequencyData(dataArray);
154 |
155 | // Set up a horizontal grid for placing elements based on frequency energy
156 | // This is basically just a horizontal grid, split into columns, so we can pot shit left/right based on how loud our frequency bins are
157 | // The grid is calculated based on our fftSize, so every ‘bin‘ is a column
158 | var colWidth = (canvas.width / analyser.fftSize);
159 | var yHeight;
160 | var x = 0;
161 | var drawingBaseline;
162 | var fill;
163 |
164 | // ifstatementsthatshouldbeswitches.tumblr.com/fuck-you-dad-i-do-what-i-want
165 | if (drawing === "mountainBg") {
166 | var bgBaseline = canvas.height - (canvas.height/4.3);
167 | var fgBaseline = canvas.height;
168 | colWidth = (canvas.width / 24);
169 |
170 | // The multiplier for the peak of the mountain, randomised.
171 | // Basing this on canvas height, RESPONSIVE FUCKIN MOUNTAINS
172 | var multiplier = getRandomInt(canvas.height * .019, canvas.height * .027);
173 |
174 | drawingBaseline = fgBaseline;
175 | fill = 'rgb(55, 75, 94)';
176 | canvasContext.shadowColor = 'rgba(180,110,110, .4)';
177 | canvasContext.shadowBlur = 400;
178 |
179 | // loop through the frequency array
180 | for(var i = 0; i < bufferLength; i++) {
181 |
182 | // For each frequency band, determine the loudest frequency
183 | // This gives us the ‘peak’ of the sound and determines height/y pos
184 | if(dataArray[i] < dataArray[i-1]) {
185 | yHeight = dataArray[i] * multiplier;
186 | }
187 |
188 | // For every frequency, determine if it’s loud enough to qualify as ‘drawable’
189 | // if not, we move to a different column for our drawing origin
190 | if(dataArray[i] > 30) {
191 | x += colWidth * i;
192 | }
193 | else {
194 | // It’s loud enough to be visualised, let’s draw some shit
195 | var mountain = new Path2D();
196 |
197 | // start x = mountain middle offset
198 | // start y = mountain height
199 | mountain.moveTo(x, drawingBaseline - yHeight);
200 |
201 | // draw line to background baseline and rightmost triangle point
202 | mountain.lineTo(x + (yHeight * 1.4), drawingBaseline);
203 |
204 | // draw bottom of mountain
205 | mountain.lineTo(x - (yHeight * 1.4), drawingBaseline);
206 |
207 | // draw the mountain yo
208 | canvasContext.fillStyle = fill;
209 | canvasContext.fill(mountain);
210 | }
211 |
212 | }
213 | }
214 |
215 | else if (drawing === "mountain") {
216 | // This is almost identical to the above
217 | var bgBaseline = canvas.height - (canvas.height/4.3);
218 | var fgBaseline = canvas.height;
219 | colWidth = (canvas.width / 24);
220 |
221 | var multiplier = getRandomInt(canvas.height * .014, canvas.height * .022);
222 |
223 | // different fill style for our foreground mountains
224 | drawingBaseline = fgBaseline;
225 | fill = 'rgb(26,36,44)';
226 |
227 | // loop through the frequency array (backwards to determine the highest frequency)
228 | for(var i = 0; i < bufferLength; i++) {
229 |
230 | // For each frequency band, determine the loudest frequency
231 | // This gives us the ‘peak’ of the sound and determines height/y pos
232 | if(dataArray[i] < dataArray[i-1]) {
233 | yHeight = dataArray[i] * multiplier;
234 | }
235 |
236 | // For every frequency, determine if it’s loud enough to qualify as ‘drawable’
237 | // if not, we move to a different column for our drawing origin
238 | if(dataArray[i] > 30) {
239 | x += colWidth * i;
240 | }
241 | else {
242 | var mountain = new Path2D();
243 | // start x = mountain middle offset
244 | // start y = mountain height
245 | mountain.moveTo(x, drawingBaseline - yHeight);
246 |
247 | // draw line to background baseline and rightmost triangle point
248 | mountain.lineTo(x + (yHeight / 1.2), drawingBaseline);
249 |
250 | // draw bottom of mountain
251 | mountain.lineTo(x - (yHeight / 1.2), drawingBaseline);
252 |
253 | // draw the mountain yo
254 | canvasContext.fillStyle = fill;
255 | canvasContext.fill(mountain);
256 | }
257 |
258 | }
259 | }
260 |
261 | else if (drawing === "treesBg") {
262 | // Lets draw some fucking trees
263 | var bgBaseline = canvas.height - (canvas.height/5.3);
264 | var fgBaseline = canvas.height;
265 | colWidth = (canvas.width / 36);
266 |
267 | // We’re going to somewhat randomise the y position of each triangle we draw, that way we can ‘stack’ triangles to make trees
268 | var multiplier = getRandomInt(2,3);
269 | drawingBaseline = bgBaseline - getRandomInt(canvas.height * .015, canvas.height * .14);
270 |
271 | // Nice green fill for dat christmas tree vibe
272 | fill = 'rgb(53,85,82)';
273 |
274 | // loop through the frequency array (backwards to determine the highest frequency)
275 | for(var i = 0; i < bufferLength; i++) {
276 |
277 | // For each frequency band, determine the loudest frequency
278 | // This gives us the ‘peak’ of the sound and determines height/y pos
279 | if(dataArray[i] < dataArray[i-1]) {
280 | yHeight = dataArray[i] * multiplier + (multiplier + getRandomInt(canvas.height * .012, canvas.height * .06));
281 | }
282 |
283 | // For every frequency, determine if it’s loud enough to qualify as the max
284 | if(dataArray[i] > 30) {
285 | x += colWidth * i;
286 | }
287 | else {
288 | var tree = new Path2D();
289 | // start x = mountain middle offset
290 | // start y = mountain height
291 | tree.moveTo(x, drawingBaseline - yHeight);
292 |
293 | // draw line to background baseline and rightmost triangle point
294 | tree.lineTo(x + (yHeight / 2.8), drawingBaseline);
295 |
296 | // draw bottom of mountain
297 | tree.lineTo(x - (yHeight / 3.8), drawingBaseline);
298 |
299 | // draw the mountain yo
300 | canvasContext.fillStyle = fill;
301 | canvasContext.fill(tree);
302 |
303 | }
304 |
305 | }
306 | }
307 |
308 | else if (drawing === "treesFg") {
309 | // Lets draw some shitty black Tim Burton wannabe-trees
310 | var bgBaseline = (canvas.height/4.3);
311 | var fgBaseline = canvas.height;
312 |
313 | // 24 potential places to draw a tree
314 | var colCount = 24
315 | colWidth = (canvas.width / colCount);
316 |
317 | // Ranomising the Y position so the trees aren’t all in one row
318 | var multiplier = getRandomInt(2,colCount);
319 | drawingBaseline = fgBaseline + getRandomInt(canvas.height * .015, canvas.height * .23);
320 | fill = 'rgb(29,26,32)';
321 |
322 | // We want to draw a tree every 2.5s
323 | if ( (audioContext.currentTime - previousTime) > 2.5) {
324 | // 2.5s has passed since the drawing of the last tree
325 |
326 | // Randomise the tree height
327 | var treeHeight = getRandomInt(canvas.height * .3, canvas.height * .46);
328 |
329 | //draw the tree
330 | var tree = new Path2D();
331 |
332 | // There’s lots of multiplies based on the canvas height here
333 | // Pls don’t hate me
334 | tree.moveTo(treeX, canvas.height - (canvas.height * .15));
335 | tree.lineTo(treeX + 35, canvas.height - treeHeight);
336 | tree.lineTo(treeX + 70, canvas.height - (canvas.height * .15));
337 | lastTreeX = treeX;
338 | treeX = colWidth * getRandomInt(2,colCount - 1);
339 | canvasContext.fillStyle = fill;
340 | canvasContext.fill(tree);
341 |
342 | // Reset previousTime so a new tree can grow
343 | previousTime = audioContext.currentTime;
344 |
345 | lastTreeHeight = treeHeight;
346 | if(timeBetweenBranches == 0) {
347 | // We’re ready to draw some branches for the first tree
348 | timeBetweenBranches = audioContext.currentTime;
349 | }
350 |
351 | }
352 |
353 | else if ( (audioContext.currentTime - timeBetweenBranches) > .5 && lastTreeHeight > 0) {
354 | // If we’ve already drew a tree AND .5s has passed since we last drew a branch, we can draw another one
355 | var maxStrength = 0;
356 |
357 | // Loop through our frequencies and determine how ‘wide’ our branch should be
358 | for(var i = 0; i < bufferLength; i++) {
359 | if(dataArray[i] < dataArray[i-1] && dataArray[i] > 10) {
360 | // 4 is arbitrary, fear not the 4
361 | maxStrength = dataArray[i] * 4;
362 | if(maxStrength > 100) {
363 | // Setting a threshold so we don’t get super long branches that ruin everyone’s life
364 | maxStrength = 80;
365 | }
366 | }
367 | }
368 |
369 | // Start drawing the branches
370 | // This is a bit weird, but basically we want to make sure that our branches are drawn within the top/bottom of the free
371 | // Here we’re basically making sure that the lowest branch is drawn at 15% of the three’s height or higher
372 | // and that the highest branch is drawn at 45% of the tree’s height or lower
373 | var branchY = canvas.height - lastTreeHeight + (lastTreeHeight * (getRandomInt(15, 45) / 100) );
374 | var branch = new Path2D();
375 |
376 | // Getting our point in the middle of the tree to draw our branch
377 | branch.moveTo(lastTreeX + 30, branchY);
378 | // Lining upwards to draw the ‘height’ of our branch
379 | branch.lineTo(lastTreeX + 30, branchY - 10);
380 |
381 | // Randomly draw a left/right–pointing branch
382 | if ( getRandomInt(1,2) == 1) {
383 | branch.lineTo(lastTreeX + (maxStrength + 15), branchY - 5);
384 | }
385 | else {
386 | branch.lineTo(lastTreeX - (maxStrength - 15), branchY - 5);
387 | }
388 |
389 | // Fill dat shit and update our timing variables to move on to the next branch
390 | canvasContext.fillStyle = fill;
391 | canvasContext.fill(branch);
392 | timeBetweenBranches = audioContext.currentTime;
393 |
394 | }
395 |
396 | }
397 |
398 | else if (drawing === "stars") {
399 | // The simplest visualisation
400 | var starRadius = 2;
401 | for(var i = 0; i < bufferLength; i++) {
402 | if(dataArray[i] > 100) {
403 | // Setting the stars opacity based on frequency energy
404 | var opacity = dataArray[i] * 0.001 * 5;
405 | canvasContext.beginPath();
406 | // Drawing little circles for our stars; randomised on the canvas
407 | canvasContext.arc(getRandomInt(0, canvas.width), getRandomInt(0, canvas.height), starRadius, 0, 2 * Math.PI, false);
408 | canvasContext.fillStyle = 'rgba(255,255,255, ' + opacity + ')';
409 | canvasContext.shadowColor = 'rgba(255,255,255, ' + (opacity - .2) + ')';
410 | canvasContext.shadowBlur = 4;
411 | canvasContext.shadowOffsetX = 0;
412 | canvasContext.shadowOffsetY = 0;
413 | canvasContext.fill();
414 | }
415 | }
416 | }
417 |
418 | else if (drawing === "terrainBg") {
419 | // Background hills
420 | // This code is bad
421 | // I am so sorry
422 | if (previousTime == 0) {
423 | previousTime = audioContext.currentTime;
424 | }
425 | else if ( (audioContext.currentTime - previousTime) > .5) {
426 | // We’re going to draw a curve every .5s so our terrain doesn’t get super weird
427 | previousTime = audioContext.currentTime;
428 | for(var i = 0; i < bufferLength; i++) {
429 | // for every frequency in our bin
430 | if(dataArray[i] > 100) {
431 | // if it’s louder enough to fuck with our terrain, start the drawing path
432 | var terrain = new Path2D();
433 | // 24 columns to fuck shit up on
434 | colWidth = (canvas.width / 24);
435 | // Set the height of our terrain based on the amplitube of the current frequency bin and some weird randomising
436 | var terrainHeight = dataArray[i];
437 | var terrainOffset = terrainHeight * (getRandomInt(canvas.height * .0077, canvas.height * .023) / 10);
438 |
439 | // Set the X/Y coordinates for our control point of our quadtratic curves
440 | // This determines how harsh/steep our curve is
441 | var terrainControlY = canvas.height - (terrainHeight * (getRandomInt(4,16) / 10));
442 | var terrainControlX = canvas.width/2 + i * getRandomInt(3,15);
443 |
444 | // Randomise the direction a curve is drawn from
445 | if(getRandomInt(1,2) == 1) {
446 | // drawing left–right
447 | terrain.moveTo(0, canvas.height);
448 | terrain.lineTo(0, canvas.height - terrainOffset);
449 | terrain.quadraticCurveTo(terrainControlX, terrainControlY, canvas.width, canvas.height);
450 | terrain.lineTo(0, canvas.height);
451 | canvasContext.fillStyle = 'rgb(42,59,74)';
452 | canvasContext.fill(terrain);
453 | }
454 | else {
455 | // drawing right–left
456 | terrain.moveTo(canvas.width, canvas.height);
457 | terrain.lineTo(canvas.width, canvas.height - terrainOffset);
458 | terrain.quadraticCurveTo(terrainControlX, terrainControlY, 0, canvas.height);
459 | terrain.lineTo(canvas.width, canvas.height);
460 | canvasContext.fillStyle = 'rgb(42,59,74)';
461 | canvasContext.fill(terrain);
462 | }
463 |
464 | }
465 | }
466 | }
467 | }
468 |
469 | else if (drawing === "terrainFg") {
470 | // Just as bad as above; just smaller multipliers
471 | if (previousTime == 0) {
472 | previousTime = audioContext.currentTime;
473 | }
474 | else if ( (audioContext.currentTime - previousTime) > .5) {
475 | previousTime = audioContext.currentTime;
476 | for(var i = 0; i < bufferLength; i++) {
477 | if(dataArray[i] > 100) {
478 | var terrain = new Path2D();
479 | colWidth = (canvas.width / 24);
480 | var terrainHeight = dataArray[i];
481 | var terrainOffset = terrainHeight * (getRandomInt(canvas.height * .0038, canvas.height * .012) / 10);
482 | var terrainControlY = canvas.height + (terrainHeight * (getRandomInt(canvas.height * .0015, canvas.height * .003) / 10));
483 | var terrainControlX = canvas.width + i * getRandomInt(3,150);
484 | if(getRandomInt(1,2) == 1) {
485 | terrain.moveTo(0, canvas.height);
486 | terrain.lineTo(0, canvas.height - terrainOffset);
487 | terrain.quadraticCurveTo(terrainControlX, terrainControlY, canvas.width, canvas.height);
488 | terrain.lineTo(0, canvas.height);
489 | canvasContext.fillStyle = 'rgb(26,36,44)';
490 | canvasContext.fill(terrain);
491 | }
492 | else {
493 | terrain.moveTo(canvas.width, canvas.height);
494 | terrain.lineTo(canvas.width, canvas.height - terrainOffset);
495 | terrain.quadraticCurveTo(terrainControlX * -1, terrainControlY, 0, canvas.height);
496 | terrain.lineTo(canvas.width, canvas.height);
497 | canvasContext.fillStyle = 'rgb(26,36,44)';
498 | canvasContext.fill(terrain);
499 | }
500 |
501 | }
502 | }
503 | }
504 | }
505 |
506 | };
507 |
508 | draw();
509 |
510 | }
511 |
512 | function gotStream(stream) {
513 | // When the user enables their mic input we do this shit
514 |
515 | // Load in our sexy ambient stoner track for deep, sensual background noise
516 | var audioFile = new Audio('assets/audio/ambience.mp3');
517 | audioFile.play();
518 |
519 | // Some reallllllll bad code for making shit fly across the screen
520 | setInterval(function() {
521 | var topClass = 't' + getRandomInt(1,5);
522 | var comet = document.createElement('div');
523 | comet.className = 'shooting-star ' + topClass;
524 | var wrapper = document.querySelector('.wrapper');
525 | wrapper.appendChild(comet);
526 | }, 5000)
527 |
528 | setInterval(function() {
529 | var topClass = 't' + getRandomInt(1,5);
530 | var comet = document.createElement('div');
531 | comet.className = 'comet ' + topClass;
532 | var wrapper = document.querySelector('.wrapper');
533 | wrapper.appendChild(comet);
534 | }, 12000)
535 |
536 | setInterval(function() {
537 | var topClass = 't' + getRandomInt(1,5);
538 | var comet = document.createElement('div');
539 | comet.className = 'planet ' + topClass;
540 | var wrapper = document.querySelector('.wrapper');
541 | wrapper.appendChild(comet);
542 | }, 14000)
543 |
544 | // Some realllllll bad code for switching visualisation focus
545 | setTimeout(function() {
546 | canvasContext.shadowColor = "transparent";
547 | drawing = "mountainBg"
548 | drawAmbience();
549 | }, 10000)
550 |
551 | setTimeout(function() {
552 | canvasContext.shadowColor = "transparent";
553 | drawing = "mountain"
554 | }, 15000)
555 |
556 | setTimeout(function() {
557 | canvasContext.shadowColor = "transparent";
558 | drawing = "treesBg"
559 | redrawFog();
560 | }, 23000)
561 |
562 | setTimeout(function() {
563 | canvasContext.shadowColor = "transparent";
564 | drawing = "terrainBg"
565 | }, 30000)
566 |
567 | setTimeout(function() {
568 | canvasContext.shadowColor = "transparent";
569 | drawing = "treesFg"
570 | }, 38000)
571 |
572 | setTimeout(function() {
573 | canvasContext.shadowColor = "transparent";
574 | drawing = "terrainFg"
575 | }, 60500)
576 |
577 | // Stereo buffer for bitch shifted goodness
578 | var channels = 2;
579 | var recLength = audioContext.sampleRate * 1.0;
580 |
581 | // Set up an audio source from the user’s stream
582 | var mediaStreamSource = audioContext.createMediaStreamSource(stream);
583 |
584 | // Create a gain node to slightly boost the input so it’s audible over background sound
585 | var gain = audioContext.createGain();
586 | gain.gain.value = 1.3;
587 |
588 | // Connect our source ot our gain node
589 | mediaStreamSource.connect(gain);
590 |
591 | // Connect gain node to our analyser (we ceated this up top, it lets us read data from an input signal)
592 | gain.connect(analyser);
593 |
594 | // create two Delay Nodes, one for the left, one for the right
595 | // Left delay is a lot shorter than right delay
596 | var delayLeft = audioContext.createDelay();
597 | delayLeft.delayTime.value = 0.16;
598 | var delayRight = audioContext.createDelay();
599 | delayRight.delayTime.value = 0.28;
600 |
601 | // create the panner nodes to spread the delay
602 | // 80% left and 80% right panning
603 | var pannerLeft = audioContext.createStereoPanner();
604 | pannerLeft.pan.value = -.8;
605 | var pannerRight = audioContext.createStereoPanner();
606 | pannerRight.pan.value = .8;
607 |
608 | // Set up individual feedback loops for each delay
609 | // Left day lasts a lot longer than right delay
610 | var feedbackLeft = audioContext.createGain();
611 | feedbackLeft.gain.value = .84;
612 | var feedbackRight = audioContext.createGain();
613 | feedbackRight.gain.value = .24;
614 |
615 | // set up a low-pass filter for each delay to warm-up the delay sound
616 | var lowPassLeft = audioContext.createBiquadFilter(); // low-pass is default for biquad filer
617 | lowPassLeft.frequency.value = 900;
618 | var lowPassRight = audioContext.createBiquadFilter(); // low-pass is default for biquad filer
619 | lowPassRight.frequency.value = 700;
620 |
621 | // Run everything through a feedback loop so our delay carries on and softens out as it goes
622 | delayLeft.connect(feedbackLeft);
623 | feedbackLeft.connect(lowPassLeft);
624 | feedbackLeft.connect(delayLeft);
625 | delayRight.connect(feedbackRight);
626 | feedbackRight.connect(lowPassRight);
627 | feedbackRight.connect(delayRight);
628 |
629 | // Connect everything to our destination so we can hear the beautiful result of all that shit
630 |
631 | // Because we connected our signal to an analyser, this analyser node holds our initial/pure signal (after the gain boost)
632 | // Connect it to the left/right delays so we get the delay and feedback loop magic going on
633 | analyser.connect(delayLeft);
634 | analyser.connect(delayRight);
635 |
636 | // Connect it to the destination so our pure signal is output
637 | // Without this line we would _only_ hear the delays (which is quite cool if you’re a hippie prog fuck)
638 | analyser.connect(audioContext.destination);
639 |
640 | // Connect our delays to panner nodes so they’re spread out before they’re output
641 | delayLeft.connect(pannerLeft);
642 | delayRight.connect(pannerRight);
643 |
644 | // Connect both our panner nodes to the destination
645 | pannerLeft.connect(audioContext.destination);
646 | pannerRight.connect(audioContext.destination);
647 |
648 | // Finally, call our visualise function so we can get this circus on the road
649 | visualise();
650 |
651 | }
652 |
653 | // More vendor prefix polyfills YAY
654 | navigator.getUserMedia = navigator.getUserMedia || navigator.webkitGetUserMedia;
655 |
656 | // Call getUserMedia (WebRTC shit); this makes the ‘APP would like to access your Mic’ thing pop up
657 | // If the user allows mic input, run the gotStream function and get sexy
658 | navigator.getUserMedia( { audio: true}, gotStream, function() { console.log("fucked up"); });
--------------------------------------------------------------------------------