├── .gitmodules
├── LICENSE.txt
├── README.md
├── chapter1
└── activation_functions.html
├── chapter3
└── tfjs_xor_batching.js
├── chapter5
├── 0_webglFirstRendering
│ ├── index.html
│ └── script.js
├── 1_mandelBrot
│ ├── index.html
│ └── script.js
├── 2_renderToTexture
│ ├── index.html
│ ├── scriptCPU.js
│ └── scriptGPU.js
├── 3_RTTfloat
│ ├── IDL_Rainbow.png
│ ├── index.html
│ ├── initialState.png
│ └── script.js
├── 4_WGLMatrix
│ ├── WGLMatrix.js
│ ├── index.html
│ └── script.js
├── 5_MNIST
│ ├── WGLMatrix.js
│ ├── data
│ │ ├── mnist_batch_0.png
│ │ ├── mnist_batch_1.png
│ │ ├── mnist_batch_10.png
│ │ ├── mnist_batch_11.png
│ │ ├── mnist_batch_12.png
│ │ ├── mnist_batch_13.png
│ │ ├── mnist_batch_14.png
│ │ ├── mnist_batch_15.png
│ │ ├── mnist_batch_16.png
│ │ ├── mnist_batch_17.png
│ │ ├── mnist_batch_18.png
│ │ ├── mnist_batch_19.png
│ │ ├── mnist_batch_2.png
│ │ ├── mnist_batch_20.png
│ │ ├── mnist_batch_3.png
│ │ ├── mnist_batch_4.png
│ │ ├── mnist_batch_5.png
│ │ ├── mnist_batch_6.png
│ │ ├── mnist_batch_7.png
│ │ ├── mnist_batch_8.png
│ │ ├── mnist_batch_9.png
│ │ └── mnist_labels.js
│ ├── index.html
│ ├── mnist_loader.js
│ ├── network.js
│ └── script.js
├── 6_MNISTimproved
│ ├── WGLMatrix.js
│ ├── data
│ │ ├── mnist_batch_0.png
│ │ ├── mnist_batch_1.png
│ │ ├── mnist_batch_10.png
│ │ ├── mnist_batch_11.png
│ │ ├── mnist_batch_12.png
│ │ ├── mnist_batch_13.png
│ │ ├── mnist_batch_14.png
│ │ ├── mnist_batch_15.png
│ │ ├── mnist_batch_16.png
│ │ ├── mnist_batch_17.png
│ │ ├── mnist_batch_18.png
│ │ ├── mnist_batch_19.png
│ │ ├── mnist_batch_2.png
│ │ ├── mnist_batch_20.png
│ │ ├── mnist_batch_3.png
│ │ ├── mnist_batch_4.png
│ │ ├── mnist_batch_5.png
│ │ ├── mnist_batch_6.png
│ │ ├── mnist_batch_7.png
│ │ ├── mnist_batch_8.png
│ │ ├── mnist_batch_9.png
│ │ └── mnist_labels.js
│ ├── index.html
│ ├── mnist_loader.js
│ ├── network.js
│ └── script.js
└── README.md
├── chapter6
├── 1_images
│ ├── loading.html
│ ├── segmentation.html
│ └── shapes.html
├── 2_video
│ ├── capturing.html
│ ├── streaming.html
│ └── video_classification_dljs.html
├── 3_audio
│ ├── decoding.html
│ └── recording.html
├── 4_frameworks
│ ├── kerasjs.html
│ ├── tfjs.html
│ └── webdnn.html
├── README.md
└── data
│ ├── Large-dog-barks - LICENSE.txt
│ ├── Large-dog-barks.mp3
│ ├── bike.jpg
│ ├── bike_object.png
│ ├── cat.jpeg
│ └── rand.bin
├── chapter7
├── .gitignore
├── 1_binary_data
│ ├── data.py
│ ├── data
│ │ ├── Large-dog-barks - LICENSE.txt
│ │ ├── Large-dog-barks.mp3
│ │ ├── bike.jpg
│ │ ├── bike_object.png
│ │ ├── cat.jpeg
│ │ └── rand.bin
│ ├── image_data.html
│ ├── lib
│ │ └── protobuf.js
│ ├── proto
│ │ ├── caffe.json
│ │ └── tf.json
│ ├── protobuf_caffe.html
│ └── protobuf_tensorflow.html
├── 2_drawing_charts
│ ├── chart_types_1.html
│ ├── chart_types_2.html
│ ├── lib
│ │ └── chart.min.js
│ ├── line_chart.html
│ ├── update_chart_1.html
│ └── update_chart_2.html
├── 3_drawing_sketches
│ └── index.html
├── 4_audio_spectogram
│ ├── index.html
│ ├── lib
│ │ ├── chart.min.js
│ │ └── dsp.js
│ └── worker.js
└── 5_face_tracking
│ ├── faceFilter.html
│ ├── face_detector.html
│ ├── lib
│ ├── face.min.js
│ └── tracking.min.js
│ └── tracking_js.html
└── cover.jpg
/.gitmodules:
--------------------------------------------------------------------------------
1 | [submodule "chapter8/tfjs-lstm-text-generation"]
2 | path = chapter8/tfjs-lstm-text-generation
3 | url = git@github.com:reiinakano/tfjs-lstm-text-generation.git
4 | [submodule "chapter8/tfjs-autoencoder"]
5 | path = chapter8/tfjs-autoencoder
6 | url = git@github.com:reiinakano/tfjs-autoencoder.git
7 | [submodule "chapter8/tfjs-rock-paper-scissors"]
8 | path = chapter8/tfjs-rock-paper-scissors
9 | url = git@github.com:reiinakano/tfjs-rock-paper-scissors
10 |
--------------------------------------------------------------------------------
/LICENSE.txt:
--------------------------------------------------------------------------------
1 | Copyright (c) 2018 Bleeding Edge Press
2 |
3 | Permission is hereby granted, free of charge, to any person
4 | obtaining a copy of this software and associated documentation
5 | files (the "Software"), to deal in the Software without
6 | restriction, including without limitation the rights to use,
7 | copy, modify, merge, publish, distribute, sublicense, and/or sell
8 | copies of the Software, and to permit persons to whom the
9 | Software is furnished to do so, subject to the following
10 | conditions:
11 |
12 | The above copyright notice and this permission notice shall be
13 | included in all copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 | EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
17 | OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
18 | NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
19 | HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
20 | WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 | FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 | OTHER DEALINGS IN THE SOFTWARE.
23 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Deep learning in the browser
2 |
3 |
4 |
5 |
6 |
7 |
8 | Official repository of the book [Deep learning in the browser](https://bleedingedgepress.com/deep-learning-browser/) released August 2018 and published by [Bleeding Edge Press](https://bleedingedgepress.com). Here you will find all of the source code of the demos in the book.
9 |
10 | Content of this repository:
11 | * [chapter 1: Introduction to deep learning - activation functions](/chapter1)
12 | * [chapter 3: Deep learning frameworks for JavaScript - Tensorflow.js](/chapter3)
13 | * [chapter 5: GPU acceleration with WebGL](/chapter5)
14 | * [chapter 6: Extracting data from the browser](/chapter6)
15 | * [chapter 7: Recipes for advanced data manipulation](/chapter7)
16 | * [chapter 8: Building applications with TensorFlow.js](/chapter8)
17 |
18 |
19 | ## Getting Started
20 |
21 | Clone the repo and all submodules.
22 |
23 | ```sh
24 | $ git clone git@github.com:backstopmedia/deep-learning-browser.git
25 | $ cd deep-learning-browser
26 | $ git submodule update --init --recursive
27 | ```
28 |
29 | You can serve the code of the chapters using a simple static webserver.
30 |
31 | ```sh
32 | $ npm install http-server -g
33 | $ http-server chapter6 --cors -p 8081
34 | ```
35 |
36 | Navigate to [localhost:8081](http://localhost:8081) to run the code from the selected chapter.
37 |
38 |
39 | ## License
40 | The whole content of this repository is released under *MIT License* (see [/LICENSE.txt](/LICENSE.txt)), except:
41 | * Third party libraries (e.g. in `/lib` subdirectory)
42 | * Third party data (e.g. in `/data` subdirectory)
43 |
--------------------------------------------------------------------------------
/chapter1/activation_functions.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
11 |
12 |
--------------------------------------------------------------------------------
/chapter3/tfjs_xor_batching.js:
--------------------------------------------------------------------------------
1 | const hiddenNumNeurons = 20;
2 | const hidden2NumNeurons = 5;
3 |
4 | const learningRate = 0.01;
5 | const num_iterations = 100;
6 | const batch_size = 20;
7 |
8 | const weights = tf.variable(tf.randomNormal([2, hiddenNumNeurons]));
9 | const biases = tf.variable(tf.zeros([hiddenNumNeurons]));
10 | const weights2 = tf.variable(tf.randomNormal([hiddenNumNeurons, hidden2NumNeurons]));
11 | const biases2 = tf.variable(tf.zeros([hidden2NumNeurons]));
12 | const outWeights = tf.variable(tf.randomNormal([hidden2NumNeurons, 1]));
13 | const outBias = tf.variable(tf.zeros([1]));
14 |
15 | const optimizer = tf.train.adam(learningRate);
16 |
17 | const epsilon = tf.scalar(1e-7);
18 | const one = tf.scalar(1);
19 |
20 | /*
21 | * Given an input, have our model output a prediction
22 | */
23 | function predict(input) {
24 | return tf.tidy(() => {
25 |
26 | const hidden = input.matMul(weights).add(biases).relu();
27 | const hidden2 = hidden.matMul(weights2).add(biases2).relu();
28 | const out = hidden2.matMul(outWeights).add(outBias).sigmoid().as1D();
29 |
30 | return out;
31 | });
32 | }
33 |
34 | /*
35 | * Calculate the loss of our model's prediction vs the actual label
36 | */
37 | function loss(prediction, actual) {
38 | // Having a good error metric is key for training a machine learning model
39 | return tf.tidy(() => {
40 | return tf.add(
41 | actual.mul(prediction.add(epsilon).log()),
42 | one.sub(actual).mul(one.sub(prediction).add(epsilon).log()))
43 | .mean()
44 | .neg().asScalar();
45 | });
46 | }
47 |
48 | /*
49 | * This function trains our model asynchronously
50 | */
51 | async function train(numIterations, done) {
52 |
53 | for (let iter = 0; iter < numIterations; iter++) {
54 |
55 | let xs, ys, cost;
56 | [xs, ys] = getNRandomSamples(batch_size);
57 |
58 | cost = tf.tidy(() => {
59 | cost = optimizer.minimize(() => {
60 | const pred = predict(tf.tensor2d(xs));
61 | const pretfoss = loss(pred, tf.tensor1d(ys));
62 |
63 | return pretfoss;
64 | }, true);
65 |
66 | return cost;
67 | })
68 |
69 | if (iter % 10 == 0) {
70 | await cost.data().then((data) => console.log(`Iteration: ${iter} Loss: ${data}`));
71 | }
72 |
73 | await tf.nextFrame();
74 | }
75 |
76 | done();
77 | }
78 |
79 | /*
80 | * This function calculates the accuracy of our model
81 | */
82 | function test(xs, ys) {
83 | tf.tidy(() => {
84 | const predictedYs = xs.map((x) => Math.round(predict(tf.tensor2d(x, [1, 2])).dataSync()));
85 |
86 | var predicted = 0;
87 | for (let i = 0; i < xs.length; i++) {
88 | if (ys[i] == predictedYs[i]) {
89 | predicted++;
90 | }
91 | }
92 | console.log(`Num correctly predicted: ${predicted} out of ${xs.length}`);
93 | console.log(`Accuracy: ${predicted/xs.length}`);
94 | })
95 | }
96 |
97 | /*
98 | * This function returns a random sample and its corresponding label
99 | */
100 | function getRandomSample() {
101 | let x;
102 | x = [Math.random()*2-1, Math.random()*2-1];
103 | let y;
104 | if (x[0] > 0 && x[1] > 0 || x[0] < 0 && x[1] < 0) {
105 | y = 0;
106 | } else {
107 | y = 1;
108 | }
109 | return [x, y];
110 | }
111 |
112 | /*
113 | * This function returns n random samples
114 | */
115 | function getNRandomSamples(n) {
116 | let xs = [];
117 | let ys = [];
118 | for (let iter = 0; iter < n; iter++) {
119 | let x, y;
120 | [x, y] = getRandomSample();
121 | xs.push(x);
122 | ys.push(y);
123 | }
124 | return [xs, ys];
125 | }
126 |
127 | let testX, testY;
128 | [testX, testY] = getNRandomSamples(100);
129 |
130 | // Test before training
131 | console.log(`Before training: `);
132 | test(testX, testY);
133 |
134 | console.log('=============');
135 | console.log(`Training ${num_iterations} epochs...`);
136 |
137 | // Train, then test right after
138 | train(num_iterations, () => {
139 | console.log('=============');
140 | console.log(
141 | `After training:`)
142 | test(testX, testY);
143 | });
144 |
145 |
--------------------------------------------------------------------------------
/chapter5/0_webglFirstRendering/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | WebGL first rendering
5 |
6 |
7 |
23 |
24 |
25 |
26 |
WebGL first rendering
27 |
28 | We draw 2 triangles to fill the viewport. They are colored by their coordinates value (RED for the horizontal axis, and GREEN for the vertical axis).
29 |
30 |
31 |
32 |
33 |
34 |
35 |
--------------------------------------------------------------------------------
/chapter5/0_webglFirstRendering/script.js:
--------------------------------------------------------------------------------
1 | /**
2 | *
3 | * This software is released under MIT licence :
4 | *
5 | * Copyright (c) 2018 Xavier Bourry ( xavier@jeeliz.com )
6 | *
7 | * Permission is hereby granted, free of charge, to any person obtaining a copy
8 | * of this software and associated documentation files (the "Software"), to deal
9 | * in the Software without restriction, including without limitation the rights
10 | * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11 | * copies of the Software, and to permit persons to whom the Software is
12 | * furnished to do so, subject to the following conditions:
13 | *
14 | * The above copyright notice and this permission notice shall be included in all
15 | * copies or substantial portions of the Software.
16 | *
17 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
20 | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
23 | * SOFTWARE.
24 | */
25 |
26 | function main(){
27 |
28 | // CREATE WEBGL CONTEXT :
29 | var myCanvas=document.getElementById('myWebGLCanvas');
30 | var GL;
31 | try {
32 | GL=myCanvas.getContext('webgl', {antialias: false, depth: false});
33 | } catch(e) {
34 | alert('You are not WebGL compatible :(');
35 | }
36 |
37 |
38 | // CREATE THE VERTEX BUFFER OBJECTS :
39 | //declare vertices and indices of a quad :
40 | var quadVertices = new Float32Array([
41 | -1, -1, //bottom left corner -> indice 0
42 | -1, 1, //top left corner -> indice 1
43 | 1, 1, //top right corner -> indice 2
44 | 1, -1 //bottom right corner -> indice 3
45 | ]);
46 | var quadIndices = new Uint16Array([
47 | 0,1,2, //first triangle if made with points of indices 0,1,2
48 | 0,2,3 //second triangle
49 | ]);
50 |
51 | //send vertices to the GPU :
52 | var quadVerticesVBO= GL.createBuffer();
53 | GL.bindBuffer(GL.ARRAY_BUFFER, quadVerticesVBO);
54 | GL.bufferData(GL.ARRAY_BUFFER, quadVertices, GL.STATIC_DRAW);
55 |
56 | //send indices to the GPU :
57 | var quadIndicesVBO= GL.createBuffer();
58 | GL.bindBuffer(GL.ELEMENT_ARRAY_BUFFER, quadIndicesVBO);
59 | GL.bufferData(GL.ELEMENT_ARRAY_BUFFER, quadIndices, GL.STATIC_DRAW);
60 |
61 |
62 | //CREATE THE SHADER PROGRAM :
63 | //declare shader sources as string
64 | var shaderVertexSource="attribute vec2 position;\n"
65 | +"void main(void){\n"
66 | +"gl_Position=vec4(position, 0., 1.);\n"
67 | +"}";
68 | var shaderFragmentSource="precision highp float;\n"
69 | +"uniform vec2 resolution;\n"
70 | +"void main(void){\n"
71 | +"vec2 pixelPosition=gl_FragCoord.xy/resolution;\n"
72 | +"gl_FragColor=vec4(pixelPosition, 0.,1.);\n"
73 | +"}";
74 |
75 | //helper function to compile a shader
76 | function compile_shader(source, type, typeString) {
77 | var shader = GL.createShader(type);
78 | GL.shaderSource(shader, source);
79 | GL.compileShader(shader);
80 | if (!GL.getShaderParameter(shader, GL.COMPILE_STATUS)) {
81 | alert("ERROR IN "+typeString+ " SHADER : " + GL.getShaderInfoLog(shader));
82 | return false;
83 | }
84 | return shader;
85 | };
86 | //compile both shader separately
87 | var shaderVertex=compile_shader(shaderVertexSource, GL.VERTEX_SHADER, "VERTEX");
88 | var shaderFragment=compile_shader(shaderFragmentSource, GL.FRAGMENT_SHADER, "FRAGMENT");
89 |
90 | var shaderProgram=GL.createProgram();
91 | GL.attachShader(shaderProgram, shaderVertex);
92 | GL.attachShader(shaderProgram, shaderFragment);
93 |
94 | //start the linking stage :
95 | GL.linkProgram(shaderProgram);
96 |
97 | //link attributes :
98 | var _positionAttributePointer = GL.getAttribLocation(shaderProgram, "position");
99 | GL.enableVertexAttribArray(_positionAttributePointer);
100 |
101 | //link uniforms :
102 | var _resolutionUniform = GL.getUniformLocation(shaderProgram, "resolution");
103 |
104 |
105 | //RENDERING TIME !
106 | //bind VBOs
107 | GL.bindBuffer(GL.ARRAY_BUFFER, quadVerticesVBO);
108 | GL.vertexAttribPointer(_positionAttributePointer, 2, GL.FLOAT, false, 8,0);
109 | GL.bindBuffer(GL.ELEMENT_ARRAY_BUFFER, quadIndicesVBO);
110 |
111 | //rendering :
112 | GL.useProgram(shaderProgram);
113 | //update GLSL "resolution" value in the fragment shader :
114 | GL.viewport(0,0,myCanvas.width, myCanvas.height);
115 | //update GLSL "resolution" value in the fragment shader :
116 | GL.uniform2f(_resolutionUniform, myCanvas.width, myCanvas.height);
117 | //trigger the rendering :
118 | GL.drawElements(GL.TRIANGLES, 6, GL.UNSIGNED_SHORT, 0);
119 | GL.flush();
120 |
121 | } //end main()
122 |
--------------------------------------------------------------------------------
/chapter5/1_mandelBrot/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | Mandelbrot fractal
5 |
6 |
7 |
8 |
9 |
25 |
26 |
27 |
28 |
Mandelbrot fractal
29 |
30 | By changing a few lines from the previous example, we draw this beautiful Mandelbrot fractal. All pixels are computed in parallel, which is particularly interesting if their computation is not trivial like in this example. It illustrates the power of GPU acceleration.
31 |
38 |
39 |
40 |
41 |
--------------------------------------------------------------------------------
/chapter5/1_mandelBrot/script.js:
--------------------------------------------------------------------------------
1 | /**
2 | *
3 | * This software is released under MIT licence :
4 | *
5 | * Copyright (c) 2018 Xavier Bourry ( xavier@jeeliz.com )
6 | *
7 | * Permission is hereby granted, free of charge, to any person obtaining a copy
8 | * of this software and associated documentation files (the "Software"), to deal
9 | * in the Software without restriction, including without limitation the rights
10 | * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11 | * copies of the Software, and to permit persons to whom the Software is
12 | * furnished to do so, subject to the following conditions:
13 | *
14 | * The above copyright notice and this permission notice shall be included in all
15 | * copies or substantial portions of the Software.
16 | *
17 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
20 | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
23 | * SOFTWARE.
24 | */
25 |
26 | function main(){
27 |
28 | // CREATE WEBGL CONTEXT :
29 | var myCanvas=document.getElementById('myWebGLCanvas');
30 | var GL;
31 | try {
32 | GL=myCanvas.getContext('webgl', {antialias: false, depth: false});
33 | } catch(e) {
34 | alert('You are not WebGL compatible :(');
35 | }
36 |
37 |
38 | // CREATE THE VERTEX BUFFER OBJECTS :
39 | //declare vertices and indices of a quad :
40 | var quadVertices = new Float32Array([
41 | -1, -1, //bottom left corner -> indice 0
42 | -1, 1, //top left corner -> indice 1
43 | 1, 1, //top right corner -> indice 2
44 | 1, -1 //bottom right corner -> indice 3
45 | ]);
46 | var quadIndices = new Uint16Array([
47 | 0,1,2, //first triangle if made with points of indices 0,1,2
48 | 0,2,3 //second triangle
49 | ]);
50 |
51 | //send vertices to the GPU :
52 | var quadVerticesVBO= GL.createBuffer();
53 | GL.bindBuffer(GL.ARRAY_BUFFER, quadVerticesVBO);
54 | GL.bufferData(GL.ARRAY_BUFFER, quadVertices, GL.STATIC_DRAW);
55 |
56 | //send indices to the GPU :
57 | var quadIndicesVBO= GL.createBuffer();
58 | GL.bindBuffer(GL.ELEMENT_ARRAY_BUFFER, quadIndicesVBO);
59 | GL.bufferData(GL.ELEMENT_ARRAY_BUFFER, quadIndices, GL.STATIC_DRAW);
60 |
61 |
62 | //CREATE THE SHADER PROGRAM :
63 | //declare shader sources as string
64 | var shaderVertexSource="attribute vec2 position;\n"
65 | +"void main(void){\n"
66 | +"gl_Position=vec4(position, 0., 1.);\n"
67 | +"}";
68 | var shaderFragmentSource="precision highp float;\n"
69 | +"uniform vec2 resolution;\n"
70 | +"void main(void){\n"
71 | +"vec2 pixelPosition=gl_FragCoord.xy/resolution;\n"
72 | +"vec2 pixelPositionCentered=1.3*(pixelPosition*2.-vec2(1.55,1.));\n"
73 | +"vec2 z = pixelPositionCentered, newZ;\n"
74 | +"float j=0.;\n"
75 | +"for(int i=0; i<=200; i+=1) {\n"
76 | +" newZ = pixelPositionCentered+vec2(z.x * z.x - z.y * z.y, 2. * z.y * z.x);\n"
77 | +" if(length(newZ) > 2.) break;\n"
78 | +" z=newZ; j+=1.;\n"
79 | +"}\n"
80 | +"vec3 color=step(j, 199.)*vec3(j/20., j*j/4000., 0.);\n"
81 | +"gl_FragColor = vec4(color,1.);\n"
82 | +"}";
83 |
84 | //helper function to compile a shader
85 | function compile_shader(source, type, typeString) {
86 | var shader = GL.createShader(type);
87 | GL.shaderSource(shader, source);
88 | GL.compileShader(shader);
89 | if (!GL.getShaderParameter(shader, GL.COMPILE_STATUS)) {
90 | alert("ERROR IN "+typeString+ " SHADER : " + GL.getShaderInfoLog(shader));
91 | return false;
92 | }
93 | return shader;
94 | };
95 | //compile both shader separately
96 | var shaderVertex=compile_shader(shaderVertexSource, GL.VERTEX_SHADER, "VERTEX");
97 | var shaderFragment=compile_shader(shaderFragmentSource, GL.FRAGMENT_SHADER, "FRAGMENT");
98 |
99 | var shaderProgram=GL.createProgram();
100 | GL.attachShader(shaderProgram, shaderVertex);
101 | GL.attachShader(shaderProgram, shaderFragment);
102 |
103 | //start the linking stage :
104 | GL.linkProgram(shaderProgram);
105 |
106 | //link attributes :
107 | var _positionAttributePointer = GL.getAttribLocation(shaderProgram, "position");
108 | GL.enableVertexAttribArray(_positionAttributePointer);
109 |
110 | //link uniforms :
111 | var _resolutionUniform = GL.getUniformLocation(shaderProgram, "resolution");
112 |
113 |
114 | //RENDERING TIME !
115 | //bind VBOs
116 | GL.bindBuffer(GL.ARRAY_BUFFER, quadVerticesVBO);
117 | GL.vertexAttribPointer(_positionAttributePointer, 2, GL.FLOAT, false, 8,0);
118 | GL.bindBuffer(GL.ELEMENT_ARRAY_BUFFER, quadIndicesVBO);
119 |
120 | //rendering :
121 | GL.useProgram(shaderProgram);
122 | //update GLSL "resolution" value in the fragment shader :
123 | GL.viewport(0,0,myCanvas.width, myCanvas.height);
124 | //update GLSL "resolution" value in the fragment shader :
125 | GL.uniform2f(_resolutionUniform, myCanvas.width, myCanvas.height);
126 | //trigger the rendering :
127 | GL.drawElements(GL.TRIANGLES, 6, GL.UNSIGNED_SHORT, 0);
128 | GL.flush();
129 |
130 | } //end main()
131 |
--------------------------------------------------------------------------------
/chapter5/2_renderToTexture/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | Render to Texture : Conway game of life
5 |
6 |
7 |
8 |
9 |
10 |
26 |
27 |
28 |
29 |
Conway game of life
30 |
31 | We simulate a Conway game of life for 2000 iteration. Regular shapes emerge from random values. We have to use render to texture to update the texture storing cell states at each iteration. This is a discrete simulation (each cell is either alive or dead), so we do not need floating point textures.
32 |
37 | Open the web console to view the benchmarks.
38 |
39 |
40 |
41 |
42 |
43 |
--------------------------------------------------------------------------------
/chapter5/2_renderToTexture/scriptCPU.js:
--------------------------------------------------------------------------------
1 | function main_CPU(){
2 | //do the conway game of life on GPU. use SETTINGS of scriptGPU
3 |
4 | var data0=new Uint8Array(SETTINGS.simuSize*SETTINGS.simuSize);
5 | var data1=new Uint8Array(SETTINGS.simuSize*SETTINGS.simuSize);
6 |
7 | //randomly init cells live or dead
8 | for (var i=0; i0.5)?1:0;
10 | data1[i]=data0[i];
11 | }
12 |
13 | var datas=[data0, data1];
14 |
15 | function getVal(dataArr, x, y){ //reproduce the GL.CLAMP_TO_EDGE behaviour
16 | var xClamped=Math.min(Math.max(x, 0), SETTINGS.simuSize-1);
17 | var yClamped=Math.min(Math.max(y, 0), SETTINGS.simuSize-1);
18 | return dataArr[yClamped*SETTINGS.simuSize+xClamped];
19 | }
20 |
21 | var x, y, dataInput, dataOutput,nNeighbors;
22 |
23 | var tStart=performance.now();
24 | //simulation loop :
25 | for (i=0; i indice 0
48 | -1, 1, //top left corner -> indice 1
49 | 1, 1, //top right corner -> indice 2
50 | 1, -1 //bottom right corner -> indice 3
51 | ]);
52 | var quadIndices = new Uint16Array([
53 | 0,1,2, //first triangle if made with points of indices 0,1,2
54 | 0,2,3 //second triangle
55 | ]);
56 |
57 | //send vertices to the GPU :
58 | var quadVerticesVBO= GL.createBuffer();
59 | GL.bindBuffer(GL.ARRAY_BUFFER, quadVerticesVBO);
60 | GL.bufferData(GL.ARRAY_BUFFER, quadVertices, GL.STATIC_DRAW);
61 |
62 | //send indices to the GPU :
63 | var quadIndicesVBO= GL.createBuffer();
64 | GL.bindBuffer(GL.ELEMENT_ARRAY_BUFFER, quadIndicesVBO);
65 | GL.bufferData(GL.ELEMENT_ARRAY_BUFFER, quadIndices, GL.STATIC_DRAW);
66 |
67 |
68 | //CREATE THE RENDERING SHADER PROGRAM :
69 | //declare shader sources as string
70 | var shaderVertexSource="attribute vec2 position;\n"
71 | +"void main(void){\n"
72 | +"gl_Position=vec4(position, 0., 1.);\n"
73 | +"}";
74 | var shaderFragmentSourceRendering="precision highp float;\n"
75 | +"uniform vec2 resolution;\n"
76 | +"uniform sampler2D samplerTexture;\n"
77 | +"void main(void){\n"
78 | +"vec2 uv=gl_FragCoord.xy/resolution;\n" //texture UV coordinates
79 | +"vec4 color=texture2D(samplerTexture, uv);\n" //fetch texture color
80 | +"gl_FragColor=vec4(color.r*vec3(1.,1.,0.) ,1.);\n"
81 | +"}";
82 |
83 | //helper function to compile a shader
84 | function compile_shader(source, type, typeString) {
85 | var shader = GL.createShader(type);
86 | GL.shaderSource(shader, source);
87 | GL.compileShader(shader);
88 | if (!GL.getShaderParameter(shader, GL.COMPILE_STATUS)) {
89 | alert("ERROR IN "+typeString+ " SHADER : " + GL.getShaderInfoLog(shader));
90 | return false;
91 | }
92 | return shader;
93 | };
94 |
95 | //helper function to build the shader program :
96 | function build_shaderProgram(shaderVertexSource, shaderFragmentSource, name) {
97 | //compile both shader separately
98 | var shaderVertex=compile_shader(shaderVertexSource, GL.VERTEX_SHADER, "VERTEX "+name);
99 | var shaderFragment=compile_shader(shaderFragmentSource, GL.FRAGMENT_SHADER, "FRAGMENT "+name);
100 |
101 | var shaderProgram=GL.createProgram();
102 | GL.attachShader(shaderProgram, shaderVertex);
103 | GL.attachShader(shaderProgram, shaderFragment);
104 |
105 | //start the linking stage :
106 | GL.linkProgram(shaderProgram);
107 | return shaderProgram;
108 | }
109 |
110 | //build rendering shader program :
111 | var shaderProgramRendering = build_shaderProgram(shaderVertexSource, shaderFragmentSourceRendering, 'RENDERING');
112 | //link attributes :
113 | var _positionAttributePointer = GL.getAttribLocation(shaderProgramRendering, 'position');
114 | GL.enableVertexAttribArray(_positionAttributePointer);
115 | //link uniforms :
116 | var _resolutionRenderingUniform = GL.getUniformLocation(shaderProgramRendering, 'resolution');
117 | var _samplerTextureRenderingUniform = GL.getUniformLocation(shaderProgramRendering, 'samplerTexture');
118 |
119 |
120 | //BIND VBOs
121 | GL.bindBuffer(GL.ARRAY_BUFFER, quadVerticesVBO);
122 | GL.vertexAttribPointer(_positionAttributePointer, 2, GL.FLOAT, false, 8,0);
123 | GL.bindBuffer(GL.ELEMENT_ARRAY_BUFFER, quadIndicesVBO);
124 |
125 |
126 | //RENDER TO TEXTURE INITIALIZATION :
127 | //initialize and bind the FBO
128 | var rttFbo=GL.createFramebuffer();
129 | GL.bindFramebuffer(GL.FRAMEBUFFER, rttFbo);
130 |
131 | //instantiate the textures :
132 | //helper function to create a texture
133 | function create_rttTexture(width, height, data){
134 | var texture=GL.createTexture();
135 | GL.bindTexture(GL.TEXTURE_2D, texture);
136 | //texture filtering : always pick the nearest pixel from the texture UV coordinates :
137 | GL.texParameteri(GL.TEXTURE_2D, GL.TEXTURE_MAG_FILTER, GL.NEAREST);
138 | GL.texParameteri(GL.TEXTURE_2D, GL.TEXTURE_MIN_FILTER, GL.NEAREST);
139 |
140 | //does not repeat texture along axis
141 | //(otherwise may throw errors if dimensions of the texture are not power of 2) :
142 | GL.texParameteri( GL.TEXTURE_2D, GL.TEXTURE_WRAP_S, GL.CLAMP_TO_EDGE );
143 | GL.texParameteri( GL.TEXTURE_2D, GL.TEXTURE_WRAP_T, GL.CLAMP_TO_EDGE );
144 | GL.texImage2D(GL.TEXTURE_2D, 0, GL.RGBA, width, height, 0, GL.RGBA, GL.UNSIGNED_BYTE, data);
145 | return texture;
146 | }
147 | var data0=new Uint8Array(SETTINGS.simuSize*SETTINGS.simuSize*4);
148 | //randomly init cells live or dead
149 | for (var i=0; i0.5)?255:0;
151 | }
152 | //uncomment this code chunk to get the illustration of the book :
153 | //init a square of SETTINGS.simuSize/4 wide at the center of the texture with 1
154 | /* data0=new Uint8Array(SETTINGS.simuSize*SETTINGS.simuSize*4); //reset all values to 0
155 | var sMin=Math.round(SETTINGS.simuSize/2-SETTINGS.simuSize/4);
156 | var sMax=Math.round(SETTINGS.simuSize/2+SETTINGS.simuSize/4);
157 | for (var y=sMin; y=4.0){\n"
195 | +" cellState=0.0;\n" //die
196 | +"};\n"
197 | +"gl_FragColor=vec4(cellState, 0., 0.,1.);\n"
198 | +"}";
199 | //build rendering shader program :
200 | var shaderProgramComputing = build_shaderProgram(shaderVertexSource, shaderFragmentSourceComputing, 'COMPUTING');
201 | //the rendering vertex shader is the same so attributes have the same name, number and dimension than the rendering shader program
202 | //so we do not need to link and enable them again
203 | //link uniforms :
204 | var _resolutionComputingUniform = GL.getUniformLocation(shaderProgramComputing, "resolution");
205 | var _samplerTextureComputingUniform = GL.getUniformLocation(shaderProgramComputing, 'samplerTexture')
206 |
207 |
208 | //COMPUTING TIME !
209 | GL.useProgram(shaderProgramComputing);
210 | GL.viewport(0,0,SETTINGS.simuSize,SETTINGS.simuSize);
211 | GL.uniform2f(_resolutionComputingUniform, SETTINGS.simuSize, SETTINGS.simuSize);
212 | GL.uniform1i(_samplerTextureComputingUniform, 0);
213 | //next bound texture will be bound on sampler 0 :
214 | GL.activeTexture(GL.TEXTURE0);
215 |
216 | //computing loop
217 | var tStart=performance.now();
218 | for (var i=0; i
2 |
3 |
4 | Render on floating point texture : heat simulation
5 |
6 |
7 |
8 |
9 |
21 |
22 |
23 |
24 |
Heat equation simulation
25 |
26 | We start from the previous example (the Conway game of life) to implement a continuous simulation. We simulate the thermal diffusion in a steel block using the head equation in 2D. Unlike the Conway game of life, the state is represented by floating point parameters (the heat and the heat gradients). So we need to implement render to floating point texture.
27 |
28 |
29 |
30 | The total area of the simulation is a square of 2.56 meters wide. The initial state is a square area of 1.28 meters wide at 100°C. The steel around is at 0°C. The simulation lasts 3000 seconds. We show the heat using the IDL_Rainbow color map calibrated from 0°C (black) to 100°C (red). This is the color map :
31 |
32 |
33 |
34 |
35 | On the left : initial state, on the right : result of the simulation
36 |
37 |
38 |
43 |
44 |
45 |
46 |
--------------------------------------------------------------------------------
/chapter5/3_RTTfloat/initialState.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/backstopmedia/deep-learning-browser/eff30520e1859653b1d05b5b7ea827cefa26bb1b/chapter5/3_RTTfloat/initialState.png
--------------------------------------------------------------------------------
/chapter5/3_RTTfloat/script.js:
--------------------------------------------------------------------------------
1 | /**
2 | *
3 | * This software is released under MIT licence :
4 | *
5 | * Copyright (c) 2018 Xavier Bourry ( xavier@jeeliz.com )
6 | *
7 | * Permission is hereby granted, free of charge, to any person obtaining a copy
8 | * of this software and associated documentation files (the "Software"), to deal
9 | * in the Software without restriction, including without limitation the rights
10 | * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11 | * copies of the Software, and to permit persons to whom the Software is
12 | * furnished to do so, subject to the following conditions:
13 | *
14 | * The above copyright notice and this permission notice shall be included in all
15 | * copies or substantial portions of the Software.
16 | *
17 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
20 | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
23 | * SOFTWARE.
24 | */
25 |
26 |
27 | /*
28 | 2D Thermal simulation of a square of iron heated at 100°C
29 | The square is 2.56 meters large
30 | The total area of the simulation is 2.56 meters
31 | The duration of the simulation is 3000 seconds
32 | */
33 |
34 | //parameters. Physical parameters are in the computing fragment shader
35 | var SETTINGS={
36 | simuSize: 256,
37 | nIterations: 3000 //time step is 1 seconds. So the total duration of the simulation is nIterations seconds
38 | };
39 |
40 | //global variables
41 | var GL, FLOATPIXELTYPE, ISWEBGL2;
42 |
43 | //test if it is possible to do RTT with FLOAT/HALF FLOAT textures :
44 | function test_canRTT(internalFormat, pixelType){
45 | var testFbo=GL.createFramebuffer();
46 | GL.bindFramebuffer(GL.FRAMEBUFFER, testFbo);
47 |
48 | var testTexture=GL.createTexture();
49 | GL.bindTexture(GL.TEXTURE_2D, testTexture);
50 | GL.texImage2D(GL.TEXTURE_2D, 0, internalFormat, 1, 1, 0, GL.RGBA, pixelType, null);
51 |
52 | GL.framebufferTexture2D(GL.FRAMEBUFFER, GL.COLOR_ATTACHMENT0, GL.TEXTURE_2D, testTexture, 0);
53 | var fbStatus=GL.checkFramebufferStatus(GL.FRAMEBUFFER);
54 |
55 | return(fbStatus===GL.FRAMEBUFFER_COMPLETE);
56 | }
57 |
58 | function enable_webGL12extensions(){
59 | GL.getExtension('EXT_color_buffer_float');
60 | GL.getExtension('WEBGL_color_buffer_float');
61 | GL.getExtension('OES_color_buffer_float');
62 | }
63 |
64 | function get_webgl1extensions(){
65 | if (GL.getExtension('OES_texture_float') && test_canRTT(GL.RGBA, GL.FLOAT)){
66 | FLOATPIXELTYPE=GL.FLOAT;
67 | return true;
68 | }
69 | if (GL.getExtension('OES_texture_half_float') && test_canRTT(GL.RGBA, GL.HALF_FLOAT)){
70 | FLOATPIXELTYPE=GL.HALF_FLOAT;
71 | return true;
72 | }
73 | return false;
74 | }
75 |
76 | //helper function to compile a shader
77 | function compile_shader(source, type, typeString) {
78 | var shader = GL.createShader(type);
79 | GL.shaderSource(shader, source);
80 | GL.compileShader(shader);
81 | if (!GL.getShaderParameter(shader, GL.COMPILE_STATUS)) {
82 | alert("ERROR IN "+typeString+ " SHADER : " + GL.getShaderInfoLog(shader));
83 | return false;
84 | }
85 | return shader;
86 | };
87 |
88 | //helper function to build the shader program :
89 | function build_shaderProgram(shaderVertexSource, shaderFragmentSource, name) {
90 | //compile both shader separately
91 | var shaderVertex=compile_shader(shaderVertexSource, GL.VERTEX_SHADER, "VERTEX "+name);
92 | var shaderFragment=compile_shader(shaderFragmentSource, GL.FRAGMENT_SHADER, "FRAGMENT "+name);
93 |
94 | var shaderProgram=GL.createProgram();
95 | GL.attachShader(shaderProgram, shaderVertex);
96 | GL.attachShader(shaderProgram, shaderFragment);
97 |
98 | //start the linking stage :
99 | GL.linkProgram(shaderProgram);
100 | return shaderProgram;
101 | }
102 |
103 | function create_andBindVBOs(positionAttributePointer){
104 | // CREATE THE VERTEX BUFFER OBJECTS :
105 | //declare vertices and indices of a quad :
106 | var quadVertices = new Float32Array([
107 | -1, -1, //bottom left corner -> indice 0
108 | -1, 1, //top left corner -> indice 1
109 | 1, 1, //top right corner -> indice 2
110 | 1, -1 //bottom right corner -> indice 3
111 | ]);
112 | var quadIndices = new Uint16Array([
113 | 0,1,2, //first triangle if made with points of indices 0,1,2
114 | 0,2,3 //second triangle
115 | ]);
116 |
117 | //send vertices to the GPU :
118 | var quadVerticesVBO= GL.createBuffer();
119 | GL.bindBuffer(GL.ARRAY_BUFFER, quadVerticesVBO);
120 | GL.bufferData(GL.ARRAY_BUFFER, quadVertices, GL.STATIC_DRAW);
121 |
122 | //send indices to the GPU :
123 | var quadIndicesVBO= GL.createBuffer();
124 | GL.bindBuffer(GL.ELEMENT_ARRAY_BUFFER, quadIndicesVBO);
125 | GL.bufferData(GL.ELEMENT_ARRAY_BUFFER, quadIndices, GL.STATIC_DRAW);
126 |
127 | //BIND VBOs
128 | GL.bindBuffer(GL.ARRAY_BUFFER, quadVerticesVBO);
129 | GL.vertexAttribPointer(positionAttributePointer, 2, GL.FLOAT, false, 8,0);
130 | GL.bindBuffer(GL.ELEMENT_ARRAY_BUFFER, quadIndicesVBO);
131 | }
132 |
133 | //helper function to create a texture
134 | function create_rttTexture(width, height, data){
135 | var texture=GL.createTexture();
136 | GL.bindTexture(GL.TEXTURE_2D, texture);
137 | //texture filtering : always pick the nearest pixel from the texture UV coordinates :
138 | GL.texParameteri(GL.TEXTURE_2D, GL.TEXTURE_MAG_FILTER, GL.NEAREST);
139 | GL.texParameteri(GL.TEXTURE_2D, GL.TEXTURE_MIN_FILTER, GL.NEAREST);
140 |
141 | //does not repeat texture along axis
142 | //(otherwise may throw errors if dimensions of the texture are not power of 2) :
143 | GL.texParameteri( GL.TEXTURE_2D, GL.TEXTURE_WRAP_S, GL.CLAMP_TO_EDGE );
144 | GL.texParameteri( GL.TEXTURE_2D, GL.TEXTURE_WRAP_T, GL.CLAMP_TO_EDGE );
145 |
146 | if (FLOATPIXELTYPE===GL.FLOAT){ //32 bit precision
147 | GL.texImage2D(GL.TEXTURE_2D, 0, (ISWEBGL2)?GL.RGBA32F:GL.RGBA, width, height, 0, GL.RGBA, FLOATPIXELTYPE, data);
148 | } else { //16 bits precision
149 | GL.texImage2D(GL.TEXTURE_2D, 0, (ISWEBGL2)?GL.RGBA16F:GL.RGBA, width, height, 0, GL.RGBA, FLOATPIXELTYPE, convert_arrayToUInt16Array(data));
150 | }
151 | return texture;
152 | }
153 |
154 | //convert a float value to Float16 encoding
155 | //ref : https://esdiscuss.org/topic/float16array
156 | function convert_floatToInt16(val){
157 | var floatView = new Float32Array(1);
158 | var int32View = new Int32Array(floatView.buffer);
159 |
160 | floatView[0] = val;
161 | var x = int32View[0];
162 |
163 | var bits = (x >> 16) & 0x8000; /* Get the sign */
164 | var m = (x >> 12) & 0x07ff; /* Keep one extra bit for rounding */
165 | var e = (x >> 23) & 0xff; /* Using int is faster here */
166 |
167 | /* If zero, or denormal, or exponent underflows too much for a denormal
168 | * half, return signed zero. */
169 | if (e < 103) {
170 | return bits;
171 | }
172 |
173 | /* If NaN, return NaN. If Inf or exponent overflow, return Inf. */
174 | if (e > 142) {
175 | bits |= 0x7c00;
176 | /* If exponent was 0xff and one mantissa bit was set, it means NaN,
177 | * not Inf, so make sure we set one mantissa bit too. */
178 | bits |= ((e == 255) ? 0 : 1) && (x & 0x007fffff);
179 | return bits;
180 | }
181 |
182 | /* If exponent underflows but not too much, return a denormal */
183 | if (e < 113) {
184 | m |= 0x0800;
185 | /* Extra rounding may overflow and set mantissa to 0 and exponent
186 | * to 1, which is OK. */
187 | bits |= (m >> (114 - e)) + ((m >> (113 - e)) & 1);
188 | return bits;
189 | }
190 |
191 | bits |= ((e - 112) << 10) | (m >> 1);
192 | /* Extra rounding. An overflow will set mantissa to 0 and increment
193 | * the exponent, which is OK. */
194 | bits += m & 1;
195 | return bits;
196 | }; //end convert_floatToInt16()
197 |
198 | //convert an array with float values or a Float32Array
199 | //to an Uint16array with 16bits encoded float
200 | //(see https://en.wikipedia.org/wiki/Half-precision_floating-point_format for the encoding)
201 | function convert_arrayToUInt16Array(arr){
202 | var arr16=new Uint16Array(arr.length);
203 | arr.forEach(function(val, ind) {
204 | arr16[ind]=convert_floatToInt16(val);
205 | });
206 | return arr16;
207 | };
208 |
209 | //entry point :
210 | function main(){
211 |
212 | // CREATE WEBGL CONTEXT :
213 | var myCanvas=document.getElementById('myWebGLCanvas');
214 | var webglOptions={antialias: false, depth: false};
215 |
216 | try {
217 | ISWEBGL2=true;
218 | GL=myCanvas.getContext('webgl2', webglOptions);
219 | enable_webGL12extensions();
220 | if (test_canRTT(GL.RGBA32F, GL.FLOAT)){
221 | FLOATPIXELTYPE=GL.FLOAT;
222 | } else {
223 | FLOATPIXELTYPE=GL.HALF_FLOAT;
224 | }
225 | } catch(e) { //no webgl2. try webgl1 ?
226 |
227 | console.log('WARNING : You are not compatible with WebGL2');
228 | try {
229 | ISWEBGL2=false;
230 | GL=myCanvas.getContext('webgl', webglOptions);
231 | enable_webGL12extensions();
232 | if (!get_webgl1extensions()){
233 | alert('WebGL2 is not here and cannot found right WebGL1 extensions');
234 | return;
235 | }
236 | } catch(e) { //no webgl at all =(
237 | alert('error : you are not compatible with WebGL at all');
238 | return;
239 | }
240 |
241 | }
242 |
243 | //disable dithering :
244 | GL.disable(GL.DITHER);
245 |
246 | //CREATE THE RENDERING SHADER PROGRAM :
247 | //declare shader sources as string
248 | var shaderVertexSource="attribute vec2 position;\n"
249 | +"void main(void){\n"
250 | +"gl_Position=vec4(position, 0., 1.);\n"
251 | +"}";
252 |
253 | //rendering shader juste apply a colorMap to display the heat scalar value
254 | //the colormap function comes from https://github.com/kbinani/glsl-colormap
255 | //it encodes the IDL_Rainbow colorMap
256 | var shaderFragmentSourceRendering="precision lowp float;\n"
257 | +"uniform vec2 resolution;\n"
258 | +"uniform sampler2D samplerTexture;\n"
259 |
260 | //begin code from https://github.com/kbinani/glsl-colormap
261 | +"float colormap_red(float x) {\n"
262 | +" if (x < 100.0) {\n"
263 | +" return (-9.55123422981038E-02 * x + 5.86981763554179E+00) * x - 3.13964093701986E+00;\n"
264 | +" } else {\n"
265 | +" return 5.25591836734694E+00 * x - 8.32322857142857E+02;\n"
266 | +" }\n"
267 | +"}\n"
268 | +"float colormap_green(float x) {\n"
269 | +" if (x < 150.0) {\n"
270 | +" return 5.24448979591837E+00 * x - 3.20842448979592E+02;\n"
271 | +" } else {\n"
272 | +" return -5.25673469387755E+00 * x + 1.34195877551020E+03;\n"
273 | +" }\n"
274 | +"}\n"
275 | +"float colormap_blue(float x) {\n"
276 | +" if (x < 80.0) {\n"
277 | +" return 4.59774436090226E+00 * x - 2.26315789473684E+00;\n"
278 | +" } else {\n"
279 | +" return -5.25112244897959E+00 * x + 8.30385102040816E+02;\n"
280 | +" }\n"
281 | +"}\n"
282 | +"vec4 colormap(float x) {\n"
283 | +" float t = x * 255.0;\n"
284 | +" float r = clamp(colormap_red(t) / 255.0, 0.0, 1.0);\n"
285 | +" float g = clamp(colormap_green(t) / 255.0, 0.0, 1.0);\n"
286 | +" float b = clamp(colormap_blue(t) / 255.0, 0.0, 1.0);\n"
287 | +" return vec4(r, g, b, 1.0);\n"
288 | +"}\n"
289 | //end code from https://github.com/kbinani/glsl-colormap
290 |
291 | +"void main(void){\n"
292 | +"vec2 uv=gl_FragCoord.xy/resolution;\n" //texture UV coordinates
293 | +"vec4 color=texture2D(samplerTexture, uv);\n" //fetch texture color. heat is stored in red channel
294 | +"gl_FragColor=colormap(color.r/100.);\n"
295 | +"}";
296 |
297 | //build rendering shader program :
298 | var shaderProgramRendering = build_shaderProgram(shaderVertexSource, shaderFragmentSourceRendering, 'RENDERING');
299 | //link attributes :
300 | var _positionAttributePointer = GL.getAttribLocation(shaderProgramRendering, 'position');
301 | GL.enableVertexAttribArray(_positionAttributePointer);
302 | create_andBindVBOs(_positionAttributePointer);
303 |
304 | //link uniforms :
305 | var _resolutionRenderingUniform = GL.getUniformLocation(shaderProgramRendering, 'resolution');
306 | var _samplerTextureRenderingUniform = GL.getUniformLocation(shaderProgramRendering, 'samplerTexture');
307 |
308 |
309 | //RENDER TO TEXTURE INITIALIZATION :
310 | //initialize and bind the FBO
311 | var rttFbo=GL.createFramebuffer();
312 | GL.bindFramebuffer(GL.FRAMEBUFFER, rttFbo);
313 |
314 | //instantiate the textures :
315 | var data0=new Float32Array(SETTINGS.simuSize*SETTINGS.simuSize*4);
316 | //set all temperatures values to 0°C
317 | for (var i=0; i
2 |
3 |
4 | WGLMatrix : Linear algebra library with WebGL
5 |
6 |
7 |
8 |
9 |
10 |
27 |
28 |
29 |
30 |
WGLMatrix
31 |
32 | Test the mythical WGLMatrix library :
33 |
34 |
35 |
36 |
37 |
38 |
39 |
--------------------------------------------------------------------------------
/chapter5/4_WGLMatrix/script.js:
--------------------------------------------------------------------------------
1 | /*
2 | Test our WebGL linear algebra lib WGLMatrix
3 | */
4 |
5 | //entry point
6 | function main(){
7 | log('Please take a look at the source code of script.js');
8 | log('Matrix are logged flattened');
9 |
10 | //FIRST TEST SERIE
11 | log('\n==== FIRST TEST ====');
12 | var a=new WGLMatrix.Matrix(4,4,[1,2,3,4, 5,6,7,8, 9,10,11,12, 13,14,15,16]);
13 | var b=new WGLMatrix.Matrix(4,4,[0.2,0.6,0.7,0.8, 0.9,0.1,10,1, 1,2,3,3, -1,-10,0,0]);
14 | var r=new WGLMatrix.MatrixZero(4,4);
15 |
16 | logMatrix('TEST READ MATRIX : A =', a.read()[0]);
17 | logMatrix('TEST READ MATRIX 2 (GREEN CHANNEL) : A =', a.read()[1]);
18 | logMatrix('TEST READ MATRIX : B =', b.read()[0]);
19 |
20 | a.add(b,r);
21 | logMatrix('TEST ADD MATRICES : A+B =', r.read()[0]);
22 |
23 | a.multiplyScalar(0.1, r);
24 | logMatrix('TEST MULTIPLYSCALAR : A*0.1 =', r.read()[0]);
25 |
26 | WGLMatrix.addFunction('y=cos(x);', 'COS');
27 | a.apply('COS', r);
28 | logMatrix('TEST APPLY : cos(A) =', r.read()[0]);
29 |
30 |
31 |
32 | //SECOND TEST
33 | log('\n==== SECOND TEST ====');
34 | var m=new WGLMatrix.Matrix(3,3,[0,1,2, 3,4,5, 6,7,8]);
35 | var v=new WGLMatrix.Matrix(3,1,[1,2,3]);
36 | var w=new WGLMatrix.MatrixZero(3,1);
37 |
38 | logMatrix('TEST READ MATRIX : M =', m.read()[0]);
39 | logMatrix('TEST READ VECTOR : V =', v.read()[0]);
40 |
41 | m.multiply(v, w); //do matrix operation M*V and put the result to W
42 | logMatrix('TEST MULTIPLY MATRICES : M*V =', w.read()[0]);
43 | log('( expected value : [8,26,44] )');
44 |
45 |
46 |
47 | //THIRD TEST
48 | log('\n==== THIRD TEST ====');
49 | var a=new WGLMatrix.Matrix(2,4,[1,3,5,7, 2,4,6,8]);
50 | var b=new WGLMatrix.Matrix(4,3,[1,8,9, 2,7,10, 3,6,11, 4,5,12]);
51 | var r=new WGLMatrix.MatrixZero(2,3);
52 |
53 | logMatrix('TEST READ MATRIX : A =', a.read()[0]);
54 | logMatrix('TEST READ MATRIX : B =', b.read()[0]);
55 |
56 | a.multiply(b, r); //do matrix operation A*B and put the result to R
57 | logMatrix('TEST MULTIPLY MATRICES : A*B =', r.read()[0]);
58 | log('( expected value : [50,94,178, 60,120,220] )');
59 | } //end main()
60 |
61 | function log(msg){
62 | document.getElementById('logArea').value+=msg+'\n';
63 | }
64 |
65 | function logMatrix(msg, matrixArray){
66 | var m=msg+'['+matrixArray.toString()+']';
67 | log(m);
68 | }
--------------------------------------------------------------------------------
/chapter5/5_MNIST/data/mnist_batch_0.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/backstopmedia/deep-learning-browser/eff30520e1859653b1d05b5b7ea827cefa26bb1b/chapter5/5_MNIST/data/mnist_batch_0.png
--------------------------------------------------------------------------------
/chapter5/5_MNIST/data/mnist_batch_1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/backstopmedia/deep-learning-browser/eff30520e1859653b1d05b5b7ea827cefa26bb1b/chapter5/5_MNIST/data/mnist_batch_1.png
--------------------------------------------------------------------------------
/chapter5/5_MNIST/data/mnist_batch_10.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/backstopmedia/deep-learning-browser/eff30520e1859653b1d05b5b7ea827cefa26bb1b/chapter5/5_MNIST/data/mnist_batch_10.png
--------------------------------------------------------------------------------
/chapter5/5_MNIST/data/mnist_batch_11.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/backstopmedia/deep-learning-browser/eff30520e1859653b1d05b5b7ea827cefa26bb1b/chapter5/5_MNIST/data/mnist_batch_11.png
--------------------------------------------------------------------------------
/chapter5/5_MNIST/data/mnist_batch_12.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/backstopmedia/deep-learning-browser/eff30520e1859653b1d05b5b7ea827cefa26bb1b/chapter5/5_MNIST/data/mnist_batch_12.png
--------------------------------------------------------------------------------
/chapter5/5_MNIST/data/mnist_batch_13.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/backstopmedia/deep-learning-browser/eff30520e1859653b1d05b5b7ea827cefa26bb1b/chapter5/5_MNIST/data/mnist_batch_13.png
--------------------------------------------------------------------------------
/chapter5/5_MNIST/data/mnist_batch_14.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/backstopmedia/deep-learning-browser/eff30520e1859653b1d05b5b7ea827cefa26bb1b/chapter5/5_MNIST/data/mnist_batch_14.png
--------------------------------------------------------------------------------
/chapter5/5_MNIST/data/mnist_batch_15.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/backstopmedia/deep-learning-browser/eff30520e1859653b1d05b5b7ea827cefa26bb1b/chapter5/5_MNIST/data/mnist_batch_15.png
--------------------------------------------------------------------------------
/chapter5/5_MNIST/data/mnist_batch_16.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/backstopmedia/deep-learning-browser/eff30520e1859653b1d05b5b7ea827cefa26bb1b/chapter5/5_MNIST/data/mnist_batch_16.png
--------------------------------------------------------------------------------
/chapter5/5_MNIST/data/mnist_batch_17.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/backstopmedia/deep-learning-browser/eff30520e1859653b1d05b5b7ea827cefa26bb1b/chapter5/5_MNIST/data/mnist_batch_17.png
--------------------------------------------------------------------------------
/chapter5/5_MNIST/data/mnist_batch_18.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/backstopmedia/deep-learning-browser/eff30520e1859653b1d05b5b7ea827cefa26bb1b/chapter5/5_MNIST/data/mnist_batch_18.png
--------------------------------------------------------------------------------
/chapter5/5_MNIST/data/mnist_batch_19.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/backstopmedia/deep-learning-browser/eff30520e1859653b1d05b5b7ea827cefa26bb1b/chapter5/5_MNIST/data/mnist_batch_19.png
--------------------------------------------------------------------------------
/chapter5/5_MNIST/data/mnist_batch_2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/backstopmedia/deep-learning-browser/eff30520e1859653b1d05b5b7ea827cefa26bb1b/chapter5/5_MNIST/data/mnist_batch_2.png
--------------------------------------------------------------------------------
/chapter5/5_MNIST/data/mnist_batch_20.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/backstopmedia/deep-learning-browser/eff30520e1859653b1d05b5b7ea827cefa26bb1b/chapter5/5_MNIST/data/mnist_batch_20.png
--------------------------------------------------------------------------------
/chapter5/5_MNIST/data/mnist_batch_3.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/backstopmedia/deep-learning-browser/eff30520e1859653b1d05b5b7ea827cefa26bb1b/chapter5/5_MNIST/data/mnist_batch_3.png
--------------------------------------------------------------------------------
/chapter5/5_MNIST/data/mnist_batch_4.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/backstopmedia/deep-learning-browser/eff30520e1859653b1d05b5b7ea827cefa26bb1b/chapter5/5_MNIST/data/mnist_batch_4.png
--------------------------------------------------------------------------------
/chapter5/5_MNIST/data/mnist_batch_5.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/backstopmedia/deep-learning-browser/eff30520e1859653b1d05b5b7ea827cefa26bb1b/chapter5/5_MNIST/data/mnist_batch_5.png
--------------------------------------------------------------------------------
/chapter5/5_MNIST/data/mnist_batch_6.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/backstopmedia/deep-learning-browser/eff30520e1859653b1d05b5b7ea827cefa26bb1b/chapter5/5_MNIST/data/mnist_batch_6.png
--------------------------------------------------------------------------------
/chapter5/5_MNIST/data/mnist_batch_7.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/backstopmedia/deep-learning-browser/eff30520e1859653b1d05b5b7ea827cefa26bb1b/chapter5/5_MNIST/data/mnist_batch_7.png
--------------------------------------------------------------------------------
/chapter5/5_MNIST/data/mnist_batch_8.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/backstopmedia/deep-learning-browser/eff30520e1859653b1d05b5b7ea827cefa26bb1b/chapter5/5_MNIST/data/mnist_batch_8.png
--------------------------------------------------------------------------------
/chapter5/5_MNIST/data/mnist_batch_9.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/backstopmedia/deep-learning-browser/eff30520e1859653b1d05b5b7ea827cefa26bb1b/chapter5/5_MNIST/data/mnist_batch_9.png
--------------------------------------------------------------------------------
/chapter5/5_MNIST/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | MNIST learning demo
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
57 |
58 |
59 |
60 |
WebGL MNIST learning
61 |
62 | We have transcoded a simple neuron network learning MNIST dataset from python to WebGL. We use our WGLMatrix library to process all linear algebra computations.
63 |
64 |
65 |
66 | The full learning may take a few tens of minutes.
67 |
68 |
69 |
70 |
Controls :
71 |
The whole dataset may be too large for you graphic card memory. If it overflows, the WebGL context is killed. You need to refresh this webpage and to try again with a reduced version of the dataset.
72 |
113 |
114 |
115 |
116 |
--------------------------------------------------------------------------------
/chapter5/5_MNIST/mnist_loader.js:
--------------------------------------------------------------------------------
1 | /*
2 | Javascript equivalent of https://github.com/mnielsen/neural-networks-and-deep-learning/blob/master/src/mnist_loader.py
3 | except we load original MNIST data instead of python wrapped MNIST data
4 |
5 | We do not load validation data (not used for the first example)
6 | */
7 |
8 | //closure
9 | var mnist_loader=(function(){
10 | var _data={
11 | training_data: [],
12 | test_data: []
13 | };
14 | var _dataCounter=0;
15 |
16 | //working canvas and ctx
17 | var _canvas=document.createElement('canvas');
18 | _canvas.setAttribute('width', 784);
19 | _canvas.setAttribute('height', 3000);
20 | var _ctx=_canvas.getContext('2d');
21 |
22 | function load_imgData(dataSetdivideFactor, img){
23 | //draw the image on the canvas to be able to read pixel values
24 | _ctx.drawImage(img, 0,0);
25 |
26 | for (var line=0, targetData; line<3000; line++){
27 |
28 | //where to put the tupple [X,Y] :
29 | if (_dataCounter>=60000/dataSetdivideFactor){
30 | return false; //stop loading
31 | } else if (_dataCounter<50000/dataSetdivideFactor){ //first fill training data
32 | targetData=_data.training_data;
33 | } else { //then fill test data
34 | targetData=_data.test_data;
35 | }
36 |
37 | //compute input vector
38 | var iData=_ctx.getImageData(0, line, 784, 1);
39 | var learningInputVector=new Uint8Array(784);
40 | for (var i=0; i 0; i--) {
16 | j = Math.floor(Math.random() * (i + 1));
17 | x = a[i];
18 | a[i] = a[j];
19 | a[j] = x;
20 | }
21 | }
22 | // get index of the max value in an array :
23 | function argmax(arr) {
24 | for (var i = 1, max = arr[0], maxIndex=0; i < arr.length; ++i) {
25 | if (arr[i] > max) {
26 | maxIndex = i;
27 | max = arr[i];
28 | }
29 | }
30 | return maxIndex;
31 | }
32 |
33 |
34 | var that={ //public methods and objects
35 | //Network constructor :
36 | Network: function(sizes){
37 | /*The list ``sizes`` contains the number of neurons in the
38 | respective layers of the network. For example, if the list
39 | was [2, 3, 1] then it would be a three-layer network, with the
40 | first layer containing 2 neurons, the second layer 3 neurons,
41 | and the third layer 1 neuron. The biases and weights for the
42 | network are initialized randomly, using a Gaussian
43 | distribution with mean 0, and variance 1. Note that the first
44 | layer is assumed to be an input layer, and by convention we
45 | won't set any biases for those neurons, since biases are only
46 | ever used in computing the outputs from later layers.*/
47 | var self=this, i;
48 |
49 | self.num_layers = sizes.length;
50 | self.sizes=sizes;
51 | self._nConnections = sizes.length-1;
52 |
53 | for (i=0, self.biases=[], self.weights=[],
54 | self._weightsTransposed=[], self._weightsUpdated=[], self._biasesUpdated=[],
55 | self._delta_biases=[], self._delta_weights=[],
56 | self._z=[], self._y=[],
57 | self._nabla_b=[], self._nabla_w=[], self._delta_nabla_b=[], self._delta_nabla_w=[],
58 | self._nabla_bUpdated=[], self._nabla_wUpdated=[],
59 | self._actFuncPrimeZ=[], self._preDelta=[], self._delta=[];
60 | i y=Infinite/Infinite = NaN
108 | WGLMatrix.addFunction('vec4 xMaj=max(x, -15.*ONE); vec4 a=(ONE+exp(-xMaj)); y=exp(-xMaj)/(a*a);', 'ACTIVATIONPRIME'); //sigmoidPrime
109 |
110 | //public dynamic methods :
111 | self.feedforward=function(a){
112 | //Return the output of the network if ``a`` is input.
113 | for (var i=0, inp=a; i
2 |
3 |
4 | MNIST learning improved
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
57 |
58 |
59 |
60 |
WebGL MNIST learning improved
61 |
62 | We have improved the previous MNIST implementation in order to make this implementation faster than the Python/Numpy one. We still use our WGLMatrix library to process all linear algebra computations.
63 |
64 |
65 |
66 | The full learning may take a few tens of minutes.
67 |
68 |
69 |
70 |
Controls :
71 |
The whole dataset may be too large for you graphic card memory. If it overflows, the WebGL context is killed. You need to refresh this webpage and to try again with a reduced version of the dataset.
72 |
113 |
114 |
115 |
116 |
--------------------------------------------------------------------------------
/chapter5/6_MNISTimproved/mnist_loader.js:
--------------------------------------------------------------------------------
1 | /*
2 | Javascript equivalent of https://github.com/mnielsen/neural-networks-and-deep-learning/blob/master/src/mnist_loader.py
3 | except we load original MNIST data instead of python wrapped MNIST data
4 |
5 | We do not load validation data (not used for the first example)
6 | */
7 |
8 | "use strict";
9 |
10 | //closure
11 | var mnist_loader=(function(){
12 | var _data={
13 | training_data: [],
14 | test_data: []
15 | };
16 | var _dataCounter=0;
17 |
18 | //working canvas and ctx
19 | var _canvas=document.createElement('canvas');
20 | _canvas.setAttribute('width', 784);
21 | _canvas.setAttribute('height', 3000);
22 | var _ctx=_canvas.getContext('2d');
23 |
24 | function load_imgData(dataSetdivideFactor, img){
25 | //draw the image on the canvas to be able to read pixel values
26 | _ctx.drawImage(img, 0,0);
27 |
28 | for (var line=0, targetData; line<3000; line++){
29 |
30 | //where to put the tupple [X,Y] :
31 | if (_dataCounter>=60000/dataSetdivideFactor){
32 | return false; //stop loading
33 | } else if (_dataCounter<50000/dataSetdivideFactor){ //first fill training data
34 | targetData=_data.training_data;
35 | } else { //then fill test data
36 | targetData=_data.test_data;
37 | }
38 |
39 | //compute input vector
40 | var iData=_ctx.getImageData(0, line, 784, 1);
41 | var learningInputVector=new Uint8Array(784);
42 | for (var i=0; i 0; i--) {
18 | j = Math.floor(Math.random() * (i + 1));
19 | x = a[i];
20 | a[i] = a[j];
21 | a[j] = x;
22 | }
23 | }
24 | // get index of the max value in an array :
25 | function argmax(arr) {
26 | for (var i = 1, max = arr[0], maxIndex=0; i < arr.length; ++i) {
27 | if (arr[i] > max) {
28 | maxIndex = i;
29 | max = arr[i];
30 | }
31 | }
32 | return maxIndex;
33 | }
34 |
35 |
36 | var that={ //public methods and objects
37 | //Network constructor :
38 | Network: function(sizes){
39 | /*The list ``sizes`` contains the number of neurons in the
40 | respective layers of the network. For example, if the list
41 | was [2, 3, 1] then it would be a three-layer network, with the
42 | first layer containing 2 neurons, the second layer 3 neurons,
43 | and the third layer 1 neuron. The biases and weights for the
44 | network are initialized randomly, using a Gaussian
45 | distribution with mean 0, and variance 1. Note that the first
46 | layer is assumed to be an input layer, and by convention we
47 | won't set any biases for those neurons, since biases are only
48 | ever used in computing the outputs from later layers.*/
49 | var self=this, i;
50 |
51 | self.num_layers = sizes.length;
52 | self.sizes=sizes;
53 | self.inputSize=sizes[0];
54 | self.outputSize=sizes[sizes.length-1];
55 |
56 | self._nConnections = sizes.length-1;
57 |
58 | for (i=0, self.biases=[], self.weights=[],
59 | self._weightsTransposed=[], self._weightsUpdated=[], self._biasesUpdated=[],
60 | self._delta_biases=[], self._delta_weights=[],
61 | self._z=[], self._y=[],
62 | self._nabla_b=[], self._nabla_w=[], self._delta_nabla_b=[], self._delta_nabla_w=[],
63 | self._nabla_bUpdated=[], self._nabla_wUpdated=[],
64 | self._actFuncPrimeZ=[], self._preDelta=[], self._delta=[];
65 | i y=Infinite/Infinite = NaN
112 | WGLMatrix.addFunction('vec4 xMaj=max(x, -15.*ONE); vec4 a=(ONE+exp(-xMaj)); y=exp(-xMaj)/(a*a);', 'ACTIVATIONPRIME'); //sigmoidPrime
113 |
114 | //public dynamic methods :
115 | self.feedforward=function(a){
116 | //Return the output of the network if ``a`` is input.
117 | for (var i=0, inp=a; i
2 |
3 |
4 |
88 |
2 |
95 |
3 |
41 |
3 |
63 |
3 |
54 |
2 |
74 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
44 | Github repository
45 |