├── LICENSE.md
├── README.md
├── example.js
├── neural-network-241117.js
├── neural-network.js
└── screenshots
├── automatic_dropping_doesn't do so much.PNG
├── screenshot1.PNG
└── screenshot2.PNG
/LICENSE.md:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2017 François Andrieux
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # An up-to-date implementation of a Neural Network in JavaScript
2 |
3 | > This library provides neural networks models that can be trained and
4 | > visualized with various optimizers, into its own thread. You can see
5 | > an example of what it does here:
6 | > **Article:** https://franpapers.com/en/2017-neural-network-implementation-in-javascript-by-an-example/
7 | > **Video:** https://www.youtube.com/watch?v=tIdTulicm9M
8 |
9 | Summary:
10 |
11 | 1. [Introduction](https://github.com/Spriteware/neural-network#1--introduction)
12 | 2. [Basic example](https://github.com/Spriteware/neural-network#2--basic-example)
13 | 3. [Data example](https://github.com/Spriteware/neural-network#3--data-example)
14 | 4. [What's next](https://github.com/Spriteware/neural-network#4--whats-next)
15 |
16 | ## 1 • Introduction
17 |
18 | The main idea was to do something that can **help to visualize the network and its evolution** through backpropagation. Here's an example:
19 |
20 | 
21 |
22 | **SVG** is used to provide you a clean visualisation, and a **simple Web Worker** is used for the training part (for avoiding blocking UI thread).
23 |
24 | This library was not meant for "distribution" purpose, so it may have thousand bugs and may be not working as you want. **Fork it !**
25 |
26 | ## 2 • Basic example
27 | It's simple, there are just way too much comments
28 | ```javascript
29 |
30 |
31 | ////////////// Neural network initialization ////////////
32 |
33 |
34 | var _params = {
35 |
36 | // Since there is a webWorker inside, we need to provide the local URI of the script itself.
37 | // This is used for our WebWorker to include the whole library into itself.
38 | // You can provide a distant or local URL
39 | libURI: "http://localhost/neural-network.js",
40 |
41 | // The learning rate factor is really important. Try a few ones to get the right one.
42 | // The more you have layers and neurons, the more you need to specify a small one
43 | lr: 0.05,
44 |
45 | // The topology of our neural network. The library can handle large number of neurons, it will just be slow.
46 | // Ex: 4 input neurons, 1 hidden layer (3 neurons) and 2 output neurons
47 | layers: [4, 3, 2],
48 | // Be creative: (bur remember to whatch the console and to adapt your learning rate)
49 | // layers: [2, 6, 6, 6, 6, 6, 6, 6, 6, 6, 2],
50 | // layers: [2, 150, 2]
51 |
52 | // Activation function used for the hidden layers. Inputs and outputs neurons have a linear activation function
53 | // If not specified, "linear" is the default value. activationParams are just factors that impacts some activation function (etc: PReLu)
54 | // Currently possible values:
55 | // linear, sigmoid, tanh, relu, prelu
56 | activation: "linear",
57 | activationParams: {alpha: 0.1}, // since the linear activation doesn't depends upon any variable, this property is useless here
58 |
59 | // Optimizers used. The library currently implements Momentum, Nesterov Accelerated Gradient, AdaGrad, AdaDelta, AdaM
60 | // Best results are found with 'nag' and 'adam'
61 | optimizer: "nag", // momentum, nag, adagrad, adadelta or adam
62 | optimizerParams: { alpha: 0.7, beta1: 0.9, beta2: 0.99 }, // alpha for nag and adadelta, betas for adam
63 | };
64 |
65 | // Initialize the neural network
66 | var brain = new Network(_params);
67 |
68 | // NeuralNetwork.createVisualization() will returns the whole SVG visualisation as a DOM element
69 | // As an example, you can directly append it to your current document
70 | document.body.appendChild( brain.createVisualization() );
71 |
72 | // If you provide a training dataset, you can specify it to the Neural Net and train it with it
73 | if (typeof _imported_training_set !== 'undefined' && _imported_training_set !== undefined)
74 | {
75 | // The epochs is 'how many times do you want to feed the same training dataset'
76 | var _epochs = 500;
77 |
78 | // NeuralNetwork.train accept a string which contains your whole dataset, and returns a DOM object for visualizating the training.
79 | // It accept something that looks like a CSV file.
80 | // You can also visit the examples on franpapers.com to see what a dataset looks like
81 |
82 | var training_set = typeof _imported_training_set !== "undefined" ? Utils.static.parseTrainingData(_imported_training_set) : undefined;
83 | var validation_set = typeof _imported_validation_set !== "undefined" ? Utils.static.parseTrainingData(_imported_validation_set) : undefined;
84 | var test_set = typeof _imported_test_set !== "undefined" ? Utils.static.parseTrainingData(_imported_test_set) : undefined;
85 |
86 | // Launch training
87 | var graph = _brain.train({
88 | training_set: training_set,
89 | validation_set: validation_set,
90 | test_set: test_set,
91 |
92 | epochs: _epochs,
93 | dropout: false, // Automatic dropout
94 | shuffle: _shuffle, // Shuffle the training set at each epoch
95 | visualize: true // If false, it doesn't return the DOM element
96 | });
97 |
98 | // Add the current training visualization to your document
99 | document.body.appendChild(graph);
100 | }
101 |
102 |
103 | ////////////// Neural Network Core ////////////
104 |
105 |
106 | // Be careful to have normalized inputs and targets. If not you will see the errors jumping
107 | // Here is a inputs/targets example of values
108 | var inputs = [0.3, 0.5, 0.1, 0.9];
109 | var targets = [0, 1];
110 |
111 | // NeuralNetwork.feed() returns the output layer
112 | var output_neurons = brain.feed(inputs);
113 | brain.backpropagate(targets);
114 |
115 | // And that's it ! For sure, if you already trained your NN,
116 | // You don't have to use NeuralNetwork.backpropagation() anymore
117 |
118 |
119 | ```
120 |
121 | ## 3 • Training/validation/test dataset example
122 | The datasets looks like a CSV file, but with a few differencies. Every `inputs/targets` couple is separated by a `;`. `inputs `and `targets` are separated by a `:`. `inputs` or `targets` values are seperated by a `space`. Here is an example:
123 | ```
124 | input1 input2 input3 input4 : target1 target2 ;
125 | input1 input2 input3 input4 : target1 target2 ;
126 | ```
127 |
128 | Usually I save everything into a JS variable, and I put the JS script as a normal script in my page. This is why you can see this "_imported_training_set" variable in the code above.
129 | ```javascript
130 | var _imported_training_set = "0 0.004012032080192407 0 0.004012032080192407 : 6.123233995736767e-17 -1;\
131 | 0.003901877324468339 0.0009301870879891982 0.00034437034884393647 -0.0009193694393909713 : -0.9726239602750568 -0.23238466364815152;\
132 | 0.003968835720993713 0.0005847808693595358 0.00006695839652537394 -0.0003454062186296625 : -0.9892652288476347 -0.14613112944556783;\
133 | 0.00380007257485393 0.0012799354131641794 -0.00016876314613978316 0.0006951545438046436 : -0.9473190861218562 0.32029135028790773;\
134 | ";
135 | ```
136 |
137 | ### Helpers for creating a training dataset
138 | There are a few helpers functions if you want to generate your dataset meanwhile you train (or not) your neural network. They are under `Utils.static.xxx` and you can call them in your script or your console.
139 | ```javascript
140 |
141 | // Add inputs and associated targets in your training dataset.
142 | // Both parameters are arrays. That will contribute to create the dataset as a string
143 | Utils.static.addIntoTraining(inputs, targets);
144 |
145 | // Get the created dataset as a string
146 | var training_data_imported = Utils.static.getTrainingData(inputs, targets);
147 |
148 | // Or if you want to export it directly in the document (there's an 'document.body.appendChild' inside)
149 | Utils.static.exportTrainingData();
150 |
151 | // Clear the current dataset
152 | Utils.static.clearTrainingData();
153 | ```
154 |
155 | ## 4 • What's next?
156 |
157 | And that's it.
158 | You can check my other github repo "[Spriteware/machinelearning](https://github.com/Spriteware/machinelearning)" to see more implementations examples.
159 |
160 | If you like this work, don't hesitate to send me a friendly message on twitter [@Spriteware](https://twitter.com/Spriteware).
161 | You can also visit my blog: [franpapers.com](https://franpapers.com)
162 |
163 |
--------------------------------------------------------------------------------
/example.js:
--------------------------------------------------------------------------------
1 |
2 | ////////////// Neural network initialization ////////////
3 |
4 |
5 | var _params = {
6 |
7 | // Since there is a webWorker inside, we need to provide the local URI.
8 | // This is used for our WebWorker to include the whole library into itself.
9 | // You can provide a distant URL w
10 | libURI: "http://localhost/neural-network.js",
11 |
12 | // The learning rate factor is really important. Try a few ones to get the right one.
13 | // The more you have layers and neurons, the more you need to specify a small one
14 | // Currently, momentum is not working. Specify it as 0
15 | lr: 0.005,
16 | momentum: 0,
17 |
18 | // The topology of our neural network. Ex: 4 input neurons, 1 hidden layer (3 neurons) and 2 output neurons
19 | layers: [4, 3, 2],
20 | // Be creative: (bur remember to whatch the console and to adapt your learning rate)
21 | // layers: [2, 6, 6, 6, 6, 6, 6, 6, 6, 6, 2],
22 | // layers: [5, 4, 3, 2, 1],
23 | // layers: [2, 40, 2]
24 |
25 | // Activation function used for the hidden layers. Inputs and outputs neurons have a linear activation function
26 | // If not specified, "linear" is de default value. activationParams are just factors that impacts some activation function (etc: PReLu)
27 | // Currently possible values:
28 | // linear, sigmoid, tanh, relu, prelu
29 | activation: "linear",
30 | activationParams: {alpha: 0.1}
31 | };
32 |
33 | // Initialize the neural network
34 | var brain = new Network(_params);
35 |
36 | // NeuralNetwork.createVisualization() will returns the whole SVG visualisation as a DOM element
37 | // As an example, you can directly append it to your current document
38 | document.body.appendChild( brain.createVisualization() );
39 |
40 | // If you provide a training dataset, you can specify it to the Neural Net and train it with it
41 | if (typeof training_data_imported !== 'undefined' && training_data_imported !== undefined)
42 | {
43 | // The epochs is 'how many times do you want to feed the same training dataset'
44 | var _epochs = 500;
45 |
46 | // NeuralNetwork.train accept a string which contains your whole dataset, and returns a DOM object for visualizating the training.
47 | // Please see Utils.static.xxxx to know how to produce a dataset, and how are formatted a dataset.
48 | // You can also visit the examples on franpapers.com to see what a dataset looks like
49 | // It accept something that looks like a CSV file.
50 | // You can deactivate visualization if you don't need it
51 |
52 | var training_visu = brain.train({
53 | data: training_data_imported,
54 | epochs: _epochs,
55 | visualize: true
56 | });
57 |
58 | // Add the current training visualization to your document
59 | document.body.appendChild(training_visu);
60 | }
61 |
62 |
63 | ////////////// Neural network core ////////////
64 |
65 |
66 | // Be careful to have normalized inputs and targets. If not you will see the errors jumping
67 | // Here is a inputs/targets example of values
68 | var inputs = [0.3, 0.5, 0.1, 0.9];
69 | var targets = [0, 1];
70 |
71 | // NeuralNetwork.feed() returns the output layer
72 | var output_neurons = brain.feed(inputs);
73 | brain.backpropagate(targets);
74 |
75 | // And that's it ! For sure, if you already trained your NN,
76 | // maybe you don't have to use NeuralNetwork.backpropagation() anymore
77 |
78 |
--------------------------------------------------------------------------------
/neural-network-241117.js:
--------------------------------------------------------------------------------
1 | "use strict";
2 |
3 | const _AVAILABLE_OPTIMIZERS = ["momentum", "nag", "adagrad", "adadelta", "adam"];
4 | const _WEIGHT_RANDOM_COEFF = 1; // must be one if we want to keep a normal distributation centered in 0
5 | const _BIAIS_RANDOM_COEFF = 0.0; // usually, can be 0 or 0.1. See http: //cs231n.github.io/neural-networks-2/
6 | const _DROPOUT_PROBABILITY = 0.5; // usually a good value also
7 | const _EPSILON = 1e-8;
8 |
9 | const _TRAINING_GATHER_ALL_THRESHOLD = 100000;
10 | const _TRAINING_DROPOUT_EPOCHS_THRESHOLD = 200;
11 | const _TRAINING_DROPOUT_MEAN_THRESHOLD = 0.001;
12 |
13 | const _DEFAULT_TRAINING_BACKPROPAGATE = true;
14 | const _DEFAULT_TRAINING_DROPOUT = false;
15 | const _DEFAULT_TRAINING_SHUFFLE = true;
16 |
17 | const _WORKER_TRAINING_PENDING = 0;
18 | const _WORKER_TRAINING_OVER = 1;
19 |
20 | const _ERROR_VALUE_TOO_HIGH = 100000;
21 | const _WEIGHT_VALUE_TOO_HIGH = 10000;
22 |
23 | const _CANVAS_GRAPH_DEFAULT_WIDTH = 600;
24 | const _CANVAS_GRAPH_DEFAULT_HEIGHT = 100;
25 | const _CANVAS_GRAPH_WINDOW_FACTOR = 1 / 0.9;
26 | const _CANVAS_GRAPH_SMOOTH_FACTOR = 1 / 20;
27 | const _CANVAS_GRAPH_SEPARATE_EPOCHS_THRESHOLD = 20;
28 |
29 | const _SVG_STROKE_WIDTH = 4;
30 | const _SVG_CIRCLE_RADIUS = 15;
31 | const _SVG_CIRCLE_COLOR_DEFAULT = "#ffe5e5";
32 | const _SVG_CIRCLE_COLOR_DROPPED = "#c7c7c7";
33 | const _SVG_MAX_WEIGHTS_DISPLAY_TEXT = 4;
34 |
35 | const _COLOR_ASPHALT = "rgb(52, 73, 94)";
36 | const _COLOR_PURPLE = "rgb(142, 68, 173)";
37 | const _COLOR_BLUE = "rgb(52, 152, 219)";
38 | const _COLOR_GREEN = "rgb(26, 188, 156)";
39 |
40 | /////////////////////////////// Utils - various functions
41 |
42 | var Utils = {
43 | static: {}, // yes, it's just sugar for a good looking in console....
44 | trainingData: "",
45 | trainingSize: 0,
46 | trainingMaxSize: 10000
47 | };
48 |
49 | Utils.static.tooltipOn = function(tooltip, event, object) {
50 |
51 | tooltip.object = object;
52 | tooltip.setAttribute("class", "");
53 | tooltip.style.left = (event.pageX+10) + "px";
54 | tooltip.style.top = (event.pageY+5) + "px";
55 |
56 | Utils.static.tooltipUpdate(object);
57 | };
58 |
59 | Utils.static.tooltipUpdate = function(tooltip, object) {
60 |
61 | if (typeof object !== "object") {
62 | tooltip.object = object;
63 | return;
64 | }
65 |
66 | var buffer = "";
67 |
68 | for (var key in object)
69 | if (object.hasOwnProperty(key) && key !== "object")
70 | buffer += key + ": " + object[key] + "
";
71 |
72 | tooltip.innerHTML = buffer;
73 | };
74 |
75 | Utils.static.tooltipOff = function(tooltip) {
76 |
77 | tooltip.object = undefined;
78 | tooltip.setAttribute("class", "off");
79 | };
80 |
81 | ////////////
82 |
83 | Utils.static.setTrainingSize = function(size) {
84 |
85 | Utils.trainingMaxSize = size;
86 | };
87 |
88 | Utils.static.addIntoTraining = function(inputs, targets) {
89 |
90 | // Build training data (as string) for future exportation
91 | if (Utils.trainingSize <= Utils.trainingMaxSize) {
92 | Utils.trainingData += inputs.join(" ") + " : " + targets.join(" ") + ";\\\n";
93 | Utils.trainingSize++;
94 | return true;
95 | }
96 |
97 | return false;
98 | };
99 |
100 | Utils.static.exportTrainingData = function() {
101 |
102 | console.info("Saving training data...", "Reading 'training_data'");
103 |
104 | var output = document.createElement("textarea");
105 | output.innerHTML = "var imported_training_set = \"" + Utils.trainingData + "\";";
106 | document.body.appendChild( output );
107 |
108 | return "Export completed for " + Utils.trainingSize + " entries.";
109 | };
110 |
111 | Utils.static.getTrainingData = function() {
112 |
113 | return Utils.trainingData;
114 | };
115 |
116 | Utils.static.clearTrainingData = function() {
117 |
118 | Utils.trainingData = "";
119 | };
120 |
121 | Utils.static.parseTrainingData = function(raw) {
122 |
123 | // Parse training data
124 | var i, l, entry, splitted = raw.split(";");
125 | var training_data = [], training_size;
126 |
127 | for (i = 0, l = splitted.length; i < l; i++)
128 | {
129 | entry = splitted[i].trim().split(":");
130 | if (entry.length !== 2)
131 | break;
132 |
133 | training_data.push({
134 | inputs: entry[0].trim().split(" ").map(parseFloat),
135 | targets: entry[1].trim().split(" ").map(parseFloat)
136 | });
137 | }
138 |
139 | return training_data;
140 | };
141 |
142 | ////////////////////////////////// Neural Network core
143 |
144 | function Neuron(id, layer, biais) {
145 |
146 | this.id = id;
147 | this.layer = layer;
148 | this.biais = biais || 0;
149 | this.biaisMomentum = 0;
150 | this.biaisGradient = 0;
151 | this.dropped = false;
152 |
153 | this.agregation = undefined;
154 | this.output = undefined;
155 | this.error = undefined;
156 |
157 | this.network = undefined; // link to its network, indispensable for special activation & derivation
158 | this.activation = undefined;
159 | this.derivative = undefined;
160 |
161 | // Input/output weights as cache (because Network.getWeight method is repeated a lot in feed and backprop, it takes time)
162 | this.inputWeightsIndex = undefined;
163 | this.outputWeightsIndex = undefined;
164 | }
165 |
166 | function Network(params) {
167 |
168 | // Required variables: lr, layers
169 | this.params = params;
170 |
171 | this.lr = undefined; // Learning rate
172 | this.layers = undefined;
173 | this.optimizer = undefined; // must bin included in _AVAILABLE_OPTIMIZER
174 | this.optimizerParams = undefined; // example: momentum rate will be {alpha: X}
175 | this.activation = undefined; // activation function for hidden layer
176 | this.activationParams = undefined;
177 |
178 | this.neurons = undefined;
179 | this.weights = undefined;
180 | this.momentums = undefined; // momentums coefficients a t-1
181 | this.gradients = undefined; // gradients squared for Adagrad
182 | this.output = undefined; // current output array
183 |
184 | // Caching variables:
185 | this.layersSum = undefined;
186 | this.layersMul = undefined;
187 | this.nbLayers = undefined;
188 | this.nbNeurons = undefined;
189 | this.nbWeights = undefined;
190 |
191 | // Stats-purpose:
192 | this.iterations = 0;
193 | this.maxWeight = 0;
194 | this.outputError = 0;
195 | this.globalError = 0;
196 | this.avgWeightsPerNeuron = 0;
197 |
198 | // Visualization:
199 | this.svgVisualization = false;
200 | this.DOM = {
201 | svg: undefined,
202 | tooltip: undefined,
203 |
204 | neuronsCircles: undefined,
205 | weightTexts: undefined,
206 | inputTexts: undefined,
207 | outputTexts: undefined,
208 | weightCurves: undefined
209 | };
210 |
211 | // Necessary for avoiding problems with Cross Origin (Web Worker)
212 | this.libURI = undefined;
213 |
214 | this.loadParams(params);
215 | this.initialize();
216 | }
217 |
218 | Network.prototype.loadParams = function(params) {
219 |
220 | for (var key in params)
221 | if (this.hasOwnProperty(key) && this[key] === undefined)
222 | this[key] = params[key];
223 |
224 | console.log("Loaded params", this);
225 | };
226 |
227 | Network.prototype.exportParams = function() {
228 |
229 | // Ensure to update params if they were modified on live
230 | for (var key in this.params)
231 | if (this.hasOwnProperty(key) && this[key] !== undefined)
232 | this.params[key] = this[key];
233 |
234 | return this.params;
235 | };
236 |
237 | Network.prototype.exportWeights = function() {
238 | return this.weights;
239 | };
240 |
241 | Network.prototype.importWeights = function(values) {
242 |
243 | this.weights = values;
244 | this.momentums.fill(0);
245 | this.gradients.fill(0);
246 | this.iterations = 0;
247 | };
248 |
249 | Network.prototype.exportBiais = function() {
250 |
251 | // We ensure to make a copy and not a reference here
252 | var values = Array(this.nbNeurons);
253 |
254 | for (var i = 0; i < this.nbNeurons; i++)
255 | values[i] = this.neurons[i].biais;
256 |
257 | return values;
258 | };
259 |
260 | Network.prototype.importBiais = function(values) {
261 |
262 | for (var i = 0; i < this.nbNeurons; i++) {
263 | this.neurons[i].biais = values[i];
264 | this.neurons[i].biaisMomentum = 0;
265 | this.neurons[i].biaisGradient = 0;
266 | }
267 | };
268 |
269 | Network.prototype.initialize = function() {
270 |
271 | if (this.libURI === undefined)
272 | throw new NetException("Undefined or invalid lib URI. Necessary for avoiding Cross Origin problems. Use https://domain.com/.../neural-net.js notation", {libURI: this.libURI});
273 |
274 | if (this.lr === undefined || this.lr <= 0)
275 | throw new NetException("Undefined or invalid learning rate", {lr: this.lr});
276 |
277 | if (this.layers === undefined || this.layers.length <= 1)
278 | throw new NetException("Undefined or unsufficient layers. At least, you must have a input and a output layer.", {layers: this.layers});
279 |
280 | if (this.optimizer !== undefined && !_AVAILABLE_OPTIMIZERS.includes(this.optimizer))
281 | throw new NetException("Invalid optimizer. Available optimizers = ", { available: _AVAILABLE_OPTIMIZERS, optimizer: this.optimizer });
282 |
283 | if ((this.optimizer === "momentum" || this.optimizer === "nag") && (this.optimizerParams === undefined || this.optimizerParams.alpha === undefined || this.optimizerParams.alpha < 0 || this.optimizerParams.alpha > 1))
284 | throw new NetException("Undefined or invalid momentum rate (must be between 0 and 1 both included) ", {optimizer: this.optimizer, optimizerParams: this.optimizerParams});
285 |
286 | if (this.optimizer === "adam" && (this.optimizerParams === undefined || this.optimizerParams.beta1 === undefined || this.optimizerParams.beta2 === undefined || this.optimizerParams.beta1 < 0 || this.optimizerParams.beta1 > 1 || this.optimizerParams.beta2 < 0 || this.optimizerParams.beta2 > 1))
287 | throw new NetException("Undefined or invalid (beta1,beta2) for Adam optimizer", {optimizer: this.optimizer, optimizerParams: this.optimizerParams});
288 |
289 | var i, j, l, sum, mul, tmp;
290 | var curr_layer = 0;
291 |
292 | // Initialization
293 | this.iterations = 0;
294 | this.nbLayers = this.layers.length;
295 | this.layersSum = [];
296 | this.layersMul = [];
297 | this.neurons = [];
298 | this.weights = [];
299 | this.momentums = [];
300 | this.gradients = [];
301 |
302 | // Prepare layers relative computation
303 | for (i = 0, sum = 0, mul = 1; i < this.nbLayers; i++) {
304 | sum += this.layers[i];
305 | mul = (this.layers[i-1] || 0) * this.layers[i];
306 | this.layersSum.push(sum);
307 | this.layersMul.push(mul + (this.layersMul[i-1] || 0));
308 | // [0] will be 0, Because layerMul is used to know how many weights there is before a layer, and there is no before layer 0
309 | }
310 |
311 | // Compute and put lengths in cache
312 | this.nbNeurons = sum;
313 | this.nbWeights = this.layersMul[this.layersMul.length-1];
314 | this.avgWeightsPerNeuron = this.nbWeights / this.nbNeurons;
315 |
316 | // Create weights, momentum and gradients
317 | for (i = 0; i < this.nbWeights; i++) {
318 | this.weights[i] = this.static_randomWeight();
319 | this.momentums.push(0);
320 | this.gradients.push(0);
321 | }
322 |
323 | // Create neurons
324 | var index, neuron, prev_neurons = [], next_neurons = [];
325 |
326 | for (curr_layer = 0, i = 0; i < this.nbNeurons; i++)
327 | {
328 | neuron = new Neuron(i, i >= this.layersSum[curr_layer] ? ++curr_layer : curr_layer, this.static_randomBiais());
329 | neuron.network = this;
330 | neuron.activation = this.static_linearActivation;
331 | neuron.derivative = this.static_linearDerivative;
332 | this.neurons.push(neuron);
333 | }
334 |
335 | // Set hidden layer activation functions
336 | // (separated from loop above because we don't want input and output layers to have an activation function -by default)
337 | switch (this.activation) {
338 | case "tanh":
339 | this.setHiddenLayerToActivation(this.static_tanhActivation, this.static_tanhDerivative);
340 | break;
341 |
342 | case "sigmoid":
343 | this.setHiddenLayerToActivation(this.static_sigmoidActivation, this.static_sigmoidDerivative);
344 | break;
345 |
346 | case "relu":
347 | this.setHiddenLayerToActivation(this.static_reluActivation, this.static_reluDerivative);
348 | break;
349 |
350 | case "prelu":
351 | this.setHiddenLayerToActivation(this.static_preluActivation, this.static_preluDerivative);
352 | break;
353 |
354 | default:
355 | this.setHiddenLayerToActivation(this.static_linearActivation, this.static_linearDerivative);
356 | }
357 |
358 | // 1- Assign weights index into neuron's cache
359 | // 2- Improve the weight initialization by ensuring that the variance is equal to 1
360 | for (curr_layer = -1, i = 0; i < this.nbNeurons; i++)
361 | {
362 | neuron = this.neurons[i];
363 |
364 | if (neuron.layer !== curr_layer) {
365 | curr_layer++;
366 | prev_neurons = curr_layer > 0 ? this.getNeuronsInLayer(curr_layer-1) : [];
367 | next_neurons = curr_layer < this.nbLayers-1 ? this.getNeuronsInLayer(curr_layer+1) : [];
368 | }
369 |
370 | neuron.inputWeightsIndex = Array(prev_neurons.length);
371 | neuron.outputWeightsIndex = Array(next_neurons.length);
372 |
373 | // Input weights
374 | for (j = 0, l = prev_neurons.length; j < l; j++) {
375 | neuron.inputWeightsIndex[j] = this.getWeightIndex(prev_neurons[j], neuron);
376 | this.weights[neuron.inputWeightsIndex[j]] *= Math.sqrt(2 / l);
377 | }
378 |
379 | // Output weights
380 | for (j = 0, l = next_neurons.length; j < l; j++)
381 | neuron.outputWeightsIndex[j] = this.getWeightIndex(neuron, next_neurons[j]);
382 | }
383 |
384 | // Initialize brain.output to zeros, to avoid training problems
385 | this.output = Array(this.layers[this.nbLayers - 1]);
386 | this.output.fill(0);
387 |
388 | // Display the complexity of this new NN (weights + biais)
389 | var parameters = this.weights.length + this.nbNeurons;
390 | console.info("This neural network has %d parameters.", parameters);
391 | };
392 |
393 | Network.prototype.createVisualization = function() {
394 |
395 | var i, l, l2, n, index;
396 | var x1, y1, x2, y2, max_y1 = 0;
397 | var neuron1, neuron2, is_input;
398 | var DOM_tmp, DOM_weight;
399 |
400 | var _MARGIN_X = 150;
401 | var _MARGIN_Y = 75;
402 | var that = this;
403 |
404 | // Create DOM elements
405 | var container = document.createElement("div");
406 | this.DOM.svg = document.createElementNS("http://www.w3.org/2000/svg", "svg");
407 | this.DOM.tooltip = document.createElement("div");
408 | this.DOM.tooltip.setAttribute("id", "tooltip");
409 | this.DOM.tooltip.setAttribute("class", "off");
410 | container.appendChild(this.DOM.svg);
411 | container.appendChild(this.DOM.tooltip);
412 |
413 | this.DOM.neuronsCircles = [];
414 | this.DOM.weightTexts = [];
415 | this.DOM.inputTexts = [];
416 | this.DOM.outputTexts = [];
417 | this.DOM.weightCurves = [];
418 |
419 | // Computing functions & listeners callbacks
420 | function calcX(neuron) {
421 | return (neuron.layer + 1) * _MARGIN_X;
422 | }
423 |
424 | function calcY(neuron) {
425 | return (neuron.id - (that.layersSum[neuron.layer-1] || 0)) * _MARGIN_Y + _MARGIN_Y / 2;
426 | }
427 |
428 | function neuronTooltipOn(event) {
429 | Utils.static.tooltipOn( that.DOM.tooltip, event, that.neurons[event.target.getAttribute("data-object")] );
430 | }
431 |
432 | function neuronTooltipOff(event) {
433 | Utils.static.tooltipOff( that.DOM.tooltip );
434 | }
435 |
436 | // Fetching every neuron
437 | for (i = 0, l = this.neurons.length; i < l; i++)
438 | {
439 | neuron1 = this.neurons[i];
440 | x1 = calcX(neuron1);
441 | y1 = calcY(neuron1);
442 |
443 | // Fetching neurons from next layer for weights
444 | for (n = 0, l2 = (this.layers[neuron1.layer + 1] || 0); n < l2; n++)
445 | {
446 | neuron2 = this.neurons[this.layersSum[ neuron1.layer ] + n];
447 | index = this.getWeightIndex(neuron1, neuron2);
448 | x2 = calcX(neuron2);
449 | y2 = calcY(neuron2);
450 |
451 | // Creating SVG weights
452 | DOM_tmp = document.createElementNS("http://www.w3.org/2000/svg", "path");
453 | DOM_tmp.setAttribute("class", "weight");
454 | DOM_tmp.setAttribute("data-object", index);
455 | DOM_tmp.setAttribute("d", "M" + x1 + "," + y1 +" C" + (x1 + _MARGIN_X/2) + "," + y1 + " " + (x1 + _MARGIN_X/2) + "," + y2 + " " + x2 + "," + y2);
456 | DOM_tmp.setAttribute("stroke-width", _SVG_STROKE_WIDTH);
457 |
458 | this.DOM.svg.appendChild(DOM_tmp);
459 | this.DOM.weightCurves.push(DOM_tmp);
460 |
461 | // Creating SVG weight Text
462 | DOM_tmp = document.createElementNS("http://www.w3.org/2000/svg", "text");
463 | DOM_tmp.setAttribute("class", "weight-text");
464 | DOM_tmp.setAttribute("data-object", index);
465 | DOM_tmp.setAttribute("x", x1 + (x2 - x1) * 0.2);
466 | DOM_tmp.setAttribute("y", y1 + (y2 - y1) * 0.2);
467 |
468 | this.DOM.weightTexts.push(DOM_tmp);
469 | }
470 |
471 | // Creating SVG input/output lines and text
472 | if (neuron1.layer === 0 || neuron1.layer === this.nbLayers-1)
473 | {
474 | is_input = neuron1.layer === 0 ? 1 : -1;
475 |
476 | DOM_tmp = document.createElementNS("http://www.w3.org/2000/svg", "path");
477 | DOM_tmp.setAttribute("class", "weight");
478 | DOM_tmp.setAttribute("d", "M" + x1 + "," + y1 +" L" + (x1 - _MARGIN_X / 4 * is_input) + "," + y1);
479 |
480 | this.DOM.svg.appendChild(DOM_tmp);
481 |
482 | DOM_tmp = document.createElementNS("http://www.w3.org/2000/svg", "text");
483 | DOM_tmp.setAttribute("class", is_input === 1 ? "input-text" : "output-text");
484 | DOM_tmp.setAttribute("x", is_input === 1 ? x1 - _MARGIN_X / 1.8 : x1 + _MARGIN_X / 3);
485 | DOM_tmp.setAttribute("y", y1 + 5);
486 |
487 | if (is_input === 1)
488 | this.DOM.inputTexts.push(DOM_tmp);
489 | else
490 | this.DOM.outputTexts.push(DOM_tmp);
491 | }
492 |
493 | // Creating SVG neuron
494 | DOM_tmp = document.createElementNS("http://www.w3.org/2000/svg", "circle");
495 | DOM_tmp.setAttribute("class", "neuron");
496 | DOM_tmp.setAttribute("data-object", neuron1.id);
497 | DOM_tmp.setAttribute("cx", x1);
498 | DOM_tmp.setAttribute("cy", y1);
499 | DOM_tmp.setAttribute("r", _SVG_CIRCLE_RADIUS);
500 | DOM_tmp.setAttribute("fill", _SVG_CIRCLE_COLOR_DEFAULT);
501 | DOM_tmp.addEventListener("mousemove", neuronTooltipOn);
502 | DOM_tmp.addEventListener("mouseout", neuronTooltipOff);
503 |
504 | this.DOM.svg.appendChild(DOM_tmp);
505 | this.DOM.neuronsCircles.push(DOM_tmp);
506 |
507 | max_y1 = max_y1 < y1 ? y1 : max_y1;
508 | }
509 |
510 | // We stretch our svg document (here x2 is supposed to be the maximum possible)
511 | var width = x2 + _MARGIN_X, height = max_y1 + _MARGIN_Y / 2, scale = 1.5;
512 | this.DOM.svg.setAttribute("width", width >= window.innerWidth ? width/scale : width);
513 | this.DOM.svg.setAttribute("height", width >= window.innerWidth ? height/scale : height);
514 | this.DOM.svg.setAttribute("viewBox", "0 0 " + width + " " + height);
515 |
516 | // Push text elements on top of everything
517 | var svg_texts = this.DOM.outputTexts.concat( this.DOM.inputTexts.concat( this.DOM.weightTexts ));
518 |
519 | for (i = 0, l = svg_texts.length; i < l; i++)
520 | this.DOM.svg.appendChild( svg_texts[i] );
521 |
522 | this.svgVisualization = true;
523 | console.info("SVG visualization ready");
524 |
525 | return container;
526 | };
527 |
528 | Network.prototype.visualize = function(inputs, precision) {
529 |
530 |
531 | if (!this.svgVisualization)
532 | throw new NetException("SVG Visualization is not available", {network: this});
533 |
534 | if (!inputs || inputs.length !== this.layers[0])
535 | throw new NetException("Incorrect inputs (undefined or incorrect length)", {inputs: inputs, layer: this.layers[0]});
536 |
537 | var i, l;
538 | var output_neurons = this.getNeuronsInLayer( this.nbLayers-1 );
539 | precision = precision || 1;
540 |
541 | // Update SVG text inputs
542 | for (i = 0, l = this.DOM.inputTexts.length; i < l; i++)
543 | this.DOM.inputTexts[i].innerHTML = inputs[i].toFixed(precision);
544 |
545 | // Update SVG text outputs
546 | for (i = 0, l = this.DOM.outputTexts.length; i < l; i++)
547 | this.DOM.outputTexts[i].innerHTML = output_neurons[i].output ? output_neurons[i].output.toFixed(precision) : output_neurons[i].output;
548 |
549 | // Update SVG weights
550 | for (i = 0, l = this.nbWeights; i < l; i++) {
551 | this.DOM.weightCurves[i].setAttribute("stroke-width", Math.abs(this.weights[i]) / this.maxWeight * _SVG_STROKE_WIDTH);
552 | if (this.avgWeightsPerNeuron < _SVG_MAX_WEIGHTS_DISPLAY_TEXT)
553 | this.DOM.weightTexts[i].innerHTML = this.weights[i].toFixed(4);
554 | }
555 |
556 | // Update tooltip
557 | if (this.DOM.tooltip.object !== undefined)
558 | Utils.static.tooltipUpdate(this.DOM.tooltip, this.DOM.tooltip.object);
559 | };
560 |
561 | Network.prototype.feed = function(inputs) {
562 |
563 | if (!inputs || inputs.length !== this.layers[0])
564 | throw new NetException("Incorrect inputs", {inputs: inputs, layer: this.layers[0]});
565 |
566 | var index, n, l, sum, neuron, prev_neurons; // neurons from previous layer
567 | var curr_layer = 0;
568 |
569 | // Input layer filling
570 | for (index = 0; index < this.layers[0]; index++)
571 | this.neurons[index].output = inputs[index];
572 |
573 | // Fetching neurons from second layer (even if curr_layer equals 0, it'll be changed directly)
574 | for (index = this.layers[0]; index < this.nbNeurons; index++)
575 | {
576 | neuron = this.neurons[index];
577 |
578 | if (neuron.dropped)
579 | continue;
580 |
581 | // Update if necessary all previous layer neurons
582 | if (prev_neurons === undefined || neuron.layer !== curr_layer)
583 | prev_neurons = this.getNeuronsInLayer(curr_layer++);
584 |
585 | // Computing w1*x1 + ... + wn*xn
586 | for (sum = 0, n = 0, l = prev_neurons.length; n < l; n++)
587 | if (!prev_neurons[n].dropped)
588 | sum += this.weights[neuron.inputWeightsIndex[n]] * prev_neurons[n].output;
589 |
590 | // Updating output
591 | neuron.agregation = sum + neuron.biais;
592 | neuron.output = neuron.activation(neuron.agregation);
593 |
594 | if (!isFinite(neuron.output))
595 | throw new NetException("Non finite or too high output. You may have a problem in your code", {neuron: neuron});
596 | }
597 |
598 | // Update network output
599 | var neurons = this.getNeuronsInLayer(this.nbLayers-1);
600 | for (n = 0, l = this.layers[this.nbLayers-1]; n < l; n++)
601 | this.output[n] = neurons[n].output;
602 |
603 | // Return output neurons
604 | return neurons;
605 | };
606 |
607 | Network.prototype.loss = function(targets) {
608 |
609 | var outputs_neurons = this.getNeuronsInLayer(this.nbLayers - 1);
610 |
611 | if (!targets || !outputs_neurons || targets.length !== outputs_neurons.length)
612 | throw new NetException("Incoherent targets for current outputs", { targets: targets, outputs_neurons: outputs_neurons });
613 |
614 | // Compute output error with our loss function
615 | // https://en.wikipedia.org/wiki/Backpropagation
616 |
617 | var n, l, neuron;
618 | this.outputError = 0;
619 |
620 | // Output layer filling: err = (expected-obtained)
621 | for (n = 0, l = outputs_neurons.length; n < l; n++) {
622 | neuron = outputs_neurons[n];
623 | neuron.error = (targets[n] - neuron.output) * neuron.derivative(neuron.agregation);
624 | this.outputError += 1 / 2 * neuron.error * neuron.error;
625 |
626 | if (!isFinite(neuron.error))
627 | throw new NetException("Non finite error on output neuron. You may have a problem in your code", { neuron: neuron });
628 | }
629 | };
630 |
631 | Network.prototype.backpropagate = function(targets) {
632 |
633 | // Compute current output error with our loss function
634 | this.loss(targets);
635 |
636 | var index, weight_index, n, l, sum, calc, grad, weight, max_weight = 0;
637 | var output_error = 0, curr_layer = this.nbLayers - 1;
638 | var neuron, next_neurons;
639 |
640 | this.iterations++; // need to be 1 in first for Adam computing
641 | this.globalError = 0;
642 |
643 | // Fetching neurons from last layer: backpropagate error & update weights
644 | for (index = this.layersSum[curr_layer-1] - 1; index >= 0; index--)
645 | {
646 | neuron = this.neurons[index];
647 |
648 | if (neuron.dropped)
649 | continue;
650 |
651 | // Update if necessary all next layer neurons
652 | if (next_neurons === undefined || neuron.layer !== curr_layer)
653 | next_neurons = this.getNeuronsInLayer(curr_layer--);
654 |
655 | // Computing w1*e1 + ... + wn*en
656 | for (sum = 0, n = 0, l = next_neurons.length; n < l; n++) {
657 | if (!next_neurons[n].dropped)
658 | sum += this.weights[neuron.outputWeightsIndex[n]] * next_neurons[n].error;
659 | }
660 |
661 | // Updating error
662 | neuron.error = sum * neuron.derivative(neuron.agregation);
663 | this.globalError += Math.abs(neuron.error);
664 |
665 | if (!isFinite(neuron.error)) {
666 | throw new NetException("Non finite error. You may have a problem in your code", {neuron: neuron});
667 | } else if (Math.abs(neuron.error) > _ERROR_VALUE_TOO_HIGH) {
668 | console.info("Scaling down error to a max", {neuron: neuron, error: neuron.error});
669 | neuron.error = neuron.error < 0 ? - _ERROR_VALUE_TOO_HIGH : _ERROR_VALUE_TOO_HIGH;
670 | throw new NetException("Computed error is too high. Try a smaller learning rate?", {neuron: neuron});
671 | }
672 |
673 | // Updating weights w = w + lr * en * output
674 | for (n = 0, l = next_neurons.length; n < l; n++)
675 | {
676 | if (next_neurons[n].dropped)
677 | continue;
678 |
679 | weight_index = neuron.outputWeightsIndex[n];
680 |
681 | // Compute new values w.r.t gradient optimizer
682 | grad = next_neurons[n].error * neuron.output;
683 | calc = this.optimizeGradient(this.weights[weight_index], grad, this.momentums[weight_index], this.gradients[weight_index]);
684 |
685 | // Updates values
686 | this.weights[weight_index] = weight = calc.value;
687 | this.momentums[weight_index] = calc.momentum;
688 | this.gradients[weight_index] = calc.gradients;
689 |
690 | // Update maxWeight (for visualisation)
691 | max_weight = max_weight < Math.abs(weight) ? Math.abs(weight) : max_weight;
692 |
693 | if (!isFinite(weight)) {
694 | throw new NetException("Non finite weight. You may have a problem in your code", {neuron: neuron, weight: weight});
695 | } else if (Math.abs(weight) > _WEIGHT_VALUE_TOO_HIGH) {
696 | console.info("Scaling down weight to a max.", {neuron: neuron, weight: weight});
697 | weight = weight < 0 ? - _WEIGHT_VALUE_TOO_HIGH : _WEIGHT_VALUE_TOO_HIGH;
698 | }
699 | }
700 |
701 | // Compute biais with gradient optimizer
702 | grad = neuron.error;
703 | calc = this.optimizeGradient(neuron.biais, grad, neuron.biaisMomentum, neuron.biaisGradient);
704 |
705 | // Updates values
706 | neuron.biais = calc.value;
707 | neuron.biaisMomentum = calc.momentum;
708 | neuron.biaisGradient = calc.gradients;
709 |
710 | if (!isFinite(neuron.biais))
711 | throw new NetException("Non finite biais. You may have a problem in your code", {neuron: neuron});
712 | }
713 |
714 | this.maxWeight = max_weight;
715 | };
716 |
717 | Network.prototype.optimizeGradient = function(value, grad, momentum, gradients) {
718 |
719 | var p = this.optimizerParams, prev_momentum = momentum;
720 |
721 | if (value === undefined || grad === undefined || momentum === undefined || gradients === undefined)
722 | throw new NetException("Invalid parameters for gradient optimization", { value: value, grad: grad, momentum: momentum, gradients: gradients });
723 |
724 | // Momentum helps to escape local minimums,
725 | // Nesterov accelerated gradient is smarter than momentum because inertia is predicted
726 | // Adagrad aims to automatically decrease the learning rate
727 | // Adadelta correct the too aggressive learning rate reduction of Adagrad
728 |
729 | switch (this.optimizer)
730 | {
731 | case "momentum":
732 | momentum = (1 - p.alpha) * this.lr * grad + p.alpha * momentum;
733 | value += momentum;
734 | break;
735 |
736 | case "nag":
737 | momentum = p.alpha * momentum + (1 - p.alpha) * this.lr * grad;
738 | value += -p.alpha * prev_momentum + (1 + p.alpha) * momentum;
739 | break;
740 |
741 | case "adagrad":
742 | gradients += grad * grad; // this contains the sum of all past squared gradients
743 | value += this.lr * grad / (Math.sqrt(gradients) + _EPSILON);
744 | break;
745 |
746 | case "adadelta":
747 | gradients = p.alpha * gradients + (1 - p.alpha) * grad * grad; // this contains the decaying average of all past squared gradients
748 | value += this.lr * grad / (Math.sqrt(gradients) + _EPSILON);
749 | break;
750 |
751 | case "adam":
752 | momentum = p.beta1 * momentum + (1 - p.beta1) * grad;
753 | gradients = p.beta2 * gradients + (1 - p.beta2) * grad * grad;
754 |
755 | var mt = momentum / (1 - Math.pow(p.beta1, this.iterations)); // momentum biais correction
756 | var gt = gradients / (1 - Math.pow(p.beta2, this.iterations)); // gradients biais correction
757 |
758 | value += this.lr * mt / (Math.sqrt(gt) + _EPSILON);
759 | break;
760 |
761 | default: // good-old vanilla SGD
762 | value += this.lr * grad;
763 | }
764 |
765 | return { value: value, grad: grad, momentum: momentum, gradients: gradients };
766 | };
767 |
768 | Network.prototype.dropout = function(completely_random, drop_inputs) {
769 |
770 | // Dropping out random neurons allows to push out our network of a bad solution
771 | // If completely_random === true, the same neuron can be dropped again.
772 | // We usually start from first hidden layer, but could be possible to start from inputs layer if drop_inputs === true
773 |
774 | var i, l, n, neurons, shot;
775 | completely_random = typeof completely_random === "undefined" ? true : completely_random;
776 |
777 | for (i = drop_inputs === true ? 0 : 1; i < this.nbLayers-1; i++)
778 | {
779 | neurons = this.getNeuronsInLayer(i);
780 | shot = completely_random ? undefined : Math.round( Math.random() * (this.layers[i] - 1) );
781 |
782 | for (n = 0, l = neurons.length; n < l; n++)
783 | {
784 | if (shot === n || (shot === undefined && Math.random() >= _DROPOUT_PROBABILITY))
785 | {
786 | if (neurons[n].dropped === false && this.svgVisualization === true) // update vizualisation {
787 | this.DOM.neuronsCircles[this.getNeuronIndex(i, n)].setAttribute("fill", _SVG_CIRCLE_COLOR_DROPPED);
788 | neurons[n].dropped = true;
789 | }
790 | else
791 | {
792 | if (neurons[n].dropped === true && this.svgVisualization === true) // update vizualisation
793 | this.DOM.neuronsCircles[this.getNeuronIndex(i, n)].setAttribute("fill", _SVG_CIRCLE_COLOR_DEFAULT);
794 | neurons[n].dropped = false;
795 | }
796 | }
797 | }
798 | };
799 |
800 | Network.prototype.validate = function (params) {
801 |
802 | if (!params)
803 | throw new NetException("Invalid parameters object for validation", { params: params });
804 |
805 | params.backpropagate = false;
806 | params.epochs = 1;
807 | params.dropout = false;
808 |
809 | return this.train(params);
810 | };
811 |
812 | Network.prototype.train = function(params) {
813 |
814 | if (!params)
815 | throw new NetException("Invalid parameters object for training", {params: params});
816 |
817 | var training_data = params.trainingSet || undefined;
818 | var validation_data = params.validationSet || [];
819 | var test_data = params.testSet || [];
820 |
821 | var epochs = params.epochs || undefined;
822 |
823 | if (!training_data || training_data.length <= 0)
824 | throw new NetException("Invalid raw training data (object)", {training_data: training_data});
825 |
826 | if (!epochs || isNaN(epochs))
827 | throw new NetException("Invalid epochs number for training", {epochs: epochs});
828 |
829 | if (typeof window.Worker === "undefined" || !window.Worker)
830 | throw new NetException("Web Worker is not supported by your client. Please upgrade in order to train as background operation");
831 |
832 | // Important to register these here (accessible in worker callback)
833 | var training_size = training_data.length;
834 | var validation_size = validation_data.length;
835 | var test_size = test_data.length;
836 | var gather_all = epochs * training_size <= _TRAINING_GATHER_ALL_THRESHOLD;
837 |
838 | console.info("Training: trying to handle %d extracted inputs/targets", training_size);
839 | console.info("Validation: trying to handle %d extracted inputs/targets", validation_size);
840 | console.info("Test: trying to handle %d extracted inputs/targets", test_size);
841 |
842 | // Create visualisation (these one are also behond the scope)
843 | var container, graph, graph_ctx, text_output;
844 | var graph_width = params.graphWidth ? params.graphWidth : _CANVAS_GRAPH_DEFAULT_WIDTH;
845 | var graph_height = params.graphHeight ? params.graphHeight : _CANVAS_GRAPH_DEFAULT_HEIGHT;
846 | var scaled_width;
847 |
848 | if (params.visualize === true)
849 | {
850 | // Create canvas
851 | container = document.createElement("div");
852 | container.setAttribute("style", "margin: 10px;");
853 |
854 | graph = document.createElement("canvas");
855 | graph.setAttribute("width", graph_width);
856 | graph.setAttribute("height", graph_height);
857 | container.appendChild( graph );
858 |
859 | // Create global error mean output
860 | text_output = document.createElement("samp");
861 | container.appendChild( text_output );
862 |
863 | // We don't want to display too much data futilely
864 | if (gather_all)
865 | scaled_width = graph_width / (epochs * training_data.length);
866 | else
867 | scaled_width = graph_width / epochs;
868 |
869 | graph_ctx = graph.getContext("2d");
870 | graph_ctx.translate(0, graph_height);
871 | graph_ctx.scale(scaled_width, - graph_height);
872 | // graph_ctx.scale(1, - _CANVAS_GRAPH_HEIGHT);
873 | graph_ctx.globalAlpha = 0.8;
874 | graph_ctx.lineWidth = 0.03;
875 |
876 | // Following functions will be called in our requestAnimFrame
877 | var display_curves = function (data, window_width, fill, stroke, fill_style, stroke_style)
878 | {
879 | if (!data || data.length === 0)
880 | return;
881 |
882 | var ratio = window_width / (data.length-1);
883 | var l = data.length;
884 |
885 | graph_ctx.fillStyle = fill_style;
886 | graph_ctx.strokeStyle = stroke_style;
887 | graph_ctx.beginPath();
888 | graph_ctx.moveTo(0, 0);
889 |
890 | for (var i = 0; i < l; i++)
891 | graph_ctx.lineTo(i * ratio, Math.sqrt(data[i] + _EPSILON) * _CANVAS_GRAPH_WINDOW_FACTOR);
892 |
893 | if (fill) {
894 | // graph_ctx.lineTo(i * ratio, Math.sqrt(data[i-1] + _EPSILON) * _CANVAS_GRAPH_WINDOW_FACTOR);
895 | graph_ctx.lineTo((i-1) * ratio, 0);
896 | graph_ctx.closePath();
897 | graph_ctx.fill();
898 | }
899 |
900 | if (stroke) {
901 | graph_ctx.stroke();
902 | graph_ctx.closePath();
903 | }
904 | };
905 |
906 | var Stats = function (losses, epoch_mean_loss, global_mean_loss) {
907 |
908 | this.size = losses.length;
909 | this.losses = losses;
910 | this.epoch_mean_loss = epoch_mean_loss;
911 | this.global_mean_loss = global_mean_loss;
912 | };
913 | }
914 |
915 | //////////////// Worker below ////////////////////////////
916 |
917 | var blob = new Blob(['(' + this.workerHandler.toString() + ')()' ], { type: "text/javascript" });
918 | var worker = new Worker(window.URL.createObjectURL(blob));
919 | var that = this;
920 |
921 | worker.addEventListener("message", function(e) {
922 |
923 | if (typeof e.data.type === "undefined")
924 | throw new NetException("Worker message needs to contain message type (WORKER_TRAINING_X)", {data: e.data});
925 |
926 | // Training is over for the current epoch: we display our losses
927 | if (e.data.type === _WORKER_TRAINING_PENDING)
928 | {
929 | if (params.visualize !== true)
930 | return;
931 |
932 | window.requestAnimationFrame(function() {
933 |
934 | var training = new Stats(e.data.training_stats.losses, e.data.training_stats.epoch_mean_loss, e.data.training_stats.global_mean_loss);
935 | var validation = new Stats(e.data.validation_stats.losses, e.data.validation_stats.epoch_mean_loss, e.data.validation_stats.global_mean_loss);
936 | var test = new Stats(e.data.test_stats.losses, e.data.test_stats.epoch_mean_loss, e.data.test_stats.global_mean_loss);
937 |
938 | var smooth_size = graph_width * _CANVAS_GRAPH_SMOOTH_FACTOR;
939 |
940 | ////////////////////////////
941 |
942 | graph_ctx.clearRect(0, 0, graph_width / scaled_width, 1);
943 |
944 | // Graphically separate epochs (only with a small amount of epochs)
945 | if (epochs <= _CANVAS_GRAPH_SEPARATE_EPOCHS_THRESHOLD) {
946 | graph_ctx.fillStyle = "#c7cbe0";
947 | for (var i = 1; i < epochs; i++)
948 | graph_ctx.fillRect(i * graph_width / scaled_width / epochs, 0, 1 / scaled_width, 1);
949 | }
950 |
951 | // Display the training set losses curve
952 | display_curves(training.losses.average(graph_width), training.size, true, false, _COLOR_ASPHALT, _COLOR_BLUE);
953 |
954 | // Display smoother mean if necessary
955 | if (gather_all)
956 | display_curves(training.losses.average(graph_width * _CANVAS_GRAPH_SMOOTH_FACTOR), training.size, false, true, _COLOR_ASPHALT, _COLOR_BLUE);
957 |
958 | // Display the validation set and test set smoothly
959 | display_curves(validation.losses.average(graph_width), training.size, false, true, "pink", _COLOR_PURPLE);
960 | display_curves(test.losses.average(graph_width), training.size, false, true, "pink", _COLOR_GREEN);
961 |
962 | // Update output text display
963 | text_output.innerHTML = "epoch " + (e.data.curr_epoch+1) + "/" + epochs + " | curr error mean: " + training.epoch_mean_loss.toFixed(5);
964 | });
965 | }
966 |
967 | // Training is over : we update our weights an biais
968 | else if (e.data.type === _WORKER_TRAINING_OVER)
969 | {
970 | that.importWeights( e.data.weights );
971 | that.importBiais( e.data.biais );
972 |
973 | // Feeding and bring in order to have updated values (as error) into neurons or others
974 | that.feed( training_data[0].inputs );
975 | that.loss( training_data[0].targets );
976 |
977 | // Free space
978 | training_data = null;
979 | validation_data = null;
980 | test_data = null;
981 | worker.terminate();
982 | }
983 | });
984 |
985 | // Start web worker with training data through epochs
986 | worker.postMessage({
987 | lib: this.libURI,
988 | params: this.exportParams(),
989 | weights: this.exportWeights(),
990 | biais: this.exportBiais(),
991 |
992 | trainingData: training_data,
993 | validationData: validation_data,
994 | testData: test_data,
995 |
996 | epochs: epochs,
997 | options: {
998 | backpropagate: params.backpropagate !== undefined ? params.backpropagate : _DEFAULT_TRAINING_BACKPROPAGATE,
999 | dropout: params.dropout !== undefined ? params.dropout : _DEFAULT_TRAINING_DROPOUT,
1000 | shuffle: params.shuffle !== undefined ? params.shuffle : _DEFAULT_TRAINING_SHUFFLE
1001 | }
1002 | });
1003 |
1004 | // You can disable worker (for exemple: analyze peformance thanks to developement utils)
1005 | // this.disabledWorkerHandler({
1006 | // ... same params ...
1007 | // });
1008 |
1009 | return container || null;
1010 | };
1011 |
1012 | Network.prototype.workerHandler = function() {
1013 |
1014 | // Inside onmessage here's the core training, what will be executed by our webworker
1015 | onmessage = function(e) {
1016 |
1017 | if (typeof importScripts !== "undefined")
1018 | importScripts(e.data.lib);
1019 |
1020 | if (!e.data.lib || !e.data.params || !e.data.weights)
1021 | throw new NetException("Invalid lib_url, params or weights in order to build a Neural Network copy", {lib: e.data.lib, params: e.data.params, weights: e.data.weights});
1022 |
1023 | var epochs = e.data.epochs;
1024 | var training_data = e.data.trainingData;
1025 | var validation_data = e.data.validationData;
1026 | var test_data = e.data.testData;
1027 | var options = {
1028 | backpropagate: e.data.options.backpropagate,
1029 | dropout: e.data.options.dropout,
1030 | shuffle: e.data.options.shuffle
1031 | };
1032 |
1033 | console.info("Training imported data in processing... "+ epochs + "requested. Options: ", options);
1034 | console.info("Brain copy below:");
1035 |
1036 | // Create copy of our current Network
1037 | var brain = new Network(e.data.params);
1038 | brain.importWeights(e.data.weights);
1039 | brain.importBiais(e.data.biais);
1040 |
1041 | ///////////////////// Training - validation - test //////////////////////////////
1042 |
1043 | var datasetHandler = function(data) {
1044 |
1045 | this.data = data;
1046 | this.size = data.length;
1047 |
1048 | this.losses = [];
1049 | this.lossesMean = 0;
1050 | this.lossesSum = 0;
1051 | this.globalLossesSum = 0;
1052 |
1053 | this.epochMeanLoss = undefined;
1054 | this.globalMeanLoss = undefined;
1055 | };
1056 |
1057 | datasetHandler.prototype.fetch = function(options, backpropagate) {
1058 |
1059 | // At a threshold, we only collect back the mean of every epoch. It enhance display performance (on the canvas)
1060 | // and avoid passing oversized arrays back to the main thread
1061 | var gather_all = epochs * this.size <= _TRAINING_GATHER_ALL_THRESHOLD;
1062 |
1063 | // Shuffling data can improve learning
1064 | if (options.shuffle === true)
1065 | this.data = this.data.shuffle();
1066 |
1067 | // Feeforward NN thought the training dataset
1068 | for (this.lossesSum = 0, i = 0; i < this.size; i++) {
1069 | try {
1070 | brain.feed(this.data[i].inputs);
1071 |
1072 | if (backpropagate === false)
1073 | brain.loss(this.data[i].targets);
1074 | else
1075 | brain.backpropagate(this.data[i].targets);
1076 | }
1077 |
1078 | catch (ex) {
1079 | console.error(ex);
1080 | return false;
1081 | }
1082 |
1083 | this.lossesSum += brain.outputError;
1084 |
1085 | // Display every loss of every epochs
1086 | if (gather_all)
1087 | this.losses.push(brain.outputError);
1088 | }
1089 |
1090 | this.globalLossesSum += this.lossesSum;
1091 | this.epochMeanLoss = this.lossesSum / this.size;
1092 | this.globalMeanLoss = this.globalLossesSum / ((curr_epoch + 1) * this.size);
1093 |
1094 | // Display the loss mean for every epoch
1095 | if (!gather_all)
1096 | this.losses.push(this.epochMeanLoss);
1097 |
1098 | return true;
1099 | };
1100 |
1101 | var i, n, curr_epoch;
1102 |
1103 | var training_handler = new datasetHandler(training_data);
1104 | var validation_handler = new datasetHandler(validation_data);
1105 | var test_handler = new datasetHandler(test_data);
1106 |
1107 | // Variables that will store means of the current training in order to fire a dropout if requested. See below the dropout execution
1108 | var last_means = [];
1109 | var last_means_sum = 0;
1110 |
1111 | // Repeat the feedforward & backpropagation process for 'epochs' epochs
1112 | for (curr_epoch = 0; curr_epoch < epochs; curr_epoch++)
1113 | {
1114 | // Train by using the training set
1115 | if (!training_handler.fetch(options, options.backpropagate))
1116 | return;
1117 |
1118 | // Feed the NN with the validation set
1119 | if (!validation_handler.fetch(options, false))
1120 | return;
1121 |
1122 | // Feed the NN with the test set
1123 | if (!test_handler.fetch(options, false))
1124 | return;
1125 |
1126 | options.dropout = options.dropout === true ? _TRAINING_DROPOUT_EPOCHS_THRESHOLD : options.dropout;
1127 |
1128 | // Introducing dynamic dropout every "options.dropout" epochs,
1129 | // if the mean difference is below _TRAINING_DROPOUT_MEAN_THRESHOLD
1130 | if (options.dropout !== false)
1131 | {
1132 | last_means_sum += training_handler.epochMeanLoss;
1133 | last_means.push( training_handler.epochMeanLoss );
1134 |
1135 | if (last_means.length >= options.dropout)
1136 | {
1137 | last_means_sum -= last_means.shift();
1138 | var local_mean = last_means_sum / options.dropout;
1139 |
1140 | if (local_mean - training_handler.epochMeanLoss <= _TRAINING_DROPOUT_MEAN_THRESHOLD) {
1141 | console.info("EVENT: Dropout at epoch #%d", curr_epoch);
1142 | brain.dropout(false);
1143 | last_means = [];
1144 | last_means_sum = 0;
1145 | }
1146 | }
1147 | }
1148 |
1149 | // Send updates back to real thread
1150 | self.postMessage({
1151 | type: _WORKER_TRAINING_PENDING,
1152 | curr_epoch: curr_epoch,
1153 |
1154 | training_stats: {
1155 | losses: training_handler.losses,
1156 | epoch_mean_loss: training_handler.epochMeanLoss,
1157 | global_mean_loss: training_handler.globalMeanLoss,
1158 | },
1159 |
1160 | validation_stats: {
1161 | losses: validation_handler.losses,
1162 | epoch_mean_loss: validation_handler.epochMeanLoss,
1163 | global_mean_loss: validation_handler.globalMeanLoss,
1164 | },
1165 |
1166 | test_stats: {
1167 | losses: test_handler.losses,
1168 | epoch_mean_loss: test_handler.epochMeanLoss,
1169 | global_mean_loss: test_handler.globalMeanLoss,
1170 | }
1171 | });
1172 | }
1173 |
1174 | console.info("Training done. Gone through all epochs", {epochs: epochs, global_mean_loss: training_handler.global_mean_loss});
1175 |
1176 | self.postMessage({
1177 | type: _WORKER_TRAINING_OVER,
1178 | weights: brain.exportWeights(),
1179 | biais: brain.exportBiais()
1180 | });
1181 |
1182 | self.close();
1183 | };
1184 |
1185 | return onmessage; // allows fallback for Network.disabledWorkerHandler
1186 | };
1187 |
1188 | Network.prototype.disabledWorkerHandler = function(data) {
1189 |
1190 | if (!data)
1191 | throw new NetException("Invalid data for disabledWorkerHandler", {data: data});
1192 |
1193 | // Override self.postMessage (doesn't exist outside of webWorker, but we create it here to avoid error and to monitor what's happening)
1194 | self.postMessage = function(data) {
1195 | console.log(data);
1196 | };
1197 |
1198 | this.workerHandler()({data: data});
1199 | };
1200 |
1201 | Network.prototype.getNeuronIndex = function(layer, n) {
1202 |
1203 | if (layer === undefined || layer < 0 || layer >= this.nbLayers)
1204 | throw new NetException("Invalid layer access", {layer: layer, n: n});
1205 |
1206 | if (n ===undefined || n >= this.layers[layer])
1207 | throw new NetException("Invalid neuron access", {layer: layer, n: n});
1208 |
1209 | return (this.layersSum[layer-1] || 0) + n;
1210 | };
1211 |
1212 | Network.prototype.getNeuron = function(layer, n) {
1213 |
1214 | return this.neurons[this.getNeuronIndex(layer, n)];
1215 | };
1216 |
1217 | Network.prototype.getNeuronsInLayer = function(layer) {
1218 |
1219 | if ((!layer && layer !== 0) || layer < 0 || layer >= this.nbLayers)
1220 | throw new NetException("Invalid layer access", {layer: layer});
1221 |
1222 | return this.neurons.slice( this.layersSum[layer] - this.layers[layer], this.layersSum[layer]);
1223 | };
1224 |
1225 | Network.prototype.getWeightIndex = function(from, to, debug) {
1226 |
1227 | if (!from || !to)
1228 | throw new NetException("Invalid weight access, wrong neurons", {from: from, to: to});
1229 |
1230 | if (to.layer - from.layer !== 1 || to.layer <= 0 || to.layer >= this.nbLayers)
1231 | throw new NetException("Invalid weight access, layers are not incorrect", {from: from, to: to});
1232 |
1233 | // How to explain this formula ? IT'S RIGHT FROM MY BRAIN
1234 | var part1 = this.layersMul[from.layer]; // How many weights there is before from.layer
1235 | var part2 = (from.id - (this.layersSum[from.layer-1] || 0)) * this.layers[to.layer]; // How many weights there is in from.layer, but before our neuron
1236 | var part3 = to.id - this.layersSum[from.layer]; // How many weights there is from our neuron, which are not going to our second neuron
1237 | var index = part1 + part2 + part3;
1238 |
1239 | if (debug || isNaN(this.weights[index]) || part1 < 0 || part2 < 0 || part3 < 0 || index < from.id)
1240 | {
1241 | console.log(from, to);
1242 | console.log("index: ", index);
1243 | console.log("#1", part1);
1244 | console.log("#2", part2);
1245 | console.log("#3", part3);
1246 |
1247 | if (isNaN(this.weights[index]))
1248 | throw new NetException("NaN detected for computing weight index");
1249 | else if (part1 < 0 || part2 < 0 || part3 < 0)
1250 | throw new NetException("Parts calculus is incorrect: negatives values");
1251 | else if (index < from.id)
1252 | throw new NetException("Incoherent index inferior to from.id");
1253 | else
1254 | throw new NetException("Error: debug launched", {debug: debug});
1255 | }
1256 |
1257 | return index;
1258 | };
1259 |
1260 | Network.prototype.getWeight = function(from, to) {
1261 |
1262 | return this.weights[this.getWeightIndex(from, to)];
1263 | };
1264 |
1265 | Network.prototype.setWeight = function(from, to, value) {
1266 |
1267 | this.weights[this.getWeightIndex(from, to)] = value;
1268 | };
1269 |
1270 | Network.prototype.setHiddenLayerToActivation = function(activation, derivation) {
1271 |
1272 | if (!activation || !derivation)
1273 | throw new NetException("Invalid activation and/or derivation assignment", {activation: activation, derivation: derivation});
1274 |
1275 | for (var i = this.layers[0]; i < this.layersSum[this.nbLayers-2]; i++) {
1276 | this.neurons[i].activation = activation;
1277 | this.neurons[i].derivative = derivation;
1278 | }
1279 | };
1280 |
1281 |
1282 | /////////////////////////// Statics network methods & activation functions
1283 |
1284 | Network.prototype.static_randomBiais = function() {
1285 | return Math.uniform() * _BIAIS_RANDOM_COEFF;
1286 | };
1287 |
1288 | Network.prototype.static_randomWeight = function() {
1289 | return Math.uniform() * _WEIGHT_RANDOM_COEFF;
1290 | };
1291 |
1292 | Network.prototype.static_linearActivation = function(x) {
1293 | return x;
1294 | };
1295 |
1296 | Network.prototype.static_linearDerivative = function(x) {
1297 | return 1;
1298 | };
1299 |
1300 | Network.prototype.static_tanhActivation = function(x) {
1301 | return Math.tanh(x);
1302 | };
1303 |
1304 | Network.prototype.static_tanhDerivative = function(x) {
1305 | return 1 - (Math.tanh(x) * Math.tanh(x));
1306 | };
1307 |
1308 | Network.prototype.static_sigmoidActivation = function(x) {
1309 | return 1 / (1 + Math.exp(-x));
1310 | };
1311 |
1312 | Network.prototype.static_sigmoidDerivative = function(x) {
1313 | return this.network.static_sigmoidActivation(x) * (1 - this.network.static_sigmoidActivation(x));
1314 | };
1315 |
1316 | Network.prototype.static_reluActivation = function(x) {
1317 | return x < 0 ? 0 : x;
1318 | };
1319 |
1320 | Network.prototype.static_reluDerivative = function(x) {
1321 | return x < 0 ? 0 : 1;
1322 | };
1323 |
1324 | Network.prototype.static_preluActivation = function(x) {
1325 | return x < 0 ? this.network.activationParams.alpha * x : x;
1326 | };
1327 |
1328 | Network.prototype.static_preluDerivative = function(x) {
1329 | return x < 0 ? this.network.activationParams.alpha : 1;
1330 | };
1331 |
1332 | /////////////////////////// Network Exception
1333 |
1334 | function NetException(message, variables) {
1335 | console.error("ERROR: " + message, variables);
1336 | }
1337 |
1338 | Array.prototype.hash = function() {
1339 | return { hash: btoa(this.join()), size: this.length };
1340 | };
1341 |
1342 | Array.prototype.shuffle = function() {
1343 |
1344 | var j, x, i;
1345 |
1346 | for (i = this.length - 1; i > 0; i--) {
1347 | j = Math.floor(Math.random() * (i + 1));
1348 | x = this[i];
1349 | this[i] = this[j];
1350 | this[j] = x;
1351 | }
1352 |
1353 | return this;
1354 | };
1355 |
1356 | Array.prototype.average = function(size) {
1357 |
1358 | if (size >= this.length)
1359 | return this;
1360 |
1361 | var ratio = this.length / size;
1362 | var index, i, j, l = this.length, n = Math.ceil(ratio);
1363 | var sum, last_sum = 0, mean, avgs = [];
1364 |
1365 | for (i = 0; i < size; i++)
1366 | {
1367 | index = index = Math.floor(i * ratio);
1368 | sum = 0;
1369 |
1370 | for (j = 0; j < n && index+j < l; j++)
1371 | sum += this[index + j];
1372 |
1373 | avgs.push((sum + last_sum) / (n * 2));
1374 | last_sum = sum;
1375 | }
1376 |
1377 | return avgs;
1378 | };
1379 |
1380 | Math.uniform = function() {
1381 | return ((Math.random() + Math.random() + Math.random() + Math.random() + Math.random() + Math.random()) - 3) / 3;
1382 | };
--------------------------------------------------------------------------------
/neural-network.js:
--------------------------------------------------------------------------------
1 | "use strict";
2 |
3 | const _AVAILABLE_OPTIMIZERS = ["momentum", "nag", "adagrad", "adadelta", "adam"];
4 | const _WEIGHT_RANDOM_COEFF = 1; // must be one if we want to keep a normal distributation centered in 0
5 | const _BIAIS_RANDOM_COEFF = 0.0; // usually, can be 0 or 0.1. See http: //cs231n.github.io/neural-networks-2/
6 | const _DROPOUT_PROBABILITY = 0.5; // usually a good value also
7 | const _EPSILON = 1e-8;
8 |
9 | const _TRAINING_GATHER_ALL_THRESHOLD = 100000;
10 | const _TRAINING_DROPOUT_EPOCHS_THRESHOLD = 200;
11 | const _TRAINING_DROPOUT_MEAN_THRESHOLD = 0.001;
12 |
13 | const _DEFAULT_TRAINING_BACKPROPAGATE = true;
14 | const _DEFAULT_TRAINING_DROPOUT = false;
15 | const _DEFAULT_TRAINING_SHUFFLE = true;
16 |
17 | const _WORKER_TRAINING_PENDING = 0;
18 | const _WORKER_TRAINING_OVER = 1;
19 |
20 | const _ERROR_VALUE_TOO_HIGH = 100000;
21 | const _WEIGHT_VALUE_TOO_HIGH = 10000;
22 |
23 | const _CANVAS_GRAPH_DEFAULT_WIDTH = 600;
24 | const _CANVAS_GRAPH_DEFAULT_HEIGHT = 100;
25 | const _CANVAS_GRAPH_WINDOW_FACTOR = 1 / 0.9;
26 | const _CANVAS_GRAPH_SMOOTH_FACTOR = 1 / 20;
27 | const _CANVAS_GRAPH_SEPARATE_EPOCHS_THRESHOLD = 20;
28 |
29 | const _SVG_STROKE_WIDTH = 4;
30 | const _SVG_CIRCLE_RADIUS = 15;
31 | const _SVG_CIRCLE_COLOR_DEFAULT = "#ffe5e5";
32 | const _SVG_CIRCLE_COLOR_DROPPED = "#c7c7c7";
33 | const _SVG_MAX_WEIGHTS_DISPLAY_TEXT = 4;
34 |
35 | const _COLOR_ASPHALT = "rgb(52, 73, 94)";
36 | const _COLOR_PURPLE = "rgb(142, 68, 173)";
37 | const _COLOR_BLUE = "rgb(52, 152, 219)";
38 | const _COLOR_GREEN = "rgb(26, 188, 156)";
39 |
40 | /////////////////////////////// Utils - various functions
41 |
42 | var Utils = {
43 | static: {}, // yes, it's just sugar for a good looking in console....
44 | trainingData: "",
45 | trainingSize: 0,
46 | trainingMaxSize: 10000
47 | };
48 |
49 | Utils.static.tooltipOn = function(tooltip, event, object) {
50 |
51 | tooltip.object = object;
52 | tooltip.setAttribute("class", "");
53 | tooltip.style.left = (event.pageX+10) + "px";
54 | tooltip.style.top = (event.pageY+5) + "px";
55 |
56 | Utils.static.tooltipUpdate(object);
57 | };
58 |
59 | Utils.static.tooltipUpdate = function(tooltip, object) {
60 |
61 | if (typeof object !== "object") {
62 | tooltip.object = object;
63 | return;
64 | }
65 |
66 | var buffer = "";
67 |
68 | for (var key in object)
69 | if (object.hasOwnProperty(key) && key !== "object")
70 | buffer += key + ": " + object[key] + "
";
71 |
72 | tooltip.innerHTML = buffer;
73 | };
74 |
75 | Utils.static.tooltipOff = function(tooltip) {
76 |
77 | tooltip.object = undefined;
78 | tooltip.setAttribute("class", "off");
79 | };
80 |
81 | ////////////
82 |
83 | Utils.static.setTrainingSize = function(size) {
84 |
85 | Utils.trainingMaxSize = size;
86 | };
87 |
88 | Utils.static.addIntoTraining = function(inputs, targets) {
89 |
90 | // Build training data (as string) for future exportation
91 | if (Utils.trainingSize <= Utils.trainingMaxSize) {
92 | Utils.trainingData += inputs.join(" ") + " : " + targets.join(" ") + ";\\\n";
93 | Utils.trainingSize++;
94 | return true;
95 | }
96 |
97 | return false;
98 | };
99 |
100 | Utils.static.exportTrainingData = function() {
101 |
102 | console.info("Saving training data...", "Reading 'training_data'");
103 |
104 | var output = document.createElement("textarea");
105 | output.innerHTML = "var imported_training_set = \"" + Utils.trainingData + "\";";
106 | document.body.appendChild( output );
107 |
108 | return "Export completed for " + Utils.trainingSize + " entries.";
109 | };
110 |
111 | Utils.static.getTrainingData = function() {
112 |
113 | return Utils.trainingData;
114 | };
115 |
116 | Utils.static.clearTrainingData = function() {
117 |
118 | Utils.trainingData = "";
119 | };
120 |
121 | Utils.static.parseTrainingData = function(raw) {
122 |
123 | // Parse training data
124 | var i, l, entry, splitted = raw.split(";");
125 | var training_data = [], training_size;
126 |
127 | for (i = 0, l = splitted.length; i < l; i++)
128 | {
129 | entry = splitted[i].trim().split(":");
130 | if (entry.length !== 2)
131 | break;
132 |
133 | training_data.push({
134 | inputs: entry[0].trim().split(" ").map(parseFloat),
135 | targets: entry[1].trim().split(" ").map(parseFloat)
136 | });
137 | }
138 |
139 | return training_data;
140 | };
141 |
142 | ////////////////////////////////// Neural Network core
143 |
144 | function Neuron(id, layer, biais) {
145 |
146 | this.id = id;
147 | this.layer = layer;
148 | this.biais = biais || 0;
149 | this.biaisMomentum = 0;
150 | this.biaisGradient = 0;
151 | this.dropped = false;
152 |
153 | this.agregation = undefined;
154 | this.output = undefined;
155 | this.error = undefined;
156 |
157 | this.network = undefined; // link to its network, indispensable for special activation & derivation
158 | this.activation = undefined;
159 | this.derivative = undefined;
160 |
161 | // Input/output weights as cache (because Network.getWeight method is repeated a lot in feed and backprop, it takes time)
162 | this.inputWeightsIndex = undefined;
163 | this.outputWeightsIndex = undefined;
164 | }
165 |
166 | function Network(params) {
167 |
168 | // Required variables: lr, layers
169 | this.params = params;
170 |
171 | this.lr = undefined; // Learning rate
172 | this.layers = undefined;
173 | this.optimizer = undefined; // must bin included in _AVAILABLE_OPTIMIZER
174 | this.optimizerParams = undefined; // example: momentum rate will be {alpha: X}
175 | this.activation = undefined; // activation function for hidden layer
176 | this.activationParams = undefined;
177 |
178 | this.neurons = undefined;
179 | this.weights = undefined;
180 | this.momentums = undefined; // momentums coefficients a t-1
181 | this.gradients = undefined; // gradients squared for Adagrad
182 | this.output = undefined; // current output array
183 |
184 | // Caching variables:
185 | this.layersSum = undefined;
186 | this.layersMul = undefined;
187 | this.nbLayers = undefined;
188 | this.nbNeurons = undefined;
189 | this.nbWeights = undefined;
190 |
191 | // Stats-purpose:
192 | this.iterations = 0;
193 | this.maxWeight = 0;
194 | this.outputError = 0;
195 | this.globalError = 0;
196 | this.avgWeightsPerNeuron = 0;
197 |
198 | // Visualization:
199 | this.svgVisualization = false;
200 | this.DOM = {
201 | svg: undefined,
202 | tooltip: undefined,
203 |
204 | neuronsCircles: undefined,
205 | weightTexts: undefined,
206 | inputTexts: undefined,
207 | outputTexts: undefined,
208 | weightCurves: undefined
209 | };
210 |
211 | // Necessary for avoiding problems with Cross Origin (Web Worker)
212 | this.libURI = undefined;
213 |
214 | this.loadParams(params);
215 | this.initialize();
216 | }
217 |
218 | Network.prototype.loadParams = function(params) {
219 |
220 | for (var key in params)
221 | if (this.hasOwnProperty(key) && this[key] === undefined)
222 | this[key] = params[key];
223 |
224 | console.log("Loaded params", this);
225 | };
226 |
227 | Network.prototype.exportParams = function() {
228 |
229 | // Ensure to update params if they were modified on live
230 | for (var key in this.params)
231 | if (this.hasOwnProperty(key) && this[key] !== undefined)
232 | this.params[key] = this[key];
233 |
234 | return this.params;
235 | };
236 |
237 | Network.prototype.exportWeights = function() {
238 | return this.weights;
239 | };
240 |
241 | Network.prototype.importWeights = function(values) {
242 |
243 | this.weights = values;
244 | this.momentums.fill(0);
245 | this.gradients.fill(0);
246 | this.iterations = 0;
247 | };
248 |
249 | Network.prototype.exportBiais = function() {
250 |
251 | // We ensure to make a copy and not a reference here
252 | var values = Array(this.nbNeurons);
253 |
254 | for (var i = 0; i < this.nbNeurons; i++)
255 | values[i] = this.neurons[i].biais;
256 |
257 | return values;
258 | };
259 |
260 | Network.prototype.importBiais = function(values) {
261 |
262 | for (var i = 0; i < this.nbNeurons; i++) {
263 | this.neurons[i].biais = values[i];
264 | this.neurons[i].biaisMomentum = 0;
265 | this.neurons[i].biaisGradient = 0;
266 | }
267 | };
268 |
269 | Network.prototype.initialize = function() {
270 |
271 | if (this.libURI === undefined)
272 | throw new NetException("Undefined or invalid lib URI. Necessary for avoiding Cross Origin problems. Use https://domain.com/.../neural-net.js notation", {libURI: this.libURI});
273 |
274 | if (this.lr === undefined || this.lr <= 0)
275 | throw new NetException("Undefined or invalid learning rate", {lr: this.lr});
276 |
277 | if (this.layers === undefined || this.layers.length <= 1)
278 | throw new NetException("Undefined or unsufficient layers. At least, you must have a input and a output layer.", {layers: this.layers});
279 |
280 | if (this.optimizer !== undefined && !_AVAILABLE_OPTIMIZERS.includes(this.optimizer))
281 | throw new NetException("Invalid optimizer. Available optimizers = ", { available: _AVAILABLE_OPTIMIZERS, optimizer: this.optimizer });
282 |
283 | if ((this.optimizer === "momentum" || this.optimizer === "nag") && (this.optimizerParams === undefined || this.optimizerParams.alpha === undefined || this.optimizerParams.alpha < 0 || this.optimizerParams.alpha > 1))
284 | throw new NetException("Undefined or invalid momentum rate (must be between 0 and 1 both included) ", {optimizer: this.optimizer, optimizerParams: this.optimizerParams});
285 |
286 | if (this.optimizer === "adam" && (this.optimizerParams === undefined || this.optimizerParams.beta1 === undefined || this.optimizerParams.beta2 === undefined || this.optimizerParams.beta1 < 0 || this.optimizerParams.beta1 > 1 || this.optimizerParams.beta2 < 0 || this.optimizerParams.beta2 > 1))
287 | throw new NetException("Undefined or invalid (beta1,beta2) for Adam optimizer", {optimizer: this.optimizer, optimizerParams: this.optimizerParams});
288 |
289 | var i, j, l, sum, mul, tmp;
290 | var curr_layer = 0;
291 |
292 | // Initialization
293 | this.iterations = 0;
294 | this.nbLayers = this.layers.length;
295 | this.layersSum = [];
296 | this.layersMul = [];
297 | this.neurons = [];
298 | this.weights = [];
299 | this.momentums = [];
300 | this.gradients = [];
301 |
302 | // Prepare layers relative computation
303 | for (i = 0, sum = 0, mul = 1; i < this.nbLayers; i++) {
304 | sum += this.layers[i];
305 | mul = (this.layers[i-1] || 0) * this.layers[i];
306 | this.layersSum.push(sum);
307 | this.layersMul.push(mul + (this.layersMul[i-1] || 0));
308 | // [0] will be 0, Because layerMul is used to know how many weights there is before a layer, and there is no before layer 0
309 | }
310 |
311 | // Compute and put lengths in cache
312 | this.nbNeurons = sum;
313 | this.nbWeights = this.layersMul[this.layersMul.length-1];
314 | this.avgWeightsPerNeuron = this.nbWeights / this.nbNeurons;
315 |
316 | // Create weights, momentum and gradients
317 | for (i = 0; i < this.nbWeights; i++) {
318 | this.weights[i] = this.static_randomWeight();
319 | this.momentums.push(0);
320 | this.gradients.push(0);
321 | }
322 |
323 | // Create neurons
324 | var index, neuron, prev_neurons = [], next_neurons = [];
325 |
326 | for (curr_layer = 0, i = 0; i < this.nbNeurons; i++)
327 | {
328 | neuron = new Neuron(i, i >= this.layersSum[curr_layer] ? ++curr_layer : curr_layer, this.static_randomBiais());
329 | neuron.network = this;
330 | neuron.activation = this.static_linearActivation;
331 | neuron.derivative = this.static_linearDerivative;
332 | this.neurons.push(neuron);
333 | }
334 |
335 | // Set hidden layer activation functions
336 | // (separated from loop above because we don't want input and output layers to have an activation function -by default)
337 | switch (this.activation) {
338 | case "tanh":
339 | this.setHiddenLayerToActivation(this.static_tanhActivation, this.static_tanhDerivative);
340 | break;
341 |
342 | case "sigmoid":
343 | this.setHiddenLayerToActivation(this.static_sigmoidActivation, this.static_sigmoidDerivative);
344 | break;
345 |
346 | case "relu":
347 | this.setHiddenLayerToActivation(this.static_reluActivation, this.static_reluDerivative);
348 | break;
349 |
350 | case "prelu":
351 | this.setHiddenLayerToActivation(this.static_preluActivation, this.static_preluDerivative);
352 | break;
353 |
354 | default:
355 | this.setHiddenLayerToActivation(this.static_linearActivation, this.static_linearDerivative);
356 | }
357 |
358 | // 1- Assign weights index into neuron's cache
359 | // 2- Improve the weight initialization by ensuring that the variance is equal to 1
360 | for (curr_layer = -1, i = 0; i < this.nbNeurons; i++)
361 | {
362 | neuron = this.neurons[i];
363 |
364 | if (neuron.layer !== curr_layer) {
365 | curr_layer++;
366 | prev_neurons = curr_layer > 0 ? this.getNeuronsInLayer(curr_layer-1) : [];
367 | next_neurons = curr_layer < this.nbLayers-1 ? this.getNeuronsInLayer(curr_layer+1) : [];
368 | }
369 |
370 | neuron.inputWeightsIndex = Array(prev_neurons.length);
371 | neuron.outputWeightsIndex = Array(next_neurons.length);
372 |
373 | // Input weights
374 | for (j = 0, l = prev_neurons.length; j < l; j++) {
375 | neuron.inputWeightsIndex[j] = this.getWeightIndex(prev_neurons[j], neuron);
376 | this.weights[neuron.inputWeightsIndex[j]] *= Math.sqrt(2 / l);
377 | }
378 |
379 | // Output weights
380 | for (j = 0, l = next_neurons.length; j < l; j++)
381 | neuron.outputWeightsIndex[j] = this.getWeightIndex(neuron, next_neurons[j]);
382 | }
383 |
384 | // Initialize brain.output to zeros, to avoid training problems
385 | this.output = Array(this.layers[this.nbLayers - 1]);
386 | this.output.fill(0);
387 |
388 | // Display the complexity of this new NN (weights + biais)
389 | var parameters = this.weights.length + this.nbNeurons;
390 | console.info("This neural network has %d parameters.", parameters);
391 | };
392 |
393 | Network.prototype.createVisualization = function() {
394 |
395 | var i, l, l2, n, index;
396 | var x1, y1, x2, y2, max_y1 = 0;
397 | var neuron1, neuron2, is_input;
398 | var DOM_tmp, DOM_weight;
399 |
400 | var _MARGIN_X = 150;
401 | var _MARGIN_Y = 75;
402 | var that = this;
403 |
404 | // Create DOM elements
405 | var container = document.createElement("div");
406 | this.DOM.svg = document.createElementNS("http://www.w3.org/2000/svg", "svg");
407 | this.DOM.tooltip = document.createElement("div");
408 | this.DOM.tooltip.setAttribute("id", "tooltip");
409 | this.DOM.tooltip.setAttribute("class", "off");
410 | container.appendChild(this.DOM.svg);
411 | container.appendChild(this.DOM.tooltip);
412 |
413 | this.DOM.neuronsCircles = [];
414 | this.DOM.weightTexts = [];
415 | this.DOM.inputTexts = [];
416 | this.DOM.outputTexts = [];
417 | this.DOM.weightCurves = [];
418 |
419 | // Computing functions & listeners callbacks
420 | function calcX(neuron) {
421 | return (neuron.layer + 1) * _MARGIN_X;
422 | }
423 |
424 | function calcY(neuron) {
425 | return (neuron.id - (that.layersSum[neuron.layer-1] || 0)) * _MARGIN_Y + _MARGIN_Y / 2;
426 | }
427 |
428 | function neuronTooltipOn(event) {
429 | Utils.static.tooltipOn( that.DOM.tooltip, event, that.neurons[event.target.getAttribute("data-object")] );
430 | }
431 |
432 | function neuronTooltipOff(event) {
433 | Utils.static.tooltipOff( that.DOM.tooltip );
434 | }
435 |
436 | // Fetching every neuron
437 | for (i = 0, l = this.neurons.length; i < l; i++)
438 | {
439 | neuron1 = this.neurons[i];
440 | x1 = calcX(neuron1);
441 | y1 = calcY(neuron1);
442 |
443 | // Fetching neurons from next layer for weights
444 | for (n = 0, l2 = (this.layers[neuron1.layer + 1] || 0); n < l2; n++)
445 | {
446 | neuron2 = this.neurons[this.layersSum[ neuron1.layer ] + n];
447 | index = this.getWeightIndex(neuron1, neuron2);
448 | x2 = calcX(neuron2);
449 | y2 = calcY(neuron2);
450 |
451 | // Creating SVG weights
452 | DOM_tmp = document.createElementNS("http://www.w3.org/2000/svg", "path");
453 | DOM_tmp.setAttribute("class", "weight");
454 | DOM_tmp.setAttribute("data-object", index);
455 | DOM_tmp.setAttribute("d", "M" + x1 + "," + y1 +" C" + (x1 + _MARGIN_X/2) + "," + y1 + " " + (x1 + _MARGIN_X/2) + "," + y2 + " " + x2 + "," + y2);
456 | DOM_tmp.setAttribute("stroke-width", _SVG_STROKE_WIDTH);
457 |
458 | this.DOM.svg.appendChild(DOM_tmp);
459 | this.DOM.weightCurves.push(DOM_tmp);
460 |
461 | // Creating SVG weight Text
462 | DOM_tmp = document.createElementNS("http://www.w3.org/2000/svg", "text");
463 | DOM_tmp.setAttribute("class", "weight-text");
464 | DOM_tmp.setAttribute("data-object", index);
465 | DOM_tmp.setAttribute("x", x1 + (x2 - x1) * 0.2);
466 | DOM_tmp.setAttribute("y", y1 + (y2 - y1) * 0.2);
467 |
468 | this.DOM.weightTexts.push(DOM_tmp);
469 | }
470 |
471 | // Creating SVG input/output lines and text
472 | if (neuron1.layer === 0 || neuron1.layer === this.nbLayers-1)
473 | {
474 | is_input = neuron1.layer === 0 ? 1 : -1;
475 |
476 | DOM_tmp = document.createElementNS("http://www.w3.org/2000/svg", "path");
477 | DOM_tmp.setAttribute("class", "weight");
478 | DOM_tmp.setAttribute("d", "M" + x1 + "," + y1 +" L" + (x1 - _MARGIN_X / 4 * is_input) + "," + y1);
479 |
480 | this.DOM.svg.appendChild(DOM_tmp);
481 |
482 | DOM_tmp = document.createElementNS("http://www.w3.org/2000/svg", "text");
483 | DOM_tmp.setAttribute("class", is_input === 1 ? "input-text" : "output-text");
484 | DOM_tmp.setAttribute("x", is_input === 1 ? x1 - _MARGIN_X / 1.8 : x1 + _MARGIN_X / 3);
485 | DOM_tmp.setAttribute("y", y1 + 5);
486 |
487 | if (is_input === 1)
488 | this.DOM.inputTexts.push(DOM_tmp);
489 | else
490 | this.DOM.outputTexts.push(DOM_tmp);
491 | }
492 |
493 | // Creating SVG neuron
494 | DOM_tmp = document.createElementNS("http://www.w3.org/2000/svg", "circle");
495 | DOM_tmp.setAttribute("class", "neuron");
496 | DOM_tmp.setAttribute("data-object", neuron1.id);
497 | DOM_tmp.setAttribute("cx", x1);
498 | DOM_tmp.setAttribute("cy", y1);
499 | DOM_tmp.setAttribute("r", _SVG_CIRCLE_RADIUS);
500 | DOM_tmp.setAttribute("fill", _SVG_CIRCLE_COLOR_DEFAULT);
501 | DOM_tmp.addEventListener("mousemove", neuronTooltipOn);
502 | DOM_tmp.addEventListener("mouseout", neuronTooltipOff);
503 |
504 | this.DOM.svg.appendChild(DOM_tmp);
505 | this.DOM.neuronsCircles.push(DOM_tmp);
506 |
507 | max_y1 = max_y1 < y1 ? y1 : max_y1;
508 | }
509 |
510 | // We stretch our svg document (here x2 is supposed to be the maximum possible)
511 | var width = x2 + _MARGIN_X, height = max_y1 + _MARGIN_Y / 2, scale = 1.5;
512 | this.DOM.svg.setAttribute("width", width >= window.innerWidth ? width/scale : width);
513 | this.DOM.svg.setAttribute("height", width >= window.innerWidth ? height/scale : height);
514 | this.DOM.svg.setAttribute("viewBox", "0 0 " + width + " " + height);
515 |
516 | // Push text elements on top of everything
517 | var svg_texts = this.DOM.outputTexts.concat( this.DOM.inputTexts.concat( this.DOM.weightTexts ));
518 |
519 | for (i = 0, l = svg_texts.length; i < l; i++)
520 | this.DOM.svg.appendChild( svg_texts[i] );
521 |
522 | this.svgVisualization = true;
523 | console.info("SVG visualization ready");
524 |
525 | return container;
526 | };
527 |
528 | Network.prototype.visualize = function(inputs, precision) {
529 |
530 |
531 | if (!this.svgVisualization)
532 | throw new NetException("SVG Visualization is not available", {network: this});
533 |
534 | if (!inputs || inputs.length !== this.layers[0])
535 | throw new NetException("Incorrect inputs (undefined or incorrect length)", {inputs: inputs, layer: this.layers[0]});
536 |
537 | var i, l;
538 | var output_neurons = this.getNeuronsInLayer( this.nbLayers-1 );
539 | precision = precision || 1;
540 |
541 | // Update SVG text inputs
542 | for (i = 0, l = this.DOM.inputTexts.length; i < l; i++)
543 | this.DOM.inputTexts[i].innerHTML = inputs[i].toFixed(precision);
544 |
545 | // Update SVG text outputs
546 | for (i = 0, l = this.DOM.outputTexts.length; i < l; i++)
547 | this.DOM.outputTexts[i].innerHTML = output_neurons[i].output ? output_neurons[i].output.toFixed(precision) : output_neurons[i].output;
548 |
549 | // Update SVG weights
550 | for (i = 0, l = this.nbWeights; i < l; i++) {
551 | this.DOM.weightCurves[i].setAttribute("stroke-width", Math.abs(this.weights[i]) / this.maxWeight * _SVG_STROKE_WIDTH);
552 | if (this.avgWeightsPerNeuron < _SVG_MAX_WEIGHTS_DISPLAY_TEXT)
553 | this.DOM.weightTexts[i].innerHTML = this.weights[i].toFixed(4);
554 | }
555 |
556 | // Update tooltip
557 | if (this.DOM.tooltip.object !== undefined)
558 | Utils.static.tooltipUpdate(this.DOM.tooltip, this.DOM.tooltip.object);
559 | };
560 |
561 | Network.prototype.feed = function(inputs) {
562 |
563 | if (!inputs || inputs.length !== this.layers[0])
564 | throw new NetException("Incorrect inputs", {inputs: inputs, layer: this.layers[0]});
565 |
566 | var index, n, l, sum, neuron, prev_neurons; // neurons from previous layer
567 | var curr_layer = 0;
568 |
569 | // Input layer filling
570 | for (index = 0; index < this.layers[0]; index++)
571 | this.neurons[index].output = inputs[index];
572 |
573 | // Fetching neurons from second layer (even if curr_layer equals 0, it'll be changed directly)
574 | for (index = this.layers[0]; index < this.nbNeurons; index++)
575 | {
576 | neuron = this.neurons[index];
577 |
578 | if (neuron.dropped)
579 | continue;
580 |
581 | // Update if necessary all previous layer neurons
582 | if (prev_neurons === undefined || neuron.layer !== curr_layer)
583 | prev_neurons = this.getNeuronsInLayer(curr_layer++);
584 |
585 | // Computing w1*x1 + ... + wn*xn
586 | for (sum = 0, n = 0, l = prev_neurons.length; n < l; n++)
587 | if (!prev_neurons[n].dropped)
588 | sum += this.weights[neuron.inputWeightsIndex[n]] * prev_neurons[n].output;
589 |
590 | // Updating output
591 | neuron.agregation = sum + neuron.biais;
592 | neuron.output = neuron.activation(neuron.agregation);
593 |
594 | if (!isFinite(neuron.output))
595 | throw new NetException("Non finite or too high output. You may have a problem in your code", {neuron: neuron});
596 | }
597 |
598 | // Update network output
599 | var neurons = this.getNeuronsInLayer(this.nbLayers-1);
600 | for (n = 0, l = this.layers[this.nbLayers-1]; n < l; n++)
601 | this.output[n] = neurons[n].output;
602 |
603 | // Return output neurons
604 | return neurons;
605 | };
606 |
607 | Network.prototype.loss = function(targets) {
608 |
609 | var outputs_neurons = this.getNeuronsInLayer(this.nbLayers - 1);
610 |
611 | if (!targets || !outputs_neurons || targets.length !== outputs_neurons.length)
612 | throw new NetException("Incoherent targets for current outputs", { targets: targets, outputs_neurons: outputs_neurons });
613 |
614 | // Compute output error with our loss function
615 | // https://en.wikipedia.org/wiki/Backpropagation
616 |
617 | var n, l, neuron;
618 | this.outputError = 0;
619 |
620 | // Output layer filling: err = (expected-obtained)
621 | for (n = 0, l = outputs_neurons.length; n < l; n++) {
622 | neuron = outputs_neurons[n];
623 | neuron.error = (targets[n] - neuron.output) * neuron.derivative(neuron.agregation);
624 | this.outputError += 1 / 2 * neuron.error * neuron.error;
625 |
626 | if (!isFinite(neuron.error))
627 | throw new NetException("Non finite error on output neuron. You may have a problem in your code", { neuron: neuron });
628 | }
629 | };
630 |
631 | Network.prototype.backpropagate = function(targets) {
632 |
633 | // Compute current output error with our loss function
634 | this.loss(targets);
635 |
636 | var index, weight_index, n, l, sum, calc, grad, weight, max_weight = 0;
637 | var output_error = 0, curr_layer = this.nbLayers - 1;
638 | var neuron, next_neurons;
639 |
640 | this.iterations++; // need to be 1 in first for Adam computing
641 | this.globalError = 0;
642 |
643 | // Fetching neurons from last layer: backpropagate error & update weights
644 | for (index = this.layersSum[curr_layer-1] - 1; index >= 0; index--)
645 | {
646 | neuron = this.neurons[index];
647 |
648 | if (neuron.dropped)
649 | continue;
650 |
651 | // Update if necessary all next layer neurons
652 | if (next_neurons === undefined || neuron.layer !== curr_layer)
653 | next_neurons = this.getNeuronsInLayer(curr_layer--);
654 |
655 | // Computing w1*e1 + ... + wn*en
656 | for (sum = 0, n = 0, l = next_neurons.length; n < l; n++) {
657 | if (!next_neurons[n].dropped)
658 | sum += this.weights[neuron.outputWeightsIndex[n]] * next_neurons[n].error;
659 | }
660 |
661 | // Updating error
662 | neuron.error = sum * neuron.derivative(neuron.agregation);
663 | this.globalError += Math.abs(neuron.error);
664 |
665 | if (!isFinite(neuron.error)) {
666 | throw new NetException("Non finite error. You may have a problem in your code", {neuron: neuron});
667 | } else if (Math.abs(neuron.error) > _ERROR_VALUE_TOO_HIGH) {
668 | console.info("Scaling down error to a max", {neuron: neuron, error: neuron.error});
669 | neuron.error = neuron.error < 0 ? - _ERROR_VALUE_TOO_HIGH : _ERROR_VALUE_TOO_HIGH;
670 | throw new NetException("Computed error is too high. Try a smaller learning rate?", {neuron: neuron});
671 | }
672 |
673 | // Updating weights w = w + lr * en * output
674 | for (n = 0, l = next_neurons.length; n < l; n++)
675 | {
676 | if (next_neurons[n].dropped)
677 | continue;
678 |
679 | weight_index = neuron.outputWeightsIndex[n];
680 |
681 | // Compute new values w.r.t gradient optimizer
682 | grad = next_neurons[n].error * neuron.output;
683 | calc = this.optimizeGradient(this.weights[weight_index], grad, this.momentums[weight_index], this.gradients[weight_index]);
684 |
685 | // Updates values
686 | this.weights[weight_index] = weight = calc.value;
687 | this.momentums[weight_index] = calc.momentum;
688 | this.gradients[weight_index] = calc.gradients;
689 |
690 | // Update maxWeight (for visualisation)
691 | max_weight = max_weight < Math.abs(weight) ? Math.abs(weight) : max_weight;
692 |
693 | if (!isFinite(weight)) {
694 | throw new NetException("Non finite weight. You may have a problem in your code", {neuron: neuron, weight: weight});
695 | } else if (Math.abs(weight) > _WEIGHT_VALUE_TOO_HIGH) {
696 | console.info("Scaling down weight to a max.", {neuron: neuron, weight: weight});
697 | weight = weight < 0 ? - _WEIGHT_VALUE_TOO_HIGH : _WEIGHT_VALUE_TOO_HIGH;
698 | }
699 | }
700 |
701 | // Compute biais with gradient optimizer
702 | grad = neuron.error;
703 | calc = this.optimizeGradient(neuron.biais, grad, neuron.biaisMomentum, neuron.biaisGradient);
704 |
705 | // Updates values
706 | neuron.biais = calc.value;
707 | neuron.biaisMomentum = calc.momentum;
708 | neuron.biaisGradient = calc.gradients;
709 |
710 | if (!isFinite(neuron.biais))
711 | throw new NetException("Non finite biais. You may have a problem in your code", {neuron: neuron});
712 | }
713 |
714 | this.maxWeight = max_weight;
715 | };
716 |
717 | Network.prototype.optimizeGradient = function(value, grad, momentum, gradients) {
718 |
719 | var p = this.optimizerParams, prev_momentum = momentum;
720 |
721 | if (value === undefined || grad === undefined || momentum === undefined || gradients === undefined)
722 | throw new NetException("Invalid parameters for gradient optimization", { value: value, grad: grad, momentum: momentum, gradients: gradients });
723 |
724 | // Momentum helps to escape local minimums,
725 | // Nesterov accelerated gradient is smarter than momentum because inertia is predicted
726 | // Adagrad aims to automatically decrease the learning rate
727 | // Adadelta correct the too aggressive learning rate reduction of Adagrad
728 |
729 | switch (this.optimizer)
730 | {
731 | case "momentum":
732 | momentum = (1 - p.alpha) * this.lr * grad + p.alpha * momentum;
733 | value += momentum;
734 | break;
735 |
736 | case "nag":
737 | momentum = p.alpha * momentum + (1 - p.alpha) * this.lr * grad;
738 | value += -p.alpha * prev_momentum + (1 + p.alpha) * momentum;
739 | break;
740 |
741 | case "adagrad":
742 | gradients += grad * grad; // this contains the sum of all past squared gradients
743 | value += this.lr * grad / (Math.sqrt(gradients) + _EPSILON);
744 | break;
745 |
746 | case "adadelta":
747 | gradients = p.alpha * gradients + (1 - p.alpha) * grad * grad; // this contains the decaying average of all past squared gradients
748 | value += this.lr * grad / (Math.sqrt(gradients) + _EPSILON);
749 | break;
750 |
751 | case "adam":
752 | momentum = p.beta1 * momentum + (1 - p.beta1) * grad;
753 | gradients = p.beta2 * gradients + (1 - p.beta2) * grad * grad;
754 |
755 | var mt = momentum / (1 - Math.pow(p.beta1, this.iterations)); // momentum biais correction
756 | var gt = gradients / (1 - Math.pow(p.beta2, this.iterations)); // gradients biais correction
757 |
758 | value += this.lr * mt / (Math.sqrt(gt) + _EPSILON);
759 | break;
760 |
761 | default: // good-old vanilla SGD
762 | value += this.lr * grad;
763 | }
764 |
765 | return { value: value, grad: grad, momentum: momentum, gradients: gradients };
766 | };
767 |
768 | Network.prototype.dropout = function(completely_random, drop_inputs) {
769 |
770 | // Dropping out random neurons allows to push out our network of a bad solution
771 | // If completely_random === true, the same neuron can be dropped again.
772 | // We usually start from first hidden layer, but could be possible to start from inputs layer if drop_inputs === true
773 |
774 | var i, l, n, neurons, shot;
775 | completely_random = typeof completely_random === "undefined" ? true : completely_random;
776 |
777 | for (i = drop_inputs === true ? 0 : 1; i < this.nbLayers-1; i++)
778 | {
779 | neurons = this.getNeuronsInLayer(i);
780 | shot = completely_random ? undefined : Math.round( Math.random() * (this.layers[i] - 1) );
781 |
782 | for (n = 0, l = neurons.length; n < l; n++)
783 | {
784 | if (shot === n || (shot === undefined && Math.random() >= _DROPOUT_PROBABILITY))
785 | {
786 | if (neurons[n].dropped === false && this.svgVisualization === true) // update vizualisation {
787 | this.DOM.neuronsCircles[this.getNeuronIndex(i, n)].setAttribute("fill", _SVG_CIRCLE_COLOR_DROPPED);
788 | neurons[n].dropped = true;
789 | }
790 | else
791 | {
792 | if (neurons[n].dropped === true && this.svgVisualization === true) // update vizualisation
793 | this.DOM.neuronsCircles[this.getNeuronIndex(i, n)].setAttribute("fill", _SVG_CIRCLE_COLOR_DEFAULT);
794 | neurons[n].dropped = false;
795 | }
796 | }
797 | }
798 | };
799 |
800 | Network.prototype.validate = function (params) {
801 |
802 | if (!params)
803 | throw new NetException("Invalid parameters object for validation", { params: params });
804 |
805 | params.backpropagate = false;
806 | params.epochs = 1;
807 | params.dropout = false;
808 |
809 | return this.train(params);
810 | };
811 |
812 | Network.prototype.train = function(params) {
813 |
814 | if (!params)
815 | throw new NetException("Invalid parameters object for training", {params: params});
816 |
817 | var training_data = params.trainingSet || undefined;
818 | var validation_data = params.validationSet || [];
819 | var test_data = params.testSet || [];
820 |
821 | var epochs = params.epochs || undefined;
822 |
823 | if (!training_data || training_data.length <= 0)
824 | throw new NetException("Invalid raw training data (object) passed in parameters", {trainingSet: training_data});
825 |
826 | if (!epochs || isNaN(epochs))
827 | throw new NetException("Invalid epochs number for training", {epochs: epochs});
828 |
829 | if (typeof window.Worker === "undefined" || !window.Worker)
830 | throw new NetException("Web Worker is not supported by your client. Please upgrade in order to train as background operation");
831 |
832 | // Important to register these here (accessible in worker callback)
833 | var training_size = training_data.length;
834 | var validation_size = validation_data.length;
835 | var test_size = test_data.length;
836 | var gather_all = epochs * training_size <= _TRAINING_GATHER_ALL_THRESHOLD;
837 |
838 | console.info("Training: trying to handle %d extracted inputs/targets", training_size);
839 | console.info("Validation: trying to handle %d extracted inputs/targets", validation_size);
840 | console.info("Test: trying to handle %d extracted inputs/targets", test_size);
841 |
842 | // Create visualisation (these one are also behond the scope)
843 | var container, graph, graph_ctx, text_output;
844 | var graph_width = params.graphWidth ? params.graphWidth : _CANVAS_GRAPH_DEFAULT_WIDTH;
845 | var graph_height = params.graphHeight ? params.graphHeight : _CANVAS_GRAPH_DEFAULT_HEIGHT;
846 | var scaled_width;
847 |
848 | if (params.visualize === true)
849 | {
850 | // Create canvas
851 | container = document.createElement("div");
852 | container.setAttribute("style", "margin: 10px;");
853 |
854 | graph = document.createElement("canvas");
855 | graph.setAttribute("width", graph_width);
856 | graph.setAttribute("height", graph_height);
857 | container.appendChild( graph );
858 |
859 | // Create global error mean output
860 | text_output = document.createElement("samp");
861 | container.appendChild( text_output );
862 |
863 | // We don't want to display too much data futilely
864 | if (gather_all)
865 | scaled_width = graph_width / (epochs * training_data.length);
866 | else
867 | scaled_width = graph_width / epochs;
868 |
869 | graph_ctx = graph.getContext("2d");
870 | graph_ctx.translate(0, graph_height);
871 | graph_ctx.scale(scaled_width, - graph_height);
872 | // graph_ctx.scale(1, - _CANVAS_GRAPH_HEIGHT);
873 | graph_ctx.globalAlpha = 0.8;
874 | graph_ctx.lineWidth = 0.03;
875 |
876 | // Following functions will be called in our requestAnimFrame
877 | var display_curves = function (data, window_width, fill, stroke, fill_style, stroke_style)
878 | {
879 | if (!data || data.length === 0)
880 | return;
881 |
882 | var ratio = window_width / (data.length-1);
883 | var l = data.length;
884 |
885 | graph_ctx.fillStyle = fill_style;
886 | graph_ctx.strokeStyle = stroke_style;
887 | graph_ctx.beginPath();
888 | graph_ctx.moveTo(0, 0);
889 |
890 | for (var i = 0; i < l; i++)
891 | graph_ctx.lineTo(i * ratio, Math.sqrt(data[i] + _EPSILON) * _CANVAS_GRAPH_WINDOW_FACTOR);
892 |
893 | if (fill) {
894 | // graph_ctx.lineTo(i * ratio, Math.sqrt(data[i-1] + _EPSILON) * _CANVAS_GRAPH_WINDOW_FACTOR);
895 | graph_ctx.lineTo((i-1) * ratio, 0);
896 | graph_ctx.closePath();
897 | graph_ctx.fill();
898 | }
899 |
900 | if (stroke) {
901 | graph_ctx.stroke();
902 | graph_ctx.closePath();
903 | }
904 | };
905 |
906 | var Stats = function (losses, epoch_mean_loss, global_mean_loss) {
907 |
908 | this.size = losses.length;
909 | this.losses = losses;
910 | this.epoch_mean_loss = epoch_mean_loss;
911 | this.global_mean_loss = global_mean_loss;
912 | };
913 | }
914 |
915 | //////////////// Worker below ////////////////////////////
916 |
917 | var blob = new Blob(['(' + this.workerHandler.toString() + ')()' ], { type: "text/javascript" });
918 | var worker = new Worker(window.URL.createObjectURL(blob));
919 | var that = this;
920 |
921 | worker.addEventListener("message", function(e) {
922 |
923 | if (typeof e.data.type === "undefined")
924 | throw new NetException("Worker message needs to contain message type (WORKER_TRAINING_X)", {data: e.data});
925 |
926 | // Training is over for the current epoch: we display our losses
927 | if (e.data.type === _WORKER_TRAINING_PENDING)
928 | {
929 | if (params.visualize !== true)
930 | return;
931 |
932 | window.requestAnimationFrame(function() {
933 |
934 | var training = new Stats(e.data.training_stats.losses, e.data.training_stats.epoch_mean_loss, e.data.training_stats.global_mean_loss);
935 | var validation = new Stats(e.data.validation_stats.losses, e.data.validation_stats.epoch_mean_loss, e.data.validation_stats.global_mean_loss);
936 | var test = new Stats(e.data.test_stats.losses, e.data.test_stats.epoch_mean_loss, e.data.test_stats.global_mean_loss);
937 |
938 | var smooth_size = graph_width * _CANVAS_GRAPH_SMOOTH_FACTOR;
939 |
940 | ////////////////////////////
941 |
942 | graph_ctx.clearRect(0, 0, graph_width / scaled_width, 1);
943 |
944 | // Graphically separate epochs (only with a small amount of epochs)
945 | if (epochs <= _CANVAS_GRAPH_SEPARATE_EPOCHS_THRESHOLD) {
946 | graph_ctx.fillStyle = "#c7cbe0";
947 | for (var i = 1; i < epochs; i++)
948 | graph_ctx.fillRect(i * graph_width / scaled_width / epochs, 0, 1 / scaled_width, 1);
949 | }
950 |
951 | // Display the training set losses curve
952 | display_curves(training.losses.average(graph_width), training.size, true, false, _COLOR_ASPHALT, _COLOR_BLUE);
953 |
954 | // Display smoother mean if necessary
955 | if (gather_all)
956 | display_curves(training.losses.average(graph_width * _CANVAS_GRAPH_SMOOTH_FACTOR), training.size, false, true, _COLOR_ASPHALT, _COLOR_BLUE);
957 |
958 | // Display the validation set and test set smoothly
959 | display_curves(validation.losses.average(graph_width), training.size, false, true, "pink", _COLOR_PURPLE);
960 | display_curves(test.losses.average(graph_width), training.size, false, true, "pink", _COLOR_GREEN);
961 |
962 | // Update output text display
963 | text_output.innerHTML = "epoch " + (e.data.curr_epoch+1) + "/" + epochs + " | curr error mean: " + training.epoch_mean_loss.toFixed(5);
964 | });
965 | }
966 |
967 | // Training is over : we update our weights an biais
968 | else if (e.data.type === _WORKER_TRAINING_OVER)
969 | {
970 | that.importWeights( e.data.weights );
971 | that.importBiais( e.data.biais );
972 |
973 | // Feeding and bring in order to have updated values (as error) into neurons or others
974 | that.feed( training_data[0].inputs );
975 | that.loss( training_data[0].targets );
976 |
977 | // Free space
978 | training_data = null;
979 | validation_data = null;
980 | test_data = null;
981 | worker.terminate();
982 | }
983 | });
984 |
985 | // Start web worker with training data through epochs
986 | worker.postMessage({
987 | lib: this.libURI,
988 | params: this.exportParams(),
989 | weights: this.exportWeights(),
990 | biais: this.exportBiais(),
991 |
992 | trainingData: training_data,
993 | validationData: validation_data,
994 | testData: test_data,
995 |
996 | epochs: epochs,
997 | options: {
998 | backpropagate: params.backpropagate !== undefined ? params.backpropagate : _DEFAULT_TRAINING_BACKPROPAGATE,
999 | dropout: params.dropout !== undefined ? params.dropout : _DEFAULT_TRAINING_DROPOUT,
1000 | shuffle: params.shuffle !== undefined ? params.shuffle : _DEFAULT_TRAINING_SHUFFLE
1001 | }
1002 | });
1003 |
1004 | // You can disable worker (for exemple: analyze peformance thanks to developement utils)
1005 | // this.disabledWorkerHandler({
1006 | // ... same params ...
1007 | // });
1008 |
1009 | return container || null;
1010 | };
1011 |
1012 | Network.prototype.workerHandler = function() {
1013 |
1014 | // Inside onmessage here's the core training, what will be executed by our webworker
1015 | onmessage = function(e) {
1016 |
1017 | if (typeof importScripts !== "undefined")
1018 | importScripts(e.data.lib);
1019 |
1020 | if (!e.data.lib || !e.data.params || !e.data.weights)
1021 | throw new NetException("Invalid lib_url, params or weights in order to build a Neural Network copy", {lib: e.data.lib, params: e.data.params, weights: e.data.weights});
1022 |
1023 | var epochs = e.data.epochs;
1024 | var training_data = e.data.trainingData;
1025 | var validation_data = e.data.validationData;
1026 | var test_data = e.data.testData;
1027 | var options = {
1028 | backpropagate: e.data.options.backpropagate,
1029 | dropout: e.data.options.dropout,
1030 | shuffle: e.data.options.shuffle
1031 | };
1032 |
1033 | console.info("Training imported data in processing... "+ epochs + "requested. Options: ", options);
1034 | console.info("Brain copy below:");
1035 |
1036 | // Create copy of our current Network
1037 | var brain = new Network(e.data.params);
1038 | brain.importWeights(e.data.weights);
1039 | brain.importBiais(e.data.biais);
1040 |
1041 | ///////////////////// Training - validation - test //////////////////////////////
1042 |
1043 | var datasetHandler = function(data) {
1044 |
1045 | this.data = data;
1046 | this.size = data.length;
1047 |
1048 | this.losses = [];
1049 | this.lossesMean = 0;
1050 | this.lossesSum = 0;
1051 | this.globalLossesSum = 0;
1052 |
1053 | this.epochMeanLoss = undefined;
1054 | this.globalMeanLoss = undefined;
1055 | };
1056 |
1057 | datasetHandler.prototype.fetch = function(options, backpropagate) {
1058 |
1059 | // At a threshold, we only collect back the mean of every epoch. It enhance display performance (on the canvas)
1060 | // and avoid passing oversized arrays back to the main thread
1061 | var gather_all = epochs * this.size <= _TRAINING_GATHER_ALL_THRESHOLD;
1062 |
1063 | // Shuffling data can improve learning
1064 | if (options.shuffle === true)
1065 | this.data = this.data.shuffle();
1066 |
1067 | // Feeforward NN thought the training dataset
1068 | for (this.lossesSum = 0, i = 0; i < this.size; i++) {
1069 | try {
1070 | brain.feed(this.data[i].inputs);
1071 |
1072 | if (backpropagate === false)
1073 | brain.loss(this.data[i].targets);
1074 | else
1075 | brain.backpropagate(this.data[i].targets);
1076 | }
1077 |
1078 | catch (ex) {
1079 | console.error(ex);
1080 | return false;
1081 | }
1082 |
1083 | this.lossesSum += brain.outputError;
1084 |
1085 | // Display every loss of every epochs
1086 | if (gather_all)
1087 | this.losses.push(brain.outputError);
1088 | }
1089 |
1090 | this.globalLossesSum += this.lossesSum;
1091 | this.epochMeanLoss = this.lossesSum / this.size;
1092 | this.globalMeanLoss = this.globalLossesSum / ((curr_epoch + 1) * this.size);
1093 |
1094 | // Display the loss mean for every epoch
1095 | if (!gather_all)
1096 | this.losses.push(this.epochMeanLoss);
1097 |
1098 | return true;
1099 | };
1100 |
1101 | var i, n, curr_epoch;
1102 |
1103 | var training_handler = new datasetHandler(training_data);
1104 | var validation_handler = new datasetHandler(validation_data);
1105 | var test_handler = new datasetHandler(test_data);
1106 |
1107 | // Variables that will store means of the current training in order to fire a dropout if requested. See below the dropout execution
1108 | var last_means = [];
1109 | var last_means_sum = 0;
1110 |
1111 | // Repeat the feedforward & backpropagation process for 'epochs' epochs
1112 | for (curr_epoch = 0; curr_epoch < epochs; curr_epoch++)
1113 | {
1114 | // Train by using the training set
1115 | if (!training_handler.fetch(options, options.backpropagate))
1116 | return;
1117 |
1118 | // Feed the NN with the validation set
1119 | if (!validation_handler.fetch(options, false))
1120 | return;
1121 |
1122 | // Feed the NN with the test set
1123 | if (!test_handler.fetch(options, false))
1124 | return;
1125 |
1126 | options.dropout = options.dropout === true ? _TRAINING_DROPOUT_EPOCHS_THRESHOLD : options.dropout;
1127 |
1128 | // Introducing dynamic dropout every "options.dropout" epochs,
1129 | // if the mean difference is below _TRAINING_DROPOUT_MEAN_THRESHOLD
1130 | if (options.dropout !== false)
1131 | {
1132 | last_means_sum += training_handler.epochMeanLoss;
1133 | last_means.push( training_handler.epochMeanLoss );
1134 |
1135 | if (last_means.length >= options.dropout)
1136 | {
1137 | last_means_sum -= last_means.shift();
1138 | var local_mean = last_means_sum / options.dropout;
1139 |
1140 | if (local_mean - training_handler.epochMeanLoss <= _TRAINING_DROPOUT_MEAN_THRESHOLD) {
1141 | console.info("EVENT: Dropout at epoch #%d", curr_epoch);
1142 | brain.dropout(false);
1143 | last_means = [];
1144 | last_means_sum = 0;
1145 | }
1146 | }
1147 | }
1148 |
1149 | // Send updates back to real thread
1150 | self.postMessage({
1151 | type: _WORKER_TRAINING_PENDING,
1152 | curr_epoch: curr_epoch,
1153 |
1154 | training_stats: {
1155 | losses: training_handler.losses,
1156 | epoch_mean_loss: training_handler.epochMeanLoss,
1157 | global_mean_loss: training_handler.globalMeanLoss,
1158 | },
1159 |
1160 | validation_stats: {
1161 | losses: validation_handler.losses,
1162 | epoch_mean_loss: validation_handler.epochMeanLoss,
1163 | global_mean_loss: validation_handler.globalMeanLoss,
1164 | },
1165 |
1166 | test_stats: {
1167 | losses: test_handler.losses,
1168 | epoch_mean_loss: test_handler.epochMeanLoss,
1169 | global_mean_loss: test_handler.globalMeanLoss,
1170 | }
1171 | });
1172 | }
1173 |
1174 | console.info("Training done. Gone through all epochs", {epochs: epochs, global_mean_loss: training_handler.global_mean_loss});
1175 |
1176 | self.postMessage({
1177 | type: _WORKER_TRAINING_OVER,
1178 | weights: brain.exportWeights(),
1179 | biais: brain.exportBiais()
1180 | });
1181 |
1182 | self.close();
1183 | };
1184 |
1185 | return onmessage; // allows fallback for Network.disabledWorkerHandler
1186 | };
1187 |
1188 | Network.prototype.disabledWorkerHandler = function(data) {
1189 |
1190 | if (!data)
1191 | throw new NetException("Invalid data for disabledWorkerHandler", {data: data});
1192 |
1193 | // Override self.postMessage (doesn't exist outside of webWorker, but we create it here to avoid error and to monitor what's happening)
1194 | self.postMessage = function(data) {
1195 | console.log(data);
1196 | };
1197 |
1198 | this.workerHandler()({data: data});
1199 | };
1200 |
1201 | Network.prototype.getNeuronIndex = function(layer, n) {
1202 |
1203 | if (layer === undefined || layer < 0 || layer >= this.nbLayers)
1204 | throw new NetException("Invalid layer access", {layer: layer, n: n});
1205 |
1206 | if (n ===undefined || n >= this.layers[layer])
1207 | throw new NetException("Invalid neuron access", {layer: layer, n: n});
1208 |
1209 | return (this.layersSum[layer-1] || 0) + n;
1210 | };
1211 |
1212 | Network.prototype.getNeuron = function(layer, n) {
1213 |
1214 | return this.neurons[this.getNeuronIndex(layer, n)];
1215 | };
1216 |
1217 | Network.prototype.getNeuronsInLayer = function(layer) {
1218 |
1219 | if ((!layer && layer !== 0) || layer < 0 || layer >= this.nbLayers)
1220 | throw new NetException("Invalid layer access", {layer: layer});
1221 |
1222 | return this.neurons.slice( this.layersSum[layer] - this.layers[layer], this.layersSum[layer]);
1223 | };
1224 |
1225 | Network.prototype.getWeightIndex = function(from, to, debug) {
1226 |
1227 | if (!from || !to)
1228 | throw new NetException("Invalid weight access, wrong neurons", {from: from, to: to});
1229 |
1230 | if (to.layer - from.layer !== 1 || to.layer <= 0 || to.layer >= this.nbLayers)
1231 | throw new NetException("Invalid weight access, layers are not incorrect", {from: from, to: to});
1232 |
1233 | // How to explain this formula ? IT'S RIGHT FROM MY BRAIN
1234 | var part1 = this.layersMul[from.layer]; // How many weights there is before from.layer
1235 | var part2 = (from.id - (this.layersSum[from.layer-1] || 0)) * this.layers[to.layer]; // How many weights there is in from.layer, but before our neuron
1236 | var part3 = to.id - this.layersSum[from.layer]; // How many weights there is from our neuron, which are not going to our second neuron
1237 | var index = part1 + part2 + part3;
1238 |
1239 | if (debug || isNaN(this.weights[index]) || part1 < 0 || part2 < 0 || part3 < 0 || index < from.id)
1240 | {
1241 | console.log(from, to);
1242 | console.log("index: ", index);
1243 | console.log("#1", part1);
1244 | console.log("#2", part2);
1245 | console.log("#3", part3);
1246 |
1247 | if (isNaN(this.weights[index]))
1248 | throw new NetException("NaN detected for computing weight index");
1249 | else if (part1 < 0 || part2 < 0 || part3 < 0)
1250 | throw new NetException("Parts calculus is incorrect: negatives values");
1251 | else if (index < from.id)
1252 | throw new NetException("Incoherent index inferior to from.id");
1253 | else
1254 | throw new NetException("Error: debug launched", {debug: debug});
1255 | }
1256 |
1257 | return index;
1258 | };
1259 |
1260 | Network.prototype.getWeight = function(from, to) {
1261 |
1262 | return this.weights[this.getWeightIndex(from, to)];
1263 | };
1264 |
1265 | Network.prototype.setWeight = function(from, to, value) {
1266 |
1267 | this.weights[this.getWeightIndex(from, to)] = value;
1268 | };
1269 |
1270 | Network.prototype.setHiddenLayerToActivation = function(activation, derivation) {
1271 |
1272 | if (!activation || !derivation)
1273 | throw new NetException("Invalid activation and/or derivation assignment", {activation: activation, derivation: derivation});
1274 |
1275 | for (var i = this.layers[0]; i < this.layersSum[this.nbLayers-2]; i++) {
1276 | this.neurons[i].activation = activation;
1277 | this.neurons[i].derivative = derivation;
1278 | }
1279 | };
1280 |
1281 |
1282 | /////////////////////////// Statics network methods & activation functions
1283 |
1284 | Network.prototype.static_randomBiais = function() {
1285 | return Math.uniform() * _BIAIS_RANDOM_COEFF;
1286 | };
1287 |
1288 | Network.prototype.static_randomWeight = function() {
1289 | return Math.uniform() * _WEIGHT_RANDOM_COEFF;
1290 | };
1291 |
1292 | Network.prototype.static_linearActivation = function(x) {
1293 | return x;
1294 | };
1295 |
1296 | Network.prototype.static_linearDerivative = function(x) {
1297 | return 1;
1298 | };
1299 |
1300 | Network.prototype.static_tanhActivation = function(x) {
1301 | return Math.tanh(x);
1302 | };
1303 |
1304 | Network.prototype.static_tanhDerivative = function(x) {
1305 | return 1 - (Math.tanh(x) * Math.tanh(x));
1306 | };
1307 |
1308 | Network.prototype.static_sigmoidActivation = function(x) {
1309 | return 1 / (1 + Math.exp(-x));
1310 | };
1311 |
1312 | Network.prototype.static_sigmoidDerivative = function(x) {
1313 | return this.network.static_sigmoidActivation(x) * (1 - this.network.static_sigmoidActivation(x));
1314 | };
1315 |
1316 | Network.prototype.static_reluActivation = function(x) {
1317 | return x < 0 ? 0 : x;
1318 | };
1319 |
1320 | Network.prototype.static_reluDerivative = function(x) {
1321 | return x < 0 ? 0 : 1;
1322 | };
1323 |
1324 | Network.prototype.static_preluActivation = function(x) {
1325 | return x < 0 ? this.network.activationParams.alpha * x : x;
1326 | };
1327 |
1328 | Network.prototype.static_preluDerivative = function(x) {
1329 | return x < 0 ? this.network.activationParams.alpha : 1;
1330 | };
1331 |
1332 | /////////////////////////// Network Exception
1333 |
1334 | function NetException(message, variables) {
1335 | console.error("ERROR: " + message, variables);
1336 | }
1337 |
1338 | Array.prototype.hash = function() {
1339 | return { hash: btoa(this.join()), size: this.length };
1340 | };
1341 |
1342 | Array.prototype.shuffle = function() {
1343 |
1344 | var j, x, i;
1345 |
1346 | for (i = this.length - 1; i > 0; i--) {
1347 | j = Math.floor(Math.random() * (i + 1));
1348 | x = this[i];
1349 | this[i] = this[j];
1350 | this[j] = x;
1351 | }
1352 |
1353 | return this;
1354 | };
1355 |
1356 | Array.prototype.average = function(size) {
1357 |
1358 | if (size >= this.length)
1359 | return this;
1360 |
1361 | var ratio = this.length / size;
1362 | var index, i, j, l = this.length, n = Math.ceil(ratio);
1363 | var sum, last_sum = 0, mean, avgs = [];
1364 |
1365 | for (i = 0; i < size; i++)
1366 | {
1367 | index = index = Math.floor(i * ratio);
1368 | sum = 0;
1369 |
1370 | for (j = 0; j < n && index+j < l; j++)
1371 | sum += this[index + j];
1372 |
1373 | avgs.push((sum + last_sum) / (n * 2));
1374 | last_sum = sum;
1375 | }
1376 |
1377 | return avgs;
1378 | };
1379 |
1380 | Math.uniform = function() {
1381 | return ((Math.random() + Math.random() + Math.random() + Math.random() + Math.random() + Math.random()) - 3) / 3;
1382 | };
--------------------------------------------------------------------------------
/screenshots/automatic_dropping_doesn't do so much.PNG:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Spriteware/neural-network/18eba0d35a8533b1e2d42039f672d754a103b784/screenshots/automatic_dropping_doesn't do so much.PNG
--------------------------------------------------------------------------------
/screenshots/screenshot1.PNG:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Spriteware/neural-network/18eba0d35a8533b1e2d42039f672d754a103b784/screenshots/screenshot1.PNG
--------------------------------------------------------------------------------
/screenshots/screenshot2.PNG:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Spriteware/neural-network/18eba0d35a8533b1e2d42039f672d754a103b784/screenshots/screenshot2.PNG
--------------------------------------------------------------------------------