├── test
├── index.html
└── test.js
├── LICENSE
├── README.md
└── lib
├── matrix.js
└── mini-ann.js
/test/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | mini-ANN-test
5 |
6 |
7 |
8 |
13 |
14 |
15 |
16 |
17 |
--------------------------------------------------------------------------------
/test/test.js:
--------------------------------------------------------------------------------
1 | function AND_gate_test() {
2 | console.log("AND GATE TEST : ");
3 |
4 | let mynn = new NeuralNetwork([2,1]);
5 |
6 | let training_data = [
7 | [[1,1], [1]],
8 | [[1,0], [0]],
9 | [[0,1], [0]],
10 | [[0,0], [0]],
11 | ];
12 |
13 | console.log("Before training...");
14 | console.log('0 AND 0',mynn.feedforward([0,0]));
15 | console.log('0 AND 1',mynn.feedforward([0,1]));
16 | console.log('1 AND 0',mynn.feedforward([1,0]));
17 | console.log('1 AND 1',mynn.feedforward([1,1]));
18 |
19 | // several training epochs
20 | for(let i=0; i<10000; i++) {
21 | let tdata = training_data[Math.floor(Math.random() * training_data.length)];
22 | mynn.train(tdata[0], tdata[1]);
23 | }
24 |
25 | console.log("After training...");
26 | console.log('0 AND 0',mynn.feedforward([0,0]));
27 | console.log('0 AND 1',mynn.feedforward([0,1]));
28 | console.log('1 AND 0',mynn.feedforward([1,0]));
29 | console.log('1 AND 1',mynn.feedforward([1,1]));
30 | }
31 |
32 | AND_gate_test();
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2019 Siddharth Maurya
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | 
2 |
3 | ## A lightweight Neural Network library in Javascript
4 |
5 | This library is inspired by [Toy-Neural-Network](https://github.com/CodingTrain/Toy-Neural-Network-JS), which works for one hidden layer. *mini-ANN-js* provides basic ANN functionalities which includes ability to create multilayer architecture, feed forward, training through backpropagation and some functions for genetic algorithm.
6 |
7 | ### Documentation
8 |
9 | * **Initialize** Neural Network
10 |
11 | ```javascript
12 | // ANN with 4 inputs, 3 neurons in hidden layer and 2 outputs
13 | const my_ann = new NeuralNetwork([4, 3, 2]);
14 | // initializes ANN with random weights and biases
15 | ```
16 |
17 | * Changing **Activation function**
18 |
19 | ```javascript
20 | // set ReLU function as activation function
21 | my_ann.setActivation(NeuralNetwork.ReLU);
22 |
23 | // set Sigmoid function as activation function
24 | my_ann.setActivation(NeuralNetwork.SIGMOID);
25 |
26 | // By default Sigmoid is the activation function
27 | ```
28 |
29 | * Performing **feed forward**
30 |
31 | ```javascript
32 | // passing 4 inputs as follows...
33 | const output = my_ann.feedforward([0, 2, 1, 2]);
34 | // returns an array with outputs of ANN
35 |
36 | // pass 2nd arg for feedforward as true to get all layers instead of just output
37 | const all_layers = my_ann.feedforward([0, 2, 1, 2], true);
38 |
39 | ```
40 |
41 | * **Training** ANN
42 |
43 | ```javascript
44 | let input = [0, 2, 1, 2];
45 | let expected_output = [1,0];
46 | my_ann.train(input, expected_output);
47 | ```
48 |
49 | * Functions for **Genetic algorithms**
50 |
51 | ```javascript
52 | // mutate weights and biases of ANN
53 | my_ann.mutate(0.2); //mutation rate = 0.2 (min-0 & max-1)
54 |
55 | // creates a copy of ann
56 | new_ann = my_ann.copy()
57 |
58 | // crossover
59 | const offspring = my_ann.crossover(other_ann);
60 | ```
61 |
62 | * **Visualization** function
63 |
64 | ```javascript
65 | const canvas = document.getElementById("canvas")
66 | const ctx = canvas.getContext("2d")
67 | const nn = new NeuralNetwork([3,5,3,2])
68 | nn.draw(
69 | ctx, // canvas context
70 | 100, // top left x coordinate to start drawing from
71 | 100, // top left y coordinate to start drawing from
72 | [-0.5, 0.75, 0.2], // OPTIONAL network inputs. When given also visualizes activation states
73 | 20 // OPTIONAL neuron radius. Default = 20
74 | )
75 | ```
76 |
77 | Example output:
78 |
79 | 
--------------------------------------------------------------------------------
/lib/matrix.js:
--------------------------------------------------------------------------------
1 | /**
2 | * Matrix library based on - https://github.com/CodingTrain/Toy-Neural-Network-JS/blob/master/lib/matrix.js
3 | */
4 | class Matrix {
5 | /**
6 | * Values are initialized to 0
7 | * @param {number} Rows
8 | * @param {number} Columns
9 | */
10 | constructor(rows,cols) {
11 | this.rows = rows;
12 | this.cols = cols;
13 | this.data = [];
14 |
15 | for(let i = 0 ; i < this.rows ; i++) {
16 | this.data[i] = [];
17 | for (let j=0; j < this.cols ; j++) {
18 | this.data[i][j] = 0;
19 | }
20 | }
21 | }
22 |
23 | multiply(n) {
24 | if(n instanceof Matrix) { //hadamard multiplication (corresponding element multiplies to this matrix)
25 | if(n.rows == this.rows && n.cols == this.cols) {
26 | for(let i=0; i < this.rows ; i++) {
27 | for(let j=0; j < this.cols ; j++) {
28 | this.data[i][j] *= n.data[i][j];
29 | }
30 | }
31 | }
32 | else {
33 | console.error("element wise multiplication failed because of size mismatch!");
34 | return -1; //failed bcoz of size mismatch
35 | }
36 | }
37 | else { //scalar multiply
38 | for (let i = 0; i < this.rows; i++) {
39 | for(let j=0; j m.toArray())
52 | const NR = neuron_radius
53 | const PADX = 6 * NR // Padding between neurons in X direction
54 | const PADY = 3 * NR // Padding between neurons in Y direction
55 | const MAXLS = Math.max(...this.layer_nodes_counts) // height of largest layer
56 | const YOFFSETS = this.layer_nodes_counts.map(c=>(MAXLS-c) * 0.5 * (NR+PADY)) // offsets for layers to keep symmetry
57 | const color = function(v, {invert=false, alpha=1}={}){
58 | let h = v < 0 ? 200 : 0 // hue value: blue'ish for negative, red'ish for positive
59 | if(invert) h = (h + 180) % 360
60 | return `hsl(${h},100%,${Math.round(Math.abs(v)*100)}%, ${alpha})`
61 | }
62 | const linePointOnCircle = function(cx,cy,r, lx1,ly1,lx2,ly2){
63 | const a = Math.atan2(ly2 - ly1, lx2 - lx1)
64 | return [cx + (r * Math.cos(a)), cy + (r * Math.sin(a))]
65 | }
66 | ctx.save();
67 | for(let layerIdx = 0; layerIdx < this.layer_nodes_counts.length; layerIdx++){
68 | for(let neuronIdx = 0; neuronIdx < this.layer_nodes_counts[layerIdx]; neuronIdx++){
69 | const cx = x + layerIdx * (NR + PADX) + NR
70 | const cy = YOFFSETS[layerIdx] + y + neuronIdx * (NR + PADY) + NR
71 | if(layerIdx < this.layer_nodes_counts.length - 1){
72 | for(let nextNeuronIdx = 0; nextNeuronIdx < this.layer_nodes_counts[layerIdx+1]; nextNeuronIdx++){
73 | const ncx = x + (layerIdx + 1) * (NR + PADX) + NR
74 | const ncy = YOFFSETS[layerIdx + 1] + y + nextNeuronIdx * (NR + PADY) + NR
75 | const start = linePointOnCircle(cx, cy, NR, cx, cy, ncx, ncy)
76 | const end = linePointOnCircle(ncx, ncy, NR, ncx, ncy, cx, cy)
77 | ctx.beginPath();
78 | ctx.moveTo(...start)
79 | ctx.lineTo(...end)
80 | ctx.lineWidth = 2
81 | ctx.strokeStyle = color(this.weights[layerIdx].data[nextNeuronIdx][neuronIdx], {alpha: 0.75}) // TODO swap?
82 | ctx.stroke();
83 | }
84 | }
85 | ctx.beginPath();
86 | ctx.arc(cx, cy, NR, 0, 2 * Math.PI, false);
87 | ctx.fillStyle = neuronStates ? color(neuronStates[layerIdx][neuronIdx], {alpha: 0.5}) : '#33333377'
88 | ctx.strokeStyle = neuronStates ? color(neuronStates[layerIdx][neuronIdx], {alpha: 0.75}) : '#333333C0'
89 | ctx.stroke()
90 | ctx.fill()
91 | ctx.font = `${NR*0.5}px Monospace`
92 | if(neuronStates){
93 | const ns = neuronStates[layerIdx][neuronIdx]
94 | const nsText = Math.round(ns * 1000) / 1000
95 | ctx.fillStyle = color(ns, {invert: true})
96 | ctx.fillText(nsText, cx - ctx.measureText(nsText).width / 2, cy + NR * 0.25)
97 | }
98 | if(layerIdx > 0){
99 | const bias = this.biases[layerIdx-1].data[neuronIdx]
100 | const biasText = 'B ' + Math.round(bias * 1000) / 1000
101 | ctx.fillStyle = color(bias)
102 | ctx.fillText(biasText, cx - ctx.measureText(biasText).width / 2, cy + NR + PADY * 0.25)
103 | }
104 | }
105 | }
106 | ctx.restore()
107 | }
108 |
109 | /**
110 | *
111 | * @param {Array} input_array - Array of input values
112 | * @param {Boolean} GET_ALL_LAYERS - if we need all layers after feed forward instead of just output layer
113 | */
114 | feedforward(input_array, GET_ALL_LAYERS) {
115 | const {layers_count} = this;
116 |
117 | if(!this._feedforward_args_validator(input_array)) {
118 | return -1;
119 | }
120 |
121 | let layers = []; //This will be array of layer arrays
122 |
123 | //input layer
124 | layers[0] = Matrix.fromArray(input_array);
125 |
126 | for(let i = 1 ; i < layers_count ; i++) {
127 | layers[i] = Matrix.multiply(this.weights[i-1],layers[i-1]);
128 | layers[i].add(this.biases[i-1]);
129 | layers[i].map(this.activation); //activation
130 | }
131 | if (GET_ALL_LAYERS == true) {
132 | return layers; //all layers (array of layer matrices)
133 | } else {
134 | return layers[layers.length-1].toArray(); //output layer array
135 | }
136 | }
137 |
138 | // Mutates weights and biases of ANN based on rate given
139 | mutate(rate) { //rate 0 to 1
140 | function mutator(val) {
141 | if(Math.random() < rate) {
142 | return val + Math.random() * 2 - 1;
143 | }
144 | else {
145 | return val;
146 | }
147 | }
148 |
149 | for(let i=0 ; i < this.weights.length ; i++) {
150 | this.weights[i].map(mutator);
151 | this.biases[i].map(mutator);
152 | }
153 | }
154 |
155 | // Returns a copy of ANN (instead of reference to original one)
156 | copy() {
157 | let new_ann = new NeuralNetwork(this.layer_nodes_counts);
158 | for(let i=0 ; i< new_ann.weights.length ; i++) {
159 | new_ann.weights[i] = this.weights[i].copy();
160 | }
161 | for(let i=0 ; i< new_ann.biases.length ; i++) {
162 | new_ann.biases[i] = this.biases[i].copy();
163 | }
164 | return new_ann;
165 | }
166 |
167 | // Trains with backpropogation
168 | train(input_array, target_array) {
169 | if(!this._train_args_validator(input_array, target_array)) {
170 | return -1;
171 | }
172 |
173 | let layers = this.feedforward(input_array, true); //layer matrices
174 | let target_matrix = Matrix.fromArray(target_array);
175 |
176 | let prev_error;
177 |
178 | for(let layer_index = layers.length-1; layer_index >= 1; layer_index--) {
179 | /* right and left are in respect to the current layer */
180 | let layer_matrix = layers[layer_index];
181 |
182 | let layer_error;
183 | //Error calculation
184 | if(layer_index == layers.length-1) { // Output layer
185 | layer_error = Matrix.add(target_matrix, Matrix.multiply(layer_matrix, -1));
186 | } else { //Hidden layer
187 | const right_weights = this.weights[layer_index];
188 | const right_weigths_t = Matrix.transpose(right_weights);
189 | layer_error = Matrix.multiply(right_weigths_t, prev_error);
190 | }
191 | prev_error = layer_error.copy(); //will be used for error calculation in hidden layers
192 |
193 | //Calculating layer gradient
194 | const layer_gradient = Matrix.map(layer_matrix, this.activation_derivative);
195 | layer_gradient.multiply(layer_error);
196 | layer_gradient.multiply(this.learningRate);
197 |
198 | //Calculating delta weights
199 | const left_layer_t = Matrix.transpose(layers[layer_index-1]);
200 | const left_weights_delta = Matrix.multiply(layer_gradient, left_layer_t);
201 |
202 | //Updating weights and biases
203 | this.weights[layer_index-1].add(left_weights_delta);
204 | this.biases[layer_index-1].add(layer_gradient);
205 | }
206 | }
207 |
208 | activation(x) {
209 | return this.activation(x);
210 | }
211 |
212 | setActivation(TYPE) {
213 | switch (TYPE) {
214 | case NeuralNetwork.SIGMOID:
215 | this.activation = NeuralNetwork.sigmoid;
216 | this.activation_derivative = NeuralNetwork.sigmoid_derivative;
217 | break;
218 | case NeuralNetwork.ReLU:
219 | this.activation = NeuralNetwork.relu;
220 | this.activation_derivative = NeuralNetwork.relu_derivative;
221 | break;
222 | default:
223 | console.error('Activation type invalid, setting sigmoid by default');
224 | this.activation = NeuralNetwork.sigmoid;
225 | this.activation_derivative = NeuralNetwork.sigmoid_derivative;
226 | }
227 | }
228 |
229 | /**
230 | * @param {NeuralNetwork} ann - crossover partner
231 | */
232 | crossover(ann) {
233 | if(!this._crossover_validator(ann)) {
234 | return -1;
235 | }
236 | const offspring = new NeuralNetwork(this.layer_nodes_counts);
237 | for(let i = 0; i < this.weights.length; i++) {
238 | if(Math.random() < 0.5) {
239 | offspring.weights[i] = this.weights[i];
240 | } else {
241 | offspring.weights[i] = ann.weights[i];
242 | }
243 |
244 | if(Math.random() < 0.5) {
245 | offspring.biases[i] = this.biases[i];
246 | } else {
247 | offspring.biases[i] = ann.biases[i];
248 | }
249 | }
250 | return offspring;
251 | }
252 |
253 | // Activation functions
254 | static sigmoid(x) {
255 | return 1 / ( 1 + Math.exp(-1 * x));
256 | }
257 |
258 | static sigmoid_derivative(y) {
259 | return y*(1-y);
260 | }
261 |
262 | static relu(x) {
263 | if(x >= 0) {
264 | return x;
265 | } else {
266 | return 0;
267 | }
268 | }
269 |
270 | static relu_derivative(y) {
271 | if(y > 0) {
272 | return 1;
273 | } else {
274 | return 0;
275 | }
276 | }
277 |
278 | // Argument validator functions
279 | _feedforward_args_validator(input_array) {
280 | let invalid = false;
281 | if(input_array.length != this.layer_nodes_counts[0]) {
282 | invalid = true;
283 | console.error("Feedforward failed : Input array and input layer size doesn't match.");
284 | }
285 | return invalid ? false : true;
286 | }
287 |
288 | _train_args_validator(input_array, target_array) {
289 | let invalid = false;
290 | if(input_array.length != this.layer_nodes_counts[0]) {
291 | console.error("Training failed : Input array and input layer size doesn't match.");
292 | invalid=true;
293 | }
294 | if(target_array.length != this.layer_nodes_counts[this.layers_count - 1]) {
295 | invalid=true;
296 | console.error("Training failed : Target array and output layer size doesn't match.");
297 | }
298 | return invalid ? false : true;
299 | }
300 |
301 | _crossover_validator(ann) {
302 | let invalid = false;
303 | if(ann instanceof NeuralNetwork) {
304 | if(this.layers_count == ann.layers_count) {
305 | for(let i = 0; i < this.layers_count; i++) {
306 | if(this.layer_nodes_counts[i] != ann.layer_nodes_counts[i]) {
307 | console.error("Crossover failed : Architecture mismatch (Different number of neurons in one or more layers).");
308 | invalid = true;
309 | break;
310 | }
311 | }
312 | } else {
313 | invalid=true;
314 | console.error("Crossover failed : Architecture mismatch (Different number of layers).");
315 | }
316 | } else {
317 | invalid=true;
318 | console.error("Crossover failed : NeuralNetwork object expected.");
319 | }
320 | return invalid ? false : true;
321 | }
322 | }
323 |
--------------------------------------------------------------------------------