├── .editorconfig
├── .gitignore
├── LICENSE
├── Migration.md
├── README.md
├── dist
└── browser-bundle.min.js
├── examples
└── node.js
├── package.json
└── src
├── Connection.js
├── FeedforwardNeuralNetwork.js
├── Layer.js
├── NeuralNetwork.js
├── Neuron.js
├── OutputLayer.js
├── OutputNeuron.js
├── activation
├── ActivationFunction.js
├── ArcTangent.js
├── BinaryStep.js
├── GaussianFunction.js
├── HyperbolicTangent.js
├── Identity.js
├── LogisticFunction.js
├── RectifiedLinearUnit.js
└── SinusoidFunction.js
├── header.js
└── main.js
/.editorconfig:
--------------------------------------------------------------------------------
1 | # editorconfig.org
2 | root = true
3 |
4 | [*]
5 | charset = utf-8
6 | indent_style = tab
7 | trim_trailing_whitespace = true
8 | end_of_line = lf
9 | insert_final_newline = true
10 |
11 | [*.md]
12 | indent_style = space
13 | indent_size = 4
14 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | .idea/
2 | node_modules/
3 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | The MIT License (MIT)
2 |
3 | Copyright (c) delight.im (https://www.delight.im/)
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/Migration.md:
--------------------------------------------------------------------------------
1 | # Migration
2 |
3 | ## From `v1.x.x` to `v2.x.x`
4 |
5 | * The license has been changed from the [Apache License 2.0](http://www.apache.org/licenses/LICENSE-2.0) to the [MIT License](https://opensource.org/licenses/MIT).
6 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # JS-NeuralNetwork
2 |
3 | Neural networks in JavaScript. Well-documented and object-oriented.
4 |
5 | ## Installation
6 |
7 | * In the browser
8 |
9 | ```html
10 |
11 | ```
12 |
13 | * In Node.js
14 |
15 | ```
16 | $ npm install @delight-im/neural-network
17 | ```
18 |
19 | and
20 |
21 | ```javascript
22 | var NeuralNetwork = require("@delight-im/neural-network");
23 | ```
24 |
25 | ## Usage
26 |
27 | * Creating a new instance
28 |
29 | * Neural network with 3 input neurons and 1 output neuron
30 |
31 | ```javascript
32 | var network = new NeuralNetwork.Type.Feedforward(3, [], 1);
33 | ```
34 |
35 | * Neural network with 4 input neurons, 3 hidden neurons and 2 output neurons
36 |
37 | ```javascript
38 | var network = new NeuralNetwork.Type.Feedforward(4, [ 3 ], 2);
39 | ```
40 |
41 | * Neural network with 6 input neurons, two hidden layers with 4 and 2 neurons, and 3 output neurons
42 |
43 | ```javascript
44 | var network = new NeuralNetwork.Type.Feedforward(6, [ 4, 2 ], 3);
45 | ```
46 |
47 | * Passing any number of additional options to the network
48 |
49 | ```javascript
50 | // pass an object containing the desired options as the fourth parameter
51 | var network = new NeuralNetwork.Type.Feedforward(3, [ 4 ], 1, {
52 | seed: 501935,
53 | learningRate: 0.3,
54 | hiddenLayerActivationFunction: new NeuralNetwork.Activation.HyperbolicTangent(),
55 | outputLayerActivationFunction: new NeuralNetwork.Activation.BinaryStep()
56 | });
57 | ```
58 |
59 | * Available activation functions
60 |
61 | ```javascript
62 | new NeuralNetwork.Activation.ArcTangent();
63 | new NeuralNetwork.Activation.BinaryStep();
64 | new NeuralNetwork.Activation.GaussianFunction();
65 | new NeuralNetwork.Activation.HyperbolicTangent();
66 | new NeuralNetwork.Activation.Identity();
67 | new NeuralNetwork.Activation.LogisticFunction();
68 | new NeuralNetwork.Activation.RectifiedLinearUnit();
69 | new NeuralNetwork.Activation.RectifiedLinearUnit(0.01);
70 | new NeuralNetwork.Activation.SinusoidFunction();
71 | ```
72 |
73 | * Training the network using supervised batch ("all-at-once") learning
74 |
75 | ```javascript
76 | // the first parameter is the array of inputs and the second parameter is the array of desired outputs
77 | // the third parameter is the optional number of iterations and the fourth parameter is the optional error threshold
78 | var error = network.trainBatch(
79 | [
80 | [0, 0, 1],
81 | [0, 1, 1],
82 | [1, 0, 1],
83 | [1, 1, 1]
84 | ],
85 | [
86 | [ 0 ],
87 | [ 1 ],
88 | [ 1 ],
89 | [ 0 ]
90 | ],
91 | 60000,
92 | 0.005
93 | );
94 | ```
95 |
96 | * Training the network using supervised online ("single-pattern") learning
97 |
98 | ```javascript
99 | // the first parameter is the input and the second parameter is the desired output
100 | var error = network.train([0, 0, 1], [ 0 ]);
101 | ```
102 |
103 | * Asking the network to predict some output from a supplied input pattern
104 |
105 | ```javascript
106 | // the single parameter is the input to process
107 | network.predict([ 0, 0, 1 ])
108 | ```
109 |
110 | * Saving the network with all its properties to a JSON string
111 |
112 | ```javascript
113 | var jsonStr = JSON.stringify(network);
114 | ```
115 |
116 | * Restoring the network with all its properties from a JSON string
117 |
118 | ```javascript
119 | var network = NeuralNetwork.Type.Feedforward.fromJson(jsonStr);
120 | ```
121 |
122 | ## Development
123 |
124 | * Prerequisites
125 |
126 | ```
127 | $ npm install -g uglify-js
128 | $ npm install -g browserify
129 | ```
130 |
131 | * Building the browser bundle
132 |
133 | ```
134 | $ browserify src/main.js --standalone NeuralNetwork > dist/browser-bundle.js
135 | $ uglifyjs dist/browser-bundle.js --compress --preamble "$(< src/header.js)" > dist/browser-bundle.min.js
136 | $ rm dist/browser-bundle.js
137 | ```
138 |
139 | * Running the Node.js examples
140 |
141 | ```
142 | $ node examples/node.js
143 | ```
144 |
145 | ## Contributing
146 |
147 | All contributions are welcome! If you wish to contribute, please create an issue first so that your feature, problem or question can be discussed.
148 |
149 | ## License
150 |
151 | This project is licensed under the terms of the [MIT License](https://opensource.org/licenses/MIT).
152 |
--------------------------------------------------------------------------------
/dist/browser-bundle.min.js:
--------------------------------------------------------------------------------
1 | /*
2 | * JS-NeuralNetwork (https://github.com/delight-im/JS-NeuralNetwork)
3 | * Copyright (c) delight.im (https://www.delight.im/)
4 | * Licensed under the MIT License (https://opensource.org/licenses/MIT)
5 | */
6 | !function(f){if("object"==typeof exports&&"undefined"!=typeof module)module.exports=f();else if("function"==typeof define&&define.amd)define([],f);else{var g;g="undefined"!=typeof window?window:"undefined"!=typeof global?global:"undefined"!=typeof self?self:this,g.NeuralNetwork=f()}}(function(){return function e(t,n,r){function s(o,u){if(!n[o]){if(!t[o]){var a="function"==typeof require&&require;if(!u&&a)return a(o,!0);if(i)return i(o,!0);var f=new Error("Cannot find module '"+o+"'");throw f.code="MODULE_NOT_FOUND",f}var l=n[o]={exports:{}};t[o][0].call(l.exports,function(e){var n=t[o][1][e];return s(n?n:e)},l,l.exports,e,t,n,r)}return n[o].exports}for(var i="function"==typeof require&&require,o=0;oi;i++)neuron=inputLayer.getNeuron(i),neuron.feed(input[i]);for(var numLayers=this.getNumberOfLayers(),k=0;numLayers>k;k++)this.getLayer(k).propagateAllNeurons()},FeedforwardNeuralNetwork.prototype._createConnections=function(){for(var previousLayer,previousLayerSize,currentLayer,currentLayerSize,numLayers=this.getNumberOfLayers(),i=1;numLayers>i;i++){previousLayer=this.getLayer(i-1),previousLayerSize=previousLayer.getSize(),currentLayer=this.getLayer(i),currentLayerSize=currentLayer.getSize();for(var k=0;previousLayerSize>k;k++)for(var m=0;currentLayerSize>m;m++)previousLayer.getNeuron(k).connectTo(currentLayer.getNeuron(m))}},FeedforwardNeuralNetwork.fromJson=function(jsonString){function flattenArray(arr){return arr.reduce(function(a,b){return a.concat(b)},[])}function createWeightsListFromLayer(layerObj){return flattenArray(layerObj.neurons.map(function(each){return each.connections.map(function(each){return each.weight})}))}function createActivationFunctionFromName(name){switch(name){case"ArcTangent":return new ArcTangent;case"BinaryStep":return new BinaryStep;case"GaussianFunction":return new GaussianFunction;case"HyperbolicTangent":return new HyperbolicTangent;case"Identity":return new Identity;case"LogisticFunction":return new LogisticFunction;case"RectifiedLinearUnit":return new RectifiedLinearUnit;case"SinusoidFunction":return new SinusoidFunction;default:throw"Undefined activation function `"+name+"`"}}for(var data=JSON.parse(jsonString),inputLayer=data.layers.shift(),outputLayer=data.layers.pop(),hiddenLayers=data.layers,inputNeurons=inputLayer.neurons.length,hiddenNeurons=hiddenLayers.map(function(each){return each.neurons.length}),outputNeurons=outputLayer.neurons.length,inputWeights=createWeightsListFromLayer(inputLayer),outputWeights=createWeightsListFromLayer(outputLayer),hiddenWeights=[],i=0;i0?hiddenLayers[0].activationFunction:"Identity";return Neuron.preDefinedWeights=allWeights,new FeedforwardNeuralNetwork(inputNeurons,hiddenNeurons,outputNeurons,{seed:data.seed,learningRate:data.learningRate,hiddenLayerActivationFunction:createActivationFunctionFromName(hiddenLayerActivationFunction),outputLayerActivationFunction:createActivationFunctionFromName(outputLayer.activationFunction)})},module.exports=FeedforwardNeuralNetwork},{"./NeuralNetwork.js":5,"./Neuron.js":6,"./activation/ArcTangent.js":10,"./activation/BinaryStep.js":11,"./activation/GaussianFunction.js":12,"./activation/HyperbolicTangent.js":13,"./activation/Identity.js":14,"./activation/LogisticFunction.js":15,"./activation/RectifiedLinearUnit.js":16,"./activation/SinusoidFunction.js":17}],4:[function(require,module,exports){"use strict";function Layer(size,activationFunction,prng){this._neurons=[],this._activationFunction=activationFunction,this._prng=prng;for(var i=0;size>i;i++)this._neurons.push(this._createNeuron())}var Neuron=require("./Neuron.js");Layer.prototype._createNeuron=function(){return new Neuron(this)},Layer.prototype.propagateAllNeurons=function(){for(var i=0;ii;i++)output.push(outputLayer.getNeuron(i).getActivation());return output},NeuralNetwork.prototype._feed=function(input){throw"Method not implemented in subclass `"+this.constructor.name+"`"},NeuralNetwork.prototype.predict=function(input){return this._feed(input),this._getOutput()},NeuralNetwork.prototype._backpropagate=function(desiredOutput){this.getOutputLayer().updateDeltas(desiredOutput);for(var layerIndex=this._layers.length-2;layerIndex>0;layerIndex--)this.getLayer(layerIndex).updateDeltas()},NeuralNetwork.prototype._updateWeightsInNetwork=function(immediate){for(var i=0;ii&&error>errorThreshold;i++){error=0;for(var k=0;ki;i++)this.getNeuron(i).updateDelta(desiredOutput[i])},OutputLayer.prototype.calculateSumSquaredError=function(desiredOutput){for(var error,numOutputs=this.getSize(),squaredErrors=0,i=0;numOutputs>i;i++)error=this.getNeuron(i).calculateError(desiredOutput[i]),squaredErrors+=Math.pow(error,2);return squaredErrors},module.exports=OutputLayer},{"./Layer.js":4,"./OutputNeuron.js":8}],8:[function(require,module,exports){"use strict";function OutputNeuron(layer){Neuron.call(this,layer)}var Neuron=require("./Neuron.js");OutputNeuron.prototype=Object.create(Neuron.prototype),OutputNeuron.prototype.constructor=OutputNeuron,OutputNeuron.prototype.calculateError=function(desiredOutput){return desiredOutput-this.getActivation()},OutputNeuron.prototype.updateDelta=function(desiredOutput){this._delta=this.getLayer().getActivationFunction().evaluateDerivative(this._input)*this.calculateError(desiredOutput)},module.exports=OutputNeuron},{"./Neuron.js":6}],9:[function(require,module,exports){"use strict";function ActivationFunction(){this.evaluate=function(x){throw"Method not implemented in subclass `"+this.constructor.name+"`"},this.evaluateDerivative=function(x){throw"Method not implemented in subclass `"+this.constructor.name+"`"},this.getLowerBound=function(){throw"Method not implemented in subclass `"+this.constructor.name+"`"},this.getUpperBound=function(){throw"Method not implemented in subclass `"+this.constructor.name+"`"},this.isMonotonic=function(){throw"Method not implemented in subclass `"+this.constructor.name+"`"},this.isDerivativeMonotonic=function(){throw"Method not implemented in subclass `"+this.constructor.name+"`"},this.isCenteredAroundZero=function(){throw"Method not implemented in subclass `"+this.constructor.name+"`"}}ActivationFunction.prototype.toJSON=function(){return this.constructor.name},module.exports=ActivationFunction},{}],10:[function(require,module,exports){"use strict";function ArcTangent(){ActivationFunction.call(this),this.evaluate=function(x){return Math.atan(x)},this.evaluateDerivative=function(x){return 1/(Math.pow(x,2)+1)},this.getLowerBound=function(){return-Math.PI/2},this.getUpperBound=function(){return Math.PI/2},this.isMonotonic=function(){return!0},this.isDerivativeMonotonic=function(){return!1},this.isCenteredAroundZero=function(){return!0}}var ActivationFunction=require("./ActivationFunction.js");ArcTangent.prototype=Object.create(ActivationFunction.prototype),ArcTangent.prototype.constructor=ArcTangent,module.exports=ArcTangent},{"./ActivationFunction.js":9}],11:[function(require,module,exports){"use strict";function BinaryStep(){ActivationFunction.call(this),this.evaluate=function(x){return 0>x?0:1},this.evaluateDerivative=function(x){return 0!==x?0:Number.NaN},this.getLowerBound=function(){return 0},this.getUpperBound=function(){return 1},this.isMonotonic=function(){return!0},this.isDerivativeMonotonic=function(){return!1},this.isCenteredAroundZero=function(){return!1}}var ActivationFunction=require("./ActivationFunction.js");BinaryStep.prototype=Object.create(ActivationFunction.prototype),BinaryStep.prototype.constructor=BinaryStep,module.exports=BinaryStep},{"./ActivationFunction.js":9}],12:[function(require,module,exports){"use strict";function GaussianFunction(){ActivationFunction.call(this),this.evaluate=function(x){return Math.exp(-Math.pow(x,2))},this.evaluateDerivative=function(x){return-2*x*this.evaluate(x)},this.getLowerBound=function(){return 0},this.getUpperBound=function(){return 1},this.isMonotonic=function(){return!1},this.isDerivativeMonotonic=function(){return!1},this.isCenteredAroundZero=function(){return!1}}var ActivationFunction=require("./ActivationFunction.js");GaussianFunction.prototype=Object.create(ActivationFunction.prototype),GaussianFunction.prototype.constructor=GaussianFunction,module.exports=GaussianFunction},{"./ActivationFunction.js":9}],13:[function(require,module,exports){"use strict";function HyperbolicTangent(){ActivationFunction.call(this),this.evaluate=function(x){return(Math.exp(x)-Math.exp(-x))/(Math.exp(x)+Math.exp(-x))},this.evaluateDerivative=function(x){return 1-Math.pow(this.evaluate(x),2)},this.getLowerBound=function(){return-1},this.getUpperBound=function(){return 1},this.isMonotonic=function(){return!0},this.isDerivativeMonotonic=function(){return!1},this.isCenteredAroundZero=function(){return!0}}var ActivationFunction=require("./ActivationFunction.js");HyperbolicTangent.prototype=Object.create(ActivationFunction.prototype),HyperbolicTangent.prototype.constructor=HyperbolicTangent,module.exports=HyperbolicTangent},{"./ActivationFunction.js":9}],14:[function(require,module,exports){"use strict";function Identity(){ActivationFunction.call(this),this.evaluate=function(x){return x},this.evaluateDerivative=function(x){return 1},this.getLowerBound=function(){return Number.NEGATIVE_INFINITY},this.getUpperBound=function(){return Number.POSITIVE_INFINITY},this.isMonotonic=function(){return!0},this.isDerivativeMonotonic=function(){return!0},this.isCenteredAroundZero=function(){return!0}}var ActivationFunction=require("./ActivationFunction.js");Identity.prototype=Object.create(ActivationFunction.prototype),Identity.prototype.constructor=Identity,module.exports=Identity},{"./ActivationFunction.js":9}],15:[function(require,module,exports){"use strict";function LogisticFunction(){ActivationFunction.call(this),this.evaluate=function(x){return 1/(1+Math.exp(-x))},this.evaluateDerivative=function(x){return this.evaluate(x)*(1-this.evaluate(x))},this.getLowerBound=function(){return 0},this.getUpperBound=function(){return 1},this.isMonotonic=function(){return!0},this.isDerivativeMonotonic=function(){return!1},this.isCenteredAroundZero=function(){return!1}}var ActivationFunction=require("./ActivationFunction.js");LogisticFunction.prototype=Object.create(ActivationFunction.prototype),LogisticFunction.prototype.constructor=LogisticFunction,module.exports=LogisticFunction},{"./ActivationFunction.js":9}],16:[function(require,module,exports){"use strict";function RectifiedLinearUnit(parameter){ActivationFunction.call(this),this._parameter=parameter||0,this.evaluate=function(x){return 0>x?this._parameter*x:x},this.evaluateDerivative=function(x){return 0>x?this._parameter:1},this.getLowerBound=function(){return this._parameter>0?Number.NEGATIVE_INFINITY:0},this.getUpperBound=function(){return Number.POSITIVE_INFINITY},this.isMonotonic=function(){return!0},this.isDerivativeMonotonic=function(){return!0},this.isCenteredAroundZero=function(){return!1}}var ActivationFunction=require("./ActivationFunction.js");RectifiedLinearUnit.prototype=Object.create(ActivationFunction.prototype),RectifiedLinearUnit.prototype.constructor=RectifiedLinearUnit,module.exports=RectifiedLinearUnit},{"./ActivationFunction.js":9}],17:[function(require,module,exports){"use strict";function SinusoidFunction(){ActivationFunction.call(this),this.evaluate=function(x){return Math.sin(x)},this.evaluateDerivative=function(x){return Math.cos(x)},this.getLowerBound=function(){return-1},this.getUpperBound=function(){return 1},this.isMonotonic=function(){return!1},this.isDerivativeMonotonic=function(){return!1},this.isCenteredAroundZero=function(){return!0}}var ActivationFunction=require("./ActivationFunction.js");SinusoidFunction.prototype=Object.create(ActivationFunction.prototype),SinusoidFunction.prototype.constructor=SinusoidFunction,module.exports=SinusoidFunction},{"./ActivationFunction.js":9}],18:[function(require,module,exports){"use strict";var FeedforwardNeuralNetwork=(require("./NeuralNetwork.js"),require("./FeedforwardNeuralNetwork.js")),ArcTangent=require("./activation/ArcTangent.js"),BinaryStep=require("./activation/BinaryStep.js"),GaussianFunction=require("./activation/GaussianFunction.js"),HyperbolicTangent=require("./activation/HyperbolicTangent.js"),Identity=require("./activation/Identity.js"),LogisticFunction=require("./activation/LogisticFunction.js"),RectifiedLinearUnit=require("./activation/RectifiedLinearUnit.js"),SinusoidFunction=require("./activation/SinusoidFunction.js");module.exports={Type:{Feedforward:FeedforwardNeuralNetwork},Activation:{ArcTangent:ArcTangent,BinaryStep:BinaryStep,GaussianFunction:GaussianFunction,HyperbolicTangent:HyperbolicTangent,Identity:Identity,LogisticFunction:LogisticFunction,RectifiedLinearUnit:RectifiedLinearUnit,SinusoidFunction:SinusoidFunction}}},{"./FeedforwardNeuralNetwork.js":3,"./NeuralNetwork.js":5,"./activation/ArcTangent.js":10,"./activation/BinaryStep.js":11,"./activation/GaussianFunction.js":12,"./activation/HyperbolicTangent.js":13,"./activation/Identity.js":14,"./activation/LogisticFunction.js":15,"./activation/RectifiedLinearUnit.js":16,"./activation/SinusoidFunction.js":17}]},{},[18])(18)});
7 |
--------------------------------------------------------------------------------
/examples/node.js:
--------------------------------------------------------------------------------
1 | "use strict";
2 |
3 | var NeuralNetwork = require("../src/main.js");
4 |
5 | var network = new NeuralNetwork.Type.Feedforward(2, [ 4 ], 1, {
6 | seed: 501935,
7 | learningRate: 0.3,
8 | hiddenLayerActivationFunction: new NeuralNetwork.Activation.Identity(),
9 | outputLayerActivationFunction: new NeuralNetwork.Activation.Identity()
10 | });
11 |
12 | console.log("Predictions *before* training");
13 | console.log(network.predict([ 0, 0 ]));
14 | console.log(network.predict([ 0, 1 ]));
15 | console.log(network.predict([ 1, 0 ]));
16 | console.log(network.predict([ 1, 1 ]));
17 |
18 | // training in batch mode
19 | var error = network.trainBatch(
20 | [
21 | [ 0, 1 ],
22 | [ 1, 1 ]
23 | ],
24 | [
25 | [ 0 ],
26 | [ 1 ]
27 | ],
28 | 5000,
29 | 0.0001
30 | );
31 |
32 | console.log("----------");
33 | console.log("error after training = "+error);
34 | console.log("----------");
35 |
36 | console.log("Predictions *after* training");
37 | console.log(network.predict([ 0, 0 ]));
38 | console.log(network.predict([ 0, 1 ]));
39 | console.log(network.predict([ 1, 0 ]));
40 | console.log(network.predict([ 1, 1 ]));
41 |
--------------------------------------------------------------------------------
/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "@delight-im/neural-network",
3 | "version": "1.0.1",
4 | "description": "Neural networks in JavaScript. Well-documented and object-oriented.",
5 | "main": "src/main.js",
6 | "license": "MIT",
7 | "author": "delight.im",
8 | "repository": {
9 | "type": "git",
10 | "url": "https://github.com/delight-im/JS-NeuralNetwork.git"
11 | },
12 | "keywords": [
13 | "machine-learning",
14 | "machine learning",
15 | "neural-network",
16 | "neural network",
17 | "classifier",
18 | "learning",
19 | "neural",
20 | "network"
21 | ],
22 | "bugs": {
23 | "url": "https://github.com/delight-im/JS-NeuralNetwork/issues"
24 | },
25 | "homepage": "https://github.com/delight-im/JS-NeuralNetwork#readme",
26 | "dependencies": {
27 | "@delight-im/prng": "^1.0.0"
28 | }
29 | }
30 |
--------------------------------------------------------------------------------
/src/Connection.js:
--------------------------------------------------------------------------------
1 | /*
2 | * JS-NeuralNetwork (https://github.com/delight-im/JS-NeuralNetwork)
3 | * Copyright (c) delight.im (https://www.delight.im/)
4 | * Licensed under the MIT License (https://opensource.org/licenses/MIT)
5 | */
6 |
7 | "use strict";
8 |
9 | /**
10 | * A connection to another neuron in an artificial neural network
11 | *
12 | * @constructor
13 | * @param {Neuron} targetNeuron - the neuron that is the target of this connection
14 | * @param {number} initialWeight - the initial weight of this connection
15 | */
16 | function Connection(targetNeuron, initialWeight) {
17 |
18 | /**
19 | * The neuron that is the target of this connection
20 | *
21 | * @type {Neuron}
22 | * @private
23 | */
24 | this._targetNeuron = targetNeuron;
25 |
26 | /**
27 | * The current weight of this connection
28 | *
29 | * @type {number}
30 | * @private
31 | */
32 | this._weight = initialWeight;
33 |
34 | /**
35 | * Accumulates all weight updates that are deferred until later
36 | *
37 | * @type {number}
38 | * @private
39 | */
40 | this._weightUpdatePending = 0;
41 |
42 | }
43 |
44 | /**
45 | * Returns the neuron that is the target of this connection
46 | *
47 | * @return {Neuron} the target neuron
48 | */
49 | Connection.prototype.getTargetNeuron = function () {
50 | return this._targetNeuron;
51 | };
52 |
53 | /**
54 | * Returns the current weight of this connection
55 | *
56 | * @return {number} the current weight
57 | */
58 | Connection.prototype.getWeight = function () {
59 | return this._weight;
60 | };
61 |
62 | /**
63 | * Increases the current weight of this connection by the specified value
64 | *
65 | * @param {number} addend - the value to increase this weight by
66 | * @param {boolean} immediate - whether to update the weights immediately or defer the update until later
67 | */
68 | Connection.prototype.updateWeight = function (addend, immediate) {
69 | if (immediate) {
70 | this._weight += addend;
71 | }
72 | else {
73 | this._weightUpdatePending += addend;
74 | }
75 | };
76 |
77 | /** Releases all deferred weight updates */
78 | Connection.prototype.releaseWeightUpdates = function () {
79 | // update the weights with the deferred changes
80 | this._weight += this._weightUpdatePending;
81 | // reset the accumulated changes
82 | this._weightUpdatePending = 0;
83 | };
84 |
85 | /**
86 | * Returns the delta of this connection's target neuron scaled by this connection's weight
87 | *
88 | * @return {number} the weighted delta
89 | */
90 | Connection.prototype.getWeightedDelta = function () {
91 | return this._targetNeuron.getDelta() * this._weight;
92 | };
93 |
94 | Connection.prototype.toJSON = function () {
95 | return {
96 | "weight": this._weight
97 | };
98 | };
99 |
100 | module.exports = Connection;
101 |
--------------------------------------------------------------------------------
/src/FeedforwardNeuralNetwork.js:
--------------------------------------------------------------------------------
1 | /*
2 | * JS-NeuralNetwork (https://github.com/delight-im/JS-NeuralNetwork)
3 | * Copyright (c) delight.im (https://www.delight.im/)
4 | * Licensed under the MIT License (https://opensource.org/licenses/MIT)
5 | */
6 |
7 | "use strict";
8 |
9 | var NeuralNetwork = require("./NeuralNetwork.js");
10 | var Neuron = require("./Neuron.js");
11 |
12 | var ArcTangent = require("./activation/ArcTangent.js");
13 | var BinaryStep = require("./activation/BinaryStep.js");
14 | var GaussianFunction = require("./activation/GaussianFunction.js");
15 | var HyperbolicTangent = require("./activation/HyperbolicTangent.js");
16 | var Identity = require("./activation/Identity.js");
17 | var LogisticFunction = require("./activation/LogisticFunction.js");
18 | var RectifiedLinearUnit = require("./activation/RectifiedLinearUnit.js");
19 | var SinusoidFunction = require("./activation/SinusoidFunction.js");
20 |
21 | /**
22 | * Artificial feedforward neural network using a directed acyclic graph as its graph
23 | *
24 | * All information travels only forward, i.e. from the input nodes, through the optional hidden nodes, to the output nodes
25 | *
26 | * @param {number} inputNeurons - the number of neurons to use in the input layer
27 | * @param {number[]} hiddenNeurons - the number of neurons to use per hidden layer
28 | * @param {number} outputNeurons - the number of neurons to use in the output layer
29 | * @param {Object} [options]
30 | * @param {number} [options.seed] - the seed to use for deterministic results
31 | * @param {number} [options.learningRate] - the learning rate to use
32 | * @param {ActivationFunction} [options.hiddenLayerActivationFunction] - the activation function for the hidden layer
33 | * @param {ActivationFunction} [options.outputLayerActivationFunction] - the activation function for the output layer
34 | * @constructor
35 | * @extends NeuralNetwork
36 | */
37 | function FeedforwardNeuralNetwork(inputNeurons, hiddenNeurons, outputNeurons, options) {
38 |
39 | // call the super class's constructor
40 | NeuralNetwork.call(this, inputNeurons, hiddenNeurons, outputNeurons, options);
41 |
42 | this._createConnections();
43 |
44 | }
45 |
46 | // create a prototype that inherits from the super class's prototype
47 | FeedforwardNeuralNetwork.prototype = Object.create(NeuralNetwork.prototype);
48 | // fix the constructor pointer so that it doesn't point to the super class
49 | FeedforwardNeuralNetwork.prototype.constructor = FeedforwardNeuralNetwork;
50 |
51 | /**
52 | * Feeds the specified input into the network
53 | *
54 | * @param {number[]} input - the input to process
55 | * @private
56 | */
57 | FeedforwardNeuralNetwork.prototype._feed = function (input) {
58 | // prepare the network for the new input
59 | this.reset();
60 |
61 | // get the input layer
62 | var inputLayer = this.getInputLayer();
63 |
64 | var inputLayerSize = inputLayer.getSize();
65 |
66 | // validate the size of the supplied input
67 | if (input.length !== inputLayerSize) {
68 | throw "Size of input layer (`"+inputLayerSize+"`) and supplied input (`"+input.length+"`) must match";
69 | }
70 |
71 | var neuron;
72 |
73 | // for every neuron in the input layer
74 | for (var i = 0; i < inputLayerSize; i++) {
75 | neuron = inputLayer.getNeuron(i);
76 |
77 | // feed the input into the neuron
78 | neuron.feed(input[i]);
79 | }
80 |
81 | var numLayers = this.getNumberOfLayers();
82 |
83 | // for every layer
84 | for (var k = 0; k < numLayers; k++) {
85 | // propagate the activation
86 | this.getLayer(k).propagateAllNeurons();
87 | }
88 | };
89 |
90 | /**
91 | * Creates the connections between the layers of this network so that they represent a fully-connected network
92 | *
93 | * @private
94 | */
95 | FeedforwardNeuralNetwork.prototype._createConnections = function () {
96 | var numLayers = this.getNumberOfLayers();
97 | var previousLayer;
98 | var previousLayerSize;
99 | var currentLayer;
100 | var currentLayerSize;
101 |
102 | // for every layer except for the input layer
103 | for (var i = 1; i < numLayers; i++) {
104 | previousLayer = this.getLayer(i - 1);
105 | previousLayerSize = previousLayer.getSize();
106 | currentLayer = this.getLayer(i);
107 | currentLayerSize = currentLayer.getSize();
108 |
109 | // for every neuron in the previous layer
110 | for (var k = 0; k < previousLayerSize; k++) {
111 | // for every neuron in the current layer
112 | for (var m = 0; m < currentLayerSize; m++) {
113 | // connect the previous layer's neuron to the current layer's neuron
114 | previousLayer.getNeuron(k).connectTo(currentLayer.getNeuron(m));
115 | }
116 | }
117 | }
118 | };
119 |
120 | /**
121 | * Restores a neural network instance from the supplied JSON string
122 | *
123 | * @param {string} jsonString - the JSON string to restore from
124 | * @return {FeedforwardNeuralNetwork} the restored network instance
125 | */
126 | FeedforwardNeuralNetwork.fromJson = function (jsonString) {
127 | function flattenArray(arr) {
128 | return arr.reduce(function(a, b) {
129 | return a.concat(b);
130 | }, []);
131 | }
132 |
133 | function createWeightsListFromLayer(layerObj) {
134 | return flattenArray(layerObj.neurons.map(function (each) {
135 | return each.connections.map(function (each) {
136 | return each.weight;
137 | });
138 | }));
139 | }
140 |
141 | function createActivationFunctionFromName(name) {
142 | switch (name) {
143 | case "ArcTangent":
144 | return new ArcTangent();
145 | case "BinaryStep":
146 | return new BinaryStep();
147 | case "GaussianFunction":
148 | return new GaussianFunction();
149 | case "HyperbolicTangent":
150 | return new HyperbolicTangent();
151 | case "Identity":
152 | return new Identity();
153 | case "LogisticFunction":
154 | return new LogisticFunction();
155 | case "RectifiedLinearUnit":
156 | return new RectifiedLinearUnit();
157 | case "SinusoidFunction":
158 | return new SinusoidFunction();
159 | default:
160 | throw "Undefined activation function `"+name+"`";
161 | }
162 | }
163 |
164 | var data = JSON.parse(jsonString);
165 |
166 | var inputLayer = data.layers.shift();
167 | var outputLayer = data.layers.pop();
168 | var hiddenLayers = data.layers;
169 |
170 | var inputNeurons = inputLayer.neurons.length;
171 | var hiddenNeurons = hiddenLayers.map(function (each) {
172 | return each.neurons.length;
173 | });
174 | var outputNeurons = outputLayer.neurons.length;
175 |
176 | var inputWeights = createWeightsListFromLayer(inputLayer);
177 | var outputWeights = createWeightsListFromLayer(outputLayer);
178 |
179 | var hiddenWeights = [];
180 | for (var i = 0; i < hiddenLayers.length; i++) {
181 | hiddenWeights.push(createWeightsListFromLayer(hiddenLayers[i]));
182 | }
183 | hiddenWeights = flattenArray(hiddenWeights);
184 |
185 | var allWeights = inputWeights.concat(hiddenWeights, outputWeights);
186 |
187 | var hiddenLayerActivationFunction = (hiddenLayers.length > 0) ? hiddenLayers[0].activationFunction : "Identity";
188 |
189 | Neuron.preDefinedWeights = allWeights;
190 |
191 | return new FeedforwardNeuralNetwork(inputNeurons, hiddenNeurons, outputNeurons, {
192 | seed: data.seed,
193 | learningRate: data.learningRate,
194 | hiddenLayerActivationFunction: createActivationFunctionFromName(hiddenLayerActivationFunction),
195 | outputLayerActivationFunction: createActivationFunctionFromName(outputLayer.activationFunction)
196 | });
197 | };
198 |
199 | module.exports = FeedforwardNeuralNetwork;
200 |
--------------------------------------------------------------------------------
/src/Layer.js:
--------------------------------------------------------------------------------
1 | /*
2 | * JS-NeuralNetwork (https://github.com/delight-im/JS-NeuralNetwork)
3 | * Copyright (c) delight.im (https://www.delight.im/)
4 | * Licensed under the MIT License (https://opensource.org/licenses/MIT)
5 | */
6 |
7 | "use strict";
8 |
9 | var Neuron = require("./Neuron.js");
10 |
11 | /**
12 | * A layer that may be part of an artificial neural network
13 | *
14 | * @param {number} size - the number of neurons to use in this layer
15 | * @param {ActivationFunction} activationFunction - the activation function that this layer should use
16 | * @param {Prng} prng - the PRNG that this layer should use
17 | * @constructor
18 | */
19 | function Layer(size, activationFunction, prng) {
20 |
21 | /**
22 | * The list of neurons in this layer
23 | *
24 | * @type {Neuron[]}
25 | * @private
26 | */
27 | this._neurons = [];
28 |
29 | /**
30 | * The activation function that this layer uses
31 | *
32 | * @type {ActivationFunction}
33 | * @private
34 | */
35 | this._activationFunction = activationFunction;
36 |
37 | /**
38 | * The PRNG that this layer uses
39 | *
40 | * @type {Prng}
41 | * @private
42 | */
43 | this._prng = prng;
44 |
45 | // create the neurons for this layer
46 | for (var i = 0; i < size; i++) {
47 | this._neurons.push(this._createNeuron());
48 | }
49 |
50 | }
51 |
52 | /**
53 | * Creates a new neuron for this layer
54 | *
55 | * @return {Neuron} the new neuron
56 | * @private
57 | */
58 | Layer.prototype._createNeuron = function () {
59 | return new Neuron(this);
60 | };
61 |
62 | /** Propagates the output of all neurons in this layer */
63 | Layer.prototype.propagateAllNeurons = function () {
64 | // for every neuron
65 | for (var i = 0; i < this._neurons.length; i++) {
66 | // propagate the neuron's activation
67 | this._neurons[i].propagate();
68 | }
69 | };
70 |
71 | /** Resets the neurons in this layer */
72 | Layer.prototype.reset = function () {
73 | // for every neuron
74 | for (var i = 0; i < this._neurons.length; i++) {
75 | // reset the neuron
76 | this._neurons[i].reset();
77 | }
78 | };
79 |
80 | /**
81 | * Returns the number of neurons in this layer
82 | *
83 | * @return {number} the number of neurons
84 | */
85 | Layer.prototype.getSize = function () {
86 | return this._neurons.length;
87 | };
88 |
89 | /**
90 | * Returns the neuron at the specified index
91 | *
92 | * @param {number} index - the neuron to return
93 | * @return {Neuron|OutputNeuron} the neuron
94 | */
95 | Layer.prototype.getNeuron = function (index) {
96 | return this._neurons[index];
97 | };
98 |
99 | /** Updates the deltas in this layer */
100 | Layer.prototype.updateDeltas = function () {
101 | // for every neuron
102 | for (var i = 0; i < this._neurons.length; i++) {
103 | // update the delta
104 | this._neurons[i].updateDelta();
105 | }
106 | };
107 |
108 | /**
109 | * Updates the weights for all neurons in this layer
110 | *
111 | * @param {number} learningRate - the learning rate to use
112 | * @param {boolean} immediate - whether to update the weights immediately or defer the update until later
113 | */
114 | Layer.prototype.updateWeightsInLayer = function (learningRate, immediate) {
115 | // for every neuron
116 | for (var i = 0; i < this._neurons.length; i++) {
117 | // update the weights
118 | this._neurons[i].updateWeightsAtConnections(learningRate, immediate);
119 | }
120 | };
121 |
122 | /** Releases all deferred weight updates */
123 | Layer.prototype.releaseWeightUpdatesInLayer = function () {
124 | // for every neuron
125 | for (var i = 0; i < this._neurons.length; i++) {
126 | // release all pending weight updates
127 | this._neurons[i].releaseWeightUpdatesAtConnections();
128 | }
129 | };
130 |
131 | /**
132 | * Returns the activation function for this layer
133 | *
134 | * @return {ActivationFunction} the activation function used by this layer
135 | */
136 | Layer.prototype.getActivationFunction = function () {
137 | return this._activationFunction;
138 | };
139 |
140 | /**
141 | * Returns the PRNG for this layer
142 | *
143 | * @return {Prng} the PRNG used by this layer
144 | */
145 | Layer.prototype.getPrng = function () {
146 | return this._prng;
147 | };
148 |
149 | Layer.prototype.toJSON = function () {
150 | var neurons = [];
151 | for (var i = 0; i < this._neurons.length; i++) {
152 | neurons.push(this._neurons[i].toJSON());
153 | }
154 |
155 | return {
156 | "neurons": neurons,
157 | "activationFunction": this._activationFunction.toJSON()
158 | }
159 | };
160 |
161 | module.exports = Layer;
162 |
--------------------------------------------------------------------------------
/src/NeuralNetwork.js:
--------------------------------------------------------------------------------
1 | /*
2 | * JS-NeuralNetwork (https://github.com/delight-im/JS-NeuralNetwork)
3 | * Copyright (c) delight.im (https://www.delight.im/)
4 | * Licensed under the MIT License (https://opensource.org/licenses/MIT)
5 | */
6 |
7 | "use strict";
8 |
9 | var Prng = require("@delight-im/prng");
10 | var Layer = require("./Layer.js");
11 | var Identity = require("./activation/Identity.js");
12 | var OutputLayer = require("./OutputLayer.js");
13 | var HyperbolicTangent = require("./activation/HyperbolicTangent.js");
14 |
15 | /**
16 | * Artificial neural network
17 | *
18 | * Neural networks take some input and try to predict the desired output
19 | *
20 | * A network can be applied to function approximation, regression analysis, classification, data processing, etc.
21 | *
22 | * When there is no hidden layer, this is called a "single-layer network", since the input layer is usually not counted
23 | *
24 | * More hidden layers allow for solutions to more complex problems, but such networks may be harder to manage and train
25 | *
26 | * If you don't know where to start, try a 2-layer or 3-layer neural network first (i.e. 1 or 2 hidden layers)
27 | *
28 | * When there are lots of hidden layers, e.g. 10 to 20, it's called "deep learning", but that may not be what you need
29 | *
30 | * The "knowledge" (or learned behavior) of a network is stored in its weights
31 | *
32 | * The response of a network is not generally analyzable so that a lot of trial and error may be required
33 | *
34 | * Parameters that can be experimented with are hidden layer depth and size and choice of activation function
35 | *
36 | * While universal approximators in theory, there is no guarantee of convergence for neural networks in practice
37 | *
38 | * @param {number} inputNeurons - the number of neurons to use in the input layer
39 | * @param {number[]} hiddenNeurons - the number of neurons to use per hidden layer
40 | * @param {number} outputNeurons - the number of neurons to use in the output layer
41 | * @param {Object} [options]
42 | * @param {number} [options.seed] - the seed to use for deterministic results
43 | * @param {number} [options.learningRate] - the learning rate to use
44 | * @param {ActivationFunction} [options.hiddenLayerActivationFunction] - the activation function for the hidden layer
45 | * @param {ActivationFunction} [options.outputLayerActivationFunction] - the activation function for the output layer
46 | * @constructor
47 | */
48 | function NeuralNetwork(inputNeurons, hiddenNeurons, outputNeurons, options) {
49 |
50 | options = options || {};
51 |
52 | /**
53 | * The layers that this network consists of
54 | *
55 | * @type {Layer[]|OutputLayer[]}
56 | * @private
57 | */
58 | this._layers = [];
59 |
60 | /**
61 | * The seed of this network (if any)
62 | *
63 | * @type {number|undefined}
64 | * @private
65 | */
66 | this._seed = options.seed || undefined;
67 |
68 | var prng = new Prng(this._seed);
69 |
70 | options.hiddenLayerActivationFunction = options.hiddenLayerActivationFunction || new HyperbolicTangent();
71 | options.outputLayerActivationFunction = options.outputLayerActivationFunction || new HyperbolicTangent();
72 |
73 | // add the input layer
74 | this._layers.push(new Layer(inputNeurons, new Identity(), prng));
75 |
76 | // add the hidden layers
77 | for (var i = 0; i < hiddenNeurons.length; i++) {
78 | this._layers.push(new Layer(hiddenNeurons[i], options.hiddenLayerActivationFunction, prng));
79 | }
80 |
81 | // add the output layer
82 | this._layers.push(new OutputLayer(outputNeurons, options.outputLayerActivationFunction, prng));
83 |
84 | /**
85 | * The current learning rate of this network
86 | *
87 | * @type {number}
88 | * @private
89 | */
90 | this._learningRate = options.learningRate || 0.3;
91 |
92 | }
93 |
94 | /**
95 | * Returns the network's output for the previously supplied input
96 | *
97 | * @return {number[]} the network's output
98 | * @private
99 | */
100 | NeuralNetwork.prototype._getOutput = function () {
101 | var outputLayer = this.getOutputLayer();
102 | var outputLayerSize = outputLayer.getSize();
103 |
104 | var output = [];
105 |
106 | for (var i = 0; i < outputLayerSize; i++) {
107 | output.push(outputLayer.getNeuron(i).getActivation());
108 | }
109 |
110 | return output;
111 | };
112 |
113 | /**
114 | * Feeds the specified input into the network
115 | *
116 | * @param {number[]} input - the input to process
117 | * @private
118 | * @abstract
119 | */
120 | NeuralNetwork.prototype._feed = function (input) {
121 | throw "Method not implemented in subclass `"+this.constructor.name+"`";
122 | };
123 |
124 | /**
125 | * Tries to predict the output from the specified input
126 | *
127 | * @param {number[]} input - the input to process
128 | * @return {number[]} the output predicted by this network
129 | */
130 | NeuralNetwork.prototype.predict = function (input) {
131 | this._feed(input);
132 |
133 | return this._getOutput();
134 | };
135 |
136 | /**
137 | * Uses backpropagation to update deltas in all layers starting with the output layer
138 | *
139 | * @param {number[]} desiredOutput - the expected output
140 | * @private
141 | */
142 | NeuralNetwork.prototype._backpropagate = function (desiredOutput) {
143 | // update the deltas in the output layer
144 | this.getOutputLayer().updateDeltas(desiredOutput);
145 |
146 | // for all hidden layers (in reverse order)
147 | for (var layerIndex = this._layers.length - 2; layerIndex > 0; layerIndex--) {
148 | // update the deltas
149 | this.getLayer(layerIndex).updateDeltas();
150 | }
151 | };
152 |
153 | /**
154 | * Updates the weights for all layers in this network
155 | *
156 | * @param {boolean} immediate - whether to update the weights immediately or defer the update until later
157 | * @private
158 | */
159 | NeuralNetwork.prototype._updateWeightsInNetwork = function (immediate) {
160 | // for the input layer and all hidden layers
161 | for (var i = 0; i < this._layers.length - 1; i++) {
162 | // update the weights
163 | this._layers[i].updateWeightsInLayer(this._learningRate, immediate);
164 | }
165 | };
166 |
167 | /**
168 | * Releases all deferred weight updates
169 | *
170 | * @private
171 | */
172 | NeuralNetwork.prototype._releaseWeightUpdatesInNetwork = function () {
173 | // for the input layer and all hidden layers
174 | for (var i = 0; i < this._layers.length - 1; i++) {
175 | // release all pending weight updates
176 | this._layers[i].releaseWeightUpdatesInLayer();
177 | }
178 | };
179 |
180 | /**
181 | * Trains the network using supervised online ("single-pattern") learning (as opposed to batch learning)
182 | *
183 | * Use online learning to supply individual training examples at a time
184 | *
185 | * All subsequent training examples will thus be run on a network that has already updated by changings its weights
186 | *
187 | * This is useful when data becomes available in a sequential order and is not available all at once
188 | *
189 | * The technique may also be used when train on the entire dataset is computationally too expensive
190 | *
191 | * @param {number[]} input - the input for an individual training example
192 | * @param {number[]} desiredOutput - the expected output for an individual training example
193 | * @return {number} the mean squared error
194 | */
195 | NeuralNetwork.prototype.train = function (input, desiredOutput) {
196 | var outputLayer = this.getOutputLayer();
197 |
198 | // feed the training input into the network
199 | this._feed(input);
200 | // calculate and propagate back the errors from the output layer
201 | this._backpropagate(desiredOutput);
202 | // calculate the sum-squared error
203 | var sumSquaredError = outputLayer.calculateSumSquaredError(desiredOutput);
204 | // update the weights in this network
205 | this._updateWeightsInNetwork(true);
206 |
207 | // return the mean squared error
208 | return sumSquaredError / outputLayer.getSize();
209 | };
210 |
211 | /**
212 | * Trains the network using supervised batch ("all-at-once") learning (as opposed to online learning)
213 | *
214 | * Use batch learning to supply multiple training examples (i.e. the entire dataset) at once
215 | *
216 | * All training examples will be run on a network with the same weights per iteration
217 | *
218 | * This is the recommended technique if all training data is available apriori and it's computationally feasible
219 | *
220 | * @param {number[][]} inputs - the inputs per training example
221 | * @param {number[][]} desiredOutputs - the expected outputs per training example
222 | * @param {number} [iterations] - the maximum number of iterations to train
223 | * @param {number} [errorThreshold] - the desired error threshold that will cause training to be finished when reached
224 | * @return {number} the mean squared error
225 | */
226 | NeuralNetwork.prototype.trainBatch = function (inputs, desiredOutputs, iterations, errorThreshold) {
227 | if (inputs.length !== desiredOutputs.length) {
228 | throw "Number of input patterns (`"+inputs.length+"`) and output patterns (`"+desiredOutputs.length+"`) must match";
229 | }
230 |
231 | iterations = iterations || 1;
232 | errorThreshold = errorThreshold || 0.005;
233 |
234 | var outputLayer = this.getOutputLayer();
235 | var outputLayerSize = outputLayer.getSize();
236 | var error = Number.POSITIVE_INFINITY;
237 |
238 | // until the maximum number of iterations or the desired error threshold has been reached (whichever comes first)
239 | for (var i = 0; i < iterations && error > errorThreshold; i++) {
240 | // reset the accumulated error
241 | error = 0;
242 |
243 | // for every training pattern
244 | for (var k = 0; k < inputs.length; k++) {
245 | // feed the training input into the network
246 | this._feed(inputs[k]);
247 | // calculate and propagate back the errors from the output layer
248 | this._backpropagate(desiredOutputs[k]);
249 | // update the weights in this network
250 | this._updateWeightsInNetwork(false);
251 | // accumulate the error
252 | error += outputLayer.calculateSumSquaredError(desiredOutputs[k]);
253 | }
254 |
255 | // turn the total sum-squared error into the mean squared error
256 | error /= inputs.length * outputLayerSize;
257 |
258 | this._releaseWeightUpdatesInNetwork();
259 | }
260 |
261 | // return the final mean squared error
262 | return error;
263 | };
264 |
265 | /** Resets the layers in this network */
266 | NeuralNetwork.prototype.reset = function () {
267 | // for every layer
268 | for (var i = 0; i < this._layers.length; i++) {
269 | // reset the layer
270 | this._layers[i].reset();
271 | }
272 | };
273 |
274 | /**
275 | * Returns this network's learning rate
276 | *
277 | * @return {number} the current learning rate
278 | */
279 | NeuralNetwork.prototype.getLearningRate = function () {
280 | return this._learningRate;
281 | };
282 |
283 | /**
284 | * Returns the number of layers in this network
285 | *
286 | * @return {Number} the number of layers
287 | */
288 | NeuralNetwork.prototype.getNumberOfLayers = function () {
289 | return this._layers.length;
290 | };
291 |
292 | /**
293 | * Returns the layer at the specified index
294 | *
295 | * @param {number} index - the layer to return
296 | * @return {Layer|OutputLayer}
297 | */
298 | NeuralNetwork.prototype.getLayer = function (index) {
299 | return this._layers[index];
300 | };
301 |
302 | /**
303 | * Returns the input layer for this network
304 | *
305 | * @return {Layer} the input layer
306 | */
307 | NeuralNetwork.prototype.getInputLayer = function () {
308 | return this.getLayer(0);
309 | };
310 |
311 | /**
312 | * Returns the output layer for this network
313 | *
314 | * @return {OutputLayer} the output layer
315 | */
316 | NeuralNetwork.prototype.getOutputLayer = function () {
317 | return this.getLayer(this._layers.length - 1);
318 | };
319 |
320 | NeuralNetwork.prototype.toJSON = function () {
321 | var layers = [];
322 | for (var i = 0; i < this._layers.length; i++) {
323 | layers.push(this._layers[i].toJSON());
324 | }
325 |
326 | return {
327 | "layers": layers,
328 | "learningRate": this._learningRate,
329 | "seed": this._seed
330 | }
331 | };
332 |
333 | module.exports = NeuralNetwork;
334 |
--------------------------------------------------------------------------------
/src/Neuron.js:
--------------------------------------------------------------------------------
1 | /*
2 | * JS-NeuralNetwork (https://github.com/delight-im/JS-NeuralNetwork)
3 | * Copyright (c) delight.im (https://www.delight.im/)
4 | * Licensed under the MIT License (https://opensource.org/licenses/MIT)
5 | */
6 |
7 | "use strict";
8 |
9 | var Connection = require("./Connection.js");
10 |
11 | /**
12 | * An artificial neuron that may be part of a layer in an artificial neural network
13 | *
14 | * @param {Layer} layer - the layer that this neuron belongs to
15 | * @constructor
16 | */
17 | function Neuron(layer) {
18 |
19 | /**
20 | * The current input of this neuron
21 | *
22 | * @type {number}
23 | * @private
24 | */
25 | this._input = 0;
26 |
27 | /**
28 | * The current activation of this neuron
29 | *
30 | * @type {number}
31 | * @private
32 | */
33 | this._activation = 0;
34 |
35 | /**
36 | * The current error of this neuron scaled by the confidence it showed in predicting its output
37 | *
38 | * @type {number}
39 | * @private
40 | */
41 | this._delta = 0;
42 |
43 | /**
44 | * The layer that this neuron belongs to
45 | *
46 | * @type {Layer}
47 | * @private
48 | */
49 | this._layer = layer;
50 |
51 | /**
52 | * The list of outgoing connections from this neuron
53 | *
54 | * @type {Connection[]}
55 | * @private
56 | */
57 | this._connections = [];
58 |
59 | }
60 |
61 | /**
62 | * Returns the current activation of this neuron
63 | *
64 | * @return {number} the activation
65 | */
66 | Neuron.prototype.getActivation = function () {
67 | return this._activation;
68 | };
69 |
70 | /**
71 | * Returns the current delta of this neuron
72 | *
73 | * @return {number}
74 | */
75 | Neuron.prototype.getDelta = function () {
76 | return this._delta;
77 | };
78 |
79 | /**
80 | * Returns the current input of this neuron
81 | *
82 | * @return {number} the input
83 | */
84 | Neuron.prototype.getInput = function () {
85 | return this._input;
86 | };
87 |
88 | /**
89 | * Returns the outgoing connection at the specified index
90 | *
91 | * @param {number} index - the connection to return
92 | * @return {Connection} the connection
93 | */
94 | Neuron.prototype.getConnection = function (index) {
95 | return this._connections[index];
96 | };
97 |
98 | /** Resets this neuron */
99 | Neuron.prototype.reset = function () {
100 | this._input = 0;
101 | this._activation = 0;
102 | };
103 |
104 | /** Propagates the activation from this neuron to the connected neurons */
105 | Neuron.prototype.propagate = function () {
106 | // determine this neuron's activation
107 | this._activation = this._layer.getActivationFunction().evaluate(this._input);
108 |
109 | var connection;
110 |
111 | // for every connection from this neuron
112 | for (var i = 0; i < this._connections.length; i++) {
113 | connection = this._connections[i];
114 |
115 | connection.getTargetNeuron().feed(this._activation * connection.getWeight());
116 | }
117 | };
118 |
119 | /**
120 | * Feeds the specified value into this neuron
121 | *
122 | * @param {number} value - the value to add to this neuron's input
123 | */
124 | Neuron.prototype.feed = function (value) {
125 | this._input += value;
126 | };
127 |
128 | /**
129 | * Adds a new connection to the other neuron that is specified
130 | *
131 | * @param {Neuron} targetNeuron - the other neuron to connect to
132 | */
133 | Neuron.prototype.connectTo = function (targetNeuron) {
134 | var initialWeight;
135 | if (Neuron.preDefinedWeights.length) {
136 | // use the next pre-defined weight
137 | initialWeight = Neuron.preDefinedWeights.pop();
138 | }
139 | else {
140 | // initialize the weight randomly with a mean of zero
141 | initialWeight = this._layer.getPrng().getRandomFloat(0, 0.3) - 0.15;
142 | }
143 |
144 | this._connections.push(new Connection(targetNeuron, initialWeight));
145 | };
146 |
147 | /**
148 | * Returns the layer that this neuron belongs to
149 | *
150 | * @return {Layer} the layer
151 | */
152 | Neuron.prototype.getLayer = function () {
153 | return this._layer;
154 | };
155 |
156 | /**
157 | * Calculates the error of this neuron
158 | *
159 | * @return {number} the error of this neuron
160 | */
161 | Neuron.prototype.calculateError = function () {
162 | // prepare a variable for the error to be accumulated
163 | var error = 0;
164 |
165 | // for every connection
166 | for (var i = 0; i < this._connections.length; i++) {
167 | // accumulate the error by adding the weighted delta from the connection
168 | error += this._connections[i].getWeightedDelta();
169 | }
170 |
171 | // return the accumulated error
172 | return error;
173 | };
174 |
175 | /** Updates the delta of this neuron */
176 | Neuron.prototype.updateDelta = function () {
177 | this._delta = this._layer.getActivationFunction().evaluateDerivative(this._input) * this.calculateError();
178 | };
179 |
180 | /**
181 | * Updates all weights for this neuron
182 | *
183 | * @param {number} learningRate - the learning rate to use
184 | * @param {boolean} immediate - whether to update the weights immediately or defer the update until later
185 | */
186 | Neuron.prototype.updateWeightsAtConnections = function (learningRate, immediate) {
187 | var update;
188 |
189 | // for every connection
190 | for (var i = 0; i < this._connections.length; i++) {
191 | // calculate the product of the learning rate and the negative gradient
192 | update = learningRate * this._connections[i].getTargetNeuron().getDelta() * this.getActivation();
193 | // update the weight to move in the direction of a minimum of the error function
194 | this._connections[i].updateWeight(update, immediate);
195 | }
196 | };
197 |
198 | /** Releases all deferred weight updates */
199 | Neuron.prototype.releaseWeightUpdatesAtConnections = function () {
200 | // for every connection
201 | for (var i = 0; i < this._connections.length; i++) {
202 | // release all pending weight updates
203 | this._connections[i].releaseWeightUpdates();
204 | }
205 | };
206 |
207 | Neuron.prototype.toJSON = function () {
208 | var connections = [];
209 | for (var i = 0; i < this._connections.length; i++) {
210 | connections.push(this._connections[i].toJSON());
211 | }
212 |
213 | return {
214 | "connections": connections
215 | }
216 | };
217 |
218 | Neuron.preDefinedWeights = [];
219 |
220 | module.exports = Neuron;
221 |
--------------------------------------------------------------------------------
/src/OutputLayer.js:
--------------------------------------------------------------------------------
1 | /*
2 | * JS-NeuralNetwork (https://github.com/delight-im/JS-NeuralNetwork)
3 | * Copyright (c) delight.im (https://www.delight.im/)
4 | * Licensed under the MIT License (https://opensource.org/licenses/MIT)
5 | */
6 |
7 | "use strict";
8 |
9 | var Layer = require("./Layer.js");
10 | var OutputNeuron = require("./OutputNeuron.js");
11 |
12 | /**
13 | * An output layer that may be part of an artificial neural network
14 | *
15 | * @param {number} size - the number of neurons to use in this layer
16 | * @param {ActivationFunction} activationFunction - the activation function that this layer should use
17 | * @param {Prng} prng - the PRNG that this layer should use
18 | * @constructor
19 | * @extends Layer
20 | */
21 | function OutputLayer(size, activationFunction, prng) {
22 |
23 | // call the super class's constructor
24 | Layer.call(this, size, activationFunction, prng);
25 |
26 | }
27 |
28 | // create a prototype that inherits from the super class's prototype
29 | OutputLayer.prototype = Object.create(Layer.prototype);
30 | // fix the constructor pointer so that it doesn't point to the super class
31 | OutputLayer.prototype.constructor = OutputLayer;
32 |
33 | /**
34 | * Creates a new neuron for this layer
35 | *
36 | * @return {OutputNeuron} the new neuron
37 | * @private
38 | */
39 | OutputLayer.prototype._createNeuron = function () {
40 | return new OutputNeuron(this);
41 | };
42 |
43 | /**
44 | * Updates the deltas in this layer
45 | *
46 | * @param {number[]} desiredOutput - the desired output of this layer
47 | */
48 | OutputLayer.prototype.updateDeltas = function (desiredOutput) {
49 | var numNeurons = this.getSize();
50 |
51 | if (desiredOutput.length !== numNeurons) {
52 | throw "Size of desired output (`"+desiredOutput.length+"`) and number of output neurons (`"+numNeurons+"`) must match";
53 | }
54 |
55 | // for every neuron
56 | for (var i = 0; i < numNeurons; i++) {
57 | // update the delta
58 | this.getNeuron(i).updateDelta(desiredOutput[i]);
59 | }
60 | };
61 |
62 | /**
63 | * Calculates the sum of the squares of all errors in this layer
64 | *
65 | * @param {number[]} desiredOutput - the desired output of this layer
66 | * @return {number} the sum-squared error of this layer
67 | */
68 | OutputLayer.prototype.calculateSumSquaredError = function (desiredOutput) {
69 | // get the number of output neurons
70 | var numOutputs = this.getSize();
71 |
72 | // prepare a variable to accumulate the squared errors
73 | var squaredErrors = 0;
74 |
75 | var error;
76 |
77 | // for each output neuron
78 | for (var i = 0; i < numOutputs; i++) {
79 | // calculate the error of the individual neuron
80 | error = this.getNeuron(i).calculateError(desiredOutput[i]);
81 | // add the square of the individual error to the sum
82 | squaredErrors += Math.pow(error, 2);
83 | }
84 |
85 | // return the sum-squared error
86 | return squaredErrors;
87 | };
88 |
89 | module.exports = OutputLayer;
90 |
--------------------------------------------------------------------------------
/src/OutputNeuron.js:
--------------------------------------------------------------------------------
1 | /*
2 | * JS-NeuralNetwork (https://github.com/delight-im/JS-NeuralNetwork)
3 | * Copyright (c) delight.im (https://www.delight.im/)
4 | * Licensed under the MIT License (https://opensource.org/licenses/MIT)
5 | */
6 |
7 | "use strict";
8 |
9 | var Neuron = require("./Neuron.js");
10 |
11 | /**
12 | * An artificial output neuron that may be part of a layer in an artificial neural network
13 | *
14 | * @param {Layer} layer - the layer that this neuron belongs to
15 | * @constructor
16 | * @extends Neuron
17 | */
18 | function OutputNeuron(layer) {
19 |
20 | // call the super class's constructor
21 | Neuron.call(this, layer);
22 |
23 | }
24 |
25 | // create a prototype that inherits from the super class's prototype
26 | OutputNeuron.prototype = Object.create(Neuron.prototype);
27 | // fix the constructor pointer so that it doesn't point to the super class
28 | OutputNeuron.prototype.constructor = OutputNeuron;
29 |
30 | /**
31 | * Calculates the error of this neuron from the desired output as specified
32 | *
33 | * @param {number} desiredOutput - the desired output for this neuron
34 | * @return {number} the error of this neuron
35 | */
36 | OutputNeuron.prototype.calculateError = function (desiredOutput) {
37 | // return the difference between the desired output and the actual output
38 | return desiredOutput - this.getActivation();
39 | };
40 |
41 | /**
42 | * Updates the delta of this neuron using the expected output of this neuron
43 | *
44 | * @param {number} desiredOutput - the expected output of this neuron
45 | */
46 | OutputNeuron.prototype.updateDelta = function (desiredOutput) {
47 | this._delta = this.getLayer().getActivationFunction().evaluateDerivative(this._input) * this.calculateError(desiredOutput);
48 | };
49 |
50 | module.exports = OutputNeuron;
51 |
--------------------------------------------------------------------------------
/src/activation/ActivationFunction.js:
--------------------------------------------------------------------------------
1 | /*
2 | * JS-NeuralNetwork (https://github.com/delight-im/JS-NeuralNetwork)
3 | * Copyright (c) delight.im (https://www.delight.im/)
4 | * Licensed under the MIT License (https://opensource.org/licenses/MIT)
5 | */
6 |
7 | "use strict";
8 |
9 | /**
10 | * Activation function for an artificial neural network
11 | *
12 | * An activation function is applied on a neuron and determines its activation based on its weighted inputs
13 | *
14 | * The main factor in deciding which activation function to use is usually the range of the function
15 | *
16 | * The hyperbolic tangent and the Rectified Linear Unit (ReLU) are the most commonly used functions
17 | *
18 | * @constructor
19 | */
20 | function ActivationFunction() {
21 |
22 | /**
23 | * Evaluates the function at the specified point
24 | *
25 | * @abstract
26 | * @param {number} x - the point to evaluate the function at
27 | * @return {number} the function value
28 | */
29 | this.evaluate = function (x) {
30 | throw "Method not implemented in subclass `"+this.constructor.name+"`";
31 | };
32 |
33 | /**
34 | * Evaluates the derivative of the function at the specified point
35 | *
36 | * @abstract
37 | * @param {number} x - the point to evaluate the derivative at
38 | * @return {number} the derivative's function value
39 | */
40 | this.evaluateDerivative = function (x) {
41 | throw "Method not implemented in subclass `"+this.constructor.name+"`";
42 | };
43 |
44 | /**
45 | * Returns the least possible value of this function
46 | *
47 | * @abstract
48 | * @return {number} the lower bound
49 | */
50 | this.getLowerBound = function () {
51 | throw "Method not implemented in subclass `"+this.constructor.name+"`";
52 | };
53 |
54 | /**
55 | * Returns the greatest possible value of this function
56 | *
57 | * @abstract
58 | * @return {number} the upper bound
59 | */
60 | this.getUpperBound = function () {
61 | throw "Method not implemented in subclass `"+this.constructor.name+"`";
62 | };
63 |
64 | /**
65 | * Returns whether this function is monotonic
66 | *
67 | * @abstract
68 | * @return {boolean} whether the function is monotonic
69 | */
70 | this.isMonotonic = function () {
71 | throw "Method not implemented in subclass `"+this.constructor.name+"`";
72 | };
73 |
74 | /**
75 | * Returns whether this function's derivative is monotonic
76 | *
77 | * @abstract
78 | * @return {boolean} whether the function's derivative is monotonic
79 | */
80 | this.isDerivativeMonotonic = function () {
81 | throw "Method not implemented in subclass `"+this.constructor.name+"`";
82 | };
83 |
84 | /**
85 | * Returns whether the graph of this function is very close to the origin
86 | *
87 | * @abstract
88 | * @return {boolean} whether the graph is centered around `(0,0)`
89 | */
90 | this.isCenteredAroundZero = function () {
91 | throw "Method not implemented in subclass `"+this.constructor.name+"`";
92 | };
93 |
94 | }
95 |
96 | ActivationFunction.prototype.toJSON = function () {
97 | return this.constructor.name;
98 | };
99 |
100 | module.exports = ActivationFunction;
101 |
--------------------------------------------------------------------------------
/src/activation/ArcTangent.js:
--------------------------------------------------------------------------------
1 | /*
2 | * JS-NeuralNetwork (https://github.com/delight-im/JS-NeuralNetwork)
3 | * Copyright (c) delight.im (https://www.delight.im/)
4 | * Licensed under the MIT License (https://opensource.org/licenses/MIT)
5 | */
6 |
7 | "use strict";
8 |
9 | var ActivationFunction = require("./ActivationFunction.js");
10 |
11 | /**
12 | * ArcTangent (also `arctan`, `atan` or `tan^(-1)`), a sigmoid function, that can be used as an activation function
13 | *
14 | * @constructor
15 | * @extends ActivationFunction
16 | */
17 | function ArcTangent() {
18 |
19 | // call the super class's constructor
20 | ActivationFunction.call(this);
21 |
22 | this.evaluate = function (x) {
23 | return Math.atan(x);
24 | };
25 |
26 | this.evaluateDerivative = function (x) {
27 | return 1 / (Math.pow(x, 2) + 1);
28 | };
29 |
30 | this.getLowerBound = function () {
31 | return - Math.PI / 2;
32 | };
33 |
34 | this.getUpperBound = function () {
35 | return Math.PI / 2;
36 | };
37 |
38 | this.isMonotonic = function () {
39 | return true;
40 | };
41 |
42 | this.isDerivativeMonotonic = function () {
43 | return false;
44 | };
45 |
46 | this.isCenteredAroundZero = function () {
47 | return true;
48 | };
49 |
50 | }
51 |
52 | // create a prototype that inherits from the super class's prototype
53 | ArcTangent.prototype = Object.create(ActivationFunction.prototype);
54 | // fix the constructor pointer so that it doesn't point to the super class
55 | ArcTangent.prototype.constructor = ArcTangent;
56 |
57 | module.exports = ArcTangent;
58 |
--------------------------------------------------------------------------------
/src/activation/BinaryStep.js:
--------------------------------------------------------------------------------
1 | /*
2 | * JS-NeuralNetwork (https://github.com/delight-im/JS-NeuralNetwork)
3 | * Copyright (c) delight.im (https://www.delight.im/)
4 | * Licensed under the MIT License (https://opensource.org/licenses/MIT)
5 | */
6 |
7 | "use strict";
8 |
9 | var ActivationFunction = require("./ActivationFunction.js");
10 |
11 | /**
12 | * Binary step that can be used as an activation function
13 | *
14 | * The function is often used for binary classifiers
15 | *
16 | * @constructor
17 | * @extends ActivationFunction
18 | */
19 | function BinaryStep() {
20 |
21 | // call the super class's constructor
22 | ActivationFunction.call(this);
23 |
24 | this.evaluate = function (x) {
25 | if (x < 0) {
26 | return 0;
27 | }
28 | else {
29 | return 1;
30 | }
31 | };
32 |
33 | this.evaluateDerivative = function (x) {
34 | if (x !== 0) {
35 | return 0;
36 | }
37 | else {
38 | return Number.NaN;
39 | }
40 | };
41 |
42 | this.getLowerBound = function () {
43 | return 0;
44 | };
45 |
46 | this.getUpperBound = function () {
47 | return 1;
48 | };
49 |
50 | this.isMonotonic = function () {
51 | return true;
52 | };
53 |
54 | this.isDerivativeMonotonic = function () {
55 | return false;
56 | };
57 |
58 | this.isCenteredAroundZero = function () {
59 | return false;
60 | };
61 |
62 | }
63 |
64 | // create a prototype that inherits from the super class's prototype
65 | BinaryStep.prototype = Object.create(ActivationFunction.prototype);
66 | // fix the constructor pointer so that it doesn't point to the super class
67 | BinaryStep.prototype.constructor = BinaryStep;
68 |
69 | module.exports = BinaryStep;
70 |
--------------------------------------------------------------------------------
/src/activation/GaussianFunction.js:
--------------------------------------------------------------------------------
1 | /*
2 | * JS-NeuralNetwork (https://github.com/delight-im/JS-NeuralNetwork)
3 | * Copyright (c) delight.im (https://www.delight.im/)
4 | * Licensed under the MIT License (https://opensource.org/licenses/MIT)
5 | */
6 |
7 | "use strict";
8 |
9 | var ActivationFunction = require("./ActivationFunction.js");
10 |
11 | /**
12 | * Gaussian function that can be used as an activation function
13 | *
14 | * The function is often used to model probability
15 | *
16 | * @constructor
17 | * @extends ActivationFunction
18 | */
19 | function GaussianFunction() {
20 |
21 | // call the super class's constructor
22 | ActivationFunction.call(this);
23 |
24 | this.evaluate = function (x) {
25 | return Math.exp(-Math.pow(x, 2));
26 | };
27 |
28 | this.evaluateDerivative = function (x) {
29 | return -2 * x * this.evaluate(x);
30 | };
31 |
32 | this.getLowerBound = function () {
33 | return 0;
34 | };
35 |
36 | this.getUpperBound = function () {
37 | return 1;
38 | };
39 |
40 | this.isMonotonic = function () {
41 | return false;
42 | };
43 |
44 | this.isDerivativeMonotonic = function () {
45 | return false;
46 | };
47 |
48 | this.isCenteredAroundZero = function () {
49 | return false;
50 | };
51 |
52 | }
53 |
54 | // create a prototype that inherits from the super class's prototype
55 | GaussianFunction.prototype = Object.create(ActivationFunction.prototype);
56 | // fix the constructor pointer so that it doesn't point to the super class
57 | GaussianFunction.prototype.constructor = GaussianFunction;
58 |
59 | module.exports = GaussianFunction;
60 |
--------------------------------------------------------------------------------
/src/activation/HyperbolicTangent.js:
--------------------------------------------------------------------------------
1 | /*
2 | * JS-NeuralNetwork (https://github.com/delight-im/JS-NeuralNetwork)
3 | * Copyright (c) delight.im (https://www.delight.im/)
4 | * Licensed under the MIT License (https://opensource.org/licenses/MIT)
5 | */
6 |
7 | "use strict";
8 |
9 | var ActivationFunction = require("./ActivationFunction.js");
10 |
11 | /**
12 | * HyperbolicTangent (also `tanh`), a sigmoid function, that can be used as an activation function
13 | *
14 | * This function is basically a scaled version of the logistic function, centered around zero
15 | *
16 | * Saturation for very low and very high values is a problem as the gradient used during backpropagation vanishes
17 | *
18 | * @constructor
19 | * @extends ActivationFunction
20 | */
21 | function HyperbolicTangent() {
22 |
23 | // call the super class's constructor
24 | ActivationFunction.call(this);
25 |
26 | this.evaluate = function (x) {
27 | return (Math.exp(x) - Math.exp(-x)) / (Math.exp(x) + Math.exp(-x));
28 | };
29 |
30 | this.evaluateDerivative = function (x) {
31 | return 1 - Math.pow(this.evaluate(x), 2);
32 | };
33 |
34 | this.getLowerBound = function () {
35 | return -1;
36 | };
37 |
38 | this.getUpperBound = function () {
39 | return 1;
40 | };
41 |
42 | this.isMonotonic = function () {
43 | return true;
44 | };
45 |
46 | this.isDerivativeMonotonic = function () {
47 | return false;
48 | };
49 |
50 | this.isCenteredAroundZero = function () {
51 | return true;
52 | };
53 |
54 | }
55 |
56 | // create a prototype that inherits from the super class's prototype
57 | HyperbolicTangent.prototype = Object.create(ActivationFunction.prototype);
58 | // fix the constructor pointer so that it doesn't point to the super class
59 | HyperbolicTangent.prototype.constructor = HyperbolicTangent;
60 |
61 | module.exports = HyperbolicTangent;
62 |
--------------------------------------------------------------------------------
/src/activation/Identity.js:
--------------------------------------------------------------------------------
1 | /*
2 | * JS-NeuralNetwork (https://github.com/delight-im/JS-NeuralNetwork)
3 | * Copyright (c) delight.im (https://www.delight.im/)
4 | * Licensed under the MIT License (https://opensource.org/licenses/MIT)
5 | */
6 |
7 | "use strict";
8 |
9 | var ActivationFunction = require("./ActivationFunction.js");
10 |
11 | /**
12 | * Identity that can be used as an activation function
13 | *
14 | * This function is often used for the activation of the output layer only
15 | *
16 | * @constructor
17 | * @extends ActivationFunction
18 | */
19 | function Identity() {
20 |
21 | // call the super class's constructor
22 | ActivationFunction.call(this);
23 |
24 | this.evaluate = function (x) {
25 | return x;
26 | };
27 |
28 | this.evaluateDerivative = function (x) {
29 | return 1;
30 | };
31 |
32 | this.getLowerBound = function () {
33 | return Number.NEGATIVE_INFINITY;
34 | };
35 |
36 | this.getUpperBound = function () {
37 | return Number.POSITIVE_INFINITY;
38 | };
39 |
40 | this.isMonotonic = function () {
41 | return true;
42 | };
43 |
44 | this.isDerivativeMonotonic = function () {
45 | return true;
46 | };
47 |
48 | this.isCenteredAroundZero = function () {
49 | return true;
50 | };
51 |
52 | }
53 |
54 | // create a prototype that inherits from the super class's prototype
55 | Identity.prototype = Object.create(ActivationFunction.prototype);
56 | // fix the constructor pointer so that it doesn't point to the super class
57 | Identity.prototype.constructor = Identity;
58 |
59 | module.exports = Identity;
60 |
--------------------------------------------------------------------------------
/src/activation/LogisticFunction.js:
--------------------------------------------------------------------------------
1 | /*
2 | * JS-NeuralNetwork (https://github.com/delight-im/JS-NeuralNetwork)
3 | * Copyright (c) delight.im (https://www.delight.im/)
4 | * Licensed under the MIT License (https://opensource.org/licenses/MIT)
5 | */
6 |
7 | "use strict";
8 |
9 | var ActivationFunction = require("./ActivationFunction.js");
10 |
11 | /**
12 | * Logistic function, a sigmoid function, that can be used as an activation function
13 | *
14 | * The range of this function is often regarded as a desirable property
15 | *
16 | * The function is typically used to model probability
17 | *
18 | * Saturation for very low and very high values is a problem as the gradient used during backpropagation vanishes
19 | *
20 | * The output of this function not being zero-centered is why the hyperbolic tangent is usually preferred
21 | *
22 | * @constructor
23 | * @extends ActivationFunction
24 | */
25 | function LogisticFunction() {
26 |
27 | // call the super class's constructor
28 | ActivationFunction.call(this);
29 |
30 | this.evaluate = function (x) {
31 | return 1 / (1 + Math.exp(-x));
32 | };
33 |
34 | this.evaluateDerivative = function (x) {
35 | return this.evaluate(x) * (1 - this.evaluate(x));
36 | };
37 |
38 | this.getLowerBound = function () {
39 | return 0;
40 | };
41 |
42 | this.getUpperBound = function () {
43 | return 1;
44 | };
45 |
46 | this.isMonotonic = function () {
47 | return true;
48 | };
49 |
50 | this.isDerivativeMonotonic = function () {
51 | return false;
52 | };
53 |
54 | this.isCenteredAroundZero = function () {
55 | return false;
56 | };
57 |
58 | }
59 |
60 | // create a prototype that inherits from the super class's prototype
61 | LogisticFunction.prototype = Object.create(ActivationFunction.prototype);
62 | // fix the constructor pointer so that it doesn't point to the super class
63 | LogisticFunction.prototype.constructor = LogisticFunction;
64 |
65 | module.exports = LogisticFunction;
66 |
--------------------------------------------------------------------------------
/src/activation/RectifiedLinearUnit.js:
--------------------------------------------------------------------------------
1 | /*
2 | * JS-NeuralNetwork (https://github.com/delight-im/JS-NeuralNetwork)
3 | * Copyright (c) delight.im (https://www.delight.im/)
4 | * Licensed under the MIT License (https://opensource.org/licenses/MIT)
5 | */
6 |
7 | "use strict";
8 |
9 | var ActivationFunction = require("./ActivationFunction.js");
10 |
11 | /**
12 | * Rectified Linear Unit (ReLU), also known as a "ramp function", that can be used as an activation function
13 | *
14 | * This function inhibits all input below the threshold of zero
15 | *
16 | * The major advantages are that this function prevents the vanishing gradient problem and is fast to compute
17 | *
18 | * The major downside of this function is that a significant share of neurons may irreversibly "die"
19 | *
20 | * The "dying ReLU" problem occurs especially with large learning rates and prevents neurons from activating ever again
21 | *
22 | * The "leaky" or parameterized versions of this function are an attempt to fix the "dying ReLU" problem
23 | *
24 | * @constructor
25 | * @extends ActivationFunction
26 | * @param {number} [parameter] - the parameter for a "leaky" or parameterized version, i.e. a small positive number
27 | */
28 | function RectifiedLinearUnit(parameter) {
29 |
30 | // call the super class's constructor
31 | ActivationFunction.call(this);
32 |
33 | /**
34 | * The parameter for a "leaky" or parameterized version of this function
35 | *
36 | * Enabled when using a non-zero value, small positive numbers such as `0.01` should be used
37 | *
38 | * @type {number}
39 | * @private
40 | */
41 | this._parameter = parameter || 0;
42 |
43 | this.evaluate = function (x) {
44 | if (x < 0) {
45 | return this._parameter * x;
46 | }
47 | else {
48 | return x;
49 | }
50 | };
51 |
52 | this.evaluateDerivative = function (x) {
53 | if (x < 0) {
54 | return this._parameter;
55 | }
56 | else {
57 | return 1;
58 | }
59 | };
60 |
61 | this.getLowerBound = function () {
62 | if (this._parameter > 0) {
63 | return Number.NEGATIVE_INFINITY;
64 | }
65 | else {
66 | return 0;
67 | }
68 | };
69 |
70 | this.getUpperBound = function () {
71 | return Number.POSITIVE_INFINITY;
72 | };
73 |
74 | this.isMonotonic = function () {
75 | return true;
76 | };
77 |
78 | this.isDerivativeMonotonic = function () {
79 | return true;
80 | };
81 |
82 | this.isCenteredAroundZero = function () {
83 | return false;
84 | };
85 |
86 | }
87 |
88 | // create a prototype that inherits from the super class's prototype
89 | RectifiedLinearUnit.prototype = Object.create(ActivationFunction.prototype);
90 | // fix the constructor pointer so that it doesn't point to the super class
91 | RectifiedLinearUnit.prototype.constructor = RectifiedLinearUnit;
92 |
93 | module.exports = RectifiedLinearUnit;
94 |
--------------------------------------------------------------------------------
/src/activation/SinusoidFunction.js:
--------------------------------------------------------------------------------
1 | /*
2 | * JS-NeuralNetwork (https://github.com/delight-im/JS-NeuralNetwork)
3 | * Copyright (c) delight.im (https://www.delight.im/)
4 | * Licensed under the MIT License (https://opensource.org/licenses/MIT)
5 | */
6 |
7 | "use strict";
8 |
9 | var ActivationFunction = require("./ActivationFunction.js");
10 |
11 | /**
12 | * Sinusoid function that can be used as an activation function
13 | *
14 | * @constructor
15 | * @extends ActivationFunction
16 | */
17 | function SinusoidFunction() {
18 |
19 | // call the super class's constructor
20 | ActivationFunction.call(this);
21 |
22 | this.evaluate = function (x) {
23 | return Math.sin(x);
24 | };
25 |
26 | this.evaluateDerivative = function (x) {
27 | return Math.cos(x);
28 | };
29 |
30 | this.getLowerBound = function () {
31 | return -1;
32 | };
33 |
34 | this.getUpperBound = function () {
35 | return 1;
36 | };
37 |
38 | this.isMonotonic = function () {
39 | return false;
40 | };
41 |
42 | this.isDerivativeMonotonic = function () {
43 | return false;
44 | };
45 |
46 | this.isCenteredAroundZero = function () {
47 | return true;
48 | };
49 |
50 | }
51 |
52 | // create a prototype that inherits from the super class's prototype
53 | SinusoidFunction.prototype = Object.create(ActivationFunction.prototype);
54 | // fix the constructor pointer so that it doesn't point to the super class
55 | SinusoidFunction.prototype.constructor = SinusoidFunction;
56 |
57 | module.exports = SinusoidFunction;
58 |
--------------------------------------------------------------------------------
/src/header.js:
--------------------------------------------------------------------------------
1 | /*
2 | * JS-NeuralNetwork (https://github.com/delight-im/JS-NeuralNetwork)
3 | * Copyright (c) delight.im (https://www.delight.im/)
4 | * Licensed under the MIT License (https://opensource.org/licenses/MIT)
5 | */
6 |
7 |
--------------------------------------------------------------------------------
/src/main.js:
--------------------------------------------------------------------------------
1 | /*
2 | * JS-NeuralNetwork (https://github.com/delight-im/JS-NeuralNetwork)
3 | * Copyright (c) delight.im (https://www.delight.im/)
4 | * Licensed under the MIT License (https://opensource.org/licenses/MIT)
5 | */
6 |
7 | "use strict";
8 |
9 | var NeuralNetwork = require("./NeuralNetwork.js");
10 | var FeedforwardNeuralNetwork = require("./FeedforwardNeuralNetwork.js");
11 |
12 | var ArcTangent = require("./activation/ArcTangent.js");
13 | var BinaryStep = require("./activation/BinaryStep.js");
14 | var GaussianFunction = require("./activation/GaussianFunction.js");
15 | var HyperbolicTangent = require("./activation/HyperbolicTangent.js");
16 | var Identity = require("./activation/Identity.js");
17 | var LogisticFunction = require("./activation/LogisticFunction.js");
18 | var RectifiedLinearUnit = require("./activation/RectifiedLinearUnit.js");
19 | var SinusoidFunction = require("./activation/SinusoidFunction.js");
20 |
21 | module.exports = {
22 | Type: {
23 | Feedforward: FeedforwardNeuralNetwork
24 | },
25 | Activation: {
26 | ArcTangent: ArcTangent,
27 | BinaryStep: BinaryStep,
28 | GaussianFunction: GaussianFunction,
29 | HyperbolicTangent: HyperbolicTangent,
30 | Identity: Identity,
31 | LogisticFunction: LogisticFunction,
32 | RectifiedLinearUnit: RectifiedLinearUnit,
33 | SinusoidFunction: SinusoidFunction
34 | }
35 | };
36 |
--------------------------------------------------------------------------------