├── .gitignore ├── LICENSE ├── README.md ├── arduino ├── fritzing │ └── tlight_detect.fzz ├── learnfast │ ├── learnfast.pde │ └── neural.pde ├── neural │ ├── Neural.cpp │ └── Neural.h ├── tcsrgbsensor │ └── tcsrgbsensor.ino ├── tlight_detect │ ├── Neural.cpp │ ├── Neural.h │ └── tlight_detect.ino └── tlight_weights │ ├── Neural.cpp │ ├── Neural.h │ └── tlight_weights.ino ├── processing ├── and │ ├── and.pde │ ├── data │ │ └── Calibri-48.vlw │ └── neural.pde ├── fsxor │ ├── fsxor.pde │ └── neural.pde ├── neural │ └── neural.pde ├── nn_test │ ├── neural.pde │ └── nn_test.pde ├── or │ ├── data │ │ └── Calibri-48.vlw │ ├── elektor_or.pde │ └── neural.pde └── xor │ ├── data │ └── Calibri-48.vlw │ ├── neural.pde │ ├── xor.pde │ └── xorpde.old ├── trafficlight ├── additive │ └── additive.pde ├── computer_vision │ ├── computer_vision.pde │ └── data │ │ └── ArialMT-48.vlw ├── findcamera │ └── findcamera.pde ├── resources │ ├── traffic-light.jpg │ ├── traffic-light.pdf │ ├── traffic-light.png │ └── traffic-light.pptx └── tlight_detect │ ├── data │ └── ArialMT-48.vlw │ ├── neural.pde │ └── tlight_detect.pde └── workedexample ├── Matt Mazur Example - Ver 2.xlsx └── Matt Mazur Example.xlsx /.gitignore: -------------------------------------------------------------------------------- 1 | # Ignore file for Simple Neural Network project 2 | 3 | # Ignore PDF files generated by documentation 4 | *.pdf 5 | 6 | # Ignore image files 7 | *.png 8 | *.jpg 9 | *.bmp 10 | 11 | # Ignore temporary files generated by gedit 12 | *.*~ 13 | *~ 14 | 15 | # Ignore compressed and archive files 16 | *.zip 17 | *.gz 18 | *.7z 19 | *.tar 20 | 21 | #Ignore extraneous MS files 22 | *.csv 23 | *.xlsl 24 | *.xls 25 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2019 Stuart Cording 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # simple-neural-network 2 | Collection of PC and Arduino Neural Network Applications. 3 | 4 | The code provided here is a simple multilayer perceptron (MLP) neural network implementation coded from scratch and based upon an article by Matt Mazur (https://mattmazur.com/2015/03/17/a-step-by-step-backpropagation-example/). 5 | 6 | It is accompanied by four articles written for Elektor: 7 | * Part 1: Artificial Neurons https://www.elektormagazine.com/articles/neural-networks-part-1-artificial-neurons 8 | * Part 2: Logical Neurons https://www.elektormagazine.com/articles/neural-networks-part-2-logical-neurons 9 | * Part 3: Practical Neurons https://www.elektormagazine.com/articles/neural-networks-part-3-practical-neurons/ 10 | * Part 4: Embedded Neurons https://www.elektormagazine.com/news/neural-networks-part-4-embedded-neurons 11 | 12 | The folders contain the following: 13 | 14 | * workedexample - This contains a spreadsheet that performs all the calculations undertaken in the Matt Mazur article. It can be used to explore the math of backpropagation. 15 | * processing - This contains a series of example projects for Processing (https://processing.org/) that use the MLP created. They test the MLP implementation as well as show how it learns various logic functions (AND, OR, XOR) while visualizing the weights during learning. 16 | * trafficlight - These projects show how a webcam and Processing can be used to 'learn' the colors of a traffic light. An example traffic light image is included. 17 | * arduino - This contains projects for an Arduino board together with the Adafruit TCS34725 (https://www.adafruit.com/product/1334) to perform traffic light color classification on a microcontroller. A 32-bit board is recommended (Arduino DUE, Arduino M0 Pro); 8-bit boards will struggle with the backpropagation. 18 | 19 | Hope you enjoy the code! Let me know your experiences with it. 20 | -------------------------------------------------------------------------------- /arduino/fritzing/tlight_detect.fzz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/codinghead/simple-neural-network/1f93533c429f76dab289eaf4dd8a4afc39b127af/arduino/fritzing/tlight_detect.fzz -------------------------------------------------------------------------------- /arduino/learnfast/learnfast.pde: -------------------------------------------------------------------------------- 1 | /* 2 | * This teaches the traffic light colors to the MLP, then 3 | * outputs the weights for use on an Arduino. 4 | */ 5 | boolean pause = false; 6 | 7 | boolean learningMode = false; 8 | boolean learnRed = false; 9 | boolean learnAmber = false; 10 | boolean learnGreen = false; 11 | boolean learnOther = false; 12 | 13 | int r; 14 | int g; 15 | int b; 16 | 17 | Neural network; 18 | 19 | void setup() { 20 | println("Configuring neural network..."); 21 | 22 | network = new Neural(3,6,4); 23 | network.setLearningRate(0.5); 24 | println("Inputs =", network.getNoOfInputNodes(), " Hidden = ", network.getNoOfHiddenNodes(), " Outputs = ", network.getNoOfOutputNodes()); 25 | network.setBiasInputToHidden(0.35); 26 | network.setBiasHiddenToOutput(0.60); 27 | 28 | /*********************************** 29 | ** 30 | ** TEACH THE BRAIN !!! 31 | ** 32 | **********************************/ 33 | network.turnLearningOn(); 34 | println("Neural network is learning..."); 35 | 36 | for (int loop = 0; loop < 90000; ++loop) { 37 | 38 | // Enter your RGB values here 39 | teachRed(149, 56, 61); 40 | teachAmber(123, 77, 61); 41 | teachGreen(67, 100, 90); 42 | 43 | teachOther(92, 90, 82); 44 | teachOther(92, 90, 75); 45 | teachOther(73, 93, 89); 46 | //teachOther(152, 167, 161); 47 | 48 | } 49 | 50 | network.turnLearningOff(); 51 | /*********************************** 52 | ** 53 | ** END OF TEACHING !!! 54 | ** 55 | **********************************/ 56 | 57 | println("Input-to-hidden node weights"); 58 | network.displayInputToHiddenWeightsCurrent(); 59 | println(); 60 | println("Hidden-to-output node weights"); 61 | network.displayHiddenToOutputWeightsCurrent(); 62 | println(); 63 | 64 | println("Arduino sketch code:"); 65 | println(); 66 | 67 | for (int x = 0; x < network.getNoOfInputNodes(); ++x) { 68 | println(" // For Input Node " + x + ": "); 69 | for (int y = 0; y < network.getNoOfHiddenNodes(); ++y) { 70 | println(" network.setInputToHiddenWeight(", x,",", y,",", network.inputToHiddenWeights[x][y],");"); 71 | //print(inputToHiddenWeights[x][y], " "); 72 | } 73 | println(); 74 | } 75 | 76 | for (int x = 0; x < network.getNoOfHiddenNodes(); ++x) { 77 | println(" //For Hidden Node " + x + ": "); 78 | for (int y = 0; y < network.getNoOfOutputNodes(); ++y) { 79 | println(" network.setHiddenToOutputWeight(", x,",", y,",", network.hiddenToOutputWeights[x][y],");"); 80 | //print(hiddenToOutputWeights[x][y], " "); 81 | } 82 | println(); 83 | } 84 | 85 | println("Neural network is ready"); 86 | } 87 | 88 | void draw() { 89 | // No code required here 90 | } 91 | 92 | void teachRed(int r, int g, int b) { 93 | float newR, newG, newB; 94 | 95 | newR = (randomise(r) / 255.0); 96 | newG = (randomise(g) / 255.0); 97 | newB = (randomise(b) / 255.0); 98 | 99 | //println("Red:", newR, newG, newB); 100 | 101 | network.setInputNode(0, newR); 102 | network.setInputNode(1, newG); 103 | network.setInputNode(2, newB); 104 | 105 | network.setOutputNodeDesired(0, 0.99); 106 | network.setOutputNodeDesired(1, 0.01); 107 | network.setOutputNodeDesired(2, 0.01); 108 | network.setOutputNodeDesired(3, 0.01); 109 | 110 | network.calculateOutput(); 111 | } 112 | 113 | void teachAmber(int r, int g, int b) { 114 | float newR, newG, newB; 115 | 116 | newR = (randomise(r) / 255.0); 117 | newG = (randomise(g) / 255.0); 118 | newB = (randomise(b) / 255.0); 119 | 120 | //println("Amber:", newR, newG, newB); 121 | 122 | network.setInputNode(0, newR); 123 | network.setInputNode(1, newG); 124 | network.setInputNode(2, newB); 125 | 126 | network.setOutputNodeDesired(0, 0.01); 127 | network.setOutputNodeDesired(1, 0.99); 128 | network.setOutputNodeDesired(2, 0.01); 129 | network.setOutputNodeDesired(3, 0.01); 130 | 131 | network.calculateOutput(); 132 | } 133 | 134 | void teachGreen(int r, int g, int b) { 135 | float newR, newG, newB; 136 | 137 | newR = (randomise(r) / 255.0); 138 | newG = (randomise(g) / 255.0); 139 | newB = (randomise(b) / 255.0); 140 | 141 | network.setInputNode(0, newR); 142 | network.setInputNode(1, newG); 143 | network.setInputNode(2, newB); 144 | 145 | network.setOutputNodeDesired(0, 0.01); 146 | network.setOutputNodeDesired(1, 0.01); 147 | network.setOutputNodeDesired(2, 0.99); 148 | network.setOutputNodeDesired(3, 0.01); 149 | 150 | network.calculateOutput(); 151 | } 152 | 153 | void teachOther(int r, int g, int b) { 154 | float newR, newG, newB; 155 | 156 | newR = (randomise(r) / 255.0); 157 | newG = (randomise(g) / 255.0); 158 | newB = (randomise(b) / 255.0); 159 | 160 | network.setInputNode(0, newR); 161 | network.setInputNode(1, newG); 162 | network.setInputNode(2, newB); 163 | 164 | network.setOutputNodeDesired(0, 0.01); 165 | network.setOutputNodeDesired(1, 0.01); 166 | network.setOutputNodeDesired(2, 0.01); 167 | network.setOutputNodeDesired(3, 0.99); 168 | 169 | network.calculateOutput(); 170 | } 171 | 172 | int randomise(int value) { 173 | value += random(-4, 5); 174 | 175 | if (value > 255) { 176 | value = 255; 177 | } 178 | if (value < 0 ) { 179 | value = 0; 180 | } 181 | 182 | return value; 183 | } 184 | -------------------------------------------------------------------------------- /arduino/learnfast/neural.pde: -------------------------------------------------------------------------------- 1 | /* 2 | * Neural class for Processing 3 | * Stuart Cording aka codinghead 4 | * 5 | * This code implements a simple neural network as a multilayer perceptron (MLP). 6 | * It supports an input layer, single hidden layer, and output layer. 7 | * The number of nodes in each layer can be defined by the user. 8 | * The code was developed based upon the post "A Step by Step Backpropgation 9 | * Example" by Matt Mazur: 10 | * https://mattmazur.com/2015/03/17/a-step-by-step-backpropagation-example/ 11 | */ 12 | class Neural { 13 | private float[] inputNodeValues; 14 | private float[] hiddenNodeValues; 15 | private float[] outputNodeValues; 16 | private float[] desiredOutputNodeValues; 17 | private int noOfInputs; 18 | private int noOfHidden; 19 | private int noOfOutputs; 20 | 21 | private float[][] inputToHiddenWeights; 22 | private float[][] newInputToHiddenWeights; 23 | private float[][] hiddenToOutputWeights; 24 | private float[][] newHiddenToOutputWeights; 25 | 26 | private float biasInputToHidden; 27 | private float biasHiddenToOutput; 28 | private float learningRate; 29 | private float totalNetworkError; 30 | private int learningEpoch; 31 | 32 | private boolean learning; 33 | 34 | private boolean verbose; 35 | 36 | // Network is created by defining number of inputs, hidden nodes and outputs 37 | Neural(int inputs, int hidden, int outputs) { 38 | // Set all variables to zero to start that don't have to be defined here 39 | biasInputToHidden = 0.0; 40 | biasHiddenToOutput = 0.0; 41 | learningRate = 0.0; 42 | totalNetworkError = 0.0; 43 | 44 | // Note that we are not in learning mode 45 | learning = false; 46 | 47 | // Note that we are not in verbose mode 48 | verbose = false; 49 | 50 | // Set learning epoch to 0 51 | learningEpoch = 0; 52 | 53 | // Note the original number of nodes created 54 | noOfInputs = inputs; 55 | noOfHidden = hidden; 56 | noOfOutputs = outputs; 57 | 58 | // Create the desired number of input nodes and set them to zero 59 | inputNodeValues = new float [inputs]; 60 | for (int x = 0; x < inputs; ++x) { 61 | inputNodeValues[x] = 0.0; 62 | } 63 | 64 | // Create the desired number of hidden nodes and set them to zero 65 | hiddenNodeValues = new float [hidden]; 66 | for (int x = 0; x < hidden; ++x) { 67 | hiddenNodeValues[x] = 0.0; 68 | } 69 | 70 | // Create the desired number of output and desired output nodes and 71 | // set them to zero 72 | // Note: outputNodeValues stores the output of the MLP. The 73 | // desiredOutputNodeValues are the values we want to 74 | // achieve for the given input values. 75 | outputNodeValues = new float [outputs]; 76 | desiredOutputNodeValues = new float [outputs]; 77 | for (int x = 0; x < outputs; ++x) { 78 | outputNodeValues[x] = 0.0; 79 | desiredOutputNodeValues[x] = 0.0; 80 | } 81 | 82 | // For each input node, create both current and new weights 83 | // for each hidden node 84 | // Note: The new weights are used during learning 85 | inputToHiddenWeights = new float [inputs][hidden]; 86 | newInputToHiddenWeights = new float [inputs][hidden]; 87 | 88 | for (int x = 0; x < inputs; ++x) { 89 | for (int y = 0; y < hidden; ++y) { 90 | // Apply starting random weights to current nodes 91 | inputToHiddenWeights[x][y] = random(0.25, 0.75); 92 | // New weights can have 0.0 for now 93 | newInputToHiddenWeights[x][y] = 0.0; 94 | } 95 | } 96 | 97 | // For each hidden node, create both current and new weights 98 | // for each output node 99 | // Note: The new weights are used during learning 100 | hiddenToOutputWeights = new float [hidden][outputs]; 101 | newHiddenToOutputWeights = new float [hidden][outputs]; 102 | 103 | for (int x = 0; x < hidden; ++x) { 104 | for (int y = 0; y < outputs; ++y) { 105 | // Apply starting random weights to current nodes 106 | hiddenToOutputWeights[x][y] = random(0.25, 0.75); 107 | // New weights can have 0.0 for now 108 | newHiddenToOutputWeights[x][y] = 0.0; 109 | } 110 | } 111 | } 112 | 113 | /* calculateOuput() 114 | * Uses the weights of the MLP to calculate new output. 115 | * Requires that user has defined their desired input values 116 | * and trained the network. 117 | */ 118 | void calculateOutput() { 119 | float tempResult = 0.0; 120 | 121 | // Start by calculating the hidden layer node results for each input node 122 | // For each hidden node Hn: 123 | // Hn = sigmoid (wn * in + w(n+1) * i(n+1) ... + Hbias * 1) 124 | for (int x = 0; x < getNoOfHiddenNodes(); ++x) { 125 | if (verbose) { 126 | println("Input-to-hidden to calculate hidden node output:"); 127 | } 128 | // Start by calculating (wn * in + w(n+1) * i(n+1) ... 129 | for (int y = 0; y < getNoOfInputNodes(); ++y) { 130 | // Sum the results for the weight * input for each input node 131 | tempResult += inputNodeValues[y] * inputToHiddenWeights[y][x]; 132 | if (verbose) { 133 | println("i[", y,"] ", inputNodeValues[y], " * ", "iToHW[", y, x,"] ",inputToHiddenWeights[y][x], " += ", tempResult); 134 | } 135 | } 136 | 137 | // Add bias value result to sum 138 | tempResult += 1.0 * biasInputToHidden; 139 | if (verbose) { 140 | println("Bias: 1.0 * ", biasInputToHidden, " += ", tempResult); 141 | } 142 | 143 | // Squash result using sigmoid of sum 144 | hiddenNodeValues[x] = sigmoid(tempResult); 145 | if (verbose) { 146 | println("Output of hidden node:"); 147 | println("Sigmoid:", hiddenNodeValues[x]); 148 | println(); 149 | } 150 | 151 | // Reset sumation variable for next round 152 | tempResult = 0.0; 153 | } 154 | 155 | // Next calculate the output layer node results for each hidden node 156 | // For each output node On: 157 | // On = sigmoid (wn * Hn + w(n+1) * Hn(n+1) ... + Obias * 1) 158 | for (int x = 0; x < getNoOfOutputNodes(); ++x) { 159 | if (verbose) { 160 | println("Hidden-to-output to calculate output node result:"); 161 | } 162 | // Start by calulating (wn * Hn + w(n+1) * Hn(n+1) ... 163 | for (int y = 0; y < getNoOfHiddenNodes(); ++y) { 164 | 165 | tempResult += hiddenNodeValues[y] * hiddenToOutputWeights[y][x]; 166 | if (verbose) { 167 | println("h[", y,"] ", hiddenNodeValues[y], " * ", "hToOW[", y, x,"] ",hiddenToOutputWeights[y][x], " += ", tempResult); 168 | } 169 | } 170 | 171 | // Add bias value 172 | tempResult += 1.0 * biasHiddenToOutput; 173 | if (verbose) { 174 | println("Bias: 1.0 * ", biasHiddenToOutput, " += ", tempResult); 175 | } 176 | 177 | // Result goes into the output node 178 | outputNodeValues[x] = sigmoid(tempResult); 179 | if (verbose) { 180 | println("Result for output node:"); 181 | println("Sigmoid:", outputNodeValues[x]); 182 | println(); 183 | } 184 | 185 | // Reset sumation variable for next round 186 | tempResult = 0.0; 187 | } 188 | 189 | // Calculate total error 190 | // ERRORtotal = SUM 0.5 * (target - output)^2 191 | for (int x = 0; x < getNoOfOutputNodes(); ++x) { 192 | tempResult += 0.5 * sq(desiredOutputNodeValues[x] - outputNodeValues[x]); 193 | if (verbose) { 194 | println("Determine error between output and desired output values:"); 195 | print("Error o[", x, "]:", tempResult); 196 | println(" : 0.5 * (", desiredOutputNodeValues[x], "-", outputNodeValues[x],")^2"); 197 | println(); 198 | } 199 | } 200 | 201 | if (verbose) { 202 | println("Total Error: ", tempResult); 203 | println(); 204 | } 205 | 206 | totalNetworkError = tempResult; 207 | 208 | if (learning) { 209 | if (verbose) { 210 | println(); 211 | println(">>> Executing learning loop..."); 212 | } 213 | backPropagation(); 214 | if (verbose) { 215 | println(); 216 | println(">>> Learning loop complete. Epoch = ", learningEpoch); 217 | println(); 218 | } 219 | } 220 | } 221 | 222 | /* backPropagation() 223 | * Uses network error to update weights when learning is 224 | * enabled. 225 | */ 226 | private void backPropagation() { 227 | float totalErrorChangeWRTOutput = 0.0; 228 | float outputChangeWRTNetInput = 0.0; 229 | float netInputChangeWRTWeight = 0.0; 230 | float errorTotalWRTHiddenNode = 0.0; 231 | 232 | // Increment epoch 233 | ++learningEpoch; 234 | 235 | // Consider the output layer to calculate new weights for hidden-to-output layer 236 | // newWeightN = wn - learningRate * (ErrorTotal / impactOfwn) 237 | if (verbose) { 238 | println(); 239 | println("Hidden to Output Weight Correction:"); 240 | } 241 | for (int x = 0; x < getNoOfOutputNodes(); ++x) { 242 | 243 | totalErrorChangeWRTOutput = -(desiredOutputNodeValues[x] - outputNodeValues[x]); 244 | if (verbose) { 245 | println("totalErrChangeWRTOutput [", x,"] =", totalErrorChangeWRTOutput); 246 | } 247 | 248 | outputChangeWRTNetInput = outputNodeValues[x] * (1 - outputNodeValues[x]); 249 | if (verbose) { 250 | println("outputChangeWRTNetInput [", x,"] =", outputChangeWRTNetInput); 251 | println(); 252 | } 253 | 254 | for (int y = 0; y < getNoOfHiddenNodes(); ++y) { 255 | float weightChange = 0.0; 256 | 257 | netInputChangeWRTWeight = hiddenNodeValues[y]; 258 | 259 | weightChange = totalErrorChangeWRTOutput * outputChangeWRTNetInput * netInputChangeWRTWeight; 260 | 261 | if (verbose) { 262 | println("weightChange =", weightChange, " :", totalErrorChangeWRTOutput, "*", outputChangeWRTNetInput, "*", netInputChangeWRTWeight); 263 | } 264 | 265 | newHiddenToOutputWeights[y][x] = hiddenToOutputWeights[y][x] - (learningRate * weightChange); 266 | 267 | if (verbose) { 268 | println("Calculating", hiddenToOutputWeights[y][x], "-", learningRate, "*", weightChange); 269 | println("New Hidden-To-Ouput Weight [", y, x, "] =", newHiddenToOutputWeights[y][x], ", Old Weight =", hiddenToOutputWeights[y][x]); 270 | println(); 271 | } 272 | } 273 | } 274 | 275 | // Consider the hidden layer (based upon original weights) 276 | if (verbose) { 277 | println("Input to Hidden Weight Correction:"); 278 | } 279 | 280 | // Need to consider for each hidden node 281 | for (int x = 0; x < getNoOfHiddenNodes(); ++x) { 282 | // For each hidden node we need: 283 | // - totalErrorChangeWRTOutput 284 | // - outputChangeWRTNetInput 285 | // - hiddenToOutputWeights 286 | float totalErrorChangeWRTHidden = 0.0; 287 | float outputHiddenWRTnetHidden = 0.0; 288 | float totalErrorChangeWRTweight = 0.0; 289 | 290 | for (int y = 0; y < getNoOfOutputNodes(); ++ y) { 291 | if (verbose) { 292 | println(); 293 | println("Calculating hidden node ", x," for output ", y); 294 | } 295 | 296 | // totalErrorChangeWRTOutput 297 | totalErrorChangeWRTOutput = -(desiredOutputNodeValues[y] - outputNodeValues[y]); 298 | if (verbose) { 299 | println("totalErrChangeWRTOutput [", y,"] =", totalErrorChangeWRTOutput); 300 | } 301 | 302 | // outputChangeWRTNetInput 303 | outputChangeWRTNetInput = outputNodeValues[y] * (1 - outputNodeValues[y]); 304 | if (verbose) { 305 | println("outputChangeWRTNetInput [", y,"] =", outputChangeWRTNetInput); 306 | } 307 | 308 | totalErrorChangeWRTHidden += totalErrorChangeWRTOutput * outputChangeWRTNetInput * hiddenToOutputWeights[x][y]; 309 | 310 | if (verbose) { 311 | println("totalErrorChangeWRTHidden[", x, "] =", totalErrorChangeWRTHidden); 312 | println(); 313 | } 314 | } 315 | 316 | outputHiddenWRTnetHidden = (hiddenNodeValues[x]) * (1 - hiddenNodeValues[x]); 317 | 318 | if (verbose) { 319 | println(); 320 | println("hiddenNodeValues[", x, "] =", hiddenNodeValues[x]); 321 | println("outputHiddenWRTnetHidden[", x, "] =", outputHiddenWRTnetHidden); 322 | } 323 | 324 | // For each input, calculate the weight change 325 | for (int y = 0; y < getNoOfInputNodes(); ++y) { 326 | totalErrorChangeWRTweight = totalErrorChangeWRTHidden * outputHiddenWRTnetHidden * inputNodeValues[y]; 327 | 328 | if (verbose) { 329 | println("inputNodeValues[", y, "] =", inputNodeValues[y]); 330 | println("totalErrorChangeWRTweight[", x, "] =", totalErrorChangeWRTweight); 331 | } 332 | 333 | newInputToHiddenWeights[y][x] = inputToHiddenWeights[y][x] - (learningRate * totalErrorChangeWRTweight); 334 | 335 | if (verbose) { 336 | println("inputToHiddenWeights[", y, "][", x, "] =", inputToHiddenWeights[y][x]); 337 | println("New Input-To-Hidden Weight[", y, "][", x, "] =", newInputToHiddenWeights[y][x], "Old Weight =", inputToHiddenWeights[y][x]); 338 | println(); 339 | } 340 | } 341 | } 342 | 343 | 344 | // Update all weights to newly calculated values 345 | if (verbose) { 346 | println("Updating weights."); 347 | } 348 | 349 | // Update the input-to-hidden weights 350 | for (int x = 0; x < getNoOfInputNodes(); ++x) { 351 | for (int y = 0; y < getNoOfHiddenNodes(); ++y) { 352 | inputToHiddenWeights[x][y] = newInputToHiddenWeights[x][y]; 353 | } 354 | } 355 | // Update the hidden-to-output weights 356 | for (int x = 0; x < getNoOfHiddenNodes(); ++x) { 357 | for (int y = 0; y < getNoOfOutputNodes(); ++y) { 358 | hiddenToOutputWeights[x][y] = newHiddenToOutputWeights[x][y]; 359 | } 360 | } 361 | } 362 | 363 | void setBiasInputToHidden(float bias) { 364 | biasInputToHidden = bias; 365 | } 366 | 367 | float getBiasInputToHidden() { 368 | return biasInputToHidden; 369 | } 370 | 371 | void setBiasHiddenToOutput(float bias) { 372 | biasHiddenToOutput = bias; 373 | } 374 | 375 | float getBiasHiddenToOutput() { 376 | return biasHiddenToOutput; 377 | } 378 | 379 | void setLearningRate(float rate) { 380 | learningRate = rate; 381 | } 382 | 383 | float getLearningRate() { 384 | return learningRate; 385 | } 386 | 387 | float getTotalNetworkError() { 388 | return totalNetworkError; 389 | } 390 | 391 | int getNoOfInputNodes() { 392 | return noOfInputs; 393 | } 394 | 395 | int getNoOfHiddenNodes() { 396 | return noOfHidden; 397 | } 398 | 399 | int getNoOfOutputNodes() { 400 | return noOfOutputs; 401 | } 402 | 403 | void setInputNode(int node, float value) { 404 | inputNodeValues[node] = value; 405 | } 406 | 407 | float getInputNode(int node) { 408 | return inputNodeValues[node]; 409 | } 410 | 411 | void setOutputNodeDesired(int node, float value) { 412 | desiredOutputNodeValues[node] = value; 413 | } 414 | 415 | float getOutputNodeDesired(int node) { 416 | return desiredOutputNodeValues[node]; 417 | } 418 | 419 | float getOutputNode(int node) { 420 | return outputNodeValues[node]; 421 | } 422 | 423 | void setInputToHiddenWeight(int input, int hidden, float value) { 424 | inputToHiddenWeights[input][hidden] = value; 425 | } 426 | 427 | float getInputToHiddenWeight(int input, int hidden) { 428 | return inputToHiddenWeights[input][hidden]; 429 | } 430 | 431 | void setHiddenToOutputWeight(int hidden, int output, float value) { 432 | hiddenToOutputWeights[hidden][output] = value; 433 | } 434 | 435 | float getHiddenToOutputWeight(int hidden, int output) { 436 | return hiddenToOutputWeights[hidden][output]; 437 | } 438 | 439 | int getEpoch() { 440 | return learningEpoch; 441 | } 442 | 443 | void turnLearningOn() { 444 | learning = true; 445 | } 446 | 447 | void turnLearningOff() { 448 | learning = false; 449 | } 450 | 451 | void turnVerboseOn() { 452 | verbose = true; 453 | } 454 | 455 | void turnVerboseOff() { 456 | verbose = false; 457 | } 458 | 459 | boolean getLearningStatus() { 460 | return learning; 461 | } 462 | 463 | void displayInputNodes() { 464 | for (int x = 0; x < noOfInputs; ++x) { 465 | print(getInputNode(x), " "); 466 | } 467 | println(); 468 | } 469 | 470 | void displayInputToHiddenWeightsCurrent() { 471 | for (int x = 0; x < getNoOfInputNodes(); ++x) { 472 | print("For Input Node " + x + ": "); 473 | for (int y = 0; y < getNoOfHiddenNodes(); ++y) { 474 | print(inputToHiddenWeights[x][y], " "); 475 | } 476 | println(); 477 | } 478 | } 479 | 480 | void displayHiddenToOutputWeightsCurrent() { 481 | for (int x = 0; x < getNoOfHiddenNodes(); ++x) { 482 | print("For Hidden Node " + x + ": "); 483 | for (int y = 0; y < getNoOfOutputNodes(); ++y) { 484 | print(hiddenToOutputWeights[x][y], " "); 485 | } 486 | println(); 487 | } 488 | } 489 | 490 | void displayHiddenNodes() { 491 | for (int x = 0; x < getNoOfHiddenNodes(); ++x) { 492 | print(hiddenNodeValues[x], " "); 493 | } 494 | println(); 495 | } 496 | 497 | void displayOutputNodes() { 498 | for (int x = 0; x < getNoOfOutputNodes(); ++x) { 499 | print(outputNodeValues[x], " "); 500 | } 501 | println(); 502 | } 503 | 504 | void seed(int x) { 505 | randomSeed(x); 506 | } 507 | } 508 | 509 | float sigmoid(float x) { 510 | return (1 / (1 + exp(-x))); 511 | } 512 | -------------------------------------------------------------------------------- /arduino/neural/Neural.cpp: -------------------------------------------------------------------------------- 1 | /* 2 | neural.h - Library for Neural Network. 3 | Created by Stuart Cording, Sept 9th, 2017. 4 | Released into the public domain. 5 | */ 6 | 7 | #include "Arduino.h" 8 | #include "Neural.h" 9 | 10 | static double sigmoid(double x) { 11 | return (1 / (1 + exp(-x))); 12 | } 13 | 14 | Neural::Neural(int inputs, int hidden, int outputs) 15 | { 16 | // Set all variables to zero to start that don't have to be defined here 17 | biasInputToHidden = 0.0; 18 | biasHiddenToOutput = 0.0; 19 | learningRate = 0.0; 20 | totalNetworkError = 0.0; 21 | 22 | // Note that we are not in learning mode 23 | learning = false; 24 | 25 | // Note that we are not in verbose mode 26 | verbose = false; 27 | 28 | // Set learning epoch to 0 29 | learningEpoch = 0; 30 | 31 | // Note the original number of nodes created 32 | noOfInputs = inputs; 33 | noOfHidden = hidden; 34 | noOfOutputs = outputs; 35 | 36 | // Create the desired number of input nodes and set them to zero 37 | inputNodeValues = new double [inputs]; 38 | for (int x = 0; x < inputs; ++x) { 39 | inputNodeValues[x] = 0.0; 40 | } 41 | 42 | // Create the desired number of hidden nodes and set them to zero 43 | hiddenNodeValues = new double [hidden]; 44 | for (int x = 0; x < hidden; ++x) { 45 | hiddenNodeValues[x] = 0.0; 46 | } 47 | 48 | // Create the desired number of output and desired output nodes and set them to zero 49 | outputNodeValues = new double [outputs]; 50 | desiredOutputNodeValues = new double [outputs]; 51 | for (int x = 0; x < inputs; ++x) { 52 | outputNodeValues[x] = 0.0; 53 | desiredOutputNodeValues[x] = 0.0; 54 | } 55 | 56 | // For each input node, create both old and new weights 57 | // for each hidden node 58 | inputToHiddenWeights = new double*[inputs]; 59 | for (int x = 0; x < inputs; ++x) { 60 | inputToHiddenWeights[x] = new double[hidden]; 61 | } 62 | 63 | newInputToHiddenWeights = new double*[inputs]; 64 | for (int x = 0; x < inputs; ++x) { 65 | newInputToHiddenWeights[x] = new double[hidden]; 66 | } 67 | 68 | for (int x = 0; x < inputs; ++x) { 69 | for (int y = 0; y < hidden; ++y) { 70 | static double fnum; 71 | fnum = (random(250, 750)) / 1000.0; 72 | // Apply starting random weights to current nodes 73 | inputToHiddenWeights[x][y] = fnum; 74 | // New weights can have 0.0 for now 75 | newInputToHiddenWeights[x][y] = 0.0; 76 | } 77 | } 78 | 79 | // For each hidden node, create both old and new weights 80 | // for each output node 81 | hiddenToOutputWeights = new double*[hidden]; 82 | for (int x = 0; x < hidden; ++x) { 83 | hiddenToOutputWeights[x] = new double[outputs]; 84 | } 85 | 86 | newHiddenToOutputWeights = new double*[hidden]; 87 | for (int x = 0; x < hidden; ++x) { 88 | newHiddenToOutputWeights[x] = new double[outputs]; 89 | } 90 | 91 | for (int x = 0; x < hidden; ++x) { 92 | for (int y = 0; y < outputs; ++y) { 93 | static double fnum; 94 | fnum = (random(250, 750)) / 1000.0; 95 | // Apply starting random weights to current nodes 96 | hiddenToOutputWeights[x][y] = fnum; 97 | // New weights can have 0.0 for now 98 | newHiddenToOutputWeights[x][y] = 0.0; 99 | } 100 | } 101 | } 102 | 103 | void Neural::calculateOutput() { 104 | double tempResult = 0.0; 105 | 106 | // Start by calculating the hidden layer node results for each input node 107 | // For each hidden node Hn: 108 | // Hn = sigmoid (wn * in + w(n+1) * i(n+1) ... + Hbias * 1) 109 | for (int x = 0; x < getNoOfHiddenNodes(); ++x) { 110 | for (int y = 0; y < getNoOfInputNodes(); ++y) { 111 | // Sum the results for the weight * input for each input node 112 | tempResult += inputNodeValues[y] * inputToHiddenWeights[y][x]; 113 | if (verbose) { 114 | Serial.print("i["); 115 | Serial.print(y); 116 | Serial.print("] "); 117 | Serial.print(inputNodeValues[y]); 118 | Serial.print(" * iToHW["); 119 | Serial.print(y); 120 | Serial.print(x); 121 | Serial.print("] "); 122 | Serial.print(inputToHiddenWeights[y][x]); 123 | Serial.print(" += "); 124 | Serial.println(tempResult); 125 | } 126 | } 127 | 128 | // Add bias value result to sum 129 | tempResult += 1.0 * biasInputToHidden; 130 | if (verbose) { 131 | Serial.print("Bias: 1.0 * "); 132 | Serial.print(biasInputToHidden); 133 | Serial.print(" += "); 134 | Serial.println(tempResult); 135 | } 136 | 137 | // Squash result using sigmoid of sum 138 | hiddenNodeValues[x] = sigmoid(tempResult); 139 | if (verbose) { 140 | Serial.print("Sigmoid:"); 141 | Serial.println(hiddenNodeValues[x]); 142 | } 143 | 144 | // Reset sumation variable for next round 145 | tempResult = 0.0; 146 | } 147 | 148 | // Next calculating the output layer node results for each hidden node 149 | // For each output node On: 150 | // On = sigmoid (wn * Hn + w(n+1) * Hn(n+1) ... + Obias * 1) 151 | for (int x = 0; x < getNoOfOutputNodes(); ++x) { 152 | for (int y = 0; y < getNoOfHiddenNodes(); ++y) { 153 | 154 | tempResult += hiddenNodeValues[y] * hiddenToOutputWeights[y][x]; 155 | if (verbose) { 156 | Serial.print("h["); 157 | Serial.print(y); 158 | Serial.print("] "); 159 | Serial.print(hiddenNodeValues[y]); 160 | Serial.print(" * hToOW["); 161 | Serial.print(y); 162 | Serial.print(" "); 163 | Serial.print(x); 164 | Serial.print("] "); 165 | Serial.print(hiddenToOutputWeights[y][x]); 166 | Serial.print(" += "); 167 | Serial.println(tempResult); 168 | } 169 | } 170 | 171 | // Add bias value 172 | tempResult += 1.0 * biasHiddenToOutput; 173 | if (verbose) { 174 | Serial.print("Bias: 1.0 * "); 175 | Serial.print(biasHiddenToOutput); 176 | Serial.print(" += "); 177 | Serial.println(tempResult); 178 | } 179 | 180 | // Result goes into the output node 181 | outputNodeValues[x] = sigmoid(tempResult); 182 | if (verbose) { 183 | Serial.print("Sigmoid:"); 184 | Serial.println(outputNodeValues[x]); 185 | } 186 | 187 | // Reset sumation variable for next round 188 | tempResult = 0.0; 189 | } 190 | 191 | // Calculate total error 192 | // ERRORtotal = SUM 0.5 * (target - output)^2 193 | for (int x = 0; x < getNoOfOutputNodes(); ++x) { 194 | //tempResult += 0.5 * ((desiredOutputNodeValues[x] - outputNodeValues[x]) * 195 | // (desiredOutputNodeValues[x] - outputNodeValues[x])); 196 | tempResult += 0.5 * sq(desiredOutputNodeValues[x] - outputNodeValues[x]); 197 | if (verbose) { 198 | Serial.print("Error o["); 199 | Serial.print(x); 200 | Serial.print("]:"); 201 | Serial.println(tempResult); 202 | Serial.print(" : 0.5 * ("); 203 | Serial.print(desiredOutputNodeValues[x]); 204 | Serial.print("-"); 205 | Serial.print(outputNodeValues[x]); 206 | Serial.println(")^2"); 207 | 208 | } 209 | } 210 | 211 | if (verbose) { 212 | Serial.print("Total Error: "); 213 | Serial.println(tempResult); 214 | } 215 | 216 | totalNetworkError = tempResult; 217 | 218 | if (learning) { 219 | if (verbose) { 220 | Serial.println(">>> Executing learning loop..."); 221 | } 222 | _backPropagation(); 223 | if (verbose) { 224 | Serial.print(">>> Learning loop complete. Epoch = "); 225 | Serial.println(learningEpoch); 226 | } 227 | } 228 | } 229 | 230 | /* _backPropagation() 231 | * Uses network error to update weights when learning is 232 | * enabled. 233 | */ 234 | void Neural::_backPropagation() { 235 | double totalErrorChangeWRTOutput = 0.0; 236 | double outputChangeWRTNetInput = 0.0; 237 | double netInputChangeWRTWeight = 0.0; 238 | double errorTotalWRTHiddenNode = 0.0; // NOTE: Does not seem to be used 239 | //int x = 0; 240 | 241 | // Increment epoch 242 | ++learningEpoch; 243 | 244 | // Consider the output layer to calculate new weights for hidden-to-output layer 245 | // newWeightN = wn - learningRate * (ErrorTotal / impactOfwn) 246 | if (verbose) { 247 | //Serial.println("Hidden to Output Weight Correction:"); 248 | } 249 | 250 | for (int x = 0; x < getNoOfOutputNodes(); ++x) { 251 | 252 | totalErrorChangeWRTOutput = -(desiredOutputNodeValues[x] - outputNodeValues[x]); 253 | 254 | if (verbose) { 255 | //Serial.println("totalErrChangeWRTOutput [", x,"] =", totalErrorChangeWRTOutput); 256 | } 257 | 258 | outputChangeWRTNetInput = outputNodeValues[x] * (1 - outputNodeValues[x]); 259 | 260 | if (verbose) { 261 | //Serial.println("outputChangeWRTNetInput [", x,"] =", outputChangeWRTNetInput); 262 | } 263 | 264 | for (int y = 0; y < getNoOfHiddenNodes(); ++y) { 265 | double weightChange = 0.0; 266 | 267 | netInputChangeWRTWeight = hiddenNodeValues[y]; 268 | 269 | weightChange = totalErrorChangeWRTOutput * outputChangeWRTNetInput * netInputChangeWRTWeight; 270 | 271 | if (verbose) { 272 | //Serial.println("weightChange =", weightChange, " :", totalErrorChangeWRTOutput, "*", outputChangeWRTNetInput, "*", netInputChangeWRTWeight); 273 | } 274 | 275 | newHiddenToOutputWeights[y][x] = hiddenToOutputWeights[y][x] - (learningRate * weightChange); 276 | 277 | if (verbose) { 278 | //Serial.println("Calculating", hiddenToOutputWeights[y][x], "-", learningRate, "*", weightChange); 279 | //Serial.println("New Weight [", y, x, "] =", newHiddenToOutputWeights[y][x], ", Old Weight =", hiddenToOutputWeights[y][x]); 280 | } 281 | } 282 | } 283 | 284 | // Consider the hidden layer (based upon original weights) 285 | if (verbose) { 286 | //Serial.println("Input to Hidden Weight Correction:"); 287 | } 288 | 289 | // Need to consider for each hidden node 290 | for (int x = 0; x < getNoOfHiddenNodes(); ++x) { 291 | // For each hidden node we need: 292 | // - totalErrorChangeWRTOutput 293 | // - outputChangeWRTNetInput 294 | // - hiddenToOutputWeights 295 | double totalErrorChangeWRTHidden = 0.0; 296 | double outputHiddenWRTnetHidden = 0.0; 297 | double totalErrorChangeWRTweight = 0.0; 298 | 299 | for (int y = 0; y < getNoOfOutputNodes(); ++ y) { 300 | // totalErrorChangeWRTOutput 301 | totalErrorChangeWRTOutput = -(desiredOutputNodeValues[y] - outputNodeValues[y]); 302 | if (verbose) { 303 | //Serial.println("totalErrChangeWRTOutput [", y,"] =", totalErrorChangeWRTOutput); 304 | } 305 | 306 | // outputChangeWRTNetInput 307 | outputChangeWRTNetInput = outputNodeValues[y] * (1 - outputNodeValues[y]); 308 | if (verbose) { 309 | //Serial.println("outputChangeWRTNetInput [", y,"] =", outputChangeWRTNetInput); 310 | } 311 | 312 | totalErrorChangeWRTHidden += totalErrorChangeWRTOutput * outputChangeWRTNetInput * hiddenToOutputWeights[x][y]; 313 | 314 | if (verbose) { 315 | //Serial.println("totalErrorChangeWRTHidden[", x, "] =", totalErrorChangeWRTHidden); 316 | } 317 | } 318 | 319 | outputHiddenWRTnetHidden = (hiddenNodeValues[x]) * (1 - hiddenNodeValues[x]); 320 | 321 | if (verbose) { 322 | //Serial.println("hiddenNodeValues[", x, "] =", hiddenNodeValues[x]); 323 | } 324 | 325 | if (verbose) { 326 | //Serial.println("outputHiddenWRTnetHidden[", x, "] =", outputHiddenWRTnetHidden); 327 | } 328 | 329 | // For each input, calculate the weight change 330 | for (int y = 0; y < getNoOfInputNodes(); ++y) { 331 | totalErrorChangeWRTweight = totalErrorChangeWRTHidden * outputHiddenWRTnetHidden * inputNodeValues[y]; 332 | 333 | if (verbose) { 334 | //Serial.println("inputNodeValues[", y, "] =", inputNodeValues[y]); 335 | } 336 | 337 | if (verbose) { 338 | //Serial.println("totalErrorChangeWRTweight[", x, "] =", totalErrorChangeWRTweight); 339 | } 340 | 341 | newInputToHiddenWeights[y][x] = inputToHiddenWeights[y][x] - (learningRate * totalErrorChangeWRTweight); 342 | 343 | if (verbose) { 344 | //Serial.println("inputToHiddenWeights[", y, "][", x, "] =", inputToHiddenWeights[y][x]); 345 | //Serial.println("newInputToHiddenWeights[", y, "][", x, "] =", newInputToHiddenWeights[y][x]); 346 | } 347 | } 348 | } 349 | 350 | 351 | // Update all weights to newly calculated values 352 | if (verbose) { 353 | //Serial.println("Updating weights."); 354 | } 355 | 356 | // Update the input-to-hidden weights 357 | for (int x = 0; x < getNoOfInputNodes(); ++x) { 358 | for (int y = 0; y < getNoOfHiddenNodes(); ++y) { 359 | inputToHiddenWeights[x][y] = newInputToHiddenWeights[x][y]; 360 | } 361 | } 362 | 363 | // Update the hidden-to-output weights 364 | for (int x = 0; x < getNoOfHiddenNodes(); ++x) { 365 | for (int y = 0; y < getNoOfOutputNodes(); ++y) { 366 | hiddenToOutputWeights[x][y] = newHiddenToOutputWeights[x][y]; 367 | } 368 | } 369 | } 370 | 371 | void Neural::setLearningRate(double bias) { 372 | learningRate = bias; 373 | } 374 | 375 | double Neural::getLearningRate() { 376 | return learningRate; 377 | } 378 | 379 | void Neural::setBiasInputToHidden(double bias) { 380 | biasInputToHidden = bias; 381 | } 382 | 383 | double Neural::getBiasInputToHidden() { 384 | return biasInputToHidden; 385 | } 386 | 387 | void Neural::setBiasHiddenToOutput(double bias) { 388 | biasHiddenToOutput = bias; 389 | } 390 | 391 | double Neural::getBiasHiddenToOutput() { 392 | return biasHiddenToOutput; 393 | } 394 | 395 | void Neural::turnLearningOn() { 396 | learning = true; 397 | // Also reset learning epoch 398 | learningEpoch = 0; 399 | } 400 | 401 | void Neural::turnLearningOff() { 402 | learning = false; 403 | } 404 | 405 | void Neural::setInputNode(int node, double value) { 406 | inputNodeValues[node] = value; 407 | } 408 | 409 | double Neural::getInputNode(int node) { 410 | return inputNodeValues[node]; 411 | } 412 | 413 | void Neural::setOutputNodeDesired(int node, double value) { 414 | desiredOutputNodeValues[node] = value; 415 | } 416 | 417 | double Neural::getOutputNodeDesired(int node) { 418 | return desiredOutputNodeValues[node]; 419 | } 420 | 421 | int Neural::getNoOfInputNodes() { 422 | return noOfInputs; 423 | } 424 | 425 | int Neural::getNoOfHiddenNodes() { 426 | return noOfHidden; 427 | } 428 | 429 | int Neural::getNoOfOutputNodes() { 430 | return noOfOutputs; 431 | } 432 | 433 | double Neural::getOutputNode(int node) { 434 | return outputNodeValues[node]; 435 | } 436 | 437 | void Neural::displayInputToHiddenWeightsCurrent() { 438 | for (int x = 0; x < getNoOfInputNodes(); ++x) { 439 | Serial.print("For Input Node "); 440 | Serial.print(x); 441 | Serial.print(": "); 442 | 443 | for (int y = 0; y < getNoOfHiddenNodes(); ++y) { 444 | Serial.print(inputToHiddenWeights[x][y]); 445 | Serial.print(" "); 446 | } 447 | 448 | Serial.println(); 449 | } 450 | } 451 | 452 | void Neural::displayHiddenToOutputWeightsCurrent() { 453 | for (int x = 0; x < getNoOfHiddenNodes(); ++x) { 454 | Serial.print("For Hidden Node "); 455 | Serial.print(x); 456 | Serial.print(": "); 457 | 458 | for (int y = 0; y < getNoOfOutputNodes(); ++y) { 459 | Serial.print(hiddenToOutputWeights[x][y]); 460 | Serial.print(" "); 461 | } 462 | 463 | Serial.println(); 464 | } 465 | } 466 | 467 | void Neural::turnVerboseOn() { 468 | verbose = true; 469 | } 470 | 471 | void Neural::turnVerboseOff() { 472 | verbose = false; 473 | } 474 | 475 | double Neural::getTotalNetworkError() { 476 | return totalNetworkError; 477 | } 478 | 479 | int Neural::getEpoch() { 480 | return learningEpoch; 481 | } 482 | 483 | void Neural::setInputToHiddenWeight(int input, int hidden, double value) { 484 | inputToHiddenWeights[input][hidden] = value; 485 | } 486 | 487 | double Neural::getInputToHiddenWeight(int input, int hidden) { 488 | return inputToHiddenWeights[input][hidden]; 489 | } 490 | 491 | void Neural::setHiddenToOutputWeight(int hidden, int output, double value) { 492 | hiddenToOutputWeights[hidden][output] = value; 493 | } 494 | 495 | double Neural::getHiddenToOutputWeight(int hidden, int output) { 496 | return hiddenToOutputWeights[hidden][output]; 497 | } 498 | 499 | boolean Neural::getLearningStatus() { 500 | return learning; 501 | } 502 | 503 | void Neural::displayInputNodes() { 504 | for (int x = 0; x < noOfInputs; ++x) { 505 | Serial.print(getInputNode(x)); 506 | Serial.print(" "); 507 | } 508 | Serial.println(); 509 | } 510 | 511 | void Neural::displayHiddenNodes() { 512 | for (int x = 0; x < getNoOfHiddenNodes(); ++x) { 513 | Serial.print(hiddenNodeValues[x]); 514 | Serial.print(" "); 515 | } 516 | Serial.println(); 517 | } 518 | 519 | void Neural::displayOutputNodes() { 520 | for (int x = 0; x < getNoOfOutputNodes(); ++x) { 521 | Serial.print(outputNodeValues[x]); 522 | Serial.print(" "); 523 | } 524 | Serial.println(); 525 | } -------------------------------------------------------------------------------- /arduino/neural/Neural.h: -------------------------------------------------------------------------------- 1 | /* 2 | neural.h - Library for Neural Network. 3 | Created by Stuart Cording, Sept 9th, 2017. 4 | Released into the public domain. 5 | */ 6 | #ifndef Neural_h 7 | #define Neural_h 8 | 9 | #include "Arduino.h" 10 | 11 | class Neural 12 | { 13 | private: 14 | double* inputNodeValues; 15 | double* hiddenNodeValues; 16 | double* outputNodeValues; 17 | double* desiredOutputNodeValues; 18 | int noOfInputs; 19 | int noOfHidden; 20 | int noOfOutputs; 21 | 22 | double** inputToHiddenWeights; 23 | double** newInputToHiddenWeights; 24 | double** hiddenToOutputWeights; 25 | double** newHiddenToOutputWeights; 26 | 27 | double biasInputToHidden; 28 | double biasHiddenToOutput; 29 | double learningRate; 30 | double totalNetworkError; 31 | int learningEpoch; 32 | 33 | boolean learning; 34 | 35 | boolean verbose; 36 | 37 | void _backPropagation(); 38 | 39 | public: 40 | Neural(int inputs, int hidden, int outputs); 41 | void calculateOutput(); 42 | 43 | void setLearningRate(double bias); 44 | double getLearningRate(); 45 | void setBiasInputToHidden(double bias); 46 | double getBiasInputToHidden(); 47 | void setBiasHiddenToOutput(double bias); 48 | double getBiasHiddenToOutput(); 49 | void turnLearningOn(); 50 | void turnLearningOff(); 51 | void setInputNode(int node, double value); 52 | double getInputNode(int node); 53 | void setOutputNodeDesired(int node, double value); 54 | double getOutputNodeDesired(int node); 55 | int getNoOfInputNodes(); 56 | int getNoOfHiddenNodes(); 57 | int getNoOfOutputNodes(); 58 | double getOutputNode(int node); 59 | void displayInputToHiddenWeightsCurrent(); 60 | void displayHiddenToOutputWeightsCurrent(); 61 | void turnVerboseOn(); 62 | void turnVerboseOff(); 63 | double getTotalNetworkError(); 64 | int getEpoch(); 65 | void setInputToHiddenWeight(int input, int hidden, double value); 66 | double getInputToHiddenWeight(int input, int hidden); 67 | void setHiddenToOutputWeight(int hidden, int output, double value); 68 | double getHiddenToOutputWeight(int hidden, int output); 69 | boolean getLearningStatus(); 70 | void displayInputNodes(); 71 | void displayHiddenNodes(); 72 | void displayOutputNodes(); 73 | }; 74 | 75 | #endif -------------------------------------------------------------------------------- /arduino/tcsrgbsensor/tcsrgbsensor.ino: -------------------------------------------------------------------------------- 1 | #include 2 | #include "Adafruit_TCS34725.h" 3 | 4 | // Use pin 10 to control white LED 5 | #define ledpin 10 6 | 7 | // Use Adafruit TCS34725 library 8 | Adafruit_TCS34725 tcs = Adafruit_TCS34725(TCS34725_INTEGRATIONTIME_50MS, TCS34725_GAIN_4X); 9 | 10 | void setup() { 11 | Serial.begin(9600); 12 | Serial.println("TCS34725 Test!"); 13 | 14 | // Turn off LED 15 | pinMode(ledpin, OUTPUT); 16 | digitalWrite(ledpin, LOW); 17 | 18 | if (tcs.begin()) { 19 | Serial.println("Sensor found - testing starts."); 20 | } else { 21 | Serial.println("No TCS34725 found ... check your connections"); 22 | Serial.println("and then press reset button."); 23 | while (1); // halt! 24 | } 25 | 26 | digitalWrite(ledpin, HIGH); // turn on LED 27 | } 28 | 29 | void loop() { 30 | float red, green, blue; 31 | 32 | delay(60); // takes 50ms to read 33 | 34 | tcs.getRGB(&red, &green, &blue); 35 | 36 | Serial.print("R:\t"); Serial.print(int(red)); 37 | Serial.print("\tG:\t"); Serial.print(int(green)); 38 | Serial.print("\tB:\t"); Serial.println(int(blue)); 39 | } 40 | -------------------------------------------------------------------------------- /arduino/tlight_detect/Neural.h: -------------------------------------------------------------------------------- 1 | /* 2 | neural.h - Library for Neural Network. 3 | Created by Stuart Cording, Sept 9th, 2017. 4 | Released into the public domain. 5 | */ 6 | #ifndef Neural_h 7 | #define Neural_h 8 | 9 | #include "Arduino.h" 10 | 11 | class Neural 12 | { 13 | private: 14 | double* inputNodeValues; 15 | double* hiddenNodeValues; 16 | double* outputNodeValues; 17 | double* desiredOutputNodeValues; 18 | int noOfInputs; 19 | int noOfHidden; 20 | int noOfOutputs; 21 | 22 | double** inputToHiddenWeights; 23 | double** newInputToHiddenWeights; 24 | double** hiddenToOutputWeights; 25 | double** newHiddenToOutputWeights; 26 | 27 | double biasInputToHidden; 28 | double biasHiddenToOutput; 29 | double learningRate; 30 | double totalNetworkError; 31 | int learningEpoch; 32 | 33 | boolean learning; 34 | 35 | boolean verbose; 36 | 37 | void _backPropagation(); 38 | 39 | public: 40 | Neural(int inputs, int hidden, int outputs); 41 | void calculateOutput(); 42 | 43 | void setLearningRate(double bias); 44 | double getLearningRate(); 45 | void setBiasInputToHidden(double bias); 46 | double getBiasInputToHidden(); 47 | void setBiasHiddenToOutput(double bias); 48 | double getBiasHiddenToOutput(); 49 | void turnLearningOn(); 50 | void turnLearningOff(); 51 | void setInputNode(int node, double value); 52 | double getInputNode(int node); 53 | void setOutputNodeDesired(int node, double value); 54 | double getOutputNodeDesired(int node); 55 | int getNoOfInputNodes(); 56 | int getNoOfHiddenNodes(); 57 | int getNoOfOutputNodes(); 58 | double getOutputNode(int node); 59 | void displayInputToHiddenWeightsCurrent(); 60 | void displayHiddenToOutputWeightsCurrent(); 61 | void turnVerboseOn(); 62 | void turnVerboseOff(); 63 | double getTotalNetworkError(); 64 | int getEpoch(); 65 | void setInputToHiddenWeight(int input, int hidden, double value); 66 | double getInputToHiddenWeight(int input, int hidden); 67 | void setHiddenToOutputWeight(int hidden, int output, double value); 68 | double getHiddenToOutputWeight(int hidden, int output); 69 | boolean getLearningStatus(); 70 | void displayInputNodes(); 71 | void displayHiddenNodes(); 72 | void displayOutputNodes(); 73 | 74 | /* 75 | 76 | 77 | void seed(int x) { 78 | randomSeed(x); 79 | } 80 | 81 | 82 | */ 83 | 84 | }; 85 | 86 | #endif -------------------------------------------------------------------------------- /arduino/tlight_detect/tlight_detect.ino: -------------------------------------------------------------------------------- 1 | #include 2 | #include "Adafruit_TCS34725.h" 3 | #include "Neural.h" 4 | 5 | // Use pin 10 to control white LED 6 | #define ledpin 10 7 | 8 | // Use Adafruit TCS34725 library 9 | Adafruit_TCS34725 tcs = Adafruit_TCS34725(TCS34725_INTEGRATIONTIME_50MS, TCS34725_GAIN_4X); 10 | 11 | // Use Neural class 12 | Neural network(3,6,4); 13 | 14 | void setup() { 15 | Serial.begin(9600); 16 | Serial.println("Traffic Light with MLP Neural Network"); 17 | 18 | // Turn off LED 19 | pinMode(ledpin, OUTPUT); 20 | digitalWrite(ledpin, LOW); 21 | 22 | // Seed the random number generator 23 | randomSeed(analogRead(0)); 24 | 25 | if (tcs.begin()) { 26 | Serial.println("RGB sensor found."); 27 | } else { 28 | Serial.println("No TCS34725 found ... check your connections"); 29 | Serial.println("and then press reset button."); 30 | while (1); // halt! 31 | } 32 | 33 | Serial.println("Configuring neural network..."); 34 | 35 | //network = new Neural(3,6,4); 36 | network.setLearningRate(0.5); 37 | Serial.print("Inputs = "); 38 | Serial.print(network.getNoOfInputNodes()); 39 | Serial.print(" Hidden = "); 40 | Serial.print(network.getNoOfHiddenNodes()); 41 | Serial.print(" Outputs = "); 42 | Serial.println(network.getNoOfOutputNodes()); 43 | network.setBiasInputToHidden(0.35); 44 | network.setBiasHiddenToOutput(0.60); 45 | 46 | /*********************************** 47 | ** 48 | ** TEACH THE BRAIN !!! 49 | ** 50 | **********************************/ 51 | network.turnLearningOn(); 52 | Serial.println("Neural network is learning..."); 53 | 54 | for (int loop = 0; loop < 30000; ++loop) { 55 | 56 | teachRed(149, 56, 61); 57 | teachAmber(123, 77, 61); 58 | teachGreen(67, 100, 90); 59 | 60 | //teachOther(92, 90, 82); 61 | //teachOther(92, 90, 75); 62 | //teachOther(73, 93, 89); 63 | //teachOther(152, 167, 161); 64 | 65 | if (!(loop % 1000)) { 66 | Serial.print("."); 67 | } 68 | } 69 | Serial.println(); 70 | network.turnLearningOff(); 71 | /*********************************** 72 | ** 73 | ** END OF TEACHING !!! 74 | ** 75 | **********************************/ 76 | 77 | Serial.println("Neural network is ready"); 78 | 79 | digitalWrite(ledpin, HIGH); // turn on LED 80 | } 81 | 82 | void loop() { 83 | float r, g, b; 84 | 85 | delay(60); // takes 50ms to read 86 | 87 | tcs.getRGB(&r, &g, &b); 88 | 89 | // Apply RGB color to MLP and calculate outputs 90 | network.setInputNode(0, r / 255.0); 91 | network.setInputNode(1, g / 255.0); 92 | network.setInputNode(2, b / 255.0); 93 | network.calculateOutput(); 94 | 95 | // Now output traffic light color detected 96 | if (network.getOutputNode(0) > 0.90) { 97 | Serial.println("Red"); 98 | } 99 | if (network.getOutputNode(1) > 0.90) { 100 | Serial.println("Amber"); 101 | } 102 | if (network.getOutputNode(2) > 0.90) { 103 | Serial.println("Green"); 104 | } 105 | if (network.getOutputNode(3) > 0.90) { 106 | Serial.println("Other"); 107 | } 108 | } 109 | 110 | void teachRed(int r, int g, int b) { 111 | float newR, newG, newB; 112 | 113 | randomise(&r, &g, &b); 114 | 115 | newR = (r / 255.0); 116 | newG = (g / 255.0); 117 | newB = (b / 255.0); 118 | 119 | //println("Red:", newR, newG, newB); 120 | 121 | network.setInputNode(0, newR); 122 | network.setInputNode(1, newG); 123 | network.setInputNode(2, newB); 124 | 125 | network.setOutputNodeDesired(0, 0.99); 126 | network.setOutputNodeDesired(1, 0.01); 127 | network.setOutputNodeDesired(2, 0.01); 128 | network.setOutputNodeDesired(3, 0.01); 129 | 130 | network.calculateOutput(); 131 | } 132 | 133 | void teachAmber(int r, int g, int b) { 134 | float newR, newG, newB; 135 | 136 | randomise(&r, &g, &b); 137 | 138 | newR = (r / 255.0); 139 | newG = (g / 255.0); 140 | newB = (b / 255.0); 141 | 142 | //println("Amber:", newR, newG, newB); 143 | 144 | network.setInputNode(0, newR); 145 | network.setInputNode(1, newG); 146 | network.setInputNode(2, newB); 147 | 148 | network.setOutputNodeDesired(0, 0.01); 149 | network.setOutputNodeDesired(1, 0.99); 150 | network.setOutputNodeDesired(2, 0.01); 151 | network.setOutputNodeDesired(3, 0.01); 152 | 153 | network.calculateOutput(); 154 | } 155 | 156 | void teachGreen(int r, int g, int b) { 157 | float newR, newG, newB; 158 | 159 | randomise(&r, &g, &b); 160 | 161 | newR = (r / 255.0); 162 | newG = (g / 255.0); 163 | newB = (b / 255.0); 164 | 165 | network.setInputNode(0, newR); 166 | network.setInputNode(1, newG); 167 | network.setInputNode(2, newB); 168 | 169 | network.setOutputNodeDesired(0, 0.01); 170 | network.setOutputNodeDesired(1, 0.01); 171 | network.setOutputNodeDesired(2, 0.99); 172 | network.setOutputNodeDesired(3, 0.01); 173 | 174 | network.calculateOutput(); 175 | } 176 | 177 | void teachOther(int r, int g, int b) { 178 | float newR, newG, newB; 179 | 180 | randomise(&r, &g, &b); 181 | 182 | newR = (r / 255.0); 183 | newG = (g / 255.0); 184 | newB = (b / 255.0); 185 | 186 | network.setInputNode(0, newR); 187 | network.setInputNode(1, newG); 188 | network.setInputNode(2, newB); 189 | 190 | network.setOutputNodeDesired(0, 0.01); 191 | network.setOutputNodeDesired(1, 0.01); 192 | network.setOutputNodeDesired(2, 0.01); 193 | network.setOutputNodeDesired(3, 0.99); 194 | 195 | network.calculateOutput(); 196 | } 197 | 198 | void randomise(int *r, int *g, int *b) { 199 | *r += random(-4, 5); 200 | *g += random(-4, 5); 201 | *b += random(-4, 5); 202 | 203 | if (*r > 255) { 204 | *r = 255; 205 | } 206 | if (*r < 0 ) { 207 | *r = 0; 208 | } 209 | if (*g > 255) { 210 | *g = 255; 211 | } 212 | if (*g < 0 ) { 213 | *g = 0; 214 | } 215 | if (*b > 255) { 216 | *b = 255; 217 | } 218 | if (*b < 0 ) { 219 | *b = 0; 220 | } 221 | } 222 | -------------------------------------------------------------------------------- /arduino/tlight_weights/Neural.h: -------------------------------------------------------------------------------- 1 | /* 2 | neural.h - Library for Neural Network. 3 | Created by Stuart Cording, Sept 9th, 2017. 4 | Released into the public domain. 5 | */ 6 | #ifndef Neural_h 7 | #define Neural_h 8 | 9 | #include "Arduino.h" 10 | 11 | class Neural 12 | { 13 | private: 14 | double* inputNodeValues; 15 | double* hiddenNodeValues; 16 | double* outputNodeValues; 17 | double* desiredOutputNodeValues; 18 | int noOfInputs; 19 | int noOfHidden; 20 | int noOfOutputs; 21 | 22 | double** inputToHiddenWeights; 23 | double** newInputToHiddenWeights; 24 | double** hiddenToOutputWeights; 25 | double** newHiddenToOutputWeights; 26 | 27 | double biasInputToHidden; 28 | double biasHiddenToOutput; 29 | double learningRate; 30 | double totalNetworkError; 31 | int learningEpoch; 32 | 33 | boolean learning; 34 | 35 | boolean verbose; 36 | 37 | void _backPropagation(); 38 | 39 | public: 40 | Neural(int inputs, int hidden, int outputs); 41 | void calculateOutput(); 42 | 43 | void setLearningRate(double bias); 44 | double getLearningRate(); 45 | void setBiasInputToHidden(double bias); 46 | double getBiasInputToHidden(); 47 | void setBiasHiddenToOutput(double bias); 48 | double getBiasHiddenToOutput(); 49 | void turnLearningOn(); 50 | void turnLearningOff(); 51 | void setInputNode(int node, double value); 52 | double getInputNode(int node); 53 | void setOutputNodeDesired(int node, double value); 54 | double getOutputNodeDesired(int node); 55 | int getNoOfInputNodes(); 56 | int getNoOfHiddenNodes(); 57 | int getNoOfOutputNodes(); 58 | double getOutputNode(int node); 59 | void displayInputToHiddenWeightsCurrent(); 60 | void displayHiddenToOutputWeightsCurrent(); 61 | void turnVerboseOn(); 62 | void turnVerboseOff(); 63 | double getTotalNetworkError(); 64 | int getEpoch(); 65 | void setInputToHiddenWeight(int input, int hidden, double value); 66 | double getInputToHiddenWeight(int input, int hidden); 67 | void setHiddenToOutputWeight(int hidden, int output, double value); 68 | double getHiddenToOutputWeight(int hidden, int output); 69 | boolean getLearningStatus(); 70 | void displayInputNodes(); 71 | void displayHiddenNodes(); 72 | void displayOutputNodes(); 73 | 74 | /* 75 | 76 | 77 | void seed(int x) { 78 | randomSeed(x); 79 | } 80 | 81 | 82 | */ 83 | 84 | }; 85 | 86 | #endif -------------------------------------------------------------------------------- /arduino/tlight_weights/tlight_weights.ino: -------------------------------------------------------------------------------- 1 | #include 2 | #include "Adafruit_TCS34725.h" 3 | #include "Neural.h" 4 | 5 | // Use pin 10 to control white LED 6 | #define ledpin 10 7 | #define testpin 9 8 | 9 | // Use Adafruit TCS34725 library 10 | Adafruit_TCS34725 tcs = Adafruit_TCS34725(TCS34725_INTEGRATIONTIME_50MS, TCS34725_GAIN_4X); 11 | 12 | // Use Neural class 13 | Neural network(3,6,4); 14 | 15 | void setup() { 16 | Serial.begin(9600); 17 | Serial.println("Traffic Light with MLP Neural Network"); 18 | 19 | // Turn off LED 20 | pinMode(ledpin, OUTPUT); 21 | digitalWrite(ledpin, LOW); 22 | pinMode(testpin, OUTPUT); 23 | digitalWrite(testpin, LOW); 24 | 25 | // Seed the random number generator 26 | randomSeed(analogRead(0)); 27 | 28 | if (tcs.begin()) { 29 | Serial.println("RGB sensor found."); 30 | } else { 31 | Serial.println("No TCS34725 found ... check your connections"); 32 | Serial.println("and then press reset button."); 33 | while (1); // halt! 34 | } 35 | 36 | Serial.println("Configuring neural network..."); 37 | 38 | //network = new Neural(3,6,4); 39 | network.setLearningRate(0.5); 40 | Serial.print("Inputs = "); 41 | Serial.print(network.getNoOfInputNodes()); 42 | Serial.print(" Hidden = "); 43 | Serial.print(network.getNoOfHiddenNodes()); 44 | Serial.print(" Outputs = "); 45 | Serial.println(network.getNoOfOutputNodes()); 46 | network.setBiasInputToHidden(0.35); 47 | network.setBiasHiddenToOutput(0.60); 48 | 49 | /*********************************** 50 | ** 51 | ** PROGRAM THE BRAIN !!! 52 | ** 53 | **********************************/ 54 | importWeights(); 55 | 56 | Serial.println("Neural network is ready"); 57 | 58 | digitalWrite(ledpin, HIGH); // turn on LED 59 | } 60 | 61 | void loop() { 62 | float r, g, b; 63 | 64 | delay(60); // takes 50ms to read 65 | 66 | tcs.getRGB(&r, &g, &b); 67 | 68 | // Apply RGB color to MLP and calculate outputs 69 | network.setInputNode(0, r / 255.0); 70 | network.setInputNode(1, g / 255.0); 71 | network.setInputNode(2, b / 255.0); 72 | 73 | digitalWrite(testpin, HIGH); 74 | network.calculateOutput(); 75 | digitalWrite(testpin, LOW); 76 | 77 | // Now output traffic light color detected 78 | if (network.getOutputNode(0) > 0.98) { 79 | Serial.println("Red"); 80 | } 81 | if (network.getOutputNode(1) > 0.98) { 82 | Serial.println("Amber"); 83 | } 84 | if (network.getOutputNode(2) > 0.98) { 85 | Serial.println("Green"); 86 | } 87 | if (network.getOutputNode(3) > 0.98) { 88 | Serial.println("Other"); 89 | } 90 | } 91 | 92 | void importWeights() { 93 | // For Input Node 0: 94 | network.setInputToHiddenWeight( 0 , 0 , 0.9894259 ); 95 | network.setInputToHiddenWeight( 0 , 1 , -0.75018144 ); 96 | network.setInputToHiddenWeight( 0 , 2 , 48.61366 ); 97 | network.setInputToHiddenWeight( 0 , 3 , -3.345678 ); 98 | network.setInputToHiddenWeight( 0 , 4 , 9.416907 ); 99 | network.setInputToHiddenWeight( 0 , 5 , -23.454737 ); 100 | 101 | // For Input Node 1: 102 | network.setInputToHiddenWeight( 1 , 0 , 2.1327987 ); 103 | network.setInputToHiddenWeight( 1 , 1 , 5.392093 ); 104 | network.setInputToHiddenWeight( 1 , 2 , -36.850338 ); 105 | network.setInputToHiddenWeight( 1 , 3 , 12.039759 ); 106 | network.setInputToHiddenWeight( 1 , 4 , -18.738537 ); 107 | network.setInputToHiddenWeight( 1 , 5 , 15.558427 ); 108 | 109 | // For Input Node 2: 110 | network.setInputToHiddenWeight( 2 , 0 , 1.3367374 ); 111 | network.setInputToHiddenWeight( 2 , 1 , -0.74704653 ); 112 | network.setInputToHiddenWeight( 2 , 2 , -1.242378 ); 113 | network.setInputToHiddenWeight( 2 , 3 , -5.9497995 ); 114 | network.setInputToHiddenWeight( 2 , 4 , -1.0344149 ); 115 | network.setInputToHiddenWeight( 2 , 5 , 15.218534 ); 116 | 117 | //For Hidden Node 0: 118 | network.setHiddenToOutputWeight( 0 , 0 , -2.0546117 ); 119 | network.setHiddenToOutputWeight( 0 , 1 , -1.203141 ); 120 | network.setHiddenToOutputWeight( 0 , 2 , -2.7587035 ); 121 | network.setHiddenToOutputWeight( 0 , 3 , -9.748996 ); 122 | 123 | //For Hidden Node 1: 124 | network.setHiddenToOutputWeight( 1 , 0 , -3.9066978 ); 125 | network.setHiddenToOutputWeight( 1 , 1 , 1.2856442 ); 126 | network.setHiddenToOutputWeight( 1 , 2 , -0.48529842 ); 127 | network.setHiddenToOutputWeight( 1 , 3 , -10.828738 ); 128 | 129 | //For Hidden Node 2: 130 | network.setHiddenToOutputWeight( 2 , 0 , 2.6418133 ); 131 | network.setHiddenToOutputWeight( 2 , 1 , 4.8058596 ); 132 | network.setHiddenToOutputWeight( 2 , 2 , -25.040785 ); 133 | network.setHiddenToOutputWeight( 2 , 3 , 21.380386 ); 134 | 135 | //For Hidden Node 3: 136 | network.setHiddenToOutputWeight( 3 , 0 , -7.608626 ); 137 | network.setHiddenToOutputWeight( 3 , 1 , 6.0782804 ); 138 | network.setHiddenToOutputWeight( 3 , 2 , 4.0631976 ); 139 | network.setHiddenToOutputWeight( 3 , 3 , -12.329156 ); 140 | 141 | //For Hidden Node 4: 142 | network.setHiddenToOutputWeight( 4 , 0 , 11.230279 ); 143 | network.setHiddenToOutputWeight( 4 , 1 , -15.336227 ); 144 | network.setHiddenToOutputWeight( 4 , 2 , -11.472162 ); 145 | network.setHiddenToOutputWeight( 4 , 3 , -7.3535438 ); 146 | 147 | //For Hidden Node 5: 148 | network.setHiddenToOutputWeight( 5 , 0 , -14.677024 ); 149 | network.setHiddenToOutputWeight( 5 , 1 , -16.876846 ); 150 | network.setHiddenToOutputWeight( 5 , 2 , 7.690963 ); 151 | network.setHiddenToOutputWeight( 5 , 3 , 20.697523 ); 152 | 153 | } 154 | -------------------------------------------------------------------------------- /processing/and/and.pde: -------------------------------------------------------------------------------- 1 | /* 2 | * This code trains a neural network to implement the AND function. 3 | * It visualises the network. The weights between the node are shown 4 | * in colour (black +ve; brown -ve) and thickness (thin low value; 5 | * thick high value). 6 | */ 7 | Neural network; 8 | PFont textFont; 9 | 10 | PrintWriter errorOutput; 11 | 12 | int learnAnd = 0; 13 | float averageError = 100.0; 14 | float[] averageErrorArray; 15 | int averageErrorPointer = 0; 16 | 17 | void setup() { 18 | size(640, 480); 19 | 20 | // Output total network error occaisionally to a CSV file. 21 | errorOutput = createWriter("and-error.csv"); 22 | 23 | textFont = loadFont("Calibri-48.vlw"); 24 | 25 | // We'll use two inputs, four hidden nodes, and one output node. 26 | network = new Neural(2,4,1); 27 | 28 | // Set learning rate here 29 | network.setLearningRate(0.5); 30 | 31 | println(network.getNoOfInputNodes(), " ", network.getNoOfHiddenNodes(), " ", network.getNoOfOutputNodes()); 32 | 33 | // Set network biasing here 34 | network.setBiasInputToHidden(0.25); 35 | network.setBiasHiddenToOutput(0.3); 36 | 37 | network.displayOutputNodes(); 38 | 39 | println(network.getTotalNetworkError()); 40 | 41 | network.turnLearningOn(); 42 | 43 | // Set up average error array 44 | averageErrorArray = new float [4]; 45 | for (int x = 0; x < 4; ++x) { 46 | averageErrorArray[x] = 100.0; 47 | } 48 | } 49 | 50 | void draw() { 51 | background(180); 52 | 53 | if (network.getLearningStatus()) { 54 | // Replace line 57 with line 55 to stop learning after a certain number of epochs 55 | // if (network.getEpoch() > 12000) { 56 | // If we are learning and have achieved better than 0.05% error... 57 | if (averageError < 0.0005) { 58 | network.turnLearningOff(); 59 | // Close file 60 | errorOutput.flush(); // Writes the remaining data to the file 61 | errorOutput.close(); 62 | frameRate(0.5); 63 | } 64 | 65 | // Set up AND inputs and expected output 66 | if (learnAnd == 0) { 67 | network.setInputNode(0, 0.01); 68 | network.setInputNode(1, 0.01); 69 | network.setOutputNodeDesired(0, 0.01); 70 | } else if (learnAnd == 1) { 71 | network.setInputNode(0, 0.01); 72 | network.setInputNode(1, 0.99); 73 | network.setOutputNodeDesired(0, 0.01); 74 | } else if (learnAnd == 2) { 75 | network.setInputNode(0, 0.99); 76 | network.setInputNode(1, 0.01); 77 | network.setOutputNodeDesired(0, 0.01); 78 | } else { // learnAnd == 3 79 | network.setInputNode(0, 0.99); 80 | network.setInputNode(1, 0.99); 81 | network.setOutputNodeDesired(0, 0.99); 82 | } 83 | 84 | // Calculate the output for the inputs given 85 | network.calculateOutput(); 86 | 87 | // Calculate average error 88 | averageErrorArray[averageErrorPointer] = network.getTotalNetworkError(); 89 | averageError = (averageErrorArray[0] + averageErrorArray[1] + averageErrorArray[1] + averageErrorArray[3]) / 4.0; 90 | ++averageErrorPointer; 91 | if (averageErrorPointer >= 4) { 92 | averageErrorPointer = 0; 93 | } 94 | 95 | if ((network.getEpoch() % 50) == 0){ 96 | print(network.getEpoch()); 97 | print(","); 98 | print(network.getTotalNetworkError()); 99 | print(","); 100 | println(averageError); 101 | 102 | // Write to file 103 | errorOutput.print(network.getEpoch()); 104 | errorOutput.print(","); 105 | errorOutput.print(network.getTotalNetworkError()); 106 | errorOutput.print(","); 107 | errorOutput.println(averageError); 108 | errorOutput.flush(); 109 | } 110 | 111 | // Output current error to main output 112 | { 113 | float strError; 114 | 115 | textAlign(LEFT, CENTER); 116 | strError = averageError * 100.0; 117 | textSize(24); 118 | text("Total Network Error: " + nf(strError,2,4) + "%", 40, 460); 119 | 120 | strokeWeight(10); 121 | stroke(0); 122 | textAlign(CENTER, CENTER); 123 | } 124 | 125 | // Increment to next input combination (00, 01, 10, 11) 126 | ++learnAnd; 127 | 128 | if (learnAnd > 3) { 129 | learnAnd = 0; 130 | } 131 | } else { 132 | // Switch between differnt AND input patterns to show result of learning 133 | 134 | // Set up AND inputs 135 | if (learnAnd == 0) { 136 | network.setInputNode(0, 0.01); 137 | network.setInputNode(1, 0.01); 138 | } else if (learnAnd == 1) { 139 | network.setInputNode(0, 0.01); 140 | network.setInputNode(1, 0.99); 141 | } else if (learnAnd == 2) { 142 | network.setInputNode(0, 0.99); 143 | network.setInputNode(1, 0.01); 144 | } else { // learnAnd == 3 145 | network.setInputNode(0, 0.99); 146 | network.setInputNode(1, 0.99); 147 | } 148 | 149 | network.calculateOutput(); 150 | print(learnAnd); 151 | print(" : "); 152 | println(network.getOutputNode(0)); 153 | 154 | // Increment to next input combination (00, 01, 10, 11) 155 | ++learnAnd; 156 | if (learnAnd > 3) { 157 | learnAnd = 0; 158 | } 159 | } 160 | 161 | // What follows outputs the rest of the display including heading, 162 | // depiction of nodes, and the weights as lines. 163 | // Heading 164 | textFont(textFont); 165 | if (network.getLearningStatus()) { 166 | String strEpoch = str(network.getEpoch()); 167 | textAlign(CENTER, CENTER); 168 | textSize(48); 169 | text("Learning - AND", width/2, 40); 170 | textSize(24); 171 | text("Epoch: "+strEpoch, width/2, 80); 172 | } else { 173 | text("Testing - AND", width/2, 40); 174 | } 175 | 176 | strokeWeight(10); 177 | 178 | //ItoH 179 | { 180 | float value = 0.0; 181 | 182 | value = 3.0 * network.getInputToHiddenWeight(0, 0); 183 | if (value < 0) { 184 | stroke(204, 102, 0); 185 | } else { 186 | stroke(0); 187 | } 188 | value = abs(value); 189 | strokeWeight(value); 190 | line(160, 240, 320, 160); 191 | 192 | value = 3.0 * network.getInputToHiddenWeight(0, 1); 193 | if (value < 0) { 194 | stroke(204, 102, 0); 195 | } else { 196 | stroke(0); 197 | } 198 | value = abs(value); 199 | strokeWeight(value); 200 | line(160, 240, 320, 240); 201 | 202 | value = 3.0 * network.getInputToHiddenWeight(0, 2); 203 | if (value < 0) { 204 | stroke(204, 102, 0); 205 | } else { 206 | stroke(0); 207 | } 208 | value = abs(value); 209 | strokeWeight(value); 210 | line(160, 240, 320, 320); 211 | 212 | value = 3.0 * network.getInputToHiddenWeight(0, 3); 213 | if (value < 0) { 214 | stroke(204, 102, 0); 215 | } else { 216 | stroke(0); 217 | } 218 | value = abs(value); 219 | strokeWeight(value); 220 | line(160, 240, 320, 400); 221 | 222 | 223 | 224 | value = 3.0 * network.getInputToHiddenWeight(1, 0); 225 | if (value < 0) { 226 | stroke(204, 102, 0); 227 | } else { 228 | stroke(0); 229 | } 230 | value = abs(value); 231 | strokeWeight(value); 232 | line(160, 320, 320, 160); 233 | 234 | value = 3.0 * network.getInputToHiddenWeight(1, 1); 235 | if (value < 0) { 236 | stroke(204, 102, 0); 237 | } else { 238 | stroke(0); 239 | } 240 | value = abs(value); 241 | strokeWeight(value); 242 | line(160, 320, 320, 240); 243 | 244 | value = 3.0 * network.getInputToHiddenWeight(1, 2); 245 | if (value < 0) { 246 | stroke(204, 102, 0); 247 | } else { 248 | stroke(0); 249 | } 250 | value = abs(value); 251 | strokeWeight(value); 252 | line(160, 320, 320, 320); 253 | 254 | value = 3.0 * network.getInputToHiddenWeight(1, 3); 255 | if (value < 0) { 256 | stroke(204, 102, 0); 257 | } else { 258 | stroke(0); 259 | } 260 | value = abs(value); 261 | strokeWeight(value); 262 | line(160, 320, 320, 400); 263 | } 264 | 265 | //HtoO 266 | { 267 | float value = 0.0; 268 | 269 | value = 3.0 * network.getHiddenToOutputWeight(0, 0); 270 | if (value < 0) { 271 | stroke(204, 102, 0); 272 | } else { 273 | stroke(0); 274 | } 275 | value = abs(value); 276 | strokeWeight(value); 277 | line(320, 160, 480, 280); 278 | 279 | value = 3.0 * network.getHiddenToOutputWeight(1, 0); 280 | if (value < 0) { 281 | stroke(204, 102, 0); 282 | } else { 283 | stroke(0); 284 | } 285 | value = abs(value); 286 | strokeWeight(value); 287 | line(320, 240, 480, 280); 288 | 289 | value = 3.0 * network.getHiddenToOutputWeight(2, 0); 290 | if (value < 0) { 291 | stroke(204, 102, 0); 292 | } else { 293 | stroke(0); 294 | } 295 | value = abs(value); 296 | strokeWeight(value); 297 | line(320, 320, 480, 280); 298 | 299 | value = 3.0 * network.getHiddenToOutputWeight(3, 0); 300 | if (value < 0) { 301 | stroke(204, 102, 0); 302 | } else { 303 | stroke(0); 304 | } 305 | value = abs(value); 306 | strokeWeight(value); 307 | line(320, 400, 480, 280); 308 | } 309 | 310 | // Input 311 | strokeWeight(10); 312 | stroke(0); 313 | ellipse(160, 240, 55, 55); 314 | ellipse(160, 320, 55, 55); 315 | 316 | // Hidden 317 | strokeWeight(10); 318 | stroke(0); 319 | ellipse(320, 160, 55, 55); 320 | ellipse(320, 240, 55, 55); 321 | ellipse(320, 320, 55, 55); 322 | ellipse(320, 400, 55, 55); 323 | 324 | // Output 325 | ellipse(480, 280, 55, 55); 326 | 327 | textSize(48); 328 | 329 | // Input Node Text 330 | if (network.getInputNode(0) > 0.9) { 331 | text("1", 100, 240); 332 | } else { 333 | text("0", 100, 240); 334 | } 335 | if (network.getInputNode(1) > 0.9) { 336 | text("1", 100, 320); 337 | } else { 338 | text("0", 100, 320); 339 | } 340 | 341 | // Output Node Text 342 | if (network.getOutputNode(0) > 0.9) { 343 | text("1", 550, 280); 344 | } else { 345 | text("0", 550, 280); 346 | } 347 | } 348 | 349 | void keyPressed() { 350 | errorOutput.flush(); // Writes the remaining data to the file 351 | errorOutput.close(); // Finishes the file 352 | exit(); // Stops the program 353 | } 354 | -------------------------------------------------------------------------------- /processing/and/data/Calibri-48.vlw: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/codinghead/simple-neural-network/1f93533c429f76dab289eaf4dd8a4afc39b127af/processing/and/data/Calibri-48.vlw -------------------------------------------------------------------------------- /processing/and/neural.pde: -------------------------------------------------------------------------------- 1 | /* 2 | * Neural class for Processing 3 | * Stuart Cording aka codinghead 4 | * 5 | * This code implements a simple neural network as a multilayer perceptron (MLP). 6 | * It supports an input layer, single hidden layer, and output layer. 7 | * The number of nodes in each layer can be defined by the user. 8 | * The code was developed based upon the post "A Step by Step Backpropgation 9 | * Example" by Matt Mazur: 10 | * https://mattmazur.com/2015/03/17/a-step-by-step-backpropagation-example/ 11 | */ 12 | class Neural { 13 | private float[] inputNodeValues; 14 | private float[] hiddenNodeValues; 15 | private float[] outputNodeValues; 16 | private float[] desiredOutputNodeValues; 17 | private int noOfInputs; 18 | private int noOfHidden; 19 | private int noOfOutputs; 20 | 21 | private float[][] inputToHiddenWeights; 22 | private float[][] newInputToHiddenWeights; 23 | private float[][] hiddenToOutputWeights; 24 | private float[][] newHiddenToOutputWeights; 25 | 26 | private float biasInputToHidden; 27 | private float biasHiddenToOutput; 28 | private float learningRate; 29 | private float totalNetworkError; 30 | private int learningEpoch; 31 | 32 | private boolean learning; 33 | 34 | private boolean verbose; 35 | 36 | // Network is created by defining number of inputs, hidden nodes and outputs 37 | Neural(int inputs, int hidden, int outputs) { 38 | // Set all variables to zero to start that don't have to be defined here 39 | biasInputToHidden = 0.0; 40 | biasHiddenToOutput = 0.0; 41 | learningRate = 0.0; 42 | totalNetworkError = 0.0; 43 | 44 | // Note that we are not in learning mode 45 | learning = false; 46 | 47 | // Note that we are not in verbose mode 48 | verbose = false; 49 | 50 | // Set learning epoch to 0 51 | learningEpoch = 0; 52 | 53 | // Note the original number of nodes created 54 | noOfInputs = inputs; 55 | noOfHidden = hidden; 56 | noOfOutputs = outputs; 57 | 58 | // Create the desired number of input nodes and set them to zero 59 | inputNodeValues = new float [inputs]; 60 | for (int x = 0; x < inputs; ++x) { 61 | inputNodeValues[x] = 0.0; 62 | } 63 | 64 | // Create the desired number of hidden nodes and set them to zero 65 | hiddenNodeValues = new float [hidden]; 66 | for (int x = 0; x < hidden; ++x) { 67 | hiddenNodeValues[x] = 0.0; 68 | } 69 | 70 | // Create the desired number of output and desired output nodes and 71 | // set them to zero 72 | // Note: outputNodeValues stores the output of the MLP. The 73 | // desiredOutputNodeValues are the values we want to 74 | // achieve for the given input values. 75 | outputNodeValues = new float [outputs]; 76 | desiredOutputNodeValues = new float [outputs]; 77 | for (int x = 0; x < outputs; ++x) { 78 | outputNodeValues[x] = 0.0; 79 | desiredOutputNodeValues[x] = 0.0; 80 | } 81 | 82 | // For each input node, create both current and new weights 83 | // for each hidden node 84 | // Note: The new weights are used during learning 85 | inputToHiddenWeights = new float [inputs][hidden]; 86 | newInputToHiddenWeights = new float [inputs][hidden]; 87 | 88 | for (int x = 0; x < inputs; ++x) { 89 | for (int y = 0; y < hidden; ++y) { 90 | // Apply starting random weights to current nodes 91 | inputToHiddenWeights[x][y] = random(0.25, 0.75); 92 | // New weights can have 0.0 for now 93 | newInputToHiddenWeights[x][y] = 0.0; 94 | } 95 | } 96 | 97 | // For each hidden node, create both current and new weights 98 | // for each output node 99 | // Note: The new weights are used during learning 100 | hiddenToOutputWeights = new float [hidden][outputs]; 101 | newHiddenToOutputWeights = new float [hidden][outputs]; 102 | 103 | for (int x = 0; x < hidden; ++x) { 104 | for (int y = 0; y < outputs; ++y) { 105 | // Apply starting random weights to current nodes 106 | hiddenToOutputWeights[x][y] = random(0.25, 0.75); 107 | // New weights can have 0.0 for now 108 | newHiddenToOutputWeights[x][y] = 0.0; 109 | } 110 | } 111 | } 112 | 113 | /* calculateOuput() 114 | * Uses the weights of the MLP to calculate new output. 115 | * Requires that user has defined their desired input values 116 | * and trained the network. 117 | */ 118 | void calculateOutput() { 119 | float tempResult = 0.0; 120 | 121 | // Start by calculating the hidden layer node results for each input node 122 | // For each hidden node Hn: 123 | // Hn = sigmoid (wn * in + w(n+1) * i(n+1) ... + Hbias * 1) 124 | for (int x = 0; x < getNoOfHiddenNodes(); ++x) { 125 | if (verbose) { 126 | println("Input-to-hidden to calculate hidden node output:"); 127 | } 128 | // Start by calculating (wn * in + w(n+1) * i(n+1) ... 129 | for (int y = 0; y < getNoOfInputNodes(); ++y) { 130 | // Sum the results for the weight * input for each input node 131 | tempResult += inputNodeValues[y] * inputToHiddenWeights[y][x]; 132 | if (verbose) { 133 | println("i[", y,"] ", inputNodeValues[y], " * ", "iToHW[", y, x,"] ",inputToHiddenWeights[y][x], " += ", tempResult); 134 | } 135 | } 136 | 137 | // Add bias value result to sum 138 | tempResult += 1.0 * biasInputToHidden; 139 | if (verbose) { 140 | println("Bias: 1.0 * ", biasInputToHidden, " += ", tempResult); 141 | } 142 | 143 | // Squash result using sigmoid of sum 144 | hiddenNodeValues[x] = sigmoid(tempResult); 145 | if (verbose) { 146 | println("Output of hidden node:"); 147 | println("Sigmoid:", hiddenNodeValues[x]); 148 | println(); 149 | } 150 | 151 | // Reset sumation variable for next round 152 | tempResult = 0.0; 153 | } 154 | 155 | // Next calculate the output layer node results for each hidden node 156 | // For each output node On: 157 | // On = sigmoid (wn * Hn + w(n+1) * Hn(n+1) ... + Obias * 1) 158 | for (int x = 0; x < getNoOfOutputNodes(); ++x) { 159 | if (verbose) { 160 | println("Hidden-to-output to calculate output node result:"); 161 | } 162 | // Start by calulating (wn * Hn + w(n+1) * Hn(n+1) ... 163 | for (int y = 0; y < getNoOfHiddenNodes(); ++y) { 164 | 165 | tempResult += hiddenNodeValues[y] * hiddenToOutputWeights[y][x]; 166 | if (verbose) { 167 | println("h[", y,"] ", hiddenNodeValues[y], " * ", "hToOW[", y, x,"] ",hiddenToOutputWeights[y][x], " += ", tempResult); 168 | } 169 | } 170 | 171 | // Add bias value 172 | tempResult += 1.0 * biasHiddenToOutput; 173 | if (verbose) { 174 | println("Bias: 1.0 * ", biasHiddenToOutput, " += ", tempResult); 175 | } 176 | 177 | // Result goes into the output node 178 | outputNodeValues[x] = sigmoid(tempResult); 179 | if (verbose) { 180 | println("Result for output node:"); 181 | println("Sigmoid:", outputNodeValues[x]); 182 | println(); 183 | } 184 | 185 | // Reset sumation variable for next round 186 | tempResult = 0.0; 187 | } 188 | 189 | // Calculate total error 190 | // ERRORtotal = SUM 0.5 * (target - output)^2 191 | for (int x = 0; x < getNoOfOutputNodes(); ++x) { 192 | tempResult += 0.5 * sq(desiredOutputNodeValues[x] - outputNodeValues[x]); 193 | if (verbose) { 194 | println("Determine error between output and desired output values:"); 195 | print("Error o[", x, "]:", tempResult); 196 | println(" : 0.5 * (", desiredOutputNodeValues[x], "-", outputNodeValues[x],")^2"); 197 | println(); 198 | } 199 | } 200 | 201 | if (verbose) { 202 | println("Total Error: ", tempResult); 203 | println(); 204 | } 205 | 206 | totalNetworkError = tempResult; 207 | 208 | if (learning) { 209 | if (verbose) { 210 | println(); 211 | println(">>> Executing learning loop..."); 212 | } 213 | backPropagation(); 214 | if (verbose) { 215 | println(); 216 | println(">>> Learning loop complete. Epoch = ", learningEpoch); 217 | println(); 218 | } 219 | } 220 | } 221 | 222 | /* backPropagation() 223 | * Uses network error to update weights when learning is 224 | * enabled. 225 | */ 226 | private void backPropagation() { 227 | float totalErrorChangeWRTOutput = 0.0; 228 | float outputChangeWRTNetInput = 0.0; 229 | float netInputChangeWRTWeight = 0.0; 230 | float errorTotalWRTHiddenNode = 0.0; 231 | 232 | // Increment epoch 233 | ++learningEpoch; 234 | 235 | // Consider the output layer to calculate new weights for hidden-to-output layer 236 | // newWeightN = wn - learningRate * (ErrorTotal / impactOfwn) 237 | if (verbose) { 238 | println(); 239 | println("Hidden to Output Weight Correction:"); 240 | } 241 | for (int x = 0; x < getNoOfOutputNodes(); ++x) { 242 | 243 | totalErrorChangeWRTOutput = -(desiredOutputNodeValues[x] - outputNodeValues[x]); 244 | if (verbose) { 245 | println("totalErrChangeWRTOutput [", x,"] =", totalErrorChangeWRTOutput); 246 | } 247 | 248 | outputChangeWRTNetInput = outputNodeValues[x] * (1 - outputNodeValues[x]); 249 | if (verbose) { 250 | println("outputChangeWRTNetInput [", x,"] =", outputChangeWRTNetInput); 251 | println(); 252 | } 253 | 254 | for (int y = 0; y < getNoOfHiddenNodes(); ++y) { 255 | float weightChange = 0.0; 256 | 257 | netInputChangeWRTWeight = hiddenNodeValues[y]; 258 | 259 | weightChange = totalErrorChangeWRTOutput * outputChangeWRTNetInput * netInputChangeWRTWeight; 260 | 261 | if (verbose) { 262 | println("weightChange =", weightChange, " :", totalErrorChangeWRTOutput, "*", outputChangeWRTNetInput, "*", netInputChangeWRTWeight); 263 | } 264 | 265 | newHiddenToOutputWeights[y][x] = hiddenToOutputWeights[y][x] - (learningRate * weightChange); 266 | 267 | if (verbose) { 268 | println("Calculating", hiddenToOutputWeights[y][x], "-", learningRate, "*", weightChange); 269 | println("New Hidden-To-Output Weight[", y, "][", x, "] =", newHiddenToOutputWeights[y][x], ", Old Weight =", hiddenToOutputWeights[y][x]); 270 | println(); 271 | } 272 | } 273 | } 274 | 275 | // Consider the hidden layer (based upon original weights) 276 | if (verbose) { 277 | println("Input to Hidden Weight Correction:"); 278 | } 279 | 280 | // Need to consider for each hidden node 281 | for (int x = 0; x < getNoOfHiddenNodes(); ++x) { 282 | // For each hidden node we need: 283 | // - totalErrorChangeWRTOutput 284 | // - outputChangeWRTNetInput 285 | // - hiddenToOutputWeights 286 | float totalErrorChangeWRTHidden = 0.0; 287 | float outputHiddenWRTnetHidden = 0.0; 288 | float totalErrorChangeWRTweight = 0.0; 289 | 290 | for (int y = 0; y < getNoOfOutputNodes(); ++ y) { 291 | if (verbose) { 292 | println(); 293 | println("Calculating hidden node ", x," for output ", y); 294 | } 295 | 296 | // totalErrorChangeWRTOutput 297 | totalErrorChangeWRTOutput = -(desiredOutputNodeValues[y] - outputNodeValues[y]); 298 | if (verbose) { 299 | println("totalErrChangeWRTOutput [", y,"] =", totalErrorChangeWRTOutput); 300 | } 301 | 302 | // outputChangeWRTNetInput 303 | outputChangeWRTNetInput = outputNodeValues[y] * (1 - outputNodeValues[y]); 304 | if (verbose) { 305 | println("outputChangeWRTNetInput [", y,"] =", outputChangeWRTNetInput); 306 | } 307 | 308 | totalErrorChangeWRTHidden += totalErrorChangeWRTOutput * outputChangeWRTNetInput * hiddenToOutputWeights[x][y]; 309 | 310 | if (verbose) { 311 | println("totalErrorChangeWRTHidden[", x, "] =", totalErrorChangeWRTHidden); 312 | println(); 313 | } 314 | } 315 | 316 | outputHiddenWRTnetHidden = (hiddenNodeValues[x]) * (1 - hiddenNodeValues[x]); 317 | 318 | if (verbose) { 319 | println(); 320 | println("hiddenNodeValues[", x, "] =", hiddenNodeValues[x]); 321 | println("outputHiddenWRTnetHidden[", x, "] =", outputHiddenWRTnetHidden); 322 | } 323 | 324 | // For each input, calculate the weight change 325 | for (int y = 0; y < getNoOfInputNodes(); ++y) { 326 | totalErrorChangeWRTweight = totalErrorChangeWRTHidden * outputHiddenWRTnetHidden * inputNodeValues[y]; 327 | 328 | if (verbose) { 329 | println("inputNodeValues[", y, "] =", inputNodeValues[y]); 330 | println("totalErrorChangeWRTweight[", x, "] =", totalErrorChangeWRTweight); 331 | } 332 | 333 | newInputToHiddenWeights[y][x] = inputToHiddenWeights[y][x] - (learningRate * totalErrorChangeWRTweight); 334 | 335 | if (verbose) { 336 | println("inputToHiddenWeights[", y, "][", x, "] =", inputToHiddenWeights[y][x]); 337 | println("New Input-To-Hidden Weight[", y, "][", x, "] =", newInputToHiddenWeights[y][x], ", Old Weight =", inputToHiddenWeights[y][x]); 338 | println(); 339 | } 340 | } 341 | } 342 | 343 | 344 | // Update all weights to newly calculated values 345 | if (verbose) { 346 | println("Updating weights."); 347 | } 348 | 349 | // Update the input-to-hidden weights 350 | for (int x = 0; x < getNoOfInputNodes(); ++x) { 351 | for (int y = 0; y < getNoOfHiddenNodes(); ++y) { 352 | inputToHiddenWeights[x][y] = newInputToHiddenWeights[x][y]; 353 | } 354 | } 355 | // Update the hidden-to-output weights 356 | for (int x = 0; x < getNoOfHiddenNodes(); ++x) { 357 | for (int y = 0; y < getNoOfOutputNodes(); ++y) { 358 | hiddenToOutputWeights[x][y] = newHiddenToOutputWeights[x][y]; 359 | } 360 | } 361 | } 362 | 363 | void setBiasInputToHidden(float bias) { 364 | biasInputToHidden = bias; 365 | } 366 | 367 | float getBiasInputToHidden() { 368 | return biasInputToHidden; 369 | } 370 | 371 | void setBiasHiddenToOutput(float bias) { 372 | biasHiddenToOutput = bias; 373 | } 374 | 375 | float getBiasHiddenToOutput() { 376 | return biasHiddenToOutput; 377 | } 378 | 379 | void setLearningRate(float rate) { 380 | learningRate = rate; 381 | } 382 | 383 | float getLearningRate() { 384 | return learningRate; 385 | } 386 | 387 | float getTotalNetworkError() { 388 | return totalNetworkError; 389 | } 390 | 391 | int getNoOfInputNodes() { 392 | return noOfInputs; 393 | } 394 | 395 | int getNoOfHiddenNodes() { 396 | return noOfHidden; 397 | } 398 | 399 | int getNoOfOutputNodes() { 400 | return noOfOutputs; 401 | } 402 | 403 | void setInputNode(int node, float value) { 404 | inputNodeValues[node] = value; 405 | } 406 | 407 | float getInputNode(int node) { 408 | return inputNodeValues[node]; 409 | } 410 | 411 | void setOutputNodeDesired(int node, float value) { 412 | desiredOutputNodeValues[node] = value; 413 | } 414 | 415 | float getOutputNodeDesired(int node) { 416 | return desiredOutputNodeValues[node]; 417 | } 418 | 419 | float getOutputNode(int node) { 420 | return outputNodeValues[node]; 421 | } 422 | 423 | void setInputToHiddenWeight(int input, int hidden, float value) { 424 | inputToHiddenWeights[input][hidden] = value; 425 | } 426 | 427 | float getInputToHiddenWeight(int input, int hidden) { 428 | return inputToHiddenWeights[input][hidden]; 429 | } 430 | 431 | void setHiddenToOutputWeight(int hidden, int output, float value) { 432 | hiddenToOutputWeights[hidden][output] = value; 433 | } 434 | 435 | float getHiddenToOutputWeight(int hidden, int output) { 436 | return hiddenToOutputWeights[hidden][output]; 437 | } 438 | 439 | int getEpoch() { 440 | return learningEpoch; 441 | } 442 | 443 | void turnLearningOn() { 444 | learning = true; 445 | } 446 | 447 | void turnLearningOff() { 448 | learning = false; 449 | } 450 | 451 | void turnVerboseOn() { 452 | verbose = true; 453 | } 454 | 455 | void turnVerboseOff() { 456 | verbose = false; 457 | } 458 | 459 | boolean getLearningStatus() { 460 | return learning; 461 | } 462 | 463 | void displayInputNodes() { 464 | for (int x = 0; x < noOfInputs; ++x) { 465 | print(getInputNode(x), " "); 466 | } 467 | println(); 468 | } 469 | 470 | void displayInputToHiddenWeightsCurrent() { 471 | for (int x = 0; x < getNoOfInputNodes(); ++x) { 472 | print("For Input Node " + x + ": "); 473 | for (int y = 0; y < getNoOfHiddenNodes(); ++y) { 474 | print(inputToHiddenWeights[x][y], " "); 475 | } 476 | println(); 477 | } 478 | } 479 | 480 | void displayHiddenToOutputWeightsCurrent() { 481 | for (int x = 0; x < getNoOfHiddenNodes(); ++x) { 482 | print("For Hidden Node " + x + ": "); 483 | for (int y = 0; y < getNoOfOutputNodes(); ++y) { 484 | print(hiddenToOutputWeights[x][y], " "); 485 | } 486 | println(); 487 | } 488 | } 489 | 490 | void displayHiddenNodes() { 491 | for (int x = 0; x < getNoOfHiddenNodes(); ++x) { 492 | print(hiddenNodeValues[x], " "); 493 | } 494 | println(); 495 | } 496 | 497 | void displayOutputNodes() { 498 | for (int x = 0; x < getNoOfOutputNodes(); ++x) { 499 | print(outputNodeValues[x], " "); 500 | } 501 | println(); 502 | } 503 | 504 | void seed(int x) { 505 | randomSeed(x); 506 | } 507 | } 508 | 509 | float sigmoid(float x) { 510 | return (1 / (1 + exp(-x))); 511 | } 512 | -------------------------------------------------------------------------------- /processing/fsxor/fsxor.pde: -------------------------------------------------------------------------------- 1 | /* Fast XOR NN to test learning rate impact 2 | * This does away with the visualization of 3 | * the MLP during learning and operation. 4 | * The output error is saved during learning, 5 | * so try experimenting with learning rates. 6 | * Otherwise, just use as a basis for your 7 | * own code. 8 | */ 9 | 10 | Neural network; 11 | 12 | PrintWriter errorOutput; 13 | 14 | int learnXor = 0; 15 | float averageError = 100.0; 16 | float[] averageErrorArray; 17 | int averageErrorPointer = 0; 18 | 19 | void setup() { 20 | errorOutput = createWriter("xor-error.csv"); 21 | 22 | network = new Neural(2,4,1); 23 | 24 | network.setLearningRate(0.5); 25 | 26 | println(network.getNoOfInputNodes(), " ", network.getNoOfHiddenNodes(), " ", network.getNoOfOutputNodes()); 27 | 28 | network.setBiasInputToHidden(0.3); 29 | network.setBiasHiddenToOutput(0.6); 30 | 31 | network.displayOutputNodes(); 32 | 33 | println(network.getTotalNetworkError()); 34 | 35 | network.turnLearningOn(); 36 | 37 | // Set up average error array 38 | averageErrorArray = new float [4]; 39 | for (int x = 0; x < 4; ++x) { 40 | averageErrorArray[x] = 100.0; 41 | } 42 | } 43 | 44 | void draw() { 45 | background(180); 46 | 47 | if (network.getLearningStatus()) { 48 | // Replace line 51 with line 49 to stop learning after a certain number of epochs 49 | // if (network.getEpoch() > 12000) { 50 | // If we are learning and have achieved better than 0.05% error... 51 | if (averageError < 0.0005) { 52 | network.turnLearningOff(); 53 | // Close file 54 | errorOutput.flush(); // Writes the remaining data to the file 55 | errorOutput.close(); 56 | } 57 | 58 | // Set up XOR inputs and expected ouput 59 | if (learnXor == 0) { 60 | network.setInputNode(0, 0.01); 61 | network.setInputNode(1, 0.01); 62 | network.setOutputNodeDesired(0, 0.01); 63 | } else if (learnXor == 1) { 64 | network.setInputNode(0, 0.01); 65 | network.setInputNode(1, 0.99); 66 | network.setOutputNodeDesired(0, 0.99); 67 | } else if (learnXor == 2) { 68 | network.setInputNode(0, 0.99); 69 | network.setInputNode(1, 0.01); 70 | network.setOutputNodeDesired(0, 0.99); 71 | } else { // learnXor == 3 72 | network.setInputNode(0, 0.99); 73 | network.setInputNode(1, 0.99); 74 | network.setOutputNodeDesired(0, 0.01); 75 | } 76 | 77 | network.calculateOutput(); 78 | 79 | // Calculate average error 80 | averageErrorArray[averageErrorPointer] = network.getTotalNetworkError(); 81 | averageError = (averageErrorArray[0] + averageErrorArray[1] + averageErrorArray[1] + averageErrorArray[3]) / 4.0; 82 | ++averageErrorPointer; 83 | if (averageErrorPointer >= 4) { 84 | averageErrorPointer = 0; 85 | } 86 | 87 | // Write network error every 50 epochs to CSV file. 88 | if ((network.getEpoch() % 50) == 0) { 89 | print(network.getEpoch()); 90 | print(","); 91 | print(network.getTotalNetworkError()); 92 | print(","); 93 | println(averageError); 94 | 95 | // Write to file 96 | errorOutput.print(network.getEpoch()); 97 | errorOutput.print(","); 98 | errorOutput.print(network.getTotalNetworkError()); 99 | errorOutput.print(","); 100 | errorOutput.println(averageError); 101 | errorOutput.flush(); 102 | } 103 | 104 | // Increment to next input combination (00, 01, 10, 11) 105 | ++learnXor; 106 | 107 | if (learnXor > 3) { 108 | learnXor = 0; 109 | } 110 | } else { 111 | // With learning completed, 112 | // switch between different XOR input patterns 113 | // and display output. 114 | 115 | // Set up XOR inputs 116 | if (learnXor == 0) { 117 | network.setInputNode(0, 0.01); 118 | network.setInputNode(1, 0.01); 119 | } else if (learnXor == 1) { 120 | network.setInputNode(0, 0.01); 121 | network.setInputNode(1, 0.99); 122 | } else if (learnXor == 2) { 123 | network.setInputNode(0, 0.99); 124 | network.setInputNode(1, 0.01); 125 | } else { // learnXor == 3 126 | network.setInputNode(0, 0.99); 127 | network.setInputNode(1, 0.99); 128 | } 129 | 130 | network.calculateOutput(); 131 | print(learnXor); 132 | print(" : "); 133 | println(network.getOutputNode(0)); 134 | 135 | // Increment to next input combination (00, 01, 10, 11) 136 | ++learnXor; 137 | if (learnXor > 3) { 138 | learnXor = 0; 139 | } 140 | } 141 | } 142 | 143 | void keyPressed() { 144 | errorOutput.flush(); // Writes the remaining data to the file 145 | errorOutput.close(); // Finishes the file 146 | exit(); // Stops the program 147 | } 148 | -------------------------------------------------------------------------------- /processing/fsxor/neural.pde: -------------------------------------------------------------------------------- 1 | /* 2 | * Neural class for Processing 3 | * Stuart Cording aka codinghead 4 | * 5 | * This code implements a simple neural network as a multilayer perceptron (MLP). 6 | * It supports an input layer, single hidden layer, and output layer. 7 | * The number of nodes in each layer can be defined by the user. 8 | * The code was developed based upon the post "A Step by Step Backpropgation 9 | * Example" by Matt Mazur: 10 | * https://mattmazur.com/2015/03/17/a-step-by-step-backpropagation-example/ 11 | */ 12 | class Neural { 13 | private float[] inputNodeValues; 14 | private float[] hiddenNodeValues; 15 | private float[] outputNodeValues; 16 | private float[] desiredOutputNodeValues; 17 | private int noOfInputs; 18 | private int noOfHidden; 19 | private int noOfOutputs; 20 | 21 | private float[][] inputToHiddenWeights; 22 | private float[][] newInputToHiddenWeights; 23 | private float[][] hiddenToOutputWeights; 24 | private float[][] newHiddenToOutputWeights; 25 | 26 | private float biasInputToHidden; 27 | private float biasHiddenToOutput; 28 | private float learningRate; 29 | private float totalNetworkError; 30 | private int learningEpoch; 31 | 32 | private boolean learning; 33 | 34 | private boolean verbose; 35 | 36 | // Network is created by defining number of inputs, hidden nodes and outputs 37 | Neural(int inputs, int hidden, int outputs) { 38 | // Set all variables to zero to start that don't have to be defined here 39 | biasInputToHidden = 0.0; 40 | biasHiddenToOutput = 0.0; 41 | learningRate = 0.0; 42 | totalNetworkError = 0.0; 43 | 44 | // Note that we are not in learning mode 45 | learning = false; 46 | 47 | // Note that we are not in verbose mode 48 | verbose = false; 49 | 50 | // Set learning epoch to 0 51 | learningEpoch = 0; 52 | 53 | // Note the original number of nodes created 54 | noOfInputs = inputs; 55 | noOfHidden = hidden; 56 | noOfOutputs = outputs; 57 | 58 | // Create the desired number of input nodes and set them to zero 59 | inputNodeValues = new float [inputs]; 60 | for (int x = 0; x < inputs; ++x) { 61 | inputNodeValues[x] = 0.0; 62 | } 63 | 64 | // Create the desired number of hidden nodes and set them to zero 65 | hiddenNodeValues = new float [hidden]; 66 | for (int x = 0; x < hidden; ++x) { 67 | hiddenNodeValues[x] = 0.0; 68 | } 69 | 70 | // Create the desired number of output and desired output nodes and 71 | // set them to zero 72 | // Note: outputNodeValues stores the output of the MLP. The 73 | // desiredOutputNodeValues are the values we want to 74 | // achieve for the given input values. 75 | outputNodeValues = new float [outputs]; 76 | desiredOutputNodeValues = new float [outputs]; 77 | for (int x = 0; x < outputs; ++x) { 78 | outputNodeValues[x] = 0.0; 79 | desiredOutputNodeValues[x] = 0.0; 80 | } 81 | 82 | // For each input node, create both current and new weights 83 | // for each hidden node 84 | // Note: The new weights are used during learning 85 | inputToHiddenWeights = new float [inputs][hidden]; 86 | newInputToHiddenWeights = new float [inputs][hidden]; 87 | 88 | for (int x = 0; x < inputs; ++x) { 89 | for (int y = 0; y < hidden; ++y) { 90 | // Apply starting random weights to current nodes 91 | inputToHiddenWeights[x][y] = random(0.25, 0.75); 92 | // New weights can have 0.0 for now 93 | newInputToHiddenWeights[x][y] = 0.0; 94 | } 95 | } 96 | 97 | // For each hidden node, create both current and new weights 98 | // for each output node 99 | // Note: The new weights are used during learning 100 | hiddenToOutputWeights = new float [hidden][outputs]; 101 | newHiddenToOutputWeights = new float [hidden][outputs]; 102 | 103 | for (int x = 0; x < hidden; ++x) { 104 | for (int y = 0; y < outputs; ++y) { 105 | // Apply starting random weights to current nodes 106 | hiddenToOutputWeights[x][y] = random(0.25, 0.75); 107 | // New weights can have 0.0 for now 108 | newHiddenToOutputWeights[x][y] = 0.0; 109 | } 110 | } 111 | } 112 | 113 | /* calculateOuput() 114 | * Uses the weights of the MLP to calculate new output. 115 | * Requires that user has defined their desired input values 116 | * and trained the network. 117 | */ 118 | void calculateOutput() { 119 | float tempResult = 0.0; 120 | 121 | // Start by calculating the hidden layer node results for each input node 122 | // For each hidden node Hn: 123 | // Hn = sigmoid (wn * in + w(n+1) * i(n+1) ... + Hbias * 1) 124 | for (int x = 0; x < getNoOfHiddenNodes(); ++x) { 125 | if (verbose) { 126 | println("Input-to-hidden to calculate hidden node output:"); 127 | } 128 | // Start by calculating (wn * in + w(n+1) * i(n+1) ... 129 | for (int y = 0; y < getNoOfInputNodes(); ++y) { 130 | // Sum the results for the weight * input for each input node 131 | tempResult += inputNodeValues[y] * inputToHiddenWeights[y][x]; 132 | if (verbose) { 133 | println("i[", y,"] ", inputNodeValues[y], " * ", "iToHW[", y, x,"] ",inputToHiddenWeights[y][x], " += ", tempResult); 134 | } 135 | } 136 | 137 | // Add bias value result to sum 138 | tempResult += 1.0 * biasInputToHidden; 139 | if (verbose) { 140 | println("Bias: 1.0 * ", biasInputToHidden, " += ", tempResult); 141 | } 142 | 143 | // Squash result using sigmoid of sum 144 | hiddenNodeValues[x] = sigmoid(tempResult); 145 | if (verbose) { 146 | println("Output of hidden node:"); 147 | println("Sigmoid:", hiddenNodeValues[x]); 148 | println(); 149 | } 150 | 151 | // Reset sumation variable for next round 152 | tempResult = 0.0; 153 | } 154 | 155 | // Next calculate the output layer node results for each hidden node 156 | // For each output node On: 157 | // On = sigmoid (wn * Hn + w(n+1) * Hn(n+1) ... + Obias * 1) 158 | for (int x = 0; x < getNoOfOutputNodes(); ++x) { 159 | if (verbose) { 160 | println("Hidden-to-output to calculate output node result:"); 161 | } 162 | // Start by calulating (wn * Hn + w(n+1) * Hn(n+1) ... 163 | for (int y = 0; y < getNoOfHiddenNodes(); ++y) { 164 | 165 | tempResult += hiddenNodeValues[y] * hiddenToOutputWeights[y][x]; 166 | if (verbose) { 167 | println("h[", y,"] ", hiddenNodeValues[y], " * ", "hToOW[", y, x,"] ",hiddenToOutputWeights[y][x], " += ", tempResult); 168 | } 169 | } 170 | 171 | // Add bias value 172 | tempResult += 1.0 * biasHiddenToOutput; 173 | if (verbose) { 174 | println("Bias: 1.0 * ", biasHiddenToOutput, " += ", tempResult); 175 | } 176 | 177 | // Result goes into the output node 178 | outputNodeValues[x] = sigmoid(tempResult); 179 | if (verbose) { 180 | println("Result for output node:"); 181 | println("Sigmoid:", outputNodeValues[x]); 182 | println(); 183 | } 184 | 185 | // Reset sumation variable for next round 186 | tempResult = 0.0; 187 | } 188 | 189 | // Calculate total error 190 | // ERRORtotal = SUM 0.5 * (target - output)^2 191 | for (int x = 0; x < getNoOfOutputNodes(); ++x) { 192 | tempResult += 0.5 * sq(desiredOutputNodeValues[x] - outputNodeValues[x]); 193 | if (verbose) { 194 | println("Determine error between output and desired output values:"); 195 | print("Error o[", x, "]:", tempResult); 196 | println(" : 0.5 * (", desiredOutputNodeValues[x], "-", outputNodeValues[x],")^2"); 197 | println(); 198 | } 199 | } 200 | 201 | if (verbose) { 202 | println("Total Error: ", tempResult); 203 | println(); 204 | } 205 | 206 | totalNetworkError = tempResult; 207 | 208 | if (learning) { 209 | if (verbose) { 210 | println(); 211 | println(">>> Executing learning loop..."); 212 | } 213 | backPropagation(); 214 | if (verbose) { 215 | println(); 216 | println(">>> Learning loop complete. Epoch = ", learningEpoch); 217 | println(); 218 | } 219 | } 220 | } 221 | 222 | /* backPropagation() 223 | * Uses network error to update weights when learning is 224 | * enabled. 225 | */ 226 | private void backPropagation() { 227 | float totalErrorChangeWRTOutput = 0.0; 228 | float outputChangeWRTNetInput = 0.0; 229 | float netInputChangeWRTWeight = 0.0; 230 | float errorTotalWRTHiddenNode = 0.0; 231 | 232 | // Increment epoch 233 | ++learningEpoch; 234 | 235 | // Consider the output layer to calculate new weights for hidden-to-output layer 236 | // newWeightN = wn - learningRate * (ErrorTotal / impactOfwn) 237 | if (verbose) { 238 | println(); 239 | println("Hidden to Output Weight Correction:"); 240 | } 241 | for (int x = 0; x < getNoOfOutputNodes(); ++x) { 242 | 243 | totalErrorChangeWRTOutput = -(desiredOutputNodeValues[x] - outputNodeValues[x]); 244 | if (verbose) { 245 | println("totalErrChangeWRTOutput [", x,"] =", totalErrorChangeWRTOutput); 246 | } 247 | 248 | outputChangeWRTNetInput = outputNodeValues[x] * (1 - outputNodeValues[x]); 249 | if (verbose) { 250 | println("outputChangeWRTNetInput [", x,"] =", outputChangeWRTNetInput); 251 | println(); 252 | } 253 | 254 | for (int y = 0; y < getNoOfHiddenNodes(); ++y) { 255 | float weightChange = 0.0; 256 | 257 | netInputChangeWRTWeight = hiddenNodeValues[y]; 258 | 259 | weightChange = totalErrorChangeWRTOutput * outputChangeWRTNetInput * netInputChangeWRTWeight; 260 | 261 | if (verbose) { 262 | println("weightChange =", weightChange, " :", totalErrorChangeWRTOutput, "*", outputChangeWRTNetInput, "*", netInputChangeWRTWeight); 263 | } 264 | 265 | newHiddenToOutputWeights[y][x] = hiddenToOutputWeights[y][x] - (learningRate * weightChange); 266 | 267 | if (verbose) { 268 | println("Calculating", hiddenToOutputWeights[y][x], "-", learningRate, "*", weightChange); 269 | println("New Hidden-To-Output Weight[", y, "][", x, "] =", newHiddenToOutputWeights[y][x], ", Old Weight =", hiddenToOutputWeights[y][x]); 270 | println(); 271 | } 272 | } 273 | } 274 | 275 | // Consider the hidden layer (based upon original weights) 276 | if (verbose) { 277 | println("Input to Hidden Weight Correction:"); 278 | } 279 | 280 | // Need to consider for each hidden node 281 | for (int x = 0; x < getNoOfHiddenNodes(); ++x) { 282 | // For each hidden node we need: 283 | // - totalErrorChangeWRTOutput 284 | // - outputChangeWRTNetInput 285 | // - hiddenToOutputWeights 286 | float totalErrorChangeWRTHidden = 0.0; 287 | float outputHiddenWRTnetHidden = 0.0; 288 | float totalErrorChangeWRTweight = 0.0; 289 | 290 | for (int y = 0; y < getNoOfOutputNodes(); ++ y) { 291 | if (verbose) { 292 | println(); 293 | println("Calculating hidden node ", x," for output ", y); 294 | } 295 | 296 | // totalErrorChangeWRTOutput 297 | totalErrorChangeWRTOutput = -(desiredOutputNodeValues[y] - outputNodeValues[y]); 298 | if (verbose) { 299 | println("totalErrChangeWRTOutput [", y,"] =", totalErrorChangeWRTOutput); 300 | } 301 | 302 | // outputChangeWRTNetInput 303 | outputChangeWRTNetInput = outputNodeValues[y] * (1 - outputNodeValues[y]); 304 | if (verbose) { 305 | println("outputChangeWRTNetInput [", y,"] =", outputChangeWRTNetInput); 306 | } 307 | 308 | totalErrorChangeWRTHidden += totalErrorChangeWRTOutput * outputChangeWRTNetInput * hiddenToOutputWeights[x][y]; 309 | 310 | if (verbose) { 311 | println("totalErrorChangeWRTHidden[", x, "] =", totalErrorChangeWRTHidden); 312 | println(); 313 | } 314 | } 315 | 316 | outputHiddenWRTnetHidden = (hiddenNodeValues[x]) * (1 - hiddenNodeValues[x]); 317 | 318 | if (verbose) { 319 | println(); 320 | println("hiddenNodeValues[", x, "] =", hiddenNodeValues[x]); 321 | println("outputHiddenWRTnetHidden[", x, "] =", outputHiddenWRTnetHidden); 322 | } 323 | 324 | // For each input, calculate the weight change 325 | for (int y = 0; y < getNoOfInputNodes(); ++y) { 326 | totalErrorChangeWRTweight = totalErrorChangeWRTHidden * outputHiddenWRTnetHidden * inputNodeValues[y]; 327 | 328 | if (verbose) { 329 | println("inputNodeValues[", y, "] =", inputNodeValues[y]); 330 | println("totalErrorChangeWRTweight[", x, "] =", totalErrorChangeWRTweight); 331 | } 332 | 333 | newInputToHiddenWeights[y][x] = inputToHiddenWeights[y][x] - (learningRate * totalErrorChangeWRTweight); 334 | 335 | if (verbose) { 336 | println("inputToHiddenWeights[", y, "][", x, "] =", inputToHiddenWeights[y][x]); 337 | println("New Input-To-Hidden Weight[", y, "][", x, "] =", newInputToHiddenWeights[y][x], ", Old Weight =", inputToHiddenWeights[y][x]); 338 | println(); 339 | } 340 | } 341 | } 342 | 343 | 344 | // Update all weights to newly calculated values 345 | if (verbose) { 346 | println("Updating weights."); 347 | } 348 | 349 | // Update the input-to-hidden weights 350 | for (int x = 0; x < getNoOfInputNodes(); ++x) { 351 | for (int y = 0; y < getNoOfHiddenNodes(); ++y) { 352 | inputToHiddenWeights[x][y] = newInputToHiddenWeights[x][y]; 353 | } 354 | } 355 | // Update the hidden-to-output weights 356 | for (int x = 0; x < getNoOfHiddenNodes(); ++x) { 357 | for (int y = 0; y < getNoOfOutputNodes(); ++y) { 358 | hiddenToOutputWeights[x][y] = newHiddenToOutputWeights[x][y]; 359 | } 360 | } 361 | } 362 | 363 | void setBiasInputToHidden(float bias) { 364 | biasInputToHidden = bias; 365 | } 366 | 367 | float getBiasInputToHidden() { 368 | return biasInputToHidden; 369 | } 370 | 371 | void setBiasHiddenToOutput(float bias) { 372 | biasHiddenToOutput = bias; 373 | } 374 | 375 | float getBiasHiddenToOutput() { 376 | return biasHiddenToOutput; 377 | } 378 | 379 | void setLearningRate(float rate) { 380 | learningRate = rate; 381 | } 382 | 383 | float getLearningRate() { 384 | return learningRate; 385 | } 386 | 387 | float getTotalNetworkError() { 388 | return totalNetworkError; 389 | } 390 | 391 | int getNoOfInputNodes() { 392 | return noOfInputs; 393 | } 394 | 395 | int getNoOfHiddenNodes() { 396 | return noOfHidden; 397 | } 398 | 399 | int getNoOfOutputNodes() { 400 | return noOfOutputs; 401 | } 402 | 403 | void setInputNode(int node, float value) { 404 | inputNodeValues[node] = value; 405 | } 406 | 407 | float getInputNode(int node) { 408 | return inputNodeValues[node]; 409 | } 410 | 411 | void setOutputNodeDesired(int node, float value) { 412 | desiredOutputNodeValues[node] = value; 413 | } 414 | 415 | float getOutputNodeDesired(int node) { 416 | return desiredOutputNodeValues[node]; 417 | } 418 | 419 | float getOutputNode(int node) { 420 | return outputNodeValues[node]; 421 | } 422 | 423 | void setInputToHiddenWeight(int input, int hidden, float value) { 424 | inputToHiddenWeights[input][hidden] = value; 425 | } 426 | 427 | float getInputToHiddenWeight(int input, int hidden) { 428 | return inputToHiddenWeights[input][hidden]; 429 | } 430 | 431 | void setHiddenToOutputWeight(int hidden, int output, float value) { 432 | hiddenToOutputWeights[hidden][output] = value; 433 | } 434 | 435 | float getHiddenToOutputWeight(int hidden, int output) { 436 | return hiddenToOutputWeights[hidden][output]; 437 | } 438 | 439 | int getEpoch() { 440 | return learningEpoch; 441 | } 442 | 443 | void turnLearningOn() { 444 | learning = true; 445 | } 446 | 447 | void turnLearningOff() { 448 | learning = false; 449 | } 450 | 451 | void turnVerboseOn() { 452 | verbose = true; 453 | } 454 | 455 | void turnVerboseOff() { 456 | verbose = false; 457 | } 458 | 459 | boolean getLearningStatus() { 460 | return learning; 461 | } 462 | 463 | void displayInputNodes() { 464 | for (int x = 0; x < noOfInputs; ++x) { 465 | print(getInputNode(x), " "); 466 | } 467 | println(); 468 | } 469 | 470 | void displayInputToHiddenWeightsCurrent() { 471 | for (int x = 0; x < getNoOfInputNodes(); ++x) { 472 | print("For Input Node " + x + ": "); 473 | for (int y = 0; y < getNoOfHiddenNodes(); ++y) { 474 | print(inputToHiddenWeights[x][y], " "); 475 | } 476 | println(); 477 | } 478 | } 479 | 480 | void displayHiddenToOutputWeightsCurrent() { 481 | for (int x = 0; x < getNoOfHiddenNodes(); ++x) { 482 | print("For Hidden Node " + x + ": "); 483 | for (int y = 0; y < getNoOfOutputNodes(); ++y) { 484 | print(hiddenToOutputWeights[x][y], " "); 485 | } 486 | println(); 487 | } 488 | } 489 | 490 | void displayHiddenNodes() { 491 | for (int x = 0; x < getNoOfHiddenNodes(); ++x) { 492 | print(hiddenNodeValues[x], " "); 493 | } 494 | println(); 495 | } 496 | 497 | void displayOutputNodes() { 498 | for (int x = 0; x < getNoOfOutputNodes(); ++x) { 499 | print(outputNodeValues[x], " "); 500 | } 501 | println(); 502 | } 503 | 504 | void seed(int x) { 505 | randomSeed(x); 506 | } 507 | } 508 | 509 | float sigmoid(float x) { 510 | return (1 / (1 + exp(-x))); 511 | } 512 | -------------------------------------------------------------------------------- /processing/neural/neural.pde: -------------------------------------------------------------------------------- 1 | /* 2 | * Neural class for Processing 3 | * Stuart Cording aka codinghead 4 | * 5 | * This code implements a simple neural network as a multilayer perceptron (MLP). 6 | * It supports an input layer, single hidden layer, and output layer. 7 | * The number of nodes in each layer can be defined by the user. 8 | * The code was developed based upon the post "A Step by Step Backpropgation 9 | * Example" by Matt Mazur: 10 | * https://mattmazur.com/2015/03/17/a-step-by-step-backpropagation-example/ 11 | */ 12 | class Neural { 13 | private float[] inputNodeValues; 14 | private float[] hiddenNodeValues; 15 | private float[] outputNodeValues; 16 | private float[] desiredOutputNodeValues; 17 | private int noOfInputs; 18 | private int noOfHidden; 19 | private int noOfOutputs; 20 | 21 | private float[][] inputToHiddenWeights; 22 | private float[][] newInputToHiddenWeights; 23 | private float[][] hiddenToOutputWeights; 24 | private float[][] newHiddenToOutputWeights; 25 | 26 | private float biasInputToHidden; 27 | private float biasHiddenToOutput; 28 | private float learningRate; 29 | private float totalNetworkError; 30 | private int learningEpoch; 31 | 32 | private boolean learning; 33 | 34 | private boolean verbose; 35 | 36 | // Network is created by defining number of inputs, hidden nodes and outputs 37 | Neural(int inputs, int hidden, int outputs) { 38 | // Set all variables to zero to start that don't have to be defined here 39 | biasInputToHidden = 0.0; 40 | biasHiddenToOutput = 0.0; 41 | learningRate = 0.0; 42 | totalNetworkError = 0.0; 43 | 44 | // Note that we are not in learning mode 45 | learning = false; 46 | 47 | // Note that we are not in verbose mode 48 | verbose = false; 49 | 50 | // Set learning epoch to 0 51 | learningEpoch = 0; 52 | 53 | // Note the original number of nodes created 54 | noOfInputs = inputs; 55 | noOfHidden = hidden; 56 | noOfOutputs = outputs; 57 | 58 | // Create the desired number of input nodes and set them to zero 59 | inputNodeValues = new float [inputs]; 60 | for (int x = 0; x < inputs; ++x) { 61 | inputNodeValues[x] = 0.0; 62 | } 63 | 64 | // Create the desired number of hidden nodes and set them to zero 65 | hiddenNodeValues = new float [hidden]; 66 | for (int x = 0; x < hidden; ++x) { 67 | hiddenNodeValues[x] = 0.0; 68 | } 69 | 70 | // Create the desired number of output and desired output nodes and 71 | // set them to zero 72 | // Note: outputNodeValues stores the output of the MLP. The 73 | // desiredOutputNodeValues are the values we want to 74 | // achieve for the given input values. 75 | outputNodeValues = new float [outputs]; 76 | desiredOutputNodeValues = new float [outputs]; 77 | for (int x = 0; x < outputs; ++x) { 78 | outputNodeValues[x] = 0.0; 79 | desiredOutputNodeValues[x] = 0.0; 80 | } 81 | 82 | // For each input node, create both current and new weights 83 | // for each hidden node 84 | // Note: The new weights are used during learning 85 | inputToHiddenWeights = new float [inputs][hidden]; 86 | newInputToHiddenWeights = new float [inputs][hidden]; 87 | 88 | for (int x = 0; x < inputs; ++x) { 89 | for (int y = 0; y < hidden; ++y) { 90 | // Apply starting random weights to current nodes 91 | inputToHiddenWeights[x][y] = random(0.25, 0.75); 92 | // New weights can have 0.0 for now 93 | newInputToHiddenWeights[x][y] = 0.0; 94 | } 95 | } 96 | 97 | // For each hidden node, create both current and new weights 98 | // for each output node 99 | // Note: The new weights are used during learning 100 | hiddenToOutputWeights = new float [hidden][outputs]; 101 | newHiddenToOutputWeights = new float [hidden][outputs]; 102 | 103 | for (int x = 0; x < hidden; ++x) { 104 | for (int y = 0; y < outputs; ++y) { 105 | // Apply starting random weights to current nodes 106 | hiddenToOutputWeights[x][y] = random(0.25, 0.75); 107 | // New weights can have 0.0 for now 108 | newHiddenToOutputWeights[x][y] = 0.0; 109 | } 110 | } 111 | } 112 | 113 | /* calculateOuput() 114 | * Uses the weights of the MLP to calculate new output. 115 | * Requires that user has defined their desired input values 116 | * and trained the network. 117 | */ 118 | void calculateOutput() { 119 | float tempResult = 0.0; 120 | 121 | // Start by calculating the hidden layer node results for each input node 122 | // For each hidden node Hn: 123 | // Hn = sigmoid (wn * in + w(n+1) * i(n+1) ... + Hbias * 1) 124 | for (int x = 0; x < getNoOfHiddenNodes(); ++x) { 125 | if (verbose) { 126 | println("Input-to-hidden to calculate hidden node output:"); 127 | } 128 | // Start by calculating (wn * in + w(n+1) * i(n+1) ... 129 | for (int y = 0; y < getNoOfInputNodes(); ++y) { 130 | // Sum the results for the weight * input for each input node 131 | tempResult += inputNodeValues[y] * inputToHiddenWeights[y][x]; 132 | if (verbose) { 133 | println("i[", y,"] ", inputNodeValues[y], " * ", "iToHW[", y, x,"] ",inputToHiddenWeights[y][x], " += ", tempResult); 134 | } 135 | } 136 | 137 | // Add bias value result to sum 138 | tempResult += 1.0 * biasInputToHidden; 139 | if (verbose) { 140 | println("Bias: 1.0 * ", biasInputToHidden, " += ", tempResult); 141 | } 142 | 143 | // Squash result using sigmoid of sum 144 | hiddenNodeValues[x] = sigmoid(tempResult); 145 | if (verbose) { 146 | println("Output of hidden node:"); 147 | println("Sigmoid:", hiddenNodeValues[x]); 148 | println(); 149 | } 150 | 151 | // Reset sumation variable for next round 152 | tempResult = 0.0; 153 | } 154 | 155 | // Next calculate the output layer node results for each hidden node 156 | // For each output node On: 157 | // On = sigmoid (wn * Hn + w(n+1) * Hn(n+1) ... + Obias * 1) 158 | for (int x = 0; x < getNoOfOutputNodes(); ++x) { 159 | if (verbose) { 160 | println("Hidden-to-output to calculate output node result:"); 161 | } 162 | // Start by calulating (wn * Hn + w(n+1) * Hn(n+1) ... 163 | for (int y = 0; y < getNoOfHiddenNodes(); ++y) { 164 | 165 | tempResult += hiddenNodeValues[y] * hiddenToOutputWeights[y][x]; 166 | if (verbose) { 167 | println("h[", y,"] ", hiddenNodeValues[y], " * ", "hToOW[", y, x,"] ",hiddenToOutputWeights[y][x], " += ", tempResult); 168 | } 169 | } 170 | 171 | // Add bias value 172 | tempResult += 1.0 * biasHiddenToOutput; 173 | if (verbose) { 174 | println("Bias: 1.0 * ", biasHiddenToOutput, " += ", tempResult); 175 | } 176 | 177 | // Result goes into the output node 178 | outputNodeValues[x] = sigmoid(tempResult); 179 | if (verbose) { 180 | println("Result for output node:"); 181 | println("Sigmoid:", outputNodeValues[x]); 182 | println(); 183 | } 184 | 185 | // Reset sumation variable for next round 186 | tempResult = 0.0; 187 | } 188 | 189 | // Calculate total error 190 | // ERRORtotal = SUM 0.5 * (target - output)^2 191 | for (int x = 0; x < getNoOfOutputNodes(); ++x) { 192 | tempResult += 0.5 * sq(desiredOutputNodeValues[x] - outputNodeValues[x]); 193 | if (verbose) { 194 | println("Determine error between output and desired output values:"); 195 | print("Error o[", x, "]:", tempResult); 196 | println(" : 0.5 * (", desiredOutputNodeValues[x], "-", outputNodeValues[x],")^2"); 197 | println(); 198 | } 199 | } 200 | 201 | if (verbose) { 202 | println("Total Error: ", tempResult); 203 | println(); 204 | } 205 | 206 | totalNetworkError = tempResult; 207 | 208 | if (learning) { 209 | if (verbose) { 210 | println(); 211 | println(">>> Executing learning loop..."); 212 | } 213 | backPropagation(); 214 | if (verbose) { 215 | println(); 216 | println(">>> Learning loop complete. Epoch = ", learningEpoch); 217 | println(); 218 | } 219 | } 220 | } 221 | 222 | /* backPropagation() 223 | * Uses network error to update weights when learning is 224 | * enabled. 225 | */ 226 | private void backPropagation() { 227 | float totalErrorChangeWRTOutput = 0.0; 228 | float outputChangeWRTNetInput = 0.0; 229 | float netInputChangeWRTWeight = 0.0; 230 | float errorTotalWRTHiddenNode = 0.0; 231 | 232 | // Increment epoch 233 | ++learningEpoch; 234 | 235 | // Consider the output layer to calculate new weights for hidden-to-output layer 236 | // newWeightN = wn - learningRate * (ErrorTotal / impactOfwn) 237 | if (verbose) { 238 | println(); 239 | println("Hidden to Output Weight Correction:"); 240 | } 241 | for (int x = 0; x < getNoOfOutputNodes(); ++x) { 242 | 243 | totalErrorChangeWRTOutput = -(desiredOutputNodeValues[x] - outputNodeValues[x]); 244 | if (verbose) { 245 | println("totalErrChangeWRTOutput [", x,"] =", totalErrorChangeWRTOutput); 246 | } 247 | 248 | outputChangeWRTNetInput = outputNodeValues[x] * (1 - outputNodeValues[x]); 249 | if (verbose) { 250 | println("outputChangeWRTNetInput [", x,"] =", outputChangeWRTNetInput); 251 | println(); 252 | } 253 | 254 | for (int y = 0; y < getNoOfHiddenNodes(); ++y) { 255 | float weightChange = 0.0; 256 | 257 | netInputChangeWRTWeight = hiddenNodeValues[y]; 258 | 259 | weightChange = totalErrorChangeWRTOutput * outputChangeWRTNetInput * netInputChangeWRTWeight; 260 | 261 | if (verbose) { 262 | println("weightChange =", weightChange, " :", totalErrorChangeWRTOutput, "*", outputChangeWRTNetInput, "*", netInputChangeWRTWeight); 263 | } 264 | 265 | newHiddenToOutputWeights[y][x] = hiddenToOutputWeights[y][x] - (learningRate * weightChange); 266 | 267 | if (verbose) { 268 | println("Calculating", hiddenToOutputWeights[y][x], "-", learningRate, "*", weightChange); 269 | println("New Hidden-To-Output Weight[", y, "][", x, "] =", newHiddenToOutputWeights[y][x], ", Old Weight =", hiddenToOutputWeights[y][x]); 270 | println(); 271 | } 272 | } 273 | } 274 | 275 | // Consider the hidden layer (based upon original weights) 276 | if (verbose) { 277 | println("Input to Hidden Weight Correction:"); 278 | } 279 | 280 | // Need to consider for each hidden node 281 | for (int x = 0; x < getNoOfHiddenNodes(); ++x) { 282 | // For each hidden node we need: 283 | // - totalErrorChangeWRTOutput 284 | // - outputChangeWRTNetInput 285 | // - hiddenToOutputWeights 286 | float totalErrorChangeWRTHidden = 0.0; 287 | float outputHiddenWRTnetHidden = 0.0; 288 | float totalErrorChangeWRTweight = 0.0; 289 | 290 | for (int y = 0; y < getNoOfOutputNodes(); ++ y) { 291 | if (verbose) { 292 | println(); 293 | println("Calculating hidden node ", x," for output ", y); 294 | } 295 | 296 | // totalErrorChangeWRTOutput 297 | totalErrorChangeWRTOutput = -(desiredOutputNodeValues[y] - outputNodeValues[y]); 298 | if (verbose) { 299 | println("totalErrChangeWRTOutput [", y,"] =", totalErrorChangeWRTOutput); 300 | } 301 | 302 | // outputChangeWRTNetInput 303 | outputChangeWRTNetInput = outputNodeValues[y] * (1 - outputNodeValues[y]); 304 | if (verbose) { 305 | println("outputChangeWRTNetInput [", y,"] =", outputChangeWRTNetInput); 306 | } 307 | 308 | totalErrorChangeWRTHidden += totalErrorChangeWRTOutput * outputChangeWRTNetInput * hiddenToOutputWeights[x][y]; 309 | 310 | if (verbose) { 311 | println("totalErrorChangeWRTHidden[", x, "] =", totalErrorChangeWRTHidden); 312 | println(); 313 | } 314 | } 315 | 316 | outputHiddenWRTnetHidden = (hiddenNodeValues[x]) * (1 - hiddenNodeValues[x]); 317 | 318 | if (verbose) { 319 | println(); 320 | println("hiddenNodeValues[", x, "] =", hiddenNodeValues[x]); 321 | println("outputHiddenWRTnetHidden[", x, "] =", outputHiddenWRTnetHidden); 322 | } 323 | 324 | // For each input, calculate the weight change 325 | for (int y = 0; y < getNoOfInputNodes(); ++y) { 326 | totalErrorChangeWRTweight = totalErrorChangeWRTHidden * outputHiddenWRTnetHidden * inputNodeValues[y]; 327 | 328 | if (verbose) { 329 | println("inputNodeValues[", y, "] =", inputNodeValues[y]); 330 | println("totalErrorChangeWRTweight[", x, "] =", totalErrorChangeWRTweight); 331 | } 332 | 333 | newInputToHiddenWeights[y][x] = inputToHiddenWeights[y][x] - (learningRate * totalErrorChangeWRTweight); 334 | 335 | if (verbose) { 336 | println("inputToHiddenWeights[", y, "][", x, "] =", inputToHiddenWeights[y][x]); 337 | println("New Input-To-Hidden Weight[", y, "][", x, "] =", newInputToHiddenWeights[y][x], ", Old Weight =", inputToHiddenWeights[y][x]); 338 | println(); 339 | } 340 | } 341 | } 342 | 343 | 344 | // Update all weights to newly calculated values 345 | if (verbose) { 346 | println("Updating weights."); 347 | } 348 | 349 | // Update the input-to-hidden weights 350 | for (int x = 0; x < getNoOfInputNodes(); ++x) { 351 | for (int y = 0; y < getNoOfHiddenNodes(); ++y) { 352 | inputToHiddenWeights[x][y] = newInputToHiddenWeights[x][y]; 353 | } 354 | } 355 | // Update the hidden-to-output weights 356 | for (int x = 0; x < getNoOfHiddenNodes(); ++x) { 357 | for (int y = 0; y < getNoOfOutputNodes(); ++y) { 358 | hiddenToOutputWeights[x][y] = newHiddenToOutputWeights[x][y]; 359 | } 360 | } 361 | } 362 | 363 | void setBiasInputToHidden(float bias) { 364 | biasInputToHidden = bias; 365 | } 366 | 367 | float getBiasInputToHidden() { 368 | return biasInputToHidden; 369 | } 370 | 371 | void setBiasHiddenToOutput(float bias) { 372 | biasHiddenToOutput = bias; 373 | } 374 | 375 | float getBiasHiddenToOutput() { 376 | return biasHiddenToOutput; 377 | } 378 | 379 | void setLearningRate(float rate) { 380 | learningRate = rate; 381 | } 382 | 383 | float getLearningRate() { 384 | return learningRate; 385 | } 386 | 387 | float getTotalNetworkError() { 388 | return totalNetworkError; 389 | } 390 | 391 | int getNoOfInputNodes() { 392 | return noOfInputs; 393 | } 394 | 395 | int getNoOfHiddenNodes() { 396 | return noOfHidden; 397 | } 398 | 399 | int getNoOfOutputNodes() { 400 | return noOfOutputs; 401 | } 402 | 403 | void setInputNode(int node, float value) { 404 | inputNodeValues[node] = value; 405 | } 406 | 407 | float getInputNode(int node) { 408 | return inputNodeValues[node]; 409 | } 410 | 411 | void setOutputNodeDesired(int node, float value) { 412 | desiredOutputNodeValues[node] = value; 413 | } 414 | 415 | float getOutputNodeDesired(int node) { 416 | return desiredOutputNodeValues[node]; 417 | } 418 | 419 | float getOutputNode(int node) { 420 | return outputNodeValues[node]; 421 | } 422 | 423 | void setInputToHiddenWeight(int input, int hidden, float value) { 424 | inputToHiddenWeights[input][hidden] = value; 425 | } 426 | 427 | float getInputToHiddenWeight(int input, int hidden) { 428 | return inputToHiddenWeights[input][hidden]; 429 | } 430 | 431 | void setHiddenToOutputWeight(int hidden, int output, float value) { 432 | hiddenToOutputWeights[hidden][output] = value; 433 | } 434 | 435 | float getHiddenToOutputWeight(int hidden, int output) { 436 | return hiddenToOutputWeights[hidden][output]; 437 | } 438 | 439 | int getEpoch() { 440 | return learningEpoch; 441 | } 442 | 443 | void turnLearningOn() { 444 | learning = true; 445 | } 446 | 447 | void turnLearningOff() { 448 | learning = false; 449 | } 450 | 451 | void turnVerboseOn() { 452 | verbose = true; 453 | } 454 | 455 | void turnVerboseOff() { 456 | verbose = false; 457 | } 458 | 459 | boolean getLearningStatus() { 460 | return learning; 461 | } 462 | 463 | void displayInputNodes() { 464 | for (int x = 0; x < noOfInputs; ++x) { 465 | print(getInputNode(x), " "); 466 | } 467 | println(); 468 | } 469 | 470 | void displayInputToHiddenWeightsCurrent() { 471 | for (int x = 0; x < getNoOfInputNodes(); ++x) { 472 | print("For Input Node " + x + ": "); 473 | for (int y = 0; y < getNoOfHiddenNodes(); ++y) { 474 | print(inputToHiddenWeights[x][y], " "); 475 | } 476 | println(); 477 | } 478 | } 479 | 480 | void displayHiddenToOutputWeightsCurrent() { 481 | for (int x = 0; x < getNoOfHiddenNodes(); ++x) { 482 | print("For Hidden Node " + x + ": "); 483 | for (int y = 0; y < getNoOfOutputNodes(); ++y) { 484 | print(hiddenToOutputWeights[x][y], " "); 485 | } 486 | println(); 487 | } 488 | } 489 | 490 | void displayHiddenNodes() { 491 | for (int x = 0; x < getNoOfHiddenNodes(); ++x) { 492 | print(hiddenNodeValues[x], " "); 493 | } 494 | println(); 495 | } 496 | 497 | void displayOutputNodes() { 498 | for (int x = 0; x < getNoOfOutputNodes(); ++x) { 499 | print(outputNodeValues[x], " "); 500 | } 501 | println(); 502 | } 503 | 504 | void seed(int x) { 505 | randomSeed(x); 506 | } 507 | } 508 | 509 | float sigmoid(float x) { 510 | return (1 / (1 + exp(-x))); 511 | } 512 | -------------------------------------------------------------------------------- /processing/nn_test/neural.pde: -------------------------------------------------------------------------------- 1 | /* 2 | * Neural class for Processing 3 | * Stuart Cording aka codinghead 4 | * 5 | * This code implements a simple neural network as a multilayer perceptron (MLP). 6 | * It supports an input layer, single hidden layer, and output layer. 7 | * The number of nodes in each layer can be defined by the user. 8 | * The code was developed based upon the post "A Step by Step Backpropgation 9 | * Example" by Matt Mazur: 10 | * https://mattmazur.com/2015/03/17/a-step-by-step-backpropagation-example/ 11 | */ 12 | class Neural { 13 | private float[] inputNodeValues; 14 | private float[] hiddenNodeValues; 15 | private float[] outputNodeValues; 16 | private float[] desiredOutputNodeValues; 17 | private int noOfInputs; 18 | private int noOfHidden; 19 | private int noOfOutputs; 20 | 21 | private float[][] inputToHiddenWeights; 22 | private float[][] newInputToHiddenWeights; 23 | private float[][] hiddenToOutputWeights; 24 | private float[][] newHiddenToOutputWeights; 25 | 26 | private float biasInputToHidden; 27 | private float biasHiddenToOutput; 28 | private float learningRate; 29 | private float totalNetworkError; 30 | private int learningEpoch; 31 | 32 | private boolean learning; 33 | 34 | private boolean verbose; 35 | 36 | // Network is created by defining number of inputs, hidden nodes and outputs 37 | Neural(int inputs, int hidden, int outputs) { 38 | // Set all variables to zero to start that don't have to be defined here 39 | biasInputToHidden = 0.0; 40 | biasHiddenToOutput = 0.0; 41 | learningRate = 0.0; 42 | totalNetworkError = 0.0; 43 | 44 | // Note that we are not in learning mode 45 | learning = false; 46 | 47 | // Note that we are not in verbose mode 48 | verbose = false; 49 | 50 | // Set learning epoch to 0 51 | learningEpoch = 0; 52 | 53 | // Note the original number of nodes created 54 | noOfInputs = inputs; 55 | noOfHidden = hidden; 56 | noOfOutputs = outputs; 57 | 58 | // Create the desired number of input nodes and set them to zero 59 | inputNodeValues = new float [inputs]; 60 | for (int x = 0; x < inputs; ++x) { 61 | inputNodeValues[x] = 0.0; 62 | } 63 | 64 | // Create the desired number of hidden nodes and set them to zero 65 | hiddenNodeValues = new float [hidden]; 66 | for (int x = 0; x < hidden; ++x) { 67 | hiddenNodeValues[x] = 0.0; 68 | } 69 | 70 | // Create the desired number of output and desired output nodes and 71 | // set them to zero 72 | // Note: outputNodeValues stores the output of the MLP. The 73 | // desiredOutputNodeValues are the values we want to 74 | // achieve for the given input values. 75 | outputNodeValues = new float [outputs]; 76 | desiredOutputNodeValues = new float [outputs]; 77 | for (int x = 0; x < outputs; ++x) { 78 | outputNodeValues[x] = 0.0; 79 | desiredOutputNodeValues[x] = 0.0; 80 | } 81 | 82 | // For each input node, create both current and new weights 83 | // for each hidden node 84 | // Note: The new weights are used during learning 85 | inputToHiddenWeights = new float [inputs][hidden]; 86 | newInputToHiddenWeights = new float [inputs][hidden]; 87 | 88 | for (int x = 0; x < inputs; ++x) { 89 | for (int y = 0; y < hidden; ++y) { 90 | // Apply starting random weights to current nodes 91 | inputToHiddenWeights[x][y] = random(0.25, 0.75); 92 | // New weights can have 0.0 for now 93 | newInputToHiddenWeights[x][y] = 0.0; 94 | } 95 | } 96 | 97 | // For each hidden node, create both current and new weights 98 | // for each output node 99 | // Note: The new weights are used during learning 100 | hiddenToOutputWeights = new float [hidden][outputs]; 101 | newHiddenToOutputWeights = new float [hidden][outputs]; 102 | 103 | for (int x = 0; x < hidden; ++x) { 104 | for (int y = 0; y < outputs; ++y) { 105 | // Apply starting random weights to current nodes 106 | hiddenToOutputWeights[x][y] = random(0.25, 0.75); 107 | // New weights can have 0.0 for now 108 | newHiddenToOutputWeights[x][y] = 0.0; 109 | } 110 | } 111 | } 112 | 113 | /* calculateOuput() 114 | * Uses the weights of the MLP to calculate new output. 115 | * Requires that user has defined their desired input values 116 | * and trained the network. 117 | */ 118 | void calculateOutput() { 119 | float tempResult = 0.0; 120 | 121 | // Start by calculating the hidden layer node results for each input node 122 | // For each hidden node Hn: 123 | // Hn = sigmoid (wn * in + w(n+1) * i(n+1) ... + Hbias * 1) 124 | for (int x = 0; x < getNoOfHiddenNodes(); ++x) { 125 | if (verbose) { 126 | println("Input-to-hidden to calculate hidden node output:"); 127 | } 128 | // Start by calculating (wn * in + w(n+1) * i(n+1) ... 129 | for (int y = 0; y < getNoOfInputNodes(); ++y) { 130 | // Sum the results for the weight * input for each input node 131 | tempResult += inputNodeValues[y] * inputToHiddenWeights[y][x]; 132 | if (verbose) { 133 | println("i[", y,"] ", inputNodeValues[y], " * ", "iToHW[", y, x,"] ",inputToHiddenWeights[y][x], " += ", tempResult); 134 | } 135 | } 136 | 137 | // Add bias value result to sum 138 | tempResult += 1.0 * biasInputToHidden; 139 | if (verbose) { 140 | println("Bias: 1.0 * ", biasInputToHidden, " += ", tempResult); 141 | } 142 | 143 | // Squash result using sigmoid of sum 144 | hiddenNodeValues[x] = sigmoid(tempResult); 145 | if (verbose) { 146 | println("Output of hidden node:"); 147 | println("Sigmoid:", hiddenNodeValues[x]); 148 | println(); 149 | } 150 | 151 | // Reset sumation variable for next round 152 | tempResult = 0.0; 153 | } 154 | 155 | // Next calculate the output layer node results for each hidden node 156 | // For each output node On: 157 | // On = sigmoid (wn * Hn + w(n+1) * Hn(n+1) ... + Obias * 1) 158 | for (int x = 0; x < getNoOfOutputNodes(); ++x) { 159 | if (verbose) { 160 | println("Hidden-to-output to calculate output node result:"); 161 | } 162 | // Start by calulating (wn * Hn + w(n+1) * Hn(n+1) ... 163 | for (int y = 0; y < getNoOfHiddenNodes(); ++y) { 164 | 165 | tempResult += hiddenNodeValues[y] * hiddenToOutputWeights[y][x]; 166 | if (verbose) { 167 | println("h[", y,"] ", hiddenNodeValues[y], " * ", "hToOW[", y, x,"] ",hiddenToOutputWeights[y][x], " += ", tempResult); 168 | } 169 | } 170 | 171 | // Add bias value 172 | tempResult += 1.0 * biasHiddenToOutput; 173 | if (verbose) { 174 | println("Bias: 1.0 * ", biasHiddenToOutput, " += ", tempResult); 175 | } 176 | 177 | // Result goes into the output node 178 | outputNodeValues[x] = sigmoid(tempResult); 179 | if (verbose) { 180 | println("Result for output node:"); 181 | println("Sigmoid:", outputNodeValues[x]); 182 | println(); 183 | } 184 | 185 | // Reset sumation variable for next round 186 | tempResult = 0.0; 187 | } 188 | 189 | // Calculate total error 190 | // ERRORtotal = SUM 0.5 * (target - output)^2 191 | for (int x = 0; x < getNoOfOutputNodes(); ++x) { 192 | tempResult += 0.5 * sq(desiredOutputNodeValues[x] - outputNodeValues[x]); 193 | if (verbose) { 194 | println("Determine error between output and desired output values:"); 195 | print("Error o[", x, "]:", tempResult); 196 | println(" : 0.5 * (", desiredOutputNodeValues[x], "-", outputNodeValues[x],")^2"); 197 | println(); 198 | } 199 | } 200 | 201 | if (verbose) { 202 | println("Total Error: ", tempResult); 203 | println(); 204 | } 205 | 206 | totalNetworkError = tempResult; 207 | 208 | if (learning) { 209 | if (verbose) { 210 | println(); 211 | println(">>> Executing learning loop..."); 212 | } 213 | backPropagation(); 214 | if (verbose) { 215 | println(); 216 | println(">>> Learning loop complete. Epoch = ", learningEpoch); 217 | println(); 218 | } 219 | } 220 | } 221 | 222 | /* backPropagation() 223 | * Uses network error to update weights when learning is 224 | * enabled. 225 | */ 226 | private void backPropagation() { 227 | float totalErrorChangeWRTOutput = 0.0; 228 | float outputChangeWRTNetInput = 0.0; 229 | float netInputChangeWRTWeight = 0.0; 230 | float errorTotalWRTHiddenNode = 0.0; 231 | 232 | // Increment epoch 233 | ++learningEpoch; 234 | 235 | // Consider the output layer to calculate new weights for hidden-to-output layer 236 | // newWeightN = wn - learningRate * (ErrorTotal / impactOfwn) 237 | if (verbose) { 238 | println(); 239 | println("Hidden to Output Weight Correction:"); 240 | } 241 | for (int x = 0; x < getNoOfOutputNodes(); ++x) { 242 | 243 | totalErrorChangeWRTOutput = -(desiredOutputNodeValues[x] - outputNodeValues[x]); 244 | if (verbose) { 245 | println("totalErrChangeWRTOutput [", x,"] =", totalErrorChangeWRTOutput); 246 | } 247 | 248 | outputChangeWRTNetInput = outputNodeValues[x] * (1 - outputNodeValues[x]); 249 | if (verbose) { 250 | println("outputChangeWRTNetInput [", x,"] =", outputChangeWRTNetInput); 251 | println(); 252 | } 253 | 254 | for (int y = 0; y < getNoOfHiddenNodes(); ++y) { 255 | float weightChange = 0.0; 256 | 257 | netInputChangeWRTWeight = hiddenNodeValues[y]; 258 | 259 | weightChange = totalErrorChangeWRTOutput * outputChangeWRTNetInput * netInputChangeWRTWeight; 260 | 261 | if (verbose) { 262 | println("weightChange =", weightChange, " :", totalErrorChangeWRTOutput, "*", outputChangeWRTNetInput, "*", netInputChangeWRTWeight); 263 | } 264 | 265 | newHiddenToOutputWeights[y][x] = hiddenToOutputWeights[y][x] - (learningRate * weightChange); 266 | 267 | if (verbose) { 268 | println("Calculating", hiddenToOutputWeights[y][x], "-", learningRate, "*", weightChange); 269 | println("New Hidden-To-Output Weight[", y, "][", x, "] =", newHiddenToOutputWeights[y][x], ", Old Weight =", hiddenToOutputWeights[y][x]); 270 | println(); 271 | } 272 | } 273 | } 274 | 275 | // Consider the hidden layer (based upon original weights) 276 | if (verbose) { 277 | println("Input to Hidden Weight Correction:"); 278 | } 279 | 280 | // Need to consider for each hidden node 281 | for (int x = 0; x < getNoOfHiddenNodes(); ++x) { 282 | // For each hidden node we need: 283 | // - totalErrorChangeWRTOutput 284 | // - outputChangeWRTNetInput 285 | // - hiddenToOutputWeights 286 | float totalErrorChangeWRTHidden = 0.0; 287 | float outputHiddenWRTnetHidden = 0.0; 288 | float totalErrorChangeWRTweight = 0.0; 289 | 290 | for (int y = 0; y < getNoOfOutputNodes(); ++ y) { 291 | if (verbose) { 292 | println(); 293 | println("Calculating hidden node ", x," for output ", y); 294 | } 295 | 296 | // totalErrorChangeWRTOutput 297 | totalErrorChangeWRTOutput = -(desiredOutputNodeValues[y] - outputNodeValues[y]); 298 | if (verbose) { 299 | println("totalErrChangeWRTOutput [", y,"] =", totalErrorChangeWRTOutput); 300 | } 301 | 302 | // outputChangeWRTNetInput 303 | outputChangeWRTNetInput = outputNodeValues[y] * (1 - outputNodeValues[y]); 304 | if (verbose) { 305 | println("outputChangeWRTNetInput [", y,"] =", outputChangeWRTNetInput); 306 | } 307 | 308 | totalErrorChangeWRTHidden += totalErrorChangeWRTOutput * outputChangeWRTNetInput * hiddenToOutputWeights[x][y]; 309 | 310 | if (verbose) { 311 | println("totalErrorChangeWRTHidden[", x, "] =", totalErrorChangeWRTHidden); 312 | println(); 313 | } 314 | } 315 | 316 | outputHiddenWRTnetHidden = (hiddenNodeValues[x]) * (1 - hiddenNodeValues[x]); 317 | 318 | if (verbose) { 319 | println(); 320 | println("hiddenNodeValues[", x, "] =", hiddenNodeValues[x]); 321 | println("outputHiddenWRTnetHidden[", x, "] =", outputHiddenWRTnetHidden); 322 | } 323 | 324 | // For each input, calculate the weight change 325 | for (int y = 0; y < getNoOfInputNodes(); ++y) { 326 | totalErrorChangeWRTweight = totalErrorChangeWRTHidden * outputHiddenWRTnetHidden * inputNodeValues[y]; 327 | 328 | if (verbose) { 329 | println("inputNodeValues[", y, "] =", inputNodeValues[y]); 330 | println("totalErrorChangeWRTweight[", x, "] =", totalErrorChangeWRTweight); 331 | } 332 | 333 | newInputToHiddenWeights[y][x] = inputToHiddenWeights[y][x] - (learningRate * totalErrorChangeWRTweight); 334 | 335 | if (verbose) { 336 | println("inputToHiddenWeights[", y, "][", x, "] =", inputToHiddenWeights[y][x]); 337 | println("New Input-To-Hidden Weight[", y, "][", x, "] =", newInputToHiddenWeights[y][x], ", Old Weight =", inputToHiddenWeights[y][x]); 338 | println(); 339 | } 340 | } 341 | } 342 | 343 | 344 | // Update all weights to newly calculated values 345 | if (verbose) { 346 | println("Updating weights."); 347 | } 348 | 349 | // Update the input-to-hidden weights 350 | for (int x = 0; x < getNoOfInputNodes(); ++x) { 351 | for (int y = 0; y < getNoOfHiddenNodes(); ++y) { 352 | inputToHiddenWeights[x][y] = newInputToHiddenWeights[x][y]; 353 | } 354 | } 355 | // Update the hidden-to-output weights 356 | for (int x = 0; x < getNoOfHiddenNodes(); ++x) { 357 | for (int y = 0; y < getNoOfOutputNodes(); ++y) { 358 | hiddenToOutputWeights[x][y] = newHiddenToOutputWeights[x][y]; 359 | } 360 | } 361 | } 362 | 363 | void setBiasInputToHidden(float bias) { 364 | biasInputToHidden = bias; 365 | } 366 | 367 | float getBiasInputToHidden() { 368 | return biasInputToHidden; 369 | } 370 | 371 | void setBiasHiddenToOutput(float bias) { 372 | biasHiddenToOutput = bias; 373 | } 374 | 375 | float getBiasHiddenToOutput() { 376 | return biasHiddenToOutput; 377 | } 378 | 379 | void setLearningRate(float rate) { 380 | learningRate = rate; 381 | } 382 | 383 | float getLearningRate() { 384 | return learningRate; 385 | } 386 | 387 | float getTotalNetworkError() { 388 | return totalNetworkError; 389 | } 390 | 391 | int getNoOfInputNodes() { 392 | return noOfInputs; 393 | } 394 | 395 | int getNoOfHiddenNodes() { 396 | return noOfHidden; 397 | } 398 | 399 | int getNoOfOutputNodes() { 400 | return noOfOutputs; 401 | } 402 | 403 | void setInputNode(int node, float value) { 404 | inputNodeValues[node] = value; 405 | } 406 | 407 | float getInputNode(int node) { 408 | return inputNodeValues[node]; 409 | } 410 | 411 | void setOutputNodeDesired(int node, float value) { 412 | desiredOutputNodeValues[node] = value; 413 | } 414 | 415 | float getOutputNodeDesired(int node) { 416 | return desiredOutputNodeValues[node]; 417 | } 418 | 419 | float getOutputNode(int node) { 420 | return outputNodeValues[node]; 421 | } 422 | 423 | void setInputToHiddenWeight(int input, int hidden, float value) { 424 | inputToHiddenWeights[input][hidden] = value; 425 | } 426 | 427 | float getInputToHiddenWeight(int input, int hidden) { 428 | return inputToHiddenWeights[input][hidden]; 429 | } 430 | 431 | void setHiddenToOutputWeight(int hidden, int output, float value) { 432 | hiddenToOutputWeights[hidden][output] = value; 433 | } 434 | 435 | float getHiddenToOutputWeight(int hidden, int output) { 436 | return hiddenToOutputWeights[hidden][output]; 437 | } 438 | 439 | int getEpoch() { 440 | return learningEpoch; 441 | } 442 | 443 | void turnLearningOn() { 444 | learning = true; 445 | } 446 | 447 | void turnLearningOff() { 448 | learning = false; 449 | } 450 | 451 | void turnVerboseOn() { 452 | verbose = true; 453 | } 454 | 455 | void turnVerboseOff() { 456 | verbose = false; 457 | } 458 | 459 | boolean getLearningStatus() { 460 | return learning; 461 | } 462 | 463 | void displayInputNodes() { 464 | for (int x = 0; x < noOfInputs; ++x) { 465 | print(getInputNode(x), " "); 466 | } 467 | println(); 468 | } 469 | 470 | void displayInputToHiddenWeightsCurrent() { 471 | for (int x = 0; x < getNoOfInputNodes(); ++x) { 472 | print("For Input Node " + x + ": "); 473 | for (int y = 0; y < getNoOfHiddenNodes(); ++y) { 474 | print(inputToHiddenWeights[x][y], " "); 475 | } 476 | println(); 477 | } 478 | } 479 | 480 | void displayHiddenToOutputWeightsCurrent() { 481 | for (int x = 0; x < getNoOfHiddenNodes(); ++x) { 482 | print("For Hidden Node " + x + ": "); 483 | for (int y = 0; y < getNoOfOutputNodes(); ++y) { 484 | print(hiddenToOutputWeights[x][y], " "); 485 | } 486 | println(); 487 | } 488 | } 489 | 490 | void displayHiddenNodes() { 491 | for (int x = 0; x < getNoOfHiddenNodes(); ++x) { 492 | print(hiddenNodeValues[x], " "); 493 | } 494 | println(); 495 | } 496 | 497 | void displayOutputNodes() { 498 | for (int x = 0; x < getNoOfOutputNodes(); ++x) { 499 | print(outputNodeValues[x], " "); 500 | } 501 | println(); 502 | } 503 | 504 | void seed(int x) { 505 | randomSeed(x); 506 | } 507 | } 508 | 509 | float sigmoid(float x) { 510 | return (1 / (1 + exp(-x))); 511 | } 512 | -------------------------------------------------------------------------------- /processing/nn_test/nn_test.pde: -------------------------------------------------------------------------------- 1 | /* 2 | * This code demonstrates the example by Matt Mazur: 3 | * https://mattmazur.com/2015/03/17/a-step-by-step-backpropagation-example/ 4 | * Code by Stuart Cording 5 | * aka codinghead@gmail.com 6 | */ 7 | 8 | Neural network; 9 | 10 | void setup() { 11 | // This neural network uses two inputs, two hidden nodes, and two output nodes. 12 | network = new Neural(2,2,2); 13 | 14 | // Set learning rate here 15 | network.setLearningRate(0.5); 16 | 17 | // Set Neural class to be 'verbose' so we can see what it is doing 18 | network.turnVerboseOn(); 19 | 20 | println("Matt Mazur Neural Network Backpropagation Example"); 21 | println("-------------------------------------------------"); 22 | println(); 23 | println("Structure is:"); 24 | println("Input nodes: ", network.getNoOfInputNodes(), "; Hidden nodes: ", network.getNoOfHiddenNodes(), 25 | "; Output nodes: ", network.getNoOfOutputNodes()); 26 | 27 | // Set network biasing here 28 | // b1 = 0.35 29 | network.setBiasInputToHidden(0.35); 30 | // b2 = 0.60 31 | network.setBiasHiddenToOutput(0.6); 32 | 33 | // The Neural class constructor gives the weights random value. 34 | // Here we use the values from Matt Mazur's example. 35 | // We start with the input-to-hidden weights: 36 | // w1 = 0.15 (i1 to h1) 37 | network.setInputToHiddenWeight(0, 0, 0.15); 38 | // w3 = 0.25 (i1 to h2) 39 | network.setInputToHiddenWeight(0, 1, 0.25); 40 | // w2 = 0.20 (i2 to h1) 41 | network.setInputToHiddenWeight(1, 0, 0.2); 42 | // w4 = 0.30 (i2 to h2) 43 | network.setInputToHiddenWeight(1, 1, 0.30); 44 | 45 | // Next we configure the hidden-to-output weights: 46 | // w5 = 0.40 (h1 to o1) 47 | network.setHiddenToOutputWeight(0, 0, 0.4); 48 | // w7 = 0.50 (h1 to o2) 49 | network.setHiddenToOutputWeight(0, 1, 0.5); 50 | // w6 = 0.45 (h2 to o1) 51 | network.setHiddenToOutputWeight(1, 0, 0.45); 52 | // w8 = 0.55 (h2 to o2) 53 | network.setHiddenToOutputWeight(1, 1, 0.55); 54 | 55 | // Configure the inputs 56 | // i1 = 0.05 57 | network.setInputNode(0, 0.05); 58 | // i2 = 0.10 59 | network.setInputNode(1, 0.1); 60 | 61 | // Now declare the values we like to achieve at the outputs for this 62 | // input combination 63 | // o1 should be 0.01 64 | network.setOutputNodeDesired(0, 0.01); 65 | // o2 should be 0.99 66 | network.setOutputNodeDesired(1, 0.99); 67 | 68 | // We now perform a forwardpass using the configured inputs, weights 69 | // and bias value. Verbose is on, so you will see the working 70 | println(); 71 | println("Calculating values for o1 and o2..."); 72 | println(); 73 | network.calculateOutput(); 74 | println(); 75 | 76 | // Let's summarise the current state 77 | println("...forwardpass complete. Results:"); 78 | println("For i1 = ", network.getInputNode(0), " and i2 = ", network.getInputNode(1)); 79 | println("o1 = ", network.getOutputNode(0), " (but we want: ", network.getOutputNodeDesired(0), ")"); 80 | println("o2 = ", network.getOutputNode(1), " (but we want: ", network.getOutputNodeDesired(1), ")"); 81 | println(); 82 | println("Total network error is: ", network.getTotalNetworkError()); 83 | println(); 84 | 85 | // Now we'll perform a learning cycle using backpropagation 86 | // This enables a backpropagation cycle everytime the calculateOuput() method is called 87 | network.turnLearningOn(); 88 | 89 | println("Learning is ON"); 90 | println("Calculating outputs again and performing backpropagation..."); 91 | println(); 92 | network.calculateOutput(); 93 | 94 | // We'll turn learning off again... 95 | network.turnLearningOff(); 96 | // ...and run another forwardpass without verbose on: 97 | network.turnVerboseOff(); 98 | network.calculateOutput(); 99 | 100 | println("...backpropagation complete. Results:"); 101 | println("For i1 = ", network.getInputNode(0), " and i2 = ", network.getInputNode(1)); 102 | println("o1 = ", network.getOutputNode(0), " (but we want: ", network.getOutputNodeDesired(0), ")"); 103 | println("o2 = ", network.getOutputNode(1), " (but we want: ", network.getOutputNodeDesired(1), ")"); 104 | println(); 105 | println("Total network error is: ", network.getTotalNetworkError()); 106 | println(); 107 | } 108 | 109 | void draw() { 110 | } 111 | -------------------------------------------------------------------------------- /processing/or/data/Calibri-48.vlw: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/codinghead/simple-neural-network/1f93533c429f76dab289eaf4dd8a4afc39b127af/processing/or/data/Calibri-48.vlw -------------------------------------------------------------------------------- /processing/or/elektor_or.pde: -------------------------------------------------------------------------------- 1 | Neural network; 2 | PFont textFont; 3 | 4 | PrintWriter errorOutput; 5 | 6 | float[] errorGraph = new float[20000]; 7 | int errorGraphCount = 0; 8 | 9 | int learnOr = 0; 10 | 11 | void setup() { 12 | size(640, 480); 13 | 14 | errorOutput = createWriter("or-error.csv"); 15 | 16 | textFont = loadFont("Calibri-48.vlw"); 17 | //frameRate(30); 18 | 19 | network = new Neural(2,4,1); 20 | 21 | network.setLearningRate(0.45); 22 | 23 | println(network.getNoOfInputNodes(), " ", network.getNoOfHiddenNodes(), " ", network.getNoOfOutputNodes()); 24 | 25 | network.setBiasInputToHidden(0.25); 26 | network.setBiasHiddenToOutput(0.3); 27 | 28 | network.displayOutputNodes(); 29 | 30 | println(network.getTotalNetworkError()); 31 | 32 | network.turnLearningOn(); 33 | 34 | for (int loop = 0; loop < 20000; ++loop) { 35 | errorGraph[loop] = 0.0; 36 | } 37 | } 38 | 39 | void draw() { 40 | background(180); 41 | 42 | if (network.getLearningStatus()) { 43 | // If we are learning and have achieved < 40000 cycles... 44 | if (network.getEpoch() > 30000) { 45 | network.turnLearningOff(); 46 | // Close file 47 | errorOutput.flush(); // Writes the remaining data to the file 48 | errorOutput.close(); 49 | frameRate(0.5); 50 | } 51 | 52 | // Set up OR inputs 53 | if (learnOr == 0) { 54 | network.setInputNode(0, 0.01); 55 | network.setInputNode(1, 0.01); 56 | network.setOutputNodeDesired(0, 0.01); 57 | } else if (learnOr == 1) { 58 | network.setInputNode(0, 0.01); 59 | network.setInputNode(1, 0.99); 60 | network.setOutputNodeDesired(0, 0.99); 61 | } else if (learnOr == 2) { 62 | network.setInputNode(0, 0.99); 63 | network.setInputNode(1, 0.01); 64 | network.setOutputNodeDesired(0, 0.99); 65 | } else { // learnOr == 3 66 | network.setInputNode(0, 0.99); 67 | network.setInputNode(1, 0.99); 68 | network.setOutputNodeDesired(0, 0.99); 69 | } 70 | 71 | network.calculateOutput(); 72 | 73 | //print(network.getEpoch()); 74 | //print(" : "); 75 | //print(learnOr); 76 | //print(" : "); 77 | //println(network.getTotalNetworkError()); 78 | 79 | if ((network.getEpoch() % 50) == 0) { 80 | print(network.getEpoch()); 81 | print(","); 82 | println(network.getTotalNetworkError()); 83 | 84 | // Write to file 85 | errorOutput.print(network.getEpoch()); 86 | errorOutput.print(","); 87 | errorOutput.println(network.getTotalNetworkError()); 88 | errorOutput.flush(); 89 | } 90 | // Output current error 91 | { 92 | float strError; 93 | 94 | textAlign(LEFT, CENTER); 95 | strError = network.getTotalNetworkError() * 100.0; 96 | textSize(24); 97 | text("Error: " + nf(strError,2,4) + "%", 40, 460); 98 | 99 | strokeWeight(10); 100 | stroke(0); 101 | textAlign(CENTER, CENTER); 102 | } 103 | 104 | // Increment to next input combination (00, 01, 10, 11) 105 | ++learnOr; 106 | 107 | if (learnOr > 3) { 108 | learnOr = 0; 109 | } 110 | } else { 111 | // Switch between differnt AND input patterns 112 | 113 | // Set up AND inputs 114 | if (learnOr == 0) { 115 | network.setInputNode(0, 0.01); 116 | network.setInputNode(1, 0.01); 117 | } else if (learnOr == 1) { 118 | network.setInputNode(0, 0.01); 119 | network.setInputNode(1, 0.99); 120 | } else if (learnOr == 2) { 121 | network.setInputNode(0, 0.99); 122 | network.setInputNode(1, 0.01); 123 | } else { // learnOr == 3 124 | network.setInputNode(0, 0.99); 125 | network.setInputNode(1, 0.99); 126 | } 127 | 128 | network.calculateOutput(); 129 | print(learnOr); 130 | print(" : "); 131 | println(network.getOutputNode(0)); 132 | 133 | // Increment to next input combination (00, 01, 10, 11) 134 | ++learnOr; 135 | if (learnOr > 3) { 136 | learnOr = 0; 137 | } 138 | } 139 | 140 | // Heading 141 | textFont(textFont); 142 | if (network.getLearningStatus()) { 143 | String strEpoch = str(network.getEpoch()); 144 | textAlign(CENTER, CENTER); 145 | textSize(48); 146 | text("Learning - OR", width/2, 40); 147 | textSize(24); 148 | text("Epoch: "+strEpoch, width/2, 80); 149 | } else { 150 | text("Testing - OR", width/2, 40); 151 | } 152 | 153 | strokeWeight(10); 154 | 155 | //ItoH 156 | { 157 | float value = 0.0; 158 | 159 | value = 3.0 * network.getInputToHiddenWeight(0, 0); 160 | if (value < 0) { 161 | stroke(204, 102, 0); 162 | } else { 163 | stroke(0); 164 | } 165 | value = abs(value); 166 | strokeWeight(value); 167 | line(160, 240, 320, 160); 168 | 169 | value = 3.0 * network.getInputToHiddenWeight(0, 1); 170 | if (value < 0) { 171 | stroke(204, 102, 0); 172 | } else { 173 | stroke(0); 174 | } 175 | value = abs(value); 176 | strokeWeight(value); 177 | line(160, 240, 320, 240); 178 | 179 | value = 3.0 * network.getInputToHiddenWeight(0, 2); 180 | if (value < 0) { 181 | stroke(204, 102, 0); 182 | } else { 183 | stroke(0); 184 | } 185 | value = abs(value); 186 | strokeWeight(value); 187 | line(160, 240, 320, 320); 188 | 189 | value = 3.0 * network.getInputToHiddenWeight(0, 3); 190 | if (value < 0) { 191 | stroke(204, 102, 0); 192 | } else { 193 | stroke(0); 194 | } 195 | value = abs(value); 196 | strokeWeight(value); 197 | line(160, 240, 320, 400); 198 | 199 | 200 | 201 | value = 3.0 * network.getInputToHiddenWeight(1, 0); 202 | if (value < 0) { 203 | stroke(204, 102, 0); 204 | } else { 205 | stroke(0); 206 | } 207 | value = abs(value); 208 | strokeWeight(value); 209 | line(160, 320, 320, 160); 210 | 211 | value = 3.0 * network.getInputToHiddenWeight(1, 1); 212 | if (value < 0) { 213 | stroke(204, 102, 0); 214 | } else { 215 | stroke(0); 216 | } 217 | value = abs(value); 218 | strokeWeight(value); 219 | line(160, 320, 320, 240); 220 | 221 | value = 3.0 * network.getInputToHiddenWeight(1, 2); 222 | if (value < 0) { 223 | stroke(204, 102, 0); 224 | } else { 225 | stroke(0); 226 | } 227 | value = abs(value); 228 | strokeWeight(value); 229 | line(160, 320, 320, 320); 230 | 231 | value = 3.0 * network.getInputToHiddenWeight(1, 3); 232 | if (value < 0) { 233 | stroke(204, 102, 0); 234 | } else { 235 | stroke(0); 236 | } 237 | value = abs(value); 238 | strokeWeight(value); 239 | line(160, 320, 320, 400); 240 | } 241 | 242 | //HtoO 243 | { 244 | float value = 0.0; 245 | 246 | value = 3.0 * network.getHiddenToOutputWeight(0, 0); 247 | if (value < 0) { 248 | stroke(204, 102, 0); 249 | } else { 250 | stroke(0); 251 | } 252 | value = abs(value); 253 | strokeWeight(value); 254 | line(320, 160, 480, 280); 255 | 256 | value = 3.0 * network.getHiddenToOutputWeight(1, 0); 257 | if (value < 0) { 258 | stroke(204, 102, 0); 259 | } else { 260 | stroke(0); 261 | } 262 | value = abs(value); 263 | strokeWeight(value); 264 | line(320, 240, 480, 280); 265 | 266 | value = 3.0 * network.getHiddenToOutputWeight(2, 0); 267 | if (value < 0) { 268 | stroke(204, 102, 0); 269 | } else { 270 | stroke(0); 271 | } 272 | value = abs(value); 273 | strokeWeight(value); 274 | line(320, 320, 480, 280); 275 | 276 | value = 3.0 * network.getHiddenToOutputWeight(3, 0); 277 | if (value < 0) { 278 | stroke(204, 102, 0); 279 | } else { 280 | stroke(0); 281 | } 282 | value = abs(value); 283 | strokeWeight(value); 284 | line(320, 400, 480, 280); 285 | } 286 | 287 | // Input 288 | strokeWeight(10); 289 | stroke(0); 290 | ellipse(160, 240, 55, 55); 291 | ellipse(160, 320, 55, 55); 292 | 293 | // Hidden 294 | strokeWeight(10); 295 | stroke(0); 296 | ellipse(320, 160, 55, 55); 297 | ellipse(320, 240, 55, 55); 298 | ellipse(320, 320, 55, 55); 299 | ellipse(320, 400, 55, 55); 300 | 301 | // Output 302 | ellipse(480, 280, 55, 55); 303 | 304 | textSize(48); 305 | 306 | // Input Node Text 307 | if (network.getInputNode(0) > 0.9) { 308 | text("1", 100, 240); 309 | } else { 310 | text("0", 100, 240); 311 | } 312 | if (network.getInputNode(1) > 0.9) { 313 | text("1", 100, 320); 314 | } else { 315 | text("0", 100, 320); 316 | } 317 | 318 | // Output Node Text 319 | if (network.getOutputNode(0) > 0.9) { 320 | text("1", 550, 280); 321 | } else { 322 | text("0", 550, 280); 323 | } 324 | } 325 | 326 | void keyPressed() { 327 | errorOutput.flush(); // Writes the remaining data to the file 328 | errorOutput.close(); // Finishes the file 329 | exit(); // Stops the program 330 | } -------------------------------------------------------------------------------- /processing/xor/data/Calibri-48.vlw: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/codinghead/simple-neural-network/1f93533c429f76dab289eaf4dd8a4afc39b127af/processing/xor/data/Calibri-48.vlw -------------------------------------------------------------------------------- /processing/xor/xor.pde: -------------------------------------------------------------------------------- 1 | /* 2 | * This code trains a neural network to implement the XOR function. 3 | * It visualises the network. The weights between the node are shown 4 | * in colour (black +ve; brown -ve) and thickness (thin low value; 5 | * thick high value). 6 | */ 7 | Neural network; 8 | PFont textFont; 9 | 10 | PrintWriter errorOutput; 11 | 12 | int learnXor = 0; 13 | float averageError = 100.0; 14 | float[] averageErrorArray; 15 | int averageErrorPointer = 0; 16 | 17 | void setup() { 18 | size(640, 480); 19 | 20 | // Output total network error occaisionally to a CSV file. 21 | errorOutput = createWriter("xor-error.csv"); 22 | 23 | textFont = loadFont("Calibri-48.vlw"); 24 | 25 | // We'll use two inputs, four hidden nodes, and one output node. 26 | network = new Neural(2,4,1); 27 | 28 | // Set learning rate here 29 | network.setLearningRate(0.5); 30 | 31 | println(network.getNoOfInputNodes(), " ", network.getNoOfHiddenNodes(), " ", network.getNoOfOutputNodes()); 32 | 33 | // Set network biasing here 34 | network.setBiasInputToHidden(0.25); 35 | network.setBiasHiddenToOutput(0.3); 36 | 37 | network.displayOutputNodes(); 38 | 39 | println(network.getTotalNetworkError()); 40 | 41 | network.turnLearningOn(); 42 | 43 | // Set up average error array 44 | averageErrorArray = new float [4]; 45 | for (int x = 0; x < 4; ++x) { 46 | averageErrorArray[x] = 100.0; 47 | } 48 | } 49 | 50 | void draw() { 51 | background(180); 52 | 53 | if (network.getLearningStatus()) { 54 | // Replace line 57 with line 55 to stop learning after a certain number of epochs 55 | // if (network.getEpoch() > 12000) { 56 | // If we are learning and have achieved better than 0.05% error... 57 | if (averageError < 0.0005) { 58 | network.turnLearningOff(); 59 | // Close file 60 | errorOutput.flush(); // Writes the remaining data to the file 61 | errorOutput.close(); 62 | frameRate(0.5); 63 | } 64 | 65 | // Set up XOR inputs and expected output 66 | if (learnXor == 0) { 67 | network.setInputNode(0, 0.01); 68 | network.setInputNode(1, 0.01); 69 | network.setOutputNodeDesired(0, 0.01); 70 | } else if (learnXor == 1) { 71 | network.setInputNode(0, 0.01); 72 | network.setInputNode(1, 0.99); 73 | network.setOutputNodeDesired(0, 0.99); 74 | } else if (learnXor == 2) { 75 | network.setInputNode(0, 0.99); 76 | network.setInputNode(1, 0.01); 77 | network.setOutputNodeDesired(0, 0.99); 78 | } else { // learnXor == 3 79 | network.setInputNode(0, 0.99); 80 | network.setInputNode(1, 0.99); 81 | network.setOutputNodeDesired(0, 0.01); 82 | } 83 | 84 | // Calculate the output for the inputs given 85 | network.calculateOutput(); 86 | 87 | // Calculate average error 88 | averageErrorArray[averageErrorPointer] = network.getTotalNetworkError(); 89 | averageError = (averageErrorArray[0] + averageErrorArray[1] + averageErrorArray[1] + averageErrorArray[3]) / 4.0; 90 | ++averageErrorPointer; 91 | if (averageErrorPointer >= 4) { 92 | averageErrorPointer = 0; 93 | } 94 | 95 | if ((network.getEpoch() % 50) == 0){ 96 | print(network.getEpoch()); 97 | print(","); 98 | print(network.getTotalNetworkError()); 99 | print(","); 100 | println(averageError); 101 | 102 | // Write to file 103 | errorOutput.print(network.getEpoch()); 104 | errorOutput.print(","); 105 | errorOutput.print(network.getTotalNetworkError()); 106 | errorOutput.print(","); 107 | errorOutput.println(averageError); 108 | errorOutput.flush(); 109 | } 110 | 111 | // Output current error to main output 112 | { 113 | float strError; 114 | 115 | textAlign(LEFT, CENTER); 116 | strError = averageError * 100.0; 117 | textSize(24); 118 | text("Total Network Error: " + nf(strError,2,4) + "%", 40, 460); 119 | 120 | strokeWeight(10); 121 | stroke(0); 122 | textAlign(CENTER, CENTER); 123 | } 124 | 125 | // Increment to next input combination (00, 01, 10, 11) 126 | ++learnXor; 127 | 128 | if (learnXor > 3) { 129 | learnXor = 0; 130 | } 131 | } else { 132 | // Switch between differnt AND input patterns to show result of learning 133 | 134 | // Set up AND inputs 135 | if (learnXor == 0) { 136 | network.setInputNode(0, 0.01); 137 | network.setInputNode(1, 0.01); 138 | } else if (learnXor == 1) { 139 | network.setInputNode(0, 0.01); 140 | network.setInputNode(1, 0.99); 141 | } else if (learnXor == 2) { 142 | network.setInputNode(0, 0.99); 143 | network.setInputNode(1, 0.01); 144 | } else { // learnXor == 3 145 | network.setInputNode(0, 0.99); 146 | network.setInputNode(1, 0.99); 147 | } 148 | 149 | network.calculateOutput(); 150 | print(learnXor); 151 | print(" : "); 152 | println(network.getOutputNode(0)); 153 | 154 | // Increment to next input combination (00, 01, 10, 11) 155 | ++learnXor; 156 | if (learnXor > 3) { 157 | learnXor = 0; 158 | } 159 | } 160 | 161 | // What follows outputs the rest of the display including heading, 162 | // depiction of nodes, and the weights as lines. 163 | // Heading 164 | textFont(textFont); 165 | if (network.getLearningStatus()) { 166 | String strEpoch = str(network.getEpoch()); 167 | textAlign(CENTER, CENTER); 168 | textSize(48); 169 | text("Learning - XOR", width/2, 40); 170 | textSize(24); 171 | text("Epoch: "+strEpoch, width/2, 80); 172 | } else { 173 | text("Testing - XOR", width/2, 40); 174 | } 175 | 176 | strokeWeight(10); 177 | 178 | //ItoH 179 | { 180 | float value = 0.0; 181 | 182 | value = 3.0 * network.getInputToHiddenWeight(0, 0); 183 | if (value < 0) { 184 | stroke(204, 102, 0); 185 | } else { 186 | stroke(0); 187 | } 188 | value = abs(value); 189 | strokeWeight(value); 190 | line(160, 240, 320, 160); 191 | 192 | value = 3.0 * network.getInputToHiddenWeight(0, 1); 193 | if (value < 0) { 194 | stroke(204, 102, 0); 195 | } else { 196 | stroke(0); 197 | } 198 | value = abs(value); 199 | strokeWeight(value); 200 | line(160, 240, 320, 240); 201 | 202 | value = 3.0 * network.getInputToHiddenWeight(0, 2); 203 | if (value < 0) { 204 | stroke(204, 102, 0); 205 | } else { 206 | stroke(0); 207 | } 208 | value = abs(value); 209 | strokeWeight(value); 210 | line(160, 240, 320, 320); 211 | 212 | value = 3.0 * network.getInputToHiddenWeight(0, 3); 213 | if (value < 0) { 214 | stroke(204, 102, 0); 215 | } else { 216 | stroke(0); 217 | } 218 | value = abs(value); 219 | strokeWeight(value); 220 | line(160, 240, 320, 400); 221 | 222 | 223 | 224 | value = 3.0 * network.getInputToHiddenWeight(1, 0); 225 | if (value < 0) { 226 | stroke(204, 102, 0); 227 | } else { 228 | stroke(0); 229 | } 230 | value = abs(value); 231 | strokeWeight(value); 232 | line(160, 320, 320, 160); 233 | 234 | value = 3.0 * network.getInputToHiddenWeight(1, 1); 235 | if (value < 0) { 236 | stroke(204, 102, 0); 237 | } else { 238 | stroke(0); 239 | } 240 | value = abs(value); 241 | strokeWeight(value); 242 | line(160, 320, 320, 240); 243 | 244 | value = 3.0 * network.getInputToHiddenWeight(1, 2); 245 | if (value < 0) { 246 | stroke(204, 102, 0); 247 | } else { 248 | stroke(0); 249 | } 250 | value = abs(value); 251 | strokeWeight(value); 252 | line(160, 320, 320, 320); 253 | 254 | value = 3.0 * network.getInputToHiddenWeight(1, 3); 255 | if (value < 0) { 256 | stroke(204, 102, 0); 257 | } else { 258 | stroke(0); 259 | } 260 | value = abs(value); 261 | strokeWeight(value); 262 | line(160, 320, 320, 400); 263 | } 264 | 265 | //HtoO 266 | { 267 | float value = 0.0; 268 | 269 | value = 3.0 * network.getHiddenToOutputWeight(0, 0); 270 | if (value < 0) { 271 | stroke(204, 102, 0); 272 | } else { 273 | stroke(0); 274 | } 275 | value = abs(value); 276 | strokeWeight(value); 277 | line(320, 160, 480, 280); 278 | 279 | value = 3.0 * network.getHiddenToOutputWeight(1, 0); 280 | if (value < 0) { 281 | stroke(204, 102, 0); 282 | } else { 283 | stroke(0); 284 | } 285 | value = abs(value); 286 | strokeWeight(value); 287 | line(320, 240, 480, 280); 288 | 289 | value = 3.0 * network.getHiddenToOutputWeight(2, 0); 290 | if (value < 0) { 291 | stroke(204, 102, 0); 292 | } else { 293 | stroke(0); 294 | } 295 | value = abs(value); 296 | strokeWeight(value); 297 | line(320, 320, 480, 280); 298 | 299 | value = 3.0 * network.getHiddenToOutputWeight(3, 0); 300 | if (value < 0) { 301 | stroke(204, 102, 0); 302 | } else { 303 | stroke(0); 304 | } 305 | value = abs(value); 306 | strokeWeight(value); 307 | line(320, 400, 480, 280); 308 | } 309 | 310 | // Input 311 | strokeWeight(10); 312 | stroke(0); 313 | ellipse(160, 240, 55, 55); 314 | ellipse(160, 320, 55, 55); 315 | 316 | // Hidden 317 | strokeWeight(10); 318 | stroke(0); 319 | ellipse(320, 160, 55, 55); 320 | ellipse(320, 240, 55, 55); 321 | ellipse(320, 320, 55, 55); 322 | ellipse(320, 400, 55, 55); 323 | 324 | // Output 325 | ellipse(480, 280, 55, 55); 326 | 327 | textSize(48); 328 | 329 | // Input Node Text 330 | if (network.getInputNode(0) > 0.9) { 331 | text("1", 100, 240); 332 | } else { 333 | text("0", 100, 240); 334 | } 335 | if (network.getInputNode(1) > 0.9) { 336 | text("1", 100, 320); 337 | } else { 338 | text("0", 100, 320); 339 | } 340 | 341 | // Output Node Text 342 | if (network.getOutputNode(0) > 0.9) { 343 | text("1", 550, 280); 344 | } else { 345 | text("0", 550, 280); 346 | } 347 | } 348 | 349 | void keyPressed() { 350 | errorOutput.flush(); // Writes the remaining data to the file 351 | errorOutput.close(); // Finishes the file 352 | exit(); // Stops the program 353 | } 354 | -------------------------------------------------------------------------------- /processing/xor/xorpde.old: -------------------------------------------------------------------------------- 1 | Neural network; 2 | PFont textFont; 3 | 4 | PrintWriter errorOutput; 5 | 6 | float[] errorGraph = new float[20000]; 7 | int errorGraphCount = 0; 8 | 9 | int learnXor = 0; 10 | 11 | void setup() { 12 | size(640, 480); 13 | 14 | errorOutput = createWriter("xor-error.csv"); 15 | 16 | textFont = loadFont("Calibri-48.vlw"); 17 | //frameRate(30); 18 | 19 | network = new Neural(2,4,1); 20 | 21 | network.setLearningRate(0.1); 22 | 23 | println(network.getNoOfInputNodes(), " ", network.getNoOfHiddenNodes(), " ", network.getNoOfOutputNodes()); 24 | 25 | network.setBiasInputToHidden(0.3); 26 | network.setBiasHiddenToOutput(0.6); 27 | 28 | network.displayOutputNodes(); 29 | 30 | println(network.getTotalNetworkError()); 31 | 32 | network.turnLearningOn(); 33 | 34 | for (int loop = 0; loop < 20000; ++loop) { 35 | errorGraph[loop] = 0.0; 36 | } 37 | } 38 | 39 | void draw() { 40 | background(180); 41 | 42 | if (network.getLearningStatus()) { 43 | // If we are learning and have achieved < 40000 cycles... 44 | if (network.getEpoch() > 30000) { 45 | network.turnLearningOff(); 46 | // Close file 47 | errorOutput.flush(); // Writes the remaining data to the file 48 | errorOutput.close(); 49 | frameRate(0.5); 50 | } 51 | 52 | // Set up XOR inputs 53 | if (learnXor == 0) { 54 | network.setInputNode(0, 0.01); 55 | network.setInputNode(1, 0.01); 56 | network.setOutputNodeDesired(0, 0.01); 57 | } else if (learnXor == 1) { 58 | network.setInputNode(0, 0.01); 59 | network.setInputNode(1, 0.99); 60 | network.setOutputNodeDesired(0, 0.99); 61 | } else if (learnXor == 2) { 62 | network.setInputNode(0, 0.99); 63 | network.setInputNode(1, 0.01); 64 | network.setOutputNodeDesired(0, 0.99); 65 | } else { // learnXor == 3 66 | network.setInputNode(0, 0.99); 67 | network.setInputNode(1, 0.99); 68 | network.setOutputNodeDesired(0, 0.01); 69 | } 70 | 71 | network.calculateOutput(); 72 | 73 | //print(network.getEpoch()); 74 | //print(" : "); 75 | //print(learnXor); 76 | //print(" : "); 77 | //println(network.getTotalNetworkError()); 78 | 79 | if ((network.getEpoch() % 50) == 0) { 80 | print(network.getEpoch()); 81 | print(","); 82 | println(network.getTotalNetworkError()); 83 | 84 | // Write to file 85 | errorOutput.print(network.getEpoch()); 86 | errorOutput.print(","); 87 | errorOutput.println(network.getTotalNetworkError()); 88 | errorOutput.flush(); 89 | } 90 | // Output current error 91 | { 92 | float strError; 93 | 94 | textAlign(LEFT, CENTER); 95 | strError = network.getTotalNetworkError() * 100.0; 96 | textSize(24); 97 | text("Error: " + nf(strError,2,4) + "%", 40, 460); 98 | 99 | strokeWeight(10); 100 | stroke(0); 101 | textAlign(CENTER, CENTER); 102 | } 103 | 104 | // Increment to next input combination (00, 01, 10, 11) 105 | ++learnXor; 106 | 107 | if (learnXor > 3) { 108 | learnXor = 0; 109 | } 110 | } else { 111 | // Switch between differnt XOR input patterns 112 | 113 | // Set up XOR inputs 114 | if (learnXor == 0) { 115 | network.setInputNode(0, 0.01); 116 | network.setInputNode(1, 0.01); 117 | } else if (learnXor == 1) { 118 | network.setInputNode(0, 0.01); 119 | network.setInputNode(1, 0.99); 120 | } else if (learnXor == 2) { 121 | network.setInputNode(0, 0.99); 122 | network.setInputNode(1, 0.01); 123 | } else { // learnXor == 3 124 | network.setInputNode(0, 0.99); 125 | network.setInputNode(1, 0.99); 126 | } 127 | 128 | network.calculateOutput(); 129 | print(learnXor); 130 | print(" : "); 131 | println(network.getOutputNode(0)); 132 | 133 | // Increment to next input combination (00, 01, 10, 11) 134 | ++learnXor; 135 | if (learnXor > 3) { 136 | learnXor = 0; 137 | } 138 | } 139 | 140 | // Heading 141 | textFont(textFont); 142 | if (network.getLearningStatus()) { 143 | String strEpoch = str(network.getEpoch()); 144 | textAlign(CENTER, CENTER); 145 | textSize(48); 146 | text("Learning - XOR", width/2, 40); 147 | textSize(24); 148 | text("Epoch: "+strEpoch, width/2, 80); 149 | } else { 150 | text("Testing - XOR", width/2, 40); 151 | } 152 | 153 | strokeWeight(10); 154 | 155 | //ItoH 156 | { 157 | float value = 0.0; 158 | 159 | value = 3.0 * network.getInputToHiddenWeight(0, 0); 160 | if (value < 0) { 161 | stroke(204, 102, 0); 162 | } else { 163 | stroke(0); 164 | } 165 | value = abs(value); 166 | strokeWeight(value); 167 | line(160, 240, 320, 160); 168 | 169 | value = 3.0 * network.getInputToHiddenWeight(0, 1); 170 | if (value < 0) { 171 | stroke(204, 102, 0); 172 | } else { 173 | stroke(0); 174 | } 175 | value = abs(value); 176 | strokeWeight(value); 177 | line(160, 240, 320, 240); 178 | 179 | value = 3.0 * network.getInputToHiddenWeight(0, 2); 180 | if (value < 0) { 181 | stroke(204, 102, 0); 182 | } else { 183 | stroke(0); 184 | } 185 | value = abs(value); 186 | strokeWeight(value); 187 | line(160, 240, 320, 320); 188 | 189 | value = 3.0 * network.getInputToHiddenWeight(0, 3); 190 | if (value < 0) { 191 | stroke(204, 102, 0); 192 | } else { 193 | stroke(0); 194 | } 195 | value = abs(value); 196 | strokeWeight(value); 197 | line(160, 240, 320, 400); 198 | 199 | 200 | 201 | value = 3.0 * network.getInputToHiddenWeight(1, 0); 202 | if (value < 0) { 203 | stroke(204, 102, 0); 204 | } else { 205 | stroke(0); 206 | } 207 | value = abs(value); 208 | strokeWeight(value); 209 | line(160, 320, 320, 160); 210 | 211 | value = 3.0 * network.getInputToHiddenWeight(1, 1); 212 | if (value < 0) { 213 | stroke(204, 102, 0); 214 | } else { 215 | stroke(0); 216 | } 217 | value = abs(value); 218 | strokeWeight(value); 219 | line(160, 320, 320, 240); 220 | 221 | value = 3.0 * network.getInputToHiddenWeight(1, 2); 222 | if (value < 0) { 223 | stroke(204, 102, 0); 224 | } else { 225 | stroke(0); 226 | } 227 | value = abs(value); 228 | strokeWeight(value); 229 | line(160, 320, 320, 320); 230 | 231 | value = 3.0 * network.getInputToHiddenWeight(1, 3); 232 | if (value < 0) { 233 | stroke(204, 102, 0); 234 | } else { 235 | stroke(0); 236 | } 237 | value = abs(value); 238 | strokeWeight(value); 239 | line(160, 320, 320, 400); 240 | } 241 | 242 | //HtoO 243 | { 244 | float value = 0.0; 245 | 246 | value = 3.0 * network.getHiddenToOutputWeight(0, 0); 247 | if (value < 0) { 248 | stroke(204, 102, 0); 249 | } else { 250 | stroke(0); 251 | } 252 | value = abs(value); 253 | strokeWeight(value); 254 | line(320, 160, 480, 280); 255 | 256 | value = 3.0 * network.getHiddenToOutputWeight(1, 0); 257 | if (value < 0) { 258 | stroke(204, 102, 0); 259 | } else { 260 | stroke(0); 261 | } 262 | value = abs(value); 263 | strokeWeight(value); 264 | line(320, 240, 480, 280); 265 | 266 | value = 3.0 * network.getHiddenToOutputWeight(2, 0); 267 | if (value < 0) { 268 | stroke(204, 102, 0); 269 | } else { 270 | stroke(0); 271 | } 272 | value = abs(value); 273 | strokeWeight(value); 274 | line(320, 320, 480, 280); 275 | 276 | value = 3.0 * network.getHiddenToOutputWeight(3, 0); 277 | if (value < 0) { 278 | stroke(204, 102, 0); 279 | } else { 280 | stroke(0); 281 | } 282 | value = abs(value); 283 | strokeWeight(value); 284 | line(320, 400, 480, 280); 285 | } 286 | 287 | // Input 288 | strokeWeight(10); 289 | stroke(0); 290 | ellipse(160, 240, 55, 55); 291 | ellipse(160, 320, 55, 55); 292 | 293 | // Hidden 294 | strokeWeight(10); 295 | stroke(0); 296 | ellipse(320, 160, 55, 55); 297 | ellipse(320, 240, 55, 55); 298 | ellipse(320, 320, 55, 55); 299 | ellipse(320, 400, 55, 55); 300 | 301 | // Output 302 | ellipse(480, 280, 55, 55); 303 | 304 | textSize(48); 305 | 306 | // Input Node Text 307 | if (network.getInputNode(0) > 0.9) { 308 | text("1", 100, 240); 309 | } else { 310 | text("0", 100, 240); 311 | } 312 | if (network.getInputNode(1) > 0.9) { 313 | text("1", 100, 320); 314 | } else { 315 | text("0", 100, 320); 316 | } 317 | 318 | // Output Node Text 319 | if (network.getOutputNode(0) > 0.9) { 320 | text("1", 550, 280); 321 | } else { 322 | text("0", 550, 280); 323 | } 324 | } 325 | 326 | void keyPressed() { 327 | errorOutput.flush(); // Writes the remaining data to the file 328 | errorOutput.close(); // Finishes the file 329 | exit(); // Stops the program 330 | } 331 | -------------------------------------------------------------------------------- /trafficlight/additive/additive.pde: -------------------------------------------------------------------------------- 1 | /* 2 | * Simple project that display RGB 3 | * additive color mixing 4 | * 5 | */ 6 | 7 | int r; 8 | int g; 9 | int b; 10 | int mode; 11 | int speed = 1; 12 | 13 | void setup() { 14 | size(500, 500); 15 | background(0); 16 | 17 | r = 0; 18 | g = 0; 19 | b = 0; 20 | 21 | mode = 0; 22 | } 23 | 24 | void draw() { 25 | background(0); 26 | 27 | switch(mode) { 28 | case 0: 29 | r += speed; 30 | if (r >= 255) { 31 | r = 255; 32 | mode = 1; 33 | } 34 | break; 35 | 36 | case 1: 37 | r -= speed; 38 | if (r <= 0) { 39 | r = 0; 40 | mode = 10; 41 | } 42 | break; 43 | 44 | case 2: 45 | r += speed; 46 | g += speed; 47 | if (r >= 255) { 48 | r = 255; 49 | g = 255; 50 | mode = 3; 51 | } 52 | break; 53 | 54 | case 3: 55 | r -= speed; 56 | g -= speed; 57 | if (r <= 0) { 58 | r = 0; 59 | g = 0; 60 | mode = 4; 61 | } 62 | break; 63 | 64 | case 4: 65 | r += speed; 66 | b += speed; 67 | if (r >= 255) { 68 | r = 255; 69 | b = 255; 70 | mode = 5; 71 | } 72 | break; 73 | 74 | case 5: 75 | r -= speed; 76 | b -= speed; 77 | if (r <= 0) { 78 | r = 0; 79 | b = 0; 80 | mode = 6; 81 | } 82 | break; 83 | 84 | case 6: 85 | g += speed; 86 | b += speed; 87 | if (g >= 255) { 88 | g = 255; 89 | b = 255; 90 | mode = 7; 91 | } 92 | break; 93 | 94 | case 7: 95 | g -= speed; 96 | b -= speed; 97 | if (g <= 0) { 98 | g = 0; 99 | b = 0; 100 | mode = 8; 101 | } 102 | break; 103 | 104 | case 8: 105 | r += speed; 106 | g += speed; 107 | b += speed; 108 | if (r >= 255) { 109 | r = 255; 110 | g = 255; 111 | b = 255; 112 | mode = 9; 113 | } 114 | break; 115 | 116 | case 9: 117 | r -= speed; 118 | g -= speed; 119 | b -= speed; 120 | if (r <= 0) { 121 | r = 0; 122 | g = 0; 123 | b = 0; 124 | mode = 0; 125 | } 126 | break; 127 | 128 | case 10: 129 | g += speed; 130 | if (g >= 255) { 131 | g = 255; 132 | mode = 11; 133 | } 134 | break; 135 | 136 | case 11: 137 | g -= speed; 138 | if (g <= 0) { 139 | g = 0; 140 | mode = 12; 141 | } 142 | break; 143 | 144 | case 12: 145 | b += speed; 146 | if (b >= 255) { 147 | b = 255; 148 | mode = 13; 149 | } 150 | break; 151 | 152 | case 13: 153 | b -= speed; 154 | if (b <= 0) { 155 | b = 0; 156 | mode = 2; 157 | } 158 | break; 159 | 160 | default: 161 | 162 | } 163 | 164 | noFill(); 165 | 166 | stroke(255); 167 | ellipse(250, 200, 250, 250); 168 | ellipse(150, 300, 250, 250); 169 | ellipse(350, 300, 250, 250); 170 | 171 | blendMode(ADD); 172 | noStroke(); 173 | 174 | fill(r, 0, 0); 175 | ellipse(250, 200, 250, 250); 176 | fill(0, g, 0); 177 | ellipse(150, 300, 250, 250); 178 | fill(0, 0, b); 179 | ellipse(350, 300, 250, 250); 180 | 181 | fill (255); 182 | textSize(30); 183 | textAlign(RIGHT); 184 | 185 | text(r, 280, 65); 186 | text(g, 100, 450); 187 | text(b, 465, 450); 188 | 189 | textAlign(CENTER); 190 | text("R", 250, 30); 191 | text("G", 70, 485); 192 | text("B", 435, 485); 193 | 194 | delay(10); 195 | 196 | println(mode, r, g, b); 197 | } 198 | -------------------------------------------------------------------------------- /trafficlight/computer_vision/computer_vision.pde: -------------------------------------------------------------------------------- 1 | /* 2 | * Use to collect RGB colors sensed by camera 3 | * 4 | * Use "findcamera" project to determine correct code 5 | * for line 57 6 | */ 7 | 8 | import processing.video.*; 9 | 10 | Capture cam; 11 | PFont font; 12 | 13 | boolean pause = false; 14 | 15 | int r; 16 | int g; 17 | int b; 18 | 19 | void setup() { 20 | size(1280, 480); 21 | // Replace the following line with the result from 'findcamera.pde' 22 | cam = new Capture(this, 640, 480, "Logitech Webcam 500", 30); 23 | 24 | println("Starting capture"); 25 | 26 | // Start capturing the images from the camera 27 | cam.start(); 28 | 29 | font = loadFont("ArialMT-48.vlw"); 30 | textFont(font); 31 | } 32 | 33 | void draw() { 34 | background(0); 35 | 36 | if (cam.available() == true && pause == false) { 37 | cam.read(); 38 | cam.loadPixels(); 39 | } 40 | 41 | // Ouput image 42 | set(0, 0, cam); 43 | 44 | // Overlay testing points 45 | rectMode(CENTER); 46 | noFill(); 47 | strokeWeight(5); 48 | 49 | // Centre 50 | stroke(250, 10, 10); 51 | rect(640/2, 480/2, 20, 20); 52 | 53 | // Centre left/right 54 | stroke(200, 200, 200); 55 | rect((640/2) - 80, (480/2), 20, 5); 56 | rect((640/2) + 80, (480/2), 20, 5); 57 | 58 | // Centre above/below 59 | rect((640/2), (480/2) - 80, 5, 20); 60 | rect((640/2), (480/2) + 80, 5, 20); 61 | 62 | // Circle for centering sign 63 | translate((640/2), (480/2)); 64 | dashedCircle(140, 6, 4); 65 | translate((-640/2), (-480/2)); 66 | 67 | int averageR = 0; 68 | int averageG = 0; 69 | int averageB = 0; 70 | int loopCount = 0; 71 | 72 | // This averages the color of all pixels seen in the red square 73 | for (int i = ((480 / 2) - 5); i <= ((480 / 2) + 5); ++i) { 74 | for (int j = ((640 / 2) - 5); j <= ((640 / 2) + 5); ++j) { 75 | ++loopCount; 76 | 77 | int wantedPixel = (640 * i) + j - 1; 78 | 79 | int pixelcolour = cam.pixels[wantedPixel]; 80 | averageR += (pixelcolour >> 16) & 0xff; 81 | averageG += (pixelcolour >> 8) & 0xff; 82 | averageB += pixelcolour & 0xff; 83 | } 84 | } 85 | 86 | r = averageR / loopCount; 87 | g = averageG / loopCount; 88 | b = averageB / loopCount; 89 | 90 | // Display pixel as a square 91 | rectMode(CENTER); 92 | fill(r, g, b); 93 | stroke(255, 255, 255); 94 | strokeWeight(1); 95 | rect(640+(50), 40, 40, 40); 96 | 97 | // Display RGB color weights as squares 98 | fill(r, 0, 0); 99 | rect(640+(50), 100, 40, 40); 100 | fill(0, g, 0); 101 | rect(640+(50), 160, 40, 40); 102 | fill(0, 0, b); 103 | rect(640+(50), 220, 40, 40); 104 | 105 | fill(200, 200, 200); 106 | text("Color seen", 640+(100), 60); 107 | text("R = " + r, 640+(100), 120); 108 | text("G = " + g, 640+(100), 180); 109 | text("B = " + b, 640+(100), 240); 110 | text("'p' - pause // 's' - start", 640+(100), 440); 111 | } 112 | 113 | // Handle key presses 114 | void keyPressed() { 115 | if (key == 'p' || key == 'P') { 116 | // Check we aren't currently paused 117 | if (pause == false) { 118 | pause = true; 119 | } 120 | } 121 | 122 | if (key == 's' || key == 'S') { 123 | // Check we are paused 124 | if (pause == true) { 125 | pause = false; 126 | } 127 | } 128 | } 129 | 130 | void dashedCircle(float radius, int dashWidth, int dashSpacing) { 131 | int steps = 200; 132 | int dashPeriod = dashWidth + dashSpacing; 133 | boolean lastDashed = false; 134 | for(int i = 0; i < steps; i++) { 135 | boolean curDashed = (i % dashPeriod) < dashWidth; 136 | if(curDashed && !lastDashed) { 137 | beginShape(); 138 | } 139 | if(!curDashed && lastDashed) { 140 | endShape(); 141 | } 142 | if(curDashed) { 143 | float theta = map(i, 0, steps, 0, TWO_PI); 144 | vertex(cos(theta) * radius, sin(theta) * radius); 145 | } 146 | lastDashed = curDashed; 147 | } 148 | if(lastDashed) { 149 | endShape(); 150 | } 151 | } 152 | -------------------------------------------------------------------------------- /trafficlight/computer_vision/data/ArialMT-48.vlw: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/codinghead/simple-neural-network/1f93533c429f76dab289eaf4dd8a4afc39b127af/trafficlight/computer_vision/data/ArialMT-48.vlw -------------------------------------------------------------------------------- /trafficlight/findcamera/findcamera.pde: -------------------------------------------------------------------------------- 1 | // Use video library 2 | import processing.video.*; 3 | 4 | int cameraIndex = -1; 5 | 6 | // Declare a capture object. 7 | Capture cam; 8 | 9 | void setup() { 10 | size(320, 240); 11 | 12 | println("Waiting for list of cameras:"); 13 | println(); 14 | printArray(Capture.list()); 15 | println(); 16 | println("Select camera with 0 - 9"); 17 | } 18 | 19 | void draw() { 20 | if (cameraIndex == -1) { 21 | // Wait for camera selection 22 | } else if (cameraIndex < 10) { 23 | // Initialize Capture object. 24 | cam = new Capture(this, 320, 240, Capture.list()[cameraIndex], 30); 25 | // Start the capturing process. 26 | cam.start(); 27 | cameraIndex = 100; 28 | } else if (cameraIndex > 10) { 29 | // If camera is selected, simply stream image 30 | image(cam, 0, 0); 31 | } 32 | } 33 | 34 | // Collects new image from camera when available 35 | void captureEvent(Capture video) { 36 | cam.read(); 37 | } 38 | 39 | void keyPressed() { 40 | // User input for chosen camera that outputs line of code to select 41 | // correct camera for traffic line example 42 | if (key >= '0' && key <= '9') { 43 | cameraIndex = key - '0'; 44 | println("Code for line 22 (computer_vision) and line 30 (tlight_detect):"); 45 | String chosenCamera = " cam = new Capture(this, 640, 480,\""+Capture.list()[cameraIndex]+"\", 30);"; 46 | println(chosenCamera); 47 | } 48 | } 49 | -------------------------------------------------------------------------------- /trafficlight/resources/traffic-light.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/codinghead/simple-neural-network/1f93533c429f76dab289eaf4dd8a4afc39b127af/trafficlight/resources/traffic-light.jpg -------------------------------------------------------------------------------- /trafficlight/resources/traffic-light.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/codinghead/simple-neural-network/1f93533c429f76dab289eaf4dd8a4afc39b127af/trafficlight/resources/traffic-light.pdf -------------------------------------------------------------------------------- /trafficlight/resources/traffic-light.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/codinghead/simple-neural-network/1f93533c429f76dab289eaf4dd8a4afc39b127af/trafficlight/resources/traffic-light.png -------------------------------------------------------------------------------- /trafficlight/resources/traffic-light.pptx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/codinghead/simple-neural-network/1f93533c429f76dab289eaf4dd8a4afc39b127af/trafficlight/resources/traffic-light.pptx -------------------------------------------------------------------------------- /trafficlight/tlight_detect/data/ArialMT-48.vlw: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/codinghead/simple-neural-network/1f93533c429f76dab289eaf4dd8a4afc39b127af/trafficlight/tlight_detect/data/ArialMT-48.vlw -------------------------------------------------------------------------------- /trafficlight/tlight_detect/neural.pde: -------------------------------------------------------------------------------- 1 | /* 2 | * Neural class for Processing 3 | * Stuart Cording aka codinghead 4 | * 5 | * This code implements a simple neural network as a multilayer perceptron (MLP). 6 | * It supports an input layer, single hidden layer, and output layer. 7 | * The number of nodes in each layer can be defined by the user. 8 | * The code was developed based upon the post "A Step by Step Backpropgation 9 | * Example" by Matt Mazur: 10 | * https://mattmazur.com/2015/03/17/a-step-by-step-backpropagation-example/ 11 | */ 12 | class Neural { 13 | private float[] inputNodeValues; 14 | private float[] hiddenNodeValues; 15 | private float[] outputNodeValues; 16 | private float[] desiredOutputNodeValues; 17 | private int noOfInputs; 18 | private int noOfHidden; 19 | private int noOfOutputs; 20 | 21 | private float[][] inputToHiddenWeights; 22 | private float[][] newInputToHiddenWeights; 23 | private float[][] hiddenToOutputWeights; 24 | private float[][] newHiddenToOutputWeights; 25 | 26 | private float biasInputToHidden; 27 | private float biasHiddenToOutput; 28 | private float learningRate; 29 | private float totalNetworkError; 30 | private int learningEpoch; 31 | 32 | private boolean learning; 33 | 34 | private boolean verbose; 35 | 36 | // Network is created by defining number of inputs, hidden nodes and outputs 37 | Neural(int inputs, int hidden, int outputs) { 38 | // Set all variables to zero to start that don't have to be defined here 39 | biasInputToHidden = 0.0; 40 | biasHiddenToOutput = 0.0; 41 | learningRate = 0.0; 42 | totalNetworkError = 0.0; 43 | 44 | // Note that we are not in learning mode 45 | learning = false; 46 | 47 | // Note that we are not in verbose mode 48 | verbose = false; 49 | 50 | // Set learning epoch to 0 51 | learningEpoch = 0; 52 | 53 | // Note the original number of nodes created 54 | noOfInputs = inputs; 55 | noOfHidden = hidden; 56 | noOfOutputs = outputs; 57 | 58 | // Create the desired number of input nodes and set them to zero 59 | inputNodeValues = new float [inputs]; 60 | for (int x = 0; x < inputs; ++x) { 61 | inputNodeValues[x] = 0.0; 62 | } 63 | 64 | // Create the desired number of hidden nodes and set them to zero 65 | hiddenNodeValues = new float [hidden]; 66 | for (int x = 0; x < hidden; ++x) { 67 | hiddenNodeValues[x] = 0.0; 68 | } 69 | 70 | // Create the desired number of output and desired output nodes and 71 | // set them to zero 72 | // Note: outputNodeValues stores the output of the MLP. The 73 | // desiredOutputNodeValues are the values we want to 74 | // achieve for the given input values. 75 | outputNodeValues = new float [outputs]; 76 | desiredOutputNodeValues = new float [outputs]; 77 | for (int x = 0; x < outputs; ++x) { 78 | outputNodeValues[x] = 0.0; 79 | desiredOutputNodeValues[x] = 0.0; 80 | } 81 | 82 | // For each input node, create both current and new weights 83 | // for each hidden node 84 | // Note: The new weights are used during learning 85 | inputToHiddenWeights = new float [inputs][hidden]; 86 | newInputToHiddenWeights = new float [inputs][hidden]; 87 | 88 | for (int x = 0; x < inputs; ++x) { 89 | for (int y = 0; y < hidden; ++y) { 90 | // Apply starting random weights to current nodes 91 | inputToHiddenWeights[x][y] = random(0.25, 0.75); 92 | // New weights can have 0.0 for now 93 | newInputToHiddenWeights[x][y] = 0.0; 94 | } 95 | } 96 | 97 | // For each hidden node, create both current and new weights 98 | // for each output node 99 | // Note: The new weights are used during learning 100 | hiddenToOutputWeights = new float [hidden][outputs]; 101 | newHiddenToOutputWeights = new float [hidden][outputs]; 102 | 103 | for (int x = 0; x < hidden; ++x) { 104 | for (int y = 0; y < outputs; ++y) { 105 | // Apply starting random weights to current nodes 106 | hiddenToOutputWeights[x][y] = random(0.25, 0.75); 107 | // New weights can have 0.0 for now 108 | newHiddenToOutputWeights[x][y] = 0.0; 109 | } 110 | } 111 | } 112 | 113 | /* calculateOuput() 114 | * Uses the weights of the MLP to calculate new output. 115 | * Requires that user has defined their desired input values 116 | * and trained the network. 117 | */ 118 | void calculateOutput() { 119 | float tempResult = 0.0; 120 | 121 | // Start by calculating the hidden layer node results for each input node 122 | // For each hidden node Hn: 123 | // Hn = sigmoid (wn * in + w(n+1) * i(n+1) ... + Hbias * 1) 124 | for (int x = 0; x < getNoOfHiddenNodes(); ++x) { 125 | if (verbose) { 126 | println("Input-to-hidden to calculate hidden node output:"); 127 | } 128 | // Start by calculating (wn * in + w(n+1) * i(n+1) ... 129 | for (int y = 0; y < getNoOfInputNodes(); ++y) { 130 | // Sum the results for the weight * input for each input node 131 | tempResult += inputNodeValues[y] * inputToHiddenWeights[y][x]; 132 | if (verbose) { 133 | println("i[", y,"] ", inputNodeValues[y], " * ", "iToHW[", y, x,"] ",inputToHiddenWeights[y][x], " += ", tempResult); 134 | } 135 | } 136 | 137 | // Add bias value result to sum 138 | tempResult += 1.0 * biasInputToHidden; 139 | if (verbose) { 140 | println("Bias: 1.0 * ", biasInputToHidden, " += ", tempResult); 141 | } 142 | 143 | // Squash result using sigmoid of sum 144 | hiddenNodeValues[x] = sigmoid(tempResult); 145 | if (verbose) { 146 | println("Output of hidden node:"); 147 | println("Sigmoid:", hiddenNodeValues[x]); 148 | println(); 149 | } 150 | 151 | // Reset sumation variable for next round 152 | tempResult = 0.0; 153 | } 154 | 155 | // Next calculate the output layer node results for each hidden node 156 | // For each output node On: 157 | // On = sigmoid (wn * Hn + w(n+1) * Hn(n+1) ... + Obias * 1) 158 | for (int x = 0; x < getNoOfOutputNodes(); ++x) { 159 | if (verbose) { 160 | println("Hidden-to-output to calculate output node result:"); 161 | } 162 | // Start by calulating (wn * Hn + w(n+1) * Hn(n+1) ... 163 | for (int y = 0; y < getNoOfHiddenNodes(); ++y) { 164 | 165 | tempResult += hiddenNodeValues[y] * hiddenToOutputWeights[y][x]; 166 | if (verbose) { 167 | println("h[", y,"] ", hiddenNodeValues[y], " * ", "hToOW[", y, x,"] ",hiddenToOutputWeights[y][x], " += ", tempResult); 168 | } 169 | } 170 | 171 | // Add bias value 172 | tempResult += 1.0 * biasHiddenToOutput; 173 | if (verbose) { 174 | println("Bias: 1.0 * ", biasHiddenToOutput, " += ", tempResult); 175 | } 176 | 177 | // Result goes into the output node 178 | outputNodeValues[x] = sigmoid(tempResult); 179 | if (verbose) { 180 | println("Result for output node:"); 181 | println("Sigmoid:", outputNodeValues[x]); 182 | println(); 183 | } 184 | 185 | // Reset sumation variable for next round 186 | tempResult = 0.0; 187 | } 188 | 189 | // Calculate total error 190 | // ERRORtotal = SUM 0.5 * (target - output)^2 191 | for (int x = 0; x < getNoOfOutputNodes(); ++x) { 192 | tempResult += 0.5 * sq(desiredOutputNodeValues[x] - outputNodeValues[x]); 193 | if (verbose) { 194 | println("Determine error between output and desired output values:"); 195 | print("Error o[", x, "]:", tempResult); 196 | println(" : 0.5 * (", desiredOutputNodeValues[x], "-", outputNodeValues[x],")^2"); 197 | println(); 198 | } 199 | } 200 | 201 | if (verbose) { 202 | println("Total Error: ", tempResult); 203 | println(); 204 | } 205 | 206 | totalNetworkError = tempResult; 207 | 208 | if (learning) { 209 | if (verbose) { 210 | println(); 211 | println(">>> Executing learning loop..."); 212 | } 213 | backPropagation(); 214 | if (verbose) { 215 | println(); 216 | println(">>> Learning loop complete. Epoch = ", learningEpoch); 217 | println(); 218 | } 219 | } 220 | } 221 | 222 | /* backPropagation() 223 | * Uses network error to update weights when learning is 224 | * enabled. 225 | */ 226 | private void backPropagation() { 227 | float totalErrorChangeWRTOutput = 0.0; 228 | float outputChangeWRTNetInput = 0.0; 229 | float netInputChangeWRTWeight = 0.0; 230 | float errorTotalWRTHiddenNode = 0.0; 231 | 232 | // Increment epoch 233 | ++learningEpoch; 234 | 235 | // Consider the output layer to calculate new weights for hidden-to-output layer 236 | // newWeightN = wn - learningRate * (ErrorTotal / impactOfwn) 237 | if (verbose) { 238 | println(); 239 | println("Hidden to Output Weight Correction:"); 240 | } 241 | for (int x = 0; x < getNoOfOutputNodes(); ++x) { 242 | 243 | totalErrorChangeWRTOutput = -(desiredOutputNodeValues[x] - outputNodeValues[x]); 244 | if (verbose) { 245 | println("totalErrChangeWRTOutput [", x,"] =", totalErrorChangeWRTOutput); 246 | } 247 | 248 | outputChangeWRTNetInput = outputNodeValues[x] * (1 - outputNodeValues[x]); 249 | if (verbose) { 250 | println("outputChangeWRTNetInput [", x,"] =", outputChangeWRTNetInput); 251 | println(); 252 | } 253 | 254 | for (int y = 0; y < getNoOfHiddenNodes(); ++y) { 255 | float weightChange = 0.0; 256 | 257 | netInputChangeWRTWeight = hiddenNodeValues[y]; 258 | 259 | weightChange = totalErrorChangeWRTOutput * outputChangeWRTNetInput * netInputChangeWRTWeight; 260 | 261 | if (verbose) { 262 | println("weightChange =", weightChange, " :", totalErrorChangeWRTOutput, "*", outputChangeWRTNetInput, "*", netInputChangeWRTWeight); 263 | } 264 | 265 | newHiddenToOutputWeights[y][x] = hiddenToOutputWeights[y][x] - (learningRate * weightChange); 266 | 267 | if (verbose) { 268 | println("Calculating", hiddenToOutputWeights[y][x], "-", learningRate, "*", weightChange); 269 | println("New Hidden-To-Ouput Weight [", y, x, "] =", newHiddenToOutputWeights[y][x], ", Old Weight =", hiddenToOutputWeights[y][x]); 270 | println(); 271 | } 272 | } 273 | } 274 | 275 | // Consider the hidden layer (based upon original weights) 276 | if (verbose) { 277 | println("Input to Hidden Weight Correction:"); 278 | } 279 | 280 | // Need to consider for each hidden node 281 | for (int x = 0; x < getNoOfHiddenNodes(); ++x) { 282 | // For each hidden node we need: 283 | // - totalErrorChangeWRTOutput 284 | // - outputChangeWRTNetInput 285 | // - hiddenToOutputWeights 286 | float totalErrorChangeWRTHidden = 0.0; 287 | float outputHiddenWRTnetHidden = 0.0; 288 | float totalErrorChangeWRTweight = 0.0; 289 | 290 | for (int y = 0; y < getNoOfOutputNodes(); ++ y) { 291 | if (verbose) { 292 | println(); 293 | println("Calculating hidden node ", x," for output ", y); 294 | } 295 | 296 | // totalErrorChangeWRTOutput 297 | totalErrorChangeWRTOutput = -(desiredOutputNodeValues[y] - outputNodeValues[y]); 298 | if (verbose) { 299 | println("totalErrChangeWRTOutput [", y,"] =", totalErrorChangeWRTOutput); 300 | } 301 | 302 | // outputChangeWRTNetInput 303 | outputChangeWRTNetInput = outputNodeValues[y] * (1 - outputNodeValues[y]); 304 | if (verbose) { 305 | println("outputChangeWRTNetInput [", y,"] =", outputChangeWRTNetInput); 306 | } 307 | 308 | totalErrorChangeWRTHidden += totalErrorChangeWRTOutput * outputChangeWRTNetInput * hiddenToOutputWeights[x][y]; 309 | 310 | if (verbose) { 311 | println("totalErrorChangeWRTHidden[", x, "] =", totalErrorChangeWRTHidden); 312 | println(); 313 | } 314 | } 315 | 316 | outputHiddenWRTnetHidden = (hiddenNodeValues[x]) * (1 - hiddenNodeValues[x]); 317 | 318 | if (verbose) { 319 | println(); 320 | println("hiddenNodeValues[", x, "] =", hiddenNodeValues[x]); 321 | println("outputHiddenWRTnetHidden[", x, "] =", outputHiddenWRTnetHidden); 322 | } 323 | 324 | // For each input, calculate the weight change 325 | for (int y = 0; y < getNoOfInputNodes(); ++y) { 326 | totalErrorChangeWRTweight = totalErrorChangeWRTHidden * outputHiddenWRTnetHidden * inputNodeValues[y]; 327 | 328 | if (verbose) { 329 | println("inputNodeValues[", y, "] =", inputNodeValues[y]); 330 | println("totalErrorChangeWRTweight[", x, "] =", totalErrorChangeWRTweight); 331 | } 332 | 333 | newInputToHiddenWeights[y][x] = inputToHiddenWeights[y][x] - (learningRate * totalErrorChangeWRTweight); 334 | 335 | if (verbose) { 336 | println("inputToHiddenWeights[", y, "][", x, "] =", inputToHiddenWeights[y][x]); 337 | println("New Input-To-Hidden Weight[", y, "][", x, "] =", newInputToHiddenWeights[y][x], "Old Weight =", inputToHiddenWeights[y][x]); 338 | println(); 339 | } 340 | } 341 | } 342 | 343 | 344 | // Update all weights to newly calculated values 345 | if (verbose) { 346 | println("Updating weights."); 347 | } 348 | 349 | // Update the input-to-hidden weights 350 | for (int x = 0; x < getNoOfInputNodes(); ++x) { 351 | for (int y = 0; y < getNoOfHiddenNodes(); ++y) { 352 | inputToHiddenWeights[x][y] = newInputToHiddenWeights[x][y]; 353 | } 354 | } 355 | // Update the hidden-to-output weights 356 | for (int x = 0; x < getNoOfHiddenNodes(); ++x) { 357 | for (int y = 0; y < getNoOfOutputNodes(); ++y) { 358 | hiddenToOutputWeights[x][y] = newHiddenToOutputWeights[x][y]; 359 | } 360 | } 361 | } 362 | 363 | void setBiasInputToHidden(float bias) { 364 | biasInputToHidden = bias; 365 | } 366 | 367 | float getBiasInputToHidden() { 368 | return biasInputToHidden; 369 | } 370 | 371 | void setBiasHiddenToOutput(float bias) { 372 | biasHiddenToOutput = bias; 373 | } 374 | 375 | float getBiasHiddenToOutput() { 376 | return biasHiddenToOutput; 377 | } 378 | 379 | void setLearningRate(float rate) { 380 | learningRate = rate; 381 | } 382 | 383 | float getLearningRate() { 384 | return learningRate; 385 | } 386 | 387 | float getTotalNetworkError() { 388 | return totalNetworkError; 389 | } 390 | 391 | int getNoOfInputNodes() { 392 | return noOfInputs; 393 | } 394 | 395 | int getNoOfHiddenNodes() { 396 | return noOfHidden; 397 | } 398 | 399 | int getNoOfOutputNodes() { 400 | return noOfOutputs; 401 | } 402 | 403 | void setInputNode(int node, float value) { 404 | inputNodeValues[node] = value; 405 | } 406 | 407 | float getInputNode(int node) { 408 | return inputNodeValues[node]; 409 | } 410 | 411 | void setOutputNodeDesired(int node, float value) { 412 | desiredOutputNodeValues[node] = value; 413 | } 414 | 415 | float getOutputNodeDesired(int node) { 416 | return desiredOutputNodeValues[node]; 417 | } 418 | 419 | float getOutputNode(int node) { 420 | return outputNodeValues[node]; 421 | } 422 | 423 | void setInputToHiddenWeight(int input, int hidden, float value) { 424 | inputToHiddenWeights[input][hidden] = value; 425 | } 426 | 427 | float getInputToHiddenWeight(int input, int hidden) { 428 | return inputToHiddenWeights[input][hidden]; 429 | } 430 | 431 | void setHiddenToOutputWeight(int hidden, int output, float value) { 432 | hiddenToOutputWeights[hidden][output] = value; 433 | } 434 | 435 | float getHiddenToOutputWeight(int hidden, int output) { 436 | return hiddenToOutputWeights[hidden][output]; 437 | } 438 | 439 | int getEpoch() { 440 | return learningEpoch; 441 | } 442 | 443 | void turnLearningOn() { 444 | learning = true; 445 | } 446 | 447 | void turnLearningOff() { 448 | learning = false; 449 | } 450 | 451 | void turnVerboseOn() { 452 | verbose = true; 453 | } 454 | 455 | void turnVerboseOff() { 456 | verbose = false; 457 | } 458 | 459 | boolean getLearningStatus() { 460 | return learning; 461 | } 462 | 463 | void displayInputNodes() { 464 | for (int x = 0; x < noOfInputs; ++x) { 465 | print(getInputNode(x), " "); 466 | } 467 | println(); 468 | } 469 | 470 | void displayInputToHiddenWeightsCurrent() { 471 | for (int x = 0; x < getNoOfInputNodes(); ++x) { 472 | print("For Input Node " + x + ": "); 473 | for (int y = 0; y < getNoOfHiddenNodes(); ++y) { 474 | print(inputToHiddenWeights[x][y], " "); 475 | } 476 | println(); 477 | } 478 | } 479 | 480 | void displayHiddenToOutputWeightsCurrent() { 481 | for (int x = 0; x < getNoOfHiddenNodes(); ++x) { 482 | print("For Hidden Node " + x + ": "); 483 | for (int y = 0; y < getNoOfOutputNodes(); ++y) { 484 | print(hiddenToOutputWeights[x][y], " "); 485 | } 486 | println(); 487 | } 488 | } 489 | 490 | void displayHiddenNodes() { 491 | for (int x = 0; x < getNoOfHiddenNodes(); ++x) { 492 | print(hiddenNodeValues[x], " "); 493 | } 494 | println(); 495 | } 496 | 497 | void displayOutputNodes() { 498 | for (int x = 0; x < getNoOfOutputNodes(); ++x) { 499 | print(outputNodeValues[x], " "); 500 | } 501 | println(); 502 | } 503 | 504 | void seed(int x) { 505 | randomSeed(x); 506 | } 507 | } 508 | 509 | float sigmoid(float x) { 510 | return (1 / (1 + exp(-x))); 511 | } 512 | -------------------------------------------------------------------------------- /trafficlight/tlight_detect/tlight_detect.pde: -------------------------------------------------------------------------------- 1 | /* 2 | * This teaches the traffic light colors to the MLP, then 3 | * tests if they are correctly classified. 4 | * 5 | * Focus red box on color of interest. 6 | */ 7 | 8 | import processing.video.*; 9 | 10 | Capture cam; 11 | PFont font; 12 | 13 | boolean pause = false; 14 | 15 | boolean learningMode = false; 16 | boolean learnRed = false; 17 | boolean learnAmber = false; 18 | boolean learnGreen = false; 19 | boolean learnOther = false; 20 | 21 | int r; 22 | int g; 23 | int b; 24 | 25 | Neural network; 26 | 27 | void setup() { 28 | size(1280, 480); 29 | 30 | cam = new Capture(this, 640, 480, "Logitech Webcam 500", 30); 31 | 32 | // Start capturing the images from the camera 33 | cam.start(); 34 | 35 | font = loadFont("ArialMT-48.vlw"); 36 | textFont(font); 37 | 38 | println("Configuring neural network..."); 39 | 40 | network = new Neural(3,6,4); 41 | network.setLearningRate(0.5); 42 | println("Inputs =", network.getNoOfInputNodes(), " Hidden = ", network.getNoOfHiddenNodes(), " Outputs = ", network.getNoOfOutputNodes()); 43 | network.setBiasInputToHidden(0.35); 44 | network.setBiasHiddenToOutput(0.60); 45 | 46 | /*********************************** 47 | ** 48 | ** TEACH THE BRAIN !!! 49 | ** 50 | **********************************/ 51 | network.turnLearningOn(); 52 | println("Neural network is learning..."); 53 | 54 | for (int loop = 0; loop < 90000; ++loop) { 55 | 56 | //teachRed(220, 56, 8); 57 | //teachAmber(216, 130, 11); 58 | //teachGreen(123, 150, 128); 59 | 60 | //teachOther(163, 160, 121); 61 | //teachOther(76, 72, 35); 62 | //teachOther(175, 167, 138); 63 | //teachOther(152, 167, 161); 64 | 65 | } 66 | 67 | network.turnLearningOff(); 68 | /*********************************** 69 | ** 70 | ** END OF TEACHING !!! 71 | ** 72 | **********************************/ 73 | 74 | println("Neural network is ready"); 75 | } 76 | 77 | void draw() { 78 | background(0); 79 | 80 | if (cam.available() == true && pause == false) { 81 | //println("Cam available"); 82 | cam.read(); 83 | cam.loadPixels(); 84 | } 85 | 86 | // Output image 87 | set(0, 0, cam); 88 | 89 | // Display testing points 90 | rectMode(CENTER); 91 | noFill(); 92 | strokeWeight(5); 93 | 94 | // Centre 95 | stroke(250, 10, 10); 96 | rect(640/2, 480/2, 20, 20); 97 | 98 | // Centre left/right 99 | stroke(204, 102, 0); 100 | rect((640/2) - 80, (480/2), 20, 5); 101 | rect((640/2) + 80, (480/2), 20, 5); 102 | 103 | // Centre above/below 104 | rect((640/2), (480/2) - 80, 5, 20); 105 | rect((640/2), (480/2) + 80, 5, 20); 106 | 107 | // Circle for centering sign 108 | translate((640/2), (480/2)); 109 | dashedCircle(140, 6, 4); 110 | translate((-640/2), (-480/2)); 111 | 112 | int averageR = 0; 113 | int averageG = 0; 114 | int averageB = 0; 115 | int loopCount = 0; 116 | 117 | // This averages the color of all pixels seen in the red square 118 | for (int i = ((480 / 2) - 5); i <= ((480 / 2) + 5); ++i) { 119 | for (int j = ((640 / 2) - 5); j <= ((640 / 2) + 5); ++j) { 120 | ++loopCount; 121 | 122 | int wantedPixel = (640 * i) + j - 1; 123 | 124 | int pixelcolour = cam.pixels[wantedPixel]; 125 | averageR += (pixelcolour >> 16) & 0xff; 126 | averageG += (pixelcolour >> 8) & 0xff; 127 | averageB += pixelcolour & 0xff; 128 | } 129 | } 130 | 131 | r = averageR / loopCount; 132 | g = averageG / loopCount; 133 | b = averageB / loopCount; 134 | 135 | // Display pixel as a square 136 | rectMode(CENTER); 137 | fill(r, g, b); 138 | stroke(255, 255, 255); 139 | strokeWeight(1); 140 | rect(640+(50), 40, 40, 40); 141 | 142 | // Display RGB colours weights as squares 143 | fill(r, 0, 0); 144 | rect(640+(50), 100, 40, 40); 145 | fill(0, g, 0); 146 | rect(640+(50), 160, 40, 40); 147 | fill(0, 0, b); 148 | rect(640+(50), 220, 40, 40); 149 | 150 | fill(200, 200, 200); 151 | text("Colour seen", 640+(100), 60); 152 | text("R = " + r, 640+(100), 120); 153 | text("G = " + g, 640+(100), 180); 154 | text("B = " + b, 640+(100), 240); 155 | 156 | // Now try to detect colour seen 157 | network.setInputNode(0, (float) r / 255.0); 158 | network.setInputNode(1, (float) g / 255.0); 159 | network.setInputNode(2, (float) b / 255.0); 160 | 161 | network.calculateOutput(); 162 | 163 | // println(network.getOutputNode(0), 164 | // network.getOutputNode(1), 165 | // network.getOutputNode(2), 166 | // network.getOutputNode(3)); 167 | 168 | 169 | // Display detected colours as circles 170 | // Red 171 | if (network.getOutputNode(0) > 0.90) { 172 | fill(200, 200, 200); 173 | text("Red", 640+(100), 320); 174 | 175 | fill(r, g, b); 176 | } else { 177 | fill(0, 0, 0); 178 | } 179 | ellipse(640+(50), 300, 40, 40); 180 | 181 | // Amber 182 | if (network.getOutputNode(1) > 0.90) { 183 | fill(200, 200, 200); 184 | text("Amber", 640+(100), 380); 185 | 186 | fill(r, g, b); 187 | } else { 188 | fill(0, 0, 0); 189 | } 190 | ellipse(640+(50), 360, 40, 40); 191 | 192 | // Green 193 | if (network.getOutputNode(2) > 0.90) { 194 | fill(200, 200, 200); 195 | text("Green", 640+(100), 440); 196 | 197 | fill(r, g, b); 198 | } else { 199 | fill(0, 0, 0); 200 | } 201 | ellipse(640+(50), 420, 40, 40); 202 | 203 | // Other 204 | if (network.getOutputNode(3) > 0.90) { 205 | fill(200, 200, 200); 206 | text("Other", 640+(400), 440); 207 | 208 | fill(r, g, b); 209 | } else { 210 | fill(0, 0, 0); 211 | } 212 | ellipse(640+(360), 420, 40, 40); 213 | 214 | } 215 | 216 | // Handle key presses 217 | void keyPressed() { 218 | if (key == 'p' || key == 'P') { 219 | // Check we aren't currently paused 220 | if (pause == false) { 221 | pause = true; 222 | } 223 | } 224 | 225 | if (key == 's' || key == 'S') { 226 | // Check we are paused 227 | if (pause == true) { 228 | pause = false; 229 | } 230 | } 231 | } 232 | 233 | void dashedCircle(float radius, int dashWidth, int dashSpacing) { 234 | int steps = 200; 235 | int dashPeriod = dashWidth + dashSpacing; 236 | boolean lastDashed = false; 237 | for(int i = 0; i < steps; i++) { 238 | boolean curDashed = (i % dashPeriod) < dashWidth; 239 | if(curDashed && !lastDashed) { 240 | beginShape(); 241 | } 242 | if(!curDashed && lastDashed) { 243 | endShape(); 244 | } 245 | if(curDashed) { 246 | float theta = map(i, 0, steps, 0, TWO_PI); 247 | vertex(cos(theta) * radius, sin(theta) * radius); 248 | } 249 | lastDashed = curDashed; 250 | } 251 | if(lastDashed) { 252 | endShape(); 253 | } 254 | } 255 | 256 | void teachRed(int r, int g, int b) { 257 | float newR, newG, newB; 258 | 259 | newR = (randomise(r) / 255.0); 260 | newG = (randomise(g) / 255.0); 261 | newB = (randomise(b) / 255.0); 262 | 263 | //println("Red:", newR, newG, newB); 264 | 265 | network.setInputNode(0, newR); 266 | network.setInputNode(1, newG); 267 | network.setInputNode(2, newB); 268 | 269 | network.setOutputNodeDesired(0, 0.99); 270 | network.setOutputNodeDesired(1, 0.01); 271 | network.setOutputNodeDesired(2, 0.01); 272 | network.setOutputNodeDesired(3, 0.01); 273 | 274 | network.calculateOutput(); 275 | } 276 | 277 | void teachAmber(int r, int g, int b) { 278 | float newR, newG, newB; 279 | 280 | newR = (randomise(r) / 255.0); 281 | newG = (randomise(g) / 255.0); 282 | newB = (randomise(b) / 255.0); 283 | 284 | //println("Amber:", newR, newG, newB); 285 | 286 | network.setInputNode(0, newR); 287 | network.setInputNode(1, newG); 288 | network.setInputNode(2, newB); 289 | 290 | network.setOutputNodeDesired(0, 0.01); 291 | network.setOutputNodeDesired(1, 0.99); 292 | network.setOutputNodeDesired(2, 0.01); 293 | network.setOutputNodeDesired(3, 0.01); 294 | 295 | network.calculateOutput(); 296 | } 297 | 298 | void teachGreen(int r, int g, int b) { 299 | float newR, newG, newB; 300 | 301 | newR = (randomise(r) / 255.0); 302 | newG = (randomise(g) / 255.0); 303 | newB = (randomise(b) / 255.0); 304 | 305 | network.setInputNode(0, newR); 306 | network.setInputNode(1, newG); 307 | network.setInputNode(2, newB); 308 | 309 | network.setOutputNodeDesired(0, 0.01); 310 | network.setOutputNodeDesired(1, 0.01); 311 | network.setOutputNodeDesired(2, 0.99); 312 | network.setOutputNodeDesired(3, 0.01); 313 | 314 | network.calculateOutput(); 315 | } 316 | 317 | void teachOther(int r, int g, int b) { 318 | float newR, newG, newB; 319 | 320 | newR = (randomise(r) / 255.0); 321 | newG = (randomise(g) / 255.0); 322 | newB = (randomise(b) / 255.0); 323 | 324 | network.setInputNode(0, newR); 325 | network.setInputNode(1, newG); 326 | network.setInputNode(2, newB); 327 | 328 | network.setOutputNodeDesired(0, 0.01); 329 | network.setOutputNodeDesired(1, 0.01); 330 | network.setOutputNodeDesired(2, 0.01); 331 | network.setOutputNodeDesired(3, 0.99); 332 | 333 | network.calculateOutput(); 334 | } 335 | 336 | int randomise(int value) { 337 | value += random(-4, 5); 338 | 339 | if (value > 255) { 340 | value = 255; 341 | } 342 | if (value < 0 ) { 343 | value = 0; 344 | } 345 | return value; 346 | } 347 | -------------------------------------------------------------------------------- /workedexample/Matt Mazur Example - Ver 2.xlsx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/codinghead/simple-neural-network/1f93533c429f76dab289eaf4dd8a4afc39b127af/workedexample/Matt Mazur Example - Ver 2.xlsx -------------------------------------------------------------------------------- /workedexample/Matt Mazur Example.xlsx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/codinghead/simple-neural-network/1f93533c429f76dab289eaf4dd8a4afc39b127af/workedexample/Matt Mazur Example.xlsx --------------------------------------------------------------------------------