├── CNAME ├── _config.yml ├── hc2.png ├── graphe_relu.png ├── graphe_tanh.png ├── graphe_gaussian.png ├── graphe_sigmoid.png ├── graphe_softplus.png ├── graphe_heaviside.png ├── hc2-deep-learning.png ├── xor.py ├── LICENSE.md ├── test points.py ├── README.md └── microMLP.py /CNAME: -------------------------------------------------------------------------------- 1 | micromlp.hc2.fr -------------------------------------------------------------------------------- /_config.yml: -------------------------------------------------------------------------------- 1 | theme: jekyll-theme-midnight -------------------------------------------------------------------------------- /hc2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jczic/MicroMLP/HEAD/hc2.png -------------------------------------------------------------------------------- /graphe_relu.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jczic/MicroMLP/HEAD/graphe_relu.png -------------------------------------------------------------------------------- /graphe_tanh.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jczic/MicroMLP/HEAD/graphe_tanh.png -------------------------------------------------------------------------------- /graphe_gaussian.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jczic/MicroMLP/HEAD/graphe_gaussian.png -------------------------------------------------------------------------------- /graphe_sigmoid.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jczic/MicroMLP/HEAD/graphe_sigmoid.png -------------------------------------------------------------------------------- /graphe_softplus.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jczic/MicroMLP/HEAD/graphe_softplus.png -------------------------------------------------------------------------------- /graphe_heaviside.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jczic/MicroMLP/HEAD/graphe_heaviside.png -------------------------------------------------------------------------------- /hc2-deep-learning.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jczic/MicroMLP/HEAD/hc2-deep-learning.png -------------------------------------------------------------------------------- /xor.py: -------------------------------------------------------------------------------- 1 | 2 | # -*- coding: utf-8 -*- 3 | 4 | from microMLP import MicroMLP 5 | 6 | mlp = MicroMLP.Create( neuronsByLayers = [2, 2, 1], 7 | activationFuncName = MicroMLP.ACTFUNC_GAUSSIAN, 8 | layersAutoConnectFunction = MicroMLP.LayersFullConnect ) 9 | 10 | nnFalse = MicroMLP.NNValue.FromBool(False) 11 | nnTrue = MicroMLP.NNValue.FromBool(True) 12 | 13 | mlp.AddExample( [nnFalse, nnFalse], [nnFalse] ) 14 | mlp.AddExample( [nnFalse, nnTrue ], [nnTrue ] ) 15 | mlp.AddExample( [nnTrue , nnTrue ], [nnFalse] ) 16 | mlp.AddExample( [nnTrue , nnFalse], [nnTrue ] ) 17 | 18 | learnCount = mlp.LearnExamples() 19 | 20 | print( "LEARNED :" ) 21 | print( " - False xor False = %s" % mlp.Predict([nnFalse, nnFalse])[0].AsBool ) 22 | print( " - False xor True = %s" % mlp.Predict([nnFalse, nnTrue] )[0].AsBool ) 23 | print( " - True xor True = %s" % mlp.Predict([nnTrue , nnTrue] )[0].AsBool ) 24 | print( " - True xor False = %s" % mlp.Predict([nnTrue , nnFalse])[0].AsBool ) -------------------------------------------------------------------------------- /LICENSE.md: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | Copyright © 2018 Jean-Christophe Bos & HC² (www.hc2.fr) 3 | 4 | Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: 5 | 6 | The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. 7 | 8 | THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -------------------------------------------------------------------------------- /test points.py: -------------------------------------------------------------------------------- 1 | 2 | # -*- coding: utf-8 -*- 3 | 4 | from microMLP import MicroMLP 5 | from tkinter import * 6 | from threading import * 7 | 8 | # ---------------------------------------------------------------- 9 | 10 | width = 800 # Window/Canvas width 11 | height = 500 # Window/Canvas height 12 | examples = [ ] 13 | 14 | # ---------------------------------------------------------------- 15 | 16 | def rgb2hex(rgb): 17 | return '#%02x%02x%02x' % rgb 18 | 19 | # ---------------------------------------------------------------- 20 | 21 | def addExample(x, y) : 22 | examples.append((x, y)) 23 | can.create_oval( x-7, y-7, x+7, y+7, 24 | fill = '#3366AA', 25 | outline = '#AA3366', 26 | width = 2 ) 27 | 28 | # ---------------------------------------------------------------- 29 | 30 | class processThread(Thread) : 31 | 32 | def run(self) : 33 | evt = Event() 34 | line = None 35 | while not evt.wait(0.010) : 36 | if len(examples) >= 2 : 37 | for i in range(30) : 38 | for ex in examples : 39 | mlp.Learn( [ MicroMLP.NNValue.FromAnalogSignal(ex[0]/width) ], 40 | [ MicroMLP.NNValue.FromAnalogSignal(ex[1]/height) ] ) 41 | pts = [ ] 42 | for x in range(0, width, 10) : 43 | out = mlp.Predict([MicroMLP.NNValue.FromAnalogSignal(x/width)]) 44 | y = out[0].AsFloat * height 45 | pts.append((x, y)) 46 | can.delete(line) 47 | line = can.create_line(pts, fill='#3366AA') 48 | 49 | # ---------------------------------------------------------------- 50 | 51 | def onCanvasClick(evt) : 52 | addExample(evt.x, evt.y) 53 | 54 | # ---------------------------------------------------------------- 55 | 56 | mlp = MicroMLP.Create( neuronsByLayers = [1, 15, 15, 1], 57 | activationFuncName = MicroMLP.ACTFUNC_GAUSSIAN, 58 | layersAutoConnectFunction = MicroMLP.LayersFullConnect ) 59 | 60 | mainWindow = Tk() 61 | mainWindow.title('microMLP - test points') 62 | mainWindow.geometry('%sx%s' % (width, height)) 63 | mainWindow.resizable(False, False) 64 | 65 | can = Canvas( mainWindow, 66 | width = width, 67 | height = height, 68 | bg = 'white', 69 | borderwidth = 0 ) 70 | can.bind('', onCanvasClick) 71 | can.pack() 72 | 73 | pc = processThread() 74 | pc.daemon = True 75 | pc.start() 76 | 77 | mainWindow.mainloop() 78 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | ## MicroMLP is a micro artificial neural network multilayer perceptron (principally used on ESP32 and [Pycom](http://www.pycom.io) modules) 2 | 3 | ![HC²](hc2.png "HC²") 4 | 5 | #### Very easy to integrate and very light with one file only : 6 | - `"microMLP.py"` 7 | 8 | #### MicroMLP features : 9 | - Modifiable multilayer and connections structure 10 | - Integrated bias on neurons 11 | - Plasticity of the connections included 12 | - Activation functions by layer 13 | - Parameters Alpha, Eta and Gain 14 | - Managing set of examples and learning 15 | - QLearning functions to use reinforcement learning 16 | - Save and load all structure to/from json file 17 | - Various activation functions : 18 | - Heaviside binary step 19 | - Logistic (sigmoid or soft step) 20 | - Hyperbolic tangent 21 | - SoftPlus rectifier 22 | - ReLU (rectified linear unit) 23 | - Gaussian function 24 | 25 | #### Use deep learning for : 26 | - Signal processing (speech processing, identification, filtering) 27 | - Image processing (compression, recognition, patterns) 28 | - Control (diagnosis, quality control, robotics) 29 | - Optimization (planning, traffic regulation, finance) 30 | - Simulation (black box simulation) 31 | - Classification (DNA analysis) 32 | - Approximation (unknown function, complex function) 33 | 34 |

35 | 36 |

37 | 38 | ### Using *MicroMLP* static functions : 39 | 40 | | Name | Function | 41 | | - | - | 42 | | Create | `mlp = MicroMLP.Create(neuronsByLayers, activationFuncName, layersAutoConnectFunction=None, useBiasValue=1.0)` | 43 | | LoadFromFile | `mlp = MicroMLP.LoadFromFile(filename)` | 44 | 45 | ### Using *MicroMLP* speedly creation of a neural network : 46 | ```python 47 | from microMLP import MicroMLP 48 | mlp = MicroMLP.Create([3, 10, 2], "Sigmoid", MicroMLP.LayersFullConnect) 49 | ``` 50 | 51 | ### Using *MicroMLP* main class : 52 | 53 | | Name | Function | 54 | | - | - | 55 | | Constructor | `mlp = MicroMLP()` | 56 | | GetLayer | `layer = mlp.GetLayer(layerIndex)` | 57 | | GetLayerIndex | `idx = mlp.GetLayerIndex(layer)` | 58 | | RemoveLayer | `mlp.RemoveLayer(layer)` | 59 | | GetInputLayer | `inputLayer = mlp.GetInputLayer()` | 60 | | GetOutputLayer | `outputLayer = mlp.GetOutputLayer()` | 61 | | Learn | `ok = mlp.Learn(inputVectorNNValues, targetVectorNNValues)` | 62 | | Test | `ok = mlp.Test(inputVectorNNValues, targetVectorNNValues)` | 63 | | Predict | `outputVectorNNValues = mlp.Predict(inputVectorNNValues)` | 64 | | QLearningLearnForChosenAction | `ok = mlp.QLearningLearnForChosenAction(stateVectorNNValues, rewardNNValue, pastStateVectorNNValues, chosenActionIndex, terminalState=True, discountFactorNNValue=None)` | 65 | | QLearningPredictBestActionIndex | `bestActionIndex = mlp.QLearningPredictBestActionIndex(stateVectorNNValues)` | 66 | | SaveToFile | `ok = mlp.SaveToFile(filename)` | 67 | | AddExample | `ok = mlp.AddExample(inputVectorNNValues, targetVectorNNValues)` | 68 | | ClearExamples | `mlp.ClearExamples()` | 69 | | LearnExamples | `learnCount = mlp.LearnExamples(maxSeconds=30, maxCount=None, stopWhenLearned=True, printMAEAverage=True)` | 70 | 71 | | Property | Example | Read/Write | 72 | | - | - | - | 73 | | Layers | `mlp.Layers` | get | 74 | | LayersCount | `mlp.LayersCount` | get | 75 | | IsNetworkComplete | `mlp.IsNetworkComplete` | get | 76 | | MSE | `mlp.MSE` | get | 77 | | MAE | `mlp.MAE` | get | 78 | | MSEPercent | `mlp.MSEPercent` | get | 79 | | MAEPercent | `mlp.MAEPercent` | get | 80 | | ExamplesCount | `mlp.ExamplesCount` | get | 81 | 82 | ### Using *MicroMLP* to learn the XOr problem (with hyperbolic tangent) : 83 | ```python 84 | from microMLP import MicroMLP 85 | 86 | mlp = MicroMLP.Create( neuronsByLayers = [2, 2, 1], 87 | activationFuncName = MicroMLP.ACTFUNC_TANH, 88 | layersAutoConnectFunction = MicroMLP.LayersFullConnect ) 89 | 90 | nnFalse = MicroMLP.NNValue.FromBool(False) 91 | nnTrue = MicroMLP.NNValue.FromBool(True) 92 | 93 | mlp.AddExample( [nnFalse, nnFalse], [nnFalse] ) 94 | mlp.AddExample( [nnFalse, nnTrue ], [nnTrue ] ) 95 | mlp.AddExample( [nnTrue , nnTrue ], [nnFalse] ) 96 | mlp.AddExample( [nnTrue , nnFalse], [nnTrue ] ) 97 | 98 | learnCount = mlp.LearnExamples() 99 | 100 | print( "LEARNED :" ) 101 | print( " - False xor False = %s" % mlp.Predict([nnFalse, nnFalse])[0].AsBool ) 102 | print( " - False xor True = %s" % mlp.Predict([nnFalse, nnTrue] )[0].AsBool ) 103 | print( " - True xor True = %s" % mlp.Predict([nnTrue , nnTrue] )[0].AsBool ) 104 | print( " - True xor False = %s" % mlp.Predict([nnTrue , nnFalse])[0].AsBool ) 105 | 106 | if mlp.SaveToFile("mlp.json") : 107 | print( "MicroMLP structure saved!" ) 108 | ``` 109 | 110 | | Variable | Description | Default | 111 | | - | - | - | 112 | | `mlp.Eta` | Weighting of the error correction | 0.30 | 113 | | `mlp.Alpha` | Strength of connections plasticity | 0.75 | 114 | | `mlp.Gain` | Network learning gain | 0.99 | 115 | | `mlp.CorrectLearnedMAE` | Threshold of self-learning error | 0.02 | 116 | 117 | | Graphe | Activation function name | Const | Detail | 118 | | - | - | - | - | 119 | | ![HC²](graphe_heaviside.png "Heaviside binary step") | `"Heaviside"` | MicroMLP.ACTFUNC_HEAVISIDE | Heaviside binary step | 120 | | ![HC²](graphe_sigmoid.png "Logistic (sigmoid or soft step)") | `"Sigmoid"` | MicroMLP.ACTFUNC_SIGMOID | Logistic (sigmoid or soft step) | 121 | | ![HC²](graphe_tanh.png "Hyperbolic tangent") | `"TanH"` | MicroMLP.ACTFUNC_TANH | Hyperbolic tangent | 122 | | ![HC²](graphe_softplus.png "SoftPlus rectifier") | `"SoftPlus"` | MicroMLP.ACTFUNC_SOFTPLUS | SoftPlus rectifier | 123 | | ![HC²](graphe_relu.png "Rectified linear unit") | `"ReLU"` | MicroMLP.ACTFUNC_RELU | Rectified linear unit | 124 | | ![HC²](graphe_gaussian.png "Gaussian function") | `"Gaussian"` | MicroMLP.ACTFUNC_GAUSSIAN | Gaussian function | 125 | 126 | | Layers auto-connect function | Detail | 127 | | - | - | 128 | | `MicroMLP.LayersFullConnect` | Network fully connected | 129 | 130 | ### Using *MicroMLP.Layer* class : 131 | 132 | | Name | Function | 133 | | - | - | 134 | | Constructor | `layer = MicroMLP.Layer(parentMicroMLP, activationFuncName=None, neuronsCount=0)` | 135 | | GetLayerIndex | `idx = layer.GetLayerIndex()` | 136 | | GetNeuron | `neuron = layer.GetNeuron(neuronIndex)` | 137 | | GetNeuronIndex | `idx = layer.GetNeuronIndex(neuron)` | 138 | | AddNeuron | `layer.AddNeuron(neuron)` | 139 | | RemoveNeuron | `layer.RemoveNeuron(neuron)` | 140 | | GetMeanSquareError | `mse = layer.GetMeanSquareError()` | 141 | | GetMeanAbsoluteError | `mae = layer.GetMeanAbsoluteError()` | 142 | | GetMeanSquareErrorAsPercent | `mseP = layer.GetMeanSquareErrorAsPercent()` | 143 | | GetMeanAbsoluteErrorAsPercent | `maeP = layer.GetMeanAbsoluteErrorAsPercent()` | 144 | | Remove | `layer.Remove()` | 145 | 146 | | Property | Example | Read/Write | 147 | | - | - | - | 148 | | ParentMicroMLP | `layer.ParentMicroMLP` | get | 149 | | ActivationFuncName | `layer.ActivationFuncName` | get | 150 | | Neurons | `layer.Neurons` | get | 151 | | NeuronsCount | `layer.NeuronsCount` | get | 152 | 153 | ### Using *MicroMLP.InputLayer(Layer)* class : 154 | 155 | | Name | Function | 156 | | - | - | 157 | | Constructor | `inputLayer = MicroMLP.InputLayer(parentMicroMLP, neuronsCount=0)` | 158 | | SetInputVectorNNValues | `ok = inputLayer.SetInputVectorNNValues(inputVectorNNValues)` | 159 | 160 | ### Using *MicroMLP.OutputLayer(Layer)* class : 161 | 162 | | Name | Function | 163 | | - | - | 164 | | Constructor | `outputLayer = MicroMLP.OutputLayer(parentMicroMLP, activationFuncName, neuronsCount=0)` | 165 | | GetOutputVectorNNValues | `outputVectorNNValues = outputLayer.GetOutputVectorNNValues()` | 166 | | ComputeTargetLayerError | `ok = outputLayer.ComputeTargetLayerError(targetVectorNNValues)` | 167 | 168 | ### Using *MicroMLP.Neuron* class : 169 | 170 | | Name | Function | 171 | | - | - | 172 | | Constructor | `neuron = MicroMLP.Neuron(parentLayer)` | 173 | | GetNeuronIndex | `idx = neuron.GetNeuronIndex()` | 174 | | GetInputConnections | `connections = neuron.GetInputConnections()` | 175 | | GetOutputConnections | `connections = neuron.GetOutputConnections()` | 176 | | AddInputConnection | `neuron.AddInputConnection(connection)` | 177 | | AddOutputConnection | `neuron.AddOutputConnection(connection)` | 178 | | RemoveInputConnection | `neuron.RemoveInputConnection(connection)` | 179 | | RemoveOutputConnection | `neuron.RemoveOutputConnection(connection)` | 180 | | SetBias | `neuron.SetBias(bias)` | 181 | | GetBias | `neuron.GetBias()` | 182 | | SetOutputNNValue | `neuron.SetOutputNNValue(nnvalue)` | 183 | | ComputeValue | `neuron.ComputeValue()` | 184 | | ComputeError | `neuron.ComputeError(targetNNValue=None)` | 185 | | Remove | `neuron.Remove()` | 186 | 187 | | Property | Example | Read/Write | 188 | | - | - | - | 189 | | ParentLayer | `neuron.ParentLayer` | get | 190 | | ComputedOutput | `neuron.ComputedOutput` | get | 191 | | ComputedDeltaError | `neuron.ComputedDeltaError` | get | 192 | | ComputedSignalError | `neuron.ComputedSignalError` | get | 193 | 194 | ### Using *MicroMLP.Connection* class : 195 | 196 | | Name | Function | 197 | | - | - | 198 | | Constructor | `connection = MicroMLP.Connection(neuronSrc, neuronDst, weight=None)` | 199 | | UpdateWeight | `connection.UpdateWeight(eta, alpha)` | 200 | | Remove | `connection.Remove()` | 201 | 202 | | Property | Example | Read/Write | 203 | | - | - | - | 204 | | NeuronSrc | `connection.NeuronSrc` | get | 205 | | NeuronDst | `connection.NeuronDst` | get | 206 | | Weight | `connection.Weight` | get | 207 | 208 | ### Using *MicroMLP.Bias* class : 209 | 210 | | Name | Function | 211 | | - | - | 212 | | Constructor | `bias = MicroMLP.Bias(neuronDst, value=1.0, weight=None)` | 213 | | UpdateWeight | `bias.UpdateWeight(eta, alpha)` | 214 | | Remove | `bias.Remove()` | 215 | 216 | | Property | Example | Read/Write | 217 | | - | - | - | 218 | | NeuronDst | `bias.NeuronDst` | get | 219 | | Value | `bias.Value` | get | 220 | | Weight | `bias.Weight` | get | 221 | 222 | ### Using *MicroMLP.NNValue* static functions : 223 | 224 | | Name | Function | 225 | | - | - | 226 | | FromPercent | `nnvalue = MicroMLP.NNValue.FromPercent(value)` | 227 | | NewPercent | `nnvalue = MicroMLP.NNValue.NewPercent()` | 228 | | FromByte | `nnvalue = MicroMLP.NNValue.FromByte(value)` | 229 | | NewByte | `nnvalue = MicroMLP.NNValue.NewByte()` | 230 | | FromBool | `nnvalue = MicroMLP.NNValue.FromBool(value)` | 231 | | NewBool | `nnvalue = MicroMLP.NNValue.NewBool()` | 232 | | FromAnalogSignal | `nnvalue = MicroMLP.NNValue.FromAnalogSignal(value)` | 233 | | NewAnalogSignal | `nnvalue = MicroMLP.NNValue.NewAnalogSignal()` | 234 | 235 | ### Using *MicroMLP.NNValue* class : 236 | 237 | | Name | Function | 238 | | - | - | 239 | | Constructor | `nnvalue = MicroMLP.NNValue(minValue, maxValue, value)` | 240 | 241 | | Property | Example | Read/Write | 242 | | - | - | - | 243 | | AsFloat | `nnvalue.AsFloat = 639.513` | get / set | 244 | | AsInt | `nnvalue.AsInt = 12345` | get / set | 245 | | AsPercent | `nnvalue.AsPercent = 65` | get / set | 246 | | AsByte | `nnvalue.AsByte = b'\x75'` | get / set | 247 | | AsBool | `nnvalue.AsBool = True` | get / set | 248 | | AsAnalogSignal | `nnvalue.AsAnalogSignal = 0.39472` | get / set | 249 | 250 | 251 | 252 | ### By JC`zic for [HC²](https://www.hc2.fr) ;') 253 | 254 | *Keep it simple, stupid* :+1: 255 | -------------------------------------------------------------------------------- /microMLP.py: -------------------------------------------------------------------------------- 1 | """ 2 | The MIT License (MIT) 3 | Copyright © 2018 Jean-Christophe Bos 4 | """ 5 | 6 | 7 | from math import exp, log 8 | from json import load, dumps 9 | from time import time 10 | 11 | try : 12 | from machine import rng 13 | except : 14 | from random import random 15 | 16 | class MicroMLP : 17 | 18 | ACTFUNC_HEAVISIDE = 'Heaviside' 19 | ACTFUNC_SIGMOID = 'Sigmoid' 20 | ACTFUNC_TANH = 'TanH' 21 | ACTFUNC_SOFTPLUS = 'SoftPlus' 22 | ACTFUNC_RELU = 'ReLU' 23 | ACTFUNC_GAUSSIAN = 'Gaussian' 24 | 25 | Eta = 0.30 26 | Alpha = 0.75 27 | Gain = 0.99 28 | 29 | CorrectLearnedMAE = 0.02 30 | 31 | # ------------------------------------------------------------------------- 32 | # --( Class : NNValue )---------------------------------------------------- 33 | # ------------------------------------------------------------------------- 34 | 35 | class NNValue : 36 | 37 | # -[ Static functions ]--------------------------------- 38 | 39 | @staticmethod 40 | def FromPercent(value) : 41 | return MicroMLP.NNValue(0, 100, value) 42 | @staticmethod 43 | def NewPercent() : 44 | return MicroMLP.NNValue.FromPercent(0) 45 | 46 | @staticmethod 47 | def FromByte(value) : 48 | return MicroMLP.NNValue(0, 255, ord(value)) 49 | @staticmethod 50 | def NewByte() : 51 | return MicroMLP.NNValue.FromByte(b'\x00') 52 | 53 | @staticmethod 54 | def FromBool(value) : 55 | return MicroMLP.NNValue(0, 1, 1 if value else 0) 56 | @staticmethod 57 | def NewBool() : 58 | return MicroMLP.NNValue.FromBool(False) 59 | 60 | @staticmethod 61 | def FromAnalogSignal(value) : 62 | return MicroMLP.NNValue(0, 1, value) 63 | @staticmethod 64 | def NewAnalogSignal() : 65 | return MicroMLP.NNValue.FromAnalogSignal(0) 66 | 67 | # -[ Constructor ]-------------------------------------- 68 | 69 | def __init__(self, minValue, maxValue, value) : 70 | if maxValue - minValue <= 0 : 71 | raise Exception('MicroMLP.NNValue : "maxValue" must be greater than "minValue".') 72 | self._minValue = minValue 73 | self._maxValue = maxValue 74 | self._value = 0.0 75 | self._setScaledValue(minValue, maxValue, value) 76 | 77 | # -[ Private functions ]-------------------------------- 78 | 79 | def _setScaledValue(self, minValue, maxValue, value) : 80 | if value <= minValue : self._value = 0.0 81 | elif value >= maxValue : self._value = 1.0 82 | else : self._value = float(value - minValue) / (maxValue - minValue) 83 | 84 | # -[ Properties ]--------------------------------------- 85 | 86 | @property 87 | def AsFloat(self) : 88 | return self._minValue + (self._value * (self._maxValue - self._minValue)) 89 | @AsFloat.setter 90 | def AsFloat(self, value) : 91 | self._setScaledValue(self._minValue, self._maxValue, value) 92 | 93 | @property 94 | def AsInt(self) : 95 | return int(round(self.AsFloat)) 96 | @AsInt.setter 97 | def AsInt(self, value) : 98 | self._setScaledValue(self._minValue, self._maxValue, value) 99 | 100 | @property 101 | def AsPercent(self) : 102 | return self._value * 100 103 | @AsPercent.setter 104 | def AsPercent(self, value) : 105 | self._setScaledValue(0, 100, value) 106 | 107 | @property 108 | def AsByte(self) : 109 | return chr(int(round(self._value * 255))) 110 | @AsByte.setter 111 | def AsByte(self, value) : 112 | self._setScaledValue(0, 255, ord(value)) 113 | 114 | @property 115 | def AsBool(self) : 116 | return self._value >= 0.5 117 | @AsBool.setter 118 | def AsBool(self, value) : 119 | self._setScaledValue(0, 1, 1 if value else 0) 120 | 121 | @property 122 | def AsAnalogSignal(self) : 123 | return self._value 124 | @AsAnalogSignal.setter 125 | def AsAnalogSignal(self, value) : 126 | self._setScaledValue(0, 1, value) 127 | 128 | # ------------------------------------------------------------------------- 129 | # ------------------------------------------------------------------------- 130 | # ------------------------------------------------------------------------- 131 | 132 | 133 | # ------------------------------------------------------------------------- 134 | # --( Class : Connection )------------------------------------------------- 135 | # ------------------------------------------------------------------------- 136 | 137 | class Connection : 138 | 139 | # -[ Constructor ]-------------------------------------- 140 | 141 | def __init__(self, neuronSrc, neuronDst, weight=None) : 142 | neuronSrc.AddOutputConnection(self) 143 | neuronDst.AddInputConnection(self) 144 | self._neuronSrc = neuronSrc 145 | self._neuronDst = neuronDst 146 | self._weight = weight if weight else MicroMLP.RandomNetworkWeight() 147 | self._momentumDeltaWeight = 0.0 148 | 149 | # -[ Public functions ]--------------------------------- 150 | 151 | def UpdateWeight(self, eta, alpha) : 152 | deltaWeight = eta \ 153 | * self._neuronSrc.ComputedOutput \ 154 | * self._neuronDst.ComputedSignalError 155 | self._weight += deltaWeight + (alpha * self._momentumDeltaWeight) 156 | self._momentumDeltaWeight = deltaWeight 157 | 158 | def Remove(self) : 159 | if self._neuronSrc and self._neuronDst : 160 | nSrc = self._neuronSrc 161 | nDst = self._neuronDst 162 | self._neuronSrc = None 163 | self._neuronDst = None 164 | nSrc.RemoveOutputConnection(self) 165 | nDst.RemoveInputConnection(self) 166 | 167 | # -[ Properties ]--------------------------------------- 168 | 169 | @property 170 | def NeuronSrc(self) : 171 | return self._neuronSrc 172 | 173 | @property 174 | def NeuronDst(self) : 175 | return self._neuronDst 176 | 177 | @property 178 | def Weight(self) : 179 | return self._weight 180 | 181 | # ------------------------------------------------------------------------- 182 | # ------------------------------------------------------------------------- 183 | # ------------------------------------------------------------------------- 184 | 185 | 186 | # ------------------------------------------------------------------------- 187 | # --( Class : Neuron )----------------------------------------------------- 188 | # ------------------------------------------------------------------------- 189 | 190 | class Neuron : 191 | 192 | # -[ Constructor ]-------------------------------------- 193 | 194 | def __init__(self, parentLayer) : 195 | parentLayer.AddNeuron(self) 196 | self._parentLayer = parentLayer 197 | self._inputConnections = [ ] 198 | self._outputConnections = [ ] 199 | self._inputBias = None 200 | self._computedInput = 0.0 201 | self._computedOutput = 0.0 202 | self._computedDeltaError = 0.0 203 | self._computedSignalError = 0.0 204 | 205 | # -[ Public functions ]--------------------------------- 206 | 207 | def GetNeuronIndex(self) : 208 | return self._parentLayer.GetNeuronIndex(self) 209 | 210 | def GetInputConnections(self) : 211 | return self._inputConnections 212 | 213 | def GetOutputConnections(self) : 214 | return self._outputConnections 215 | 216 | def AddInputConnection(self, connection) : 217 | self._inputConnections.append(connection) 218 | 219 | def AddOutputConnection(self, connection) : 220 | self._outputConnections.append(connection) 221 | 222 | def RemoveInputConnection(self, connection) : 223 | self._inputConnections.remove(connection) 224 | 225 | def RemoveOutputConnection(self, connection) : 226 | self._outputConnections.remove(connection) 227 | 228 | def SetBias(self, bias) : 229 | self._inputBias = bias 230 | 231 | def GetBias(self) : 232 | return self._inputBias 233 | 234 | def SetOutputNNValue(self, nnvalue) : 235 | self._computedOutput = nnvalue.AsAnalogSignal 236 | 237 | def _computeInput(self) : 238 | sum = 0.0 239 | for conn in self._inputConnections : 240 | sum += conn.NeuronSrc.ComputedOutput * conn.Weight 241 | if self._inputBias : 242 | sum += self._inputBias.Value * self._inputBias.Weight 243 | self._computedInput = sum 244 | 245 | def ComputeOutput(self) : 246 | self._computeInput() 247 | if self._parentLayer._actFunc : 248 | self._computedOutput = self._parentLayer._actFunc( self._computedInput * \ 249 | self._parentLayer.ParentMicroMLP.Gain ) 250 | 251 | def ComputeError(self, targetNNValue=None) : 252 | if targetNNValue : 253 | self._computedDeltaError = targetNNValue.AsAnalogSignal - self.ComputedOutput 254 | else : 255 | self._computedDeltaError = 0.0 256 | for conn in self._outputConnections : 257 | self._computedDeltaError += conn.NeuronDst.ComputedSignalError * conn.Weight 258 | if self._parentLayer._actFunc : 259 | self._computedSignalError = self._computedDeltaError \ 260 | * self._parentLayer.ParentMicroMLP.Gain \ 261 | * self._parentLayer._actFunc( self._computedInput, 262 | derivative = True ) 263 | 264 | def Remove(self) : 265 | for conn in self._inputConnections : 266 | conn.NeuronSrc.RemoveOutputConnection(conn) 267 | for conn in self._outputConnections : 268 | conn.NeuronDst.RemoveInputConnection(conn) 269 | l = self._parentLayer 270 | self._parentLayer = None 271 | l.RemoveNeuron(self) 272 | 273 | # -[ Properties ]--------------------------------------- 274 | 275 | @property 276 | def ParentLayer(self) : 277 | return self._parentLayer 278 | 279 | @property 280 | def ComputedOutput(self) : 281 | return self._computedOutput 282 | 283 | @property 284 | def ComputedDeltaError(self) : 285 | return self._computedDeltaError 286 | 287 | @property 288 | def ComputedSignalError(self) : 289 | return self._computedSignalError 290 | 291 | # ------------------------------------------------------------------------- 292 | # ------------------------------------------------------------------------- 293 | # ------------------------------------------------------------------------- 294 | 295 | 296 | # ------------------------------------------------------------------------- 297 | # --( Class : Bias )------------------------------------------------------- 298 | # ------------------------------------------------------------------------- 299 | 300 | class Bias : 301 | 302 | # -[ Constructor ]-------------------------------------- 303 | 304 | def __init__(self, neuronDst, value=1.0, weight=None) : 305 | neuronDst.SetBias(self) 306 | self._neuronDst = neuronDst 307 | self._value = value 308 | self._weight = weight if weight else MicroMLP.RandomNetworkWeight() 309 | self._momentumDeltaWeight = 0.0 310 | 311 | # -[ Public functions ]--------------------------------- 312 | 313 | def UpdateWeight(self, eta, alpha) : 314 | deltaWeight = eta \ 315 | * self._value \ 316 | * self._neuronDst.ComputedSignalError 317 | self._weight += deltaWeight + (alpha * self._momentumDeltaWeight) 318 | self._momentumDeltaWeight = deltaWeight 319 | 320 | def Remove(self) : 321 | nDst.SetBias(None) 322 | 323 | # -[ Properties ]--------------------------------------- 324 | 325 | @property 326 | def NeuronDst(self) : 327 | return self._neuronDst 328 | 329 | @property 330 | def Value(self) : 331 | return self._value 332 | 333 | @property 334 | def Weight(self) : 335 | return self._weight 336 | 337 | # ------------------------------------------------------------------------- 338 | # ------------------------------------------------------------------------- 339 | # ------------------------------------------------------------------------- 340 | 341 | 342 | # ------------------------------------------------------------------------- 343 | # --( Class : Layer )------------------------------------------------------ 344 | # ------------------------------------------------------------------------- 345 | 346 | class Layer : 347 | 348 | # -[ Constructor ]-------------------------------------- 349 | 350 | def __init__(self, parentMicroMLP, activationFuncName=None, neuronsCount=0) : 351 | self._parentMicroMLP = parentMicroMLP 352 | self._actFuncName = activationFuncName 353 | self._actFunc = MicroMLP.GetActivationFunction(activationFuncName) 354 | self._neurons = [ ] 355 | self._parentMicroMLP.AddLayer(self) 356 | for i in range(neuronsCount) : 357 | MicroMLP.Neuron(self) 358 | 359 | # -[ Public functions ]--------------------------------- 360 | 361 | def GetLayerIndex(self) : 362 | return self._parentMicroMLP.GetLayerIndex(self) 363 | 364 | def GetNeuron(self, neuronIndex) : 365 | if neuronIndex >= 0 and neuronIndex < len(self._neurons) : 366 | return self._neurons[neuronIndex] 367 | return None 368 | 369 | def GetNeuronIndex(self, neuron) : 370 | return self._neurons.index(neuron) 371 | 372 | def AddNeuron(self, neuron) : 373 | self._neurons.append(neuron) 374 | 375 | def RemoveNeuron(self, neuron) : 376 | self._neurons.remove(neuron) 377 | 378 | def GetMeanSquareError(self) : 379 | if len(self._neurons) == 0 : 380 | return 0 381 | mse = 0.0 382 | for n in self._neurons : 383 | mse += n.ComputedDeltaError ** 2 384 | return mse / len(self._neurons) 385 | 386 | def GetMeanAbsoluteError(self) : 387 | if len(self._neurons) == 0 : 388 | return 0 389 | mae = 0.0 390 | for n in self._neurons : 391 | mae += abs(n.ComputedDeltaError) 392 | return mae / len(self._neurons) 393 | 394 | def GetMeanSquareErrorAsPercent(self) : 395 | return round( self.GetMeanSquareError() * 100 * 1000 ) / 1000 396 | 397 | def GetMeanAbsoluteErrorAsPercent(self) : 398 | return round( self.GetMeanAbsoluteError() * 100 * 1000 ) / 1000 399 | 400 | def Remove(self) : 401 | while len(self._neurons) > 0 : 402 | self._neurons[0].Remove() 403 | mlp = self._parentMicroMLP 404 | self._parentMicroMLP = None 405 | mlp.RemoveLayer(self) 406 | 407 | # -[ Properties ]--------------------------------------- 408 | 409 | @property 410 | def ParentMicroMLP(self) : 411 | return self._parentMicroMLP 412 | 413 | @property 414 | def ActivationFuncName(self) : 415 | return self._actFuncName 416 | 417 | @property 418 | def Neurons(self) : 419 | return self._neurons 420 | 421 | @property 422 | def NeuronsCount(self) : 423 | return len(self._neurons) 424 | 425 | # ------------------------------------------------------------------------- 426 | # ------------------------------------------------------------------------- 427 | # ------------------------------------------------------------------------- 428 | 429 | 430 | # ------------------------------------------------------------------------- 431 | # --( Class : InputLayer )------------------------------------------------- 432 | # ------------------------------------------------------------------------- 433 | 434 | class InputLayer(Layer) : 435 | 436 | # -[ Constructor ]-------------------------------------- 437 | 438 | def __init__(self, parentMicroMLP, neuronsCount=0) : 439 | super().__init__(parentMicroMLP, None, neuronsCount) 440 | 441 | # -[ Public functions ]--------------------------------- 442 | 443 | def SetInputVectorNNValues(self, inputVectorNNValues) : 444 | if len(inputVectorNNValues) == self.NeuronsCount : 445 | for i in range(self.NeuronsCount) : 446 | self._neurons[i].SetOutputNNValue(inputVectorNNValues[i]) 447 | return True 448 | return False 449 | 450 | # ------------------------------------------------------------------------- 451 | # ------------------------------------------------------------------------- 452 | # ------------------------------------------------------------------------- 453 | 454 | 455 | # ------------------------------------------------------------------------- 456 | # --( Class : OutputLayer )------------------------------------------------ 457 | # ------------------------------------------------------------------------- 458 | 459 | class OutputLayer(Layer) : 460 | 461 | # -[ Constructor ]-------------------------------------- 462 | 463 | def __init__(self, parentMicroMLP, activationFuncName, neuronsCount=0) : 464 | super().__init__(parentMicroMLP, activationFuncName, neuronsCount) 465 | 466 | # -[ Public functions ]--------------------------------- 467 | 468 | def GetOutputVectorNNValues(self) : 469 | nnvalues = [ ] 470 | for n in self._neurons : 471 | nnvalues.append(MicroMLP.NNValue.FromAnalogSignal(n.ComputedOutput)) 472 | return nnvalues 473 | 474 | def ComputeTargetLayerError(self, targetVectorNNValues) : 475 | if len(targetVectorNNValues) == self.NeuronsCount : 476 | for i in range(self.NeuronsCount) : 477 | self._neurons[i].ComputeError(targetVectorNNValues[i]) 478 | return True 479 | return False 480 | 481 | # ------------------------------------------------------------------------- 482 | # ------------------------------------------------------------------------- 483 | # ------------------------------------------------------------------------- 484 | 485 | # -[ Constructor ]-------------------------------------- 486 | 487 | def __init__(self) : 488 | self._layers = [ ] 489 | self._examples = [ ] 490 | 491 | # -[ Static functions ]------------------------------------- 492 | 493 | @staticmethod 494 | def Create(neuronsByLayers, activationFuncName, layersAutoConnectFunction=None, useBiasValue=1.0) : 495 | if not neuronsByLayers or len(neuronsByLayers) < 2 : 496 | raise Exception('MicroMLP.Create : Incorrect "neuronsByLayers" parameter.') 497 | for x in neuronsByLayers : 498 | if x < 1 : 499 | raise Exception('MicroMLP.Create : Incorrect count in "neuronsByLayers".') 500 | if not MicroMLP.GetActivationFunction(activationFuncName) : 501 | raise Exception('MicroMLP : Unknow activationFuncName "%s".' % activationFuncName) 502 | mlp = MicroMLP() 503 | for i in range(len(neuronsByLayers)) : 504 | if i == 0 : 505 | layer = MicroMLP.InputLayer(mlp, neuronsByLayers[i]) 506 | else : 507 | if i == len(neuronsByLayers)-1 : 508 | layer = MicroMLP.OutputLayer(mlp, activationFuncName, neuronsByLayers[i]) 509 | else : 510 | layer = MicroMLP.Layer(mlp, activationFuncName, neuronsByLayers[i]) 511 | if layersAutoConnectFunction : 512 | layersAutoConnectFunction(mlp.GetLayer(i-1), layer) 513 | if useBiasValue : 514 | for n in layer.Neurons : 515 | MicroMLP.Bias(n, useBiasValue) 516 | return mlp 517 | 518 | @staticmethod 519 | def RandomFloat() : 520 | if 'rng' in globals() : 521 | return rng() / (2 ** 24) 522 | return random() 523 | 524 | @staticmethod 525 | def RandomNetworkWeight() : 526 | return (MicroMLP.RandomFloat()-0.5) * 0.7 527 | 528 | @staticmethod 529 | def HeavisideActivation(x, derivative=False) : 530 | if derivative : 531 | return 1.0 532 | return 1.0 if x >= 0 else 0.0 533 | 534 | @staticmethod 535 | def SigmoidActivation(x, derivative=False) : 536 | f = 1.0 / ( 1.0 + exp(-x) ) 537 | if derivative : 538 | return f * (1.0-f) 539 | return f 540 | 541 | @staticmethod 542 | def TanHActivation(x, derivative=False) : 543 | f = ( 2.0 / (1.0 + exp(-2.0 * x)) ) - 1.0 544 | if derivative : 545 | return 1.0 - (f ** 2) 546 | return f 547 | 548 | @staticmethod 549 | def SoftPlusActivation(x, derivative=False) : 550 | if derivative : 551 | return 1 / (1 + exp(-x)) 552 | return log(1 + exp(x)) 553 | 554 | @staticmethod 555 | def ReLUActivation(x, derivative=False) : 556 | if derivative : 557 | return 1.0 if x >= 0 else 0.0 558 | return max(0.0, x) 559 | 560 | @staticmethod 561 | def GaussianActivation(x, derivative=False) : 562 | f = exp(-x ** 2) 563 | if derivative : 564 | return -2 * x * f 565 | return f 566 | 567 | @staticmethod 568 | def LayersFullConnect(layerSrc, layerDst) : 569 | if layerSrc and layerDst and layerSrc != layerDst : 570 | for nSrc in layerSrc.Neurons : 571 | for nDst in layerDst.Neurons : 572 | MicroMLP.Connection(nSrc, nDst) 573 | 574 | @staticmethod 575 | def GetActivationFunction(actFuncName) : 576 | if actFuncName : 577 | funcs = { 578 | MicroMLP.ACTFUNC_HEAVISIDE : MicroMLP.HeavisideActivation, 579 | MicroMLP.ACTFUNC_SIGMOID : MicroMLP.SigmoidActivation, 580 | MicroMLP.ACTFUNC_TANH : MicroMLP.TanHActivation, 581 | MicroMLP.ACTFUNC_SOFTPLUS : MicroMLP.SoftPlusActivation, 582 | MicroMLP.ACTFUNC_RELU : MicroMLP.ReLUActivation, 583 | MicroMLP.ACTFUNC_GAUSSIAN : MicroMLP.GaussianActivation 584 | } 585 | if actFuncName in funcs : 586 | return funcs[actFuncName] 587 | return None 588 | 589 | @staticmethod 590 | def LoadFromFile(filename) : 591 | with open(filename, 'r') as jsonFile : 592 | o = load(jsonFile) 593 | mlp = MicroMLP() 594 | mlp.Eta = o['Eta'] 595 | mlp.Alpha = o['Alpha'] 596 | mlp.Gain = o['Gain'] 597 | oLayers = o['Layers'] 598 | for i in range(len(oLayers)) : 599 | oLayer = oLayers[i] 600 | activationFuncName = oLayer['Func'] 601 | oNeurons = oLayer['Neurons'] 602 | if i == 0 : 603 | layer = MicroMLP.InputLayer(mlp, len(oNeurons)) 604 | else : 605 | if i == len(oLayers)-1 : 606 | layer = MicroMLP.OutputLayer(mlp, activationFuncName, len(oNeurons)) 607 | else : 608 | layer = MicroMLP.Layer(mlp, activationFuncName, len(oNeurons)) 609 | for neuron in layer.Neurons : 610 | oNeuron = oNeurons[neuron.GetNeuronIndex()] 611 | oBias = oNeuron['Bias'] 612 | if oBias : 613 | MicroMLP.Bias(neuron, oBias['Val'], oBias['Wght']) 614 | for oConn in oNeuron['Conn'] : 615 | nSrc = mlp.GetLayer(oConn['LSrc']).GetNeuron(oConn['NSrc']) 616 | MicroMLP.Connection(nSrc, neuron, oConn['Wght']) 617 | return mlp 618 | 619 | # -[ Public functions ]--------------------------------- 620 | 621 | def GetLayer(self, layerIndex) : 622 | if layerIndex >= 0 and layerIndex < len(self._layers) : 623 | return self._layers[layerIndex] 624 | return None 625 | 626 | def GetLayerIndex(self, layer) : 627 | return self._layers.index(layer) 628 | 629 | def AddLayer(self, layer) : 630 | self._layers.append(layer) 631 | 632 | def RemoveLayer(self, layer) : 633 | self._layers.remove(layer) 634 | 635 | def ClearAll(self) : 636 | while len(self._layers) > 0 : 637 | self._layers[0].Remove() 638 | 639 | def GetInputLayer(self) : 640 | if self.LayersCount > 0 : 641 | l = self._layers[0] 642 | if type(l) is MicroMLP.InputLayer : 643 | return l 644 | return None 645 | 646 | def GetOutputLayer(self) : 647 | if self.LayersCount > 0 : 648 | l = self._layers[self.LayersCount-1] 649 | if type(l) is MicroMLP.OutputLayer : 650 | return l 651 | return None 652 | 653 | def Learn(self, inputVectorNNValues, targetVectorNNValues) : 654 | if targetVectorNNValues : 655 | return self._simulate(inputVectorNNValues, targetVectorNNValues, True) 656 | return False 657 | 658 | def Test(self, inputVectorNNValues, targetVectorNNValues) : 659 | if targetVectorNNValues : 660 | return self._simulate(inputVectorNNValues, targetVectorNNValues) 661 | return False 662 | 663 | def Predict(self, inputVectorNNValues) : 664 | if self._simulate(inputVectorNNValues) : 665 | return self.GetOutputLayer().GetOutputVectorNNValues() 666 | return None 667 | 668 | def QLearningLearnForChosenAction( self, 669 | stateVectorNNValues, 670 | rewardNNValue, 671 | pastStateVectorNNValues, 672 | chosenActionIndex, 673 | terminalState = True, 674 | discountFactorNNValue = None ) : 675 | if chosenActionIndex >= 0 and \ 676 | chosenActionIndex < self.GetOutputLayer().NeuronsCount : 677 | if not terminalState : 678 | if not discountFactorNNValue or \ 679 | not self._simulate(stateVectorNNValues) : 680 | return False 681 | bestActVal = 0 682 | for nnVal in self.GetOutputLayer().GetOutputVectorNNValues() : 683 | if nnVal.AsAnalogSignal > bestActVal : 684 | bestActVal = nnVal.AsAnalogSignal 685 | if self._simulate(pastStateVectorNNValues) : 686 | targetVectorNNValues = self.GetOutputLayer().GetOutputVectorNNValues() 687 | targetActVal = rewardNNValue.AsAnalogSignal 688 | if not terminalState : 689 | targetActVal += discountFactorNNValue.AsAnalogSignal * bestActVal 690 | targetVectorNNValues[chosenActionIndex].AsAnalogSignal = targetActVal 691 | return self._simulate(pastStateVectorNNValues, targetVectorNNValues, True) 692 | return False 693 | 694 | def QLearningPredictBestActionIndex(self, stateVectorNNValues) : 695 | bestActIdx = None 696 | if self._simulate(stateVectorNNValues) : 697 | maxVal = 0 698 | idx = 0 699 | for nnVal in self.GetOutputLayer().GetOutputVectorNNValues() : 700 | if nnVal.AsAnalogSignal > maxVal : 701 | maxVal = nnVal.AsAnalogSignal 702 | bestActIdx = idx 703 | idx += 1 704 | return bestActIdx 705 | 706 | def SaveToFile(self, filename) : 707 | o = { 708 | 'Eta' : self.Eta, 709 | 'Alpha' : self.Alpha, 710 | 'Gain' : self.Gain, 711 | 'Layers' : [ ] 712 | } 713 | for layer in self.Layers : 714 | oLayer = { 715 | 'Func' : layer.ActivationFuncName, 716 | 'Neurons' : [ ] 717 | } 718 | for neuron in layer.Neurons : 719 | bias = neuron.GetBias() 720 | if bias : 721 | oBias = { 722 | 'Val' : bias.Value, 723 | 'Wght' : bias.Weight 724 | } 725 | else : 726 | oBias = None 727 | oNeuron = { 728 | 'Bias' : oBias, 729 | 'Conn' : [ ] 730 | } 731 | for conn in neuron.GetInputConnections() : 732 | oNeuron['Conn'].append( { 733 | 'LSrc' : conn.NeuronSrc.ParentLayer.GetLayerIndex(), 734 | 'NSrc' : conn.NeuronSrc.GetNeuronIndex(), 735 | 'Wght' : conn.Weight 736 | } ) 737 | oLayer['Neurons'].append(oNeuron) 738 | o['Layers'].append(oLayer) 739 | try : 740 | jsonStr = dumps(o) 741 | jsonFile = open(filename, 'wt') 742 | jsonFile.write(jsonStr) 743 | jsonFile.close() 744 | except : 745 | return False 746 | return True 747 | 748 | def AddExample(self, inputVectorNNValues, targetVectorNNValues) : 749 | if self.IsNetworkComplete and \ 750 | inputVectorNNValues and \ 751 | targetVectorNNValues and \ 752 | len(inputVectorNNValues) == self.GetInputLayer().NeuronsCount and \ 753 | len(targetVectorNNValues) == self.GetOutputLayer().NeuronsCount : 754 | self._examples.append( { 755 | 'Input' : inputVectorNNValues, 756 | 'Target' : targetVectorNNValues 757 | } ) 758 | return True 759 | return False 760 | 761 | def ClearExamples(self) : 762 | self._examples.clear() 763 | 764 | def LearnExamples(self, maxSeconds=30, maxCount=None, stopWhenLearned=True, printMAEAverage=True) : 765 | if self.ExamplesCount > 0 and maxSeconds > 0 : 766 | count = 0 767 | endTime = time() + maxSeconds 768 | while time() < endTime and \ 769 | ( maxCount is None or count < maxCount ) : 770 | idx = int( MicroMLP.RandomFloat() * self.ExamplesCount ) 771 | if not self.Learn( self._examples[idx]['Input'], 772 | self._examples[idx]['Target'] ) : 773 | return 0 774 | count += 1 775 | if (stopWhenLearned or printMAEAverage) and count % 10 == 0 : 776 | maeAvg = 0.0 777 | for ex in self._examples : 778 | self.Test(ex['Input'], ex['Target']) 779 | maeAvg += self.MAE 780 | maeAvg /= self.ExamplesCount 781 | if printMAEAverage : 782 | print( "[ STEP : %s / ERROR : %s%% ]" 783 | % ( count, round(maeAvg*100*1000)/1000 ) ) 784 | if stopWhenLearned and maeAvg <= self.CorrectLearnedMAE : 785 | break 786 | return count 787 | return 0 788 | 789 | # -[ Properties ]--------------------------------------- 790 | 791 | @property 792 | def Layers(self) : 793 | return self._layers 794 | 795 | @property 796 | def LayersCount(self) : 797 | return len(self._layers) 798 | 799 | @property 800 | def IsNetworkComplete(self) : 801 | return self.GetInputLayer() is not None and self.GetOutputLayer() is not None 802 | 803 | @property 804 | def MSE(self) : 805 | if self.IsNetworkComplete : 806 | return self.GetOutputLayer().GetMeanSquareError() 807 | return 0.0 808 | 809 | @property 810 | def MAE(self) : 811 | if self.IsNetworkComplete : 812 | return self.GetOutputLayer().GetMeanAbsoluteError() 813 | return 0.0 814 | 815 | @property 816 | def MSEPercent(self) : 817 | if self.IsNetworkComplete : 818 | return self.GetOutputLayer().GetMeanSquareErrorAsPercent() 819 | return 0.0 820 | 821 | @property 822 | def MAEPercent(self) : 823 | if self.IsNetworkComplete : 824 | return self.GetOutputLayer().GetMeanAbsoluteErrorAsPercent() 825 | return 0.0 826 | 827 | @property 828 | def ExamplesCount(self) : 829 | return len(self._examples) 830 | 831 | # -[ Private functions ]------------------------------------ 832 | 833 | def _propagateSignal(self) : 834 | if self.IsNetworkComplete : 835 | idx = 1 836 | while idx < self.LayersCount : 837 | for n in self.GetLayer(idx).Neurons : 838 | n.ComputeOutput() 839 | idx += 1 840 | return True 841 | return False 842 | 843 | def _backPropagateError(self) : 844 | if self.IsNetworkComplete : 845 | idx = self.LayersCount-1 846 | while idx >= 0 : 847 | for n in self.GetLayer(idx).Neurons : 848 | if idx < self.LayersCount-1 : 849 | if idx > 0 : 850 | n.ComputeError() 851 | for conn in n.GetOutputConnections() : 852 | conn.UpdateWeight(self.Eta, self.Alpha) 853 | bias = n.GetBias() 854 | if bias : 855 | bias.UpdateWeight(self.Eta, self.Alpha) 856 | idx -= 1 857 | return True 858 | return False 859 | 860 | def _simulate(self, inputVectorNNValues, targetVectorNNValues=None, training=False) : 861 | if self.IsNetworkComplete and self.GetInputLayer().SetInputVectorNNValues(inputVectorNNValues) : 862 | self._propagateSignal() 863 | if not targetVectorNNValues : 864 | return not training 865 | if self.GetOutputLayer().ComputeTargetLayerError(targetVectorNNValues) : 866 | if not training : 867 | return True 868 | return self._backPropagateError() 869 | return False 870 | 871 | # ------------------------------------------------------------------------- 872 | # ------------------------------------------------------------------------- 873 | # ------------------------------------------------------------------------- 874 | --------------------------------------------------------------------------------