├── .gitignore ├── .gitmodules ├── .npmignore ├── .travis.yml ├── CHANGELOG.md ├── CMakeLists.txt ├── LICENSE ├── README.md ├── codecov.yml ├── dev ├── cpp │ ├── ConvLayer.cpp │ ├── FCLayer.cpp │ ├── Filter.cpp │ ├── NetMath.cpp │ ├── NetUtil.cpp │ ├── Network.cpp │ ├── Neuron.cpp │ ├── PoolLayer.cpp │ ├── emscripten.cpp │ ├── jsNet.h │ └── printv.h ├── js-WebAssembly │ ├── ConvLayer.js │ ├── FCLayer.js │ ├── Filter.js │ ├── InputLayer.js │ ├── NetMath.js │ ├── NetUtil.js │ ├── NetWASM.js │ ├── Network.js │ ├── Neuron.js │ ├── OutputLayer.js │ └── PoolLayer.js ├── js │ ├── ConvLayer.js │ ├── FCLayer.js │ ├── Filter.js │ ├── InputLayer.js │ ├── NetMath.js │ ├── NetUtil.js │ ├── Network.js │ ├── Neuron.js │ ├── OutputLayer.js │ └── PoolLayer.js └── jsNet.js ├── dist ├── NetWASM.js ├── NetWASM.js.map ├── NetWASM.wasm ├── NetWASM.wasm.map ├── NetWASM.wast ├── jsNet.js ├── jsNet.js.map ├── jsNetJS.concat.js ├── jsNetJS.concat.js.map ├── jsNetJS.min.js ├── jsNetJS.min.js.map ├── jsNetWebAssembly.concat.js ├── jsNetWebAssembly.concat.js.map ├── jsNetWebAssembly.min.js └── jsNetWebAssembly.min.js.map ├── examples ├── confusion.png ├── index.html ├── mnist │ ├── NetChart.js │ ├── fc-784f-100f-10f.png │ ├── mnist.html │ └── readmeimg.png ├── multiInstance │ └── multiple.html ├── nodejsDemo.js └── webpack loading │ ├── dist │ ├── index.html │ └── main.js │ ├── package-lock.json │ ├── package.json │ ├── server.js │ ├── src │ └── index.js │ └── webpack.config.js ├── gruntfile.js ├── package-lock.json ├── package.json ├── server.js └── test ├── cpp-mocks.cpp ├── cpp-test.cpp ├── emscriptenTests.cpp ├── emscriptenTests.js ├── emscriptenTests.wasm ├── js-test.js └── wa-test.js /.gitignore: -------------------------------------------------------------------------------- 1 | node_modules 2 | npm-debug.log 3 | .nyc_output 4 | coverage 5 | Plan.todo 6 | *.stackdump 7 | /test/googletest/ 8 | /build 9 | /examples/mnist/mnist.js -------------------------------------------------------------------------------- /.gitmodules: -------------------------------------------------------------------------------- 1 | [submodule "test/googletest"] 2 | path = test/googletest 3 | url = https://github.com/google/googletest 4 | -------------------------------------------------------------------------------- /.npmignore: -------------------------------------------------------------------------------- 1 | node_modules 2 | .nyc_output 3 | coverage 4 | Plan.todo 5 | *.stackdump 6 | /build 7 | /test/ 8 | /dev/ 9 | .travis.yml 10 | .gitmodules 11 | CHANGELOG.md 12 | CMakeLists.txt 13 | gruntfile.js -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | sudo: required 2 | language: 3 | - cpp 4 | 5 | script: 6 | - nvm install v8.7.0 7 | - npm install -g codecov 8 | - npm install -g istanbul 9 | - mkdir build 10 | - cd build 11 | - cmake .. 12 | - make 13 | - npm install 14 | - ./cpp-tests 15 | - npm test 16 | - npm run coverage 17 | - codecov -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # Upcoming 2 | --- 3 | #### Global 4 | - Added back ability to use etiher 'expected' or 'output' keys in data sets 5 | 6 | # 3.4.0 - Bug fixes and improvements 7 | --- 8 | #### Global 9 | - Removed implicit softmax from last layer, to allow multi-variate regression (#42) 10 | 11 | #### WebAssembly 12 | - Added fix for Webpack loading of NetWASM.js 13 | - Added net.delete() for clean-up 14 | 15 | #### OutputLayer 16 | - Added OutputLayer class 17 | 18 | #### Examples 19 | - Added example project for loading jsNet through Webpack 20 | - Added example for using multiple WASM network instances 21 | 22 | #### Bug fixes 23 | - WASM misassigned learning rate defaults 24 | - WASM momentum not training 25 | 26 | # 3.3.0 - Misc Improvements 27 | --- 28 | #### Network 29 | - Added confusion matrix 30 | - Made it possible to pass volume (3D array) input data 31 | - Added callback interval config to .train() 32 | - Added collectErrors config to .train() and .test() 33 | 34 | #### InputLayer 35 | - Added InputLayer class 36 | 37 | #### Examples 38 | - Added MNIST dev enviromnment example 39 | 40 | # 3.2.0 - IMG data, validation, early stopping 41 | --- 42 | #### Network 43 | - Added weight+bias importing and exporting via images, using IMGArrays 44 | - Added validation config to .train(), with interval config 45 | - Added early stopping to validation, with threshold stopping condition 46 | - Added early stopping patience condition 47 | - Added early stopping divergence condition 48 | - Breaking change: "error" key in training callbacks have been changed to "trainingError" 49 | - Breaking change: Removed ability to use either data keys 'expected' and 'output'. Now just 'expected'. 50 | 51 | #### NetUtil 52 | - Added splitData function 53 | - Added normalize function 54 | 55 | #### NetMath 56 | - Added root mean squared error cost function 57 | - Added momentum weight update function 58 | - Breaking change: Renamed "vanilla update fn" to "vanilla sgd" 59 | 60 | # 3.1.0 - Optimizations 61 | --- 62 | #### ConvLayer 63 | - Optimized errors structure 64 | - Optimized bias structure 65 | - Optimized activations structure 66 | - Optimized weights structure 67 | - Optimized deltaWeights structure 68 | - Optimized deltaBiase structure 69 | 70 | #### NetUtil 71 | - Optimized convolve 72 | 73 | #### FCLayer 74 | - Optimized weights structure 75 | - Optimized bias structure 76 | - Optimized deltaWeights structure 77 | - Optimized sums structure 78 | - Optimized errors structure and net errors propagation 79 | - Optimized activations structure 80 | - Optimized forward() 81 | - Optimized backward() 82 | - Optimized deltaBias structure 83 | 84 | #### Global 85 | - Changed framework loading to allow choosing between versions at runtime 86 | - Added basic server and browser + nodejs demos for how to load jsNet 87 | - Bug fixes 88 | - Changed the way classes were bundled, to fix some bundler compatibility issues (see #33) 89 | 90 | # 3.0.0 - WebAssembly 91 | --- 92 | #### WebAssembly 93 | - Complete, rewritten, WebAssembly version of jsNet 94 | 95 | #### Global 96 | - Many bug fixes 97 | - Removed default configuration values: l1, l2 98 | - Added layer specific activation function config for FC layers, and ability to turn it off 99 | - Reworked regularization algorithm 100 | - Reworked layer specific activation function assignments 101 | - Reworked net error propagation, using softmax 102 | - net.forward() function now returns softmax activations by default 103 | 104 | #### JavaScript 105 | - Removed babel transpilation (it's 2018) 106 | 107 | # 2.1.0 - Optimizations 108 | --- 109 | #### NetUtil 110 | - Optimized addZeroPadding() - ~68% faster 111 | - Optimized uniform() - ~588% faster 112 | - Optimized gaussian() - ~450% faster 113 | 114 | #### FCLayer 115 | - Optimized resetDeltaWeights() and applyDeltaWeights() - ~18% faster (overall) 116 | 117 | #### NetMath 118 | - Optimized softmax() - ~924% faster 119 | 120 | #### ConvLayer 121 | - Restricted filters' dropout maps only to when dropout is configured - less memory usage 122 | 123 | #### Bug Fixes 124 | - Fixed bug caused by minification and disabled name mangling 125 | 126 | # 2.0.0 - Convolutional Networks 127 | --- 128 | #### Network 129 | - New name: jsNet 130 | - Restructured to allow multiple layer types 131 | - Added conv config for configuring filterSize, zeroPadding, stride ConvLayer default values 132 | - Added pool config for configuring size and stride PoolLayer default values 133 | - Added (input) channels config, used by ConvLayers 134 | - Re-wrote the JSON import/export. Check README for details on backward compatibility 135 | - Removed ability to create a network by just giving layer types in the list 136 | - Can check the version number via Network.version 137 | - Renamed adaptiveLR to updateFn 138 | 139 | #### ConvLayer 140 | - Added ConvLayer.js 🎉 with activation, filterCount, filterSize, zeroPadding and stride configs 141 | 142 | #### Filter 143 | - Added Filter.js 144 | 145 | #### PoolLayer 146 | - Added PoolLayer, with stride and activation configs 147 | 148 | #### NetUtil 149 | - Added NetUtil.js 150 | - Added addZeroPadding 151 | - Added arrayToMap 152 | - Added arrayToVolume 153 | - Added 4 other helper functions 154 | 155 | #### NetMath 156 | - Renamed noadaptivelr to vanillaupdatefn 157 | 158 | #### FCLayer 159 | - Renamed Layer.js to FCLayer.js. Layer still exists as an alias to what is now FCLayer 160 | 161 | #### Bug Fixes 162 | - Fixed training callback giving iteration index, not count (-1) 163 | 164 | # 1.5.0 - Training, Misc 165 | --- 166 | #### Network 167 | - Made string configs (activation, adaptiveLR, cost, distribution) case/underscore/space insensitive. 168 | - Allow custom activation functions to be configured 169 | - Allow custom cost functions to be configured 170 | - Allow custom weight distribution functions to be configured 171 | - Added time elapsed to training/testing logs and elapsed milliseconds to training callback 172 | - Added log option to training/testing to disable logging for each one 173 | - Added mini batch SGD training, and miniBatchSize config for .train() as its config 174 | - Breaking change: classes are no longer required straight into global context. See readme. 175 | - Added shuffle .train() option 176 | - Added callback .test() option 177 | - Breaking change: Updated default values 178 | 179 | #### NetMath 180 | - Breaking change (if you were using the NetMath functions directly): Made config functions' names lower case 181 | 182 | #### Bug Fixes 183 | - Fixed iteration error logged when testing being the averaged total at that point 184 | 185 | # 1.4.0 - Weights Initialization 186 | --- 187 | #### Network 188 | - Reworked current weights initialization to be configurable 189 | - Set old weights init to uniform distribution, with additional limit config 190 | - Added mean as weightsConfig option, for gaussian distribution 191 | - Added stdDeviation as weightsConfig option, for gaussian distribution 192 | 193 | #### NetMath 194 | - Added standardDeviation to NetMath 195 | - Added gaussian weights distribution 196 | - Added xavierNormal weights distribution 197 | - Added lecunUniform weights distribution 198 | - Added lecunNormal weights distribution 199 | - Added xavierUniform weights distribution 200 | 201 | # v1.3.0 - Regularization 202 | --- 203 | #### Network 204 | - Added dropout, with dropout configuration 205 | - Added L2 regularization, via l2 strength config 206 | - Added L1 regularization, via l1 strength config 207 | - Added max norm regularization, via the maxNorm threshold value config 208 | 209 | #### Bug Fixes 210 | - Fixed error value logged accumulating across epochs, instead of resetting to 0 211 | - Fixed epoch counter logged resetting instead of accumulating across training sessions 212 | 213 | # v1.2.0 - Activation functions (Part 1) 214 | --- 215 | #### Network 216 | - Added lreluSlope, for lrelu activation 217 | - Added eluAlpha, for elu activation 218 | 219 | #### NetMath 220 | - Added tanh activation function 221 | - Added relu activation function 222 | - Added lrelu activation function 223 | - Added rrelu activation function 224 | - Added lecuntanh activation function 225 | - Added sech to NetMath 226 | - Added elu activation function 227 | 228 | # v1.1.0 - Update Functions 229 | --- 230 | #### Network 231 | - Added rho as a network configuration 232 | - Added rmsDecay as a network configuration 233 | - Added adaptiveLR as a network configuration 234 | 235 | #### NetMath 236 | - Added adam as adaptiveLR configuration 237 | - Added RMSProp as adaptiveLR configuration 238 | - Added adagrad as adaptiveLR configuration 239 | - Added gain as adaptiveLR configuration 240 | - Added Mean Squared Error cost function 241 | 242 | # v1.0.0 243 | ---- 244 | Initial release -------------------------------------------------------------------------------- /CMakeLists.txt: -------------------------------------------------------------------------------- 1 | cmake_minimum_required(VERSION 2.8 FATAL_ERROR) 2 | 3 | project(jsNet) 4 | 5 | set(CMAKE_CXX_STANDARD 14) 6 | enable_language(C) 7 | enable_language(CXX) 8 | 9 | if(CMAKE_CXX_COMPILER_ID MATCHES GNU) 10 | set(CMAKE_CXX_FLAGS "-Wall -Wno-unknown-pragmas -Wno-sign-compare -Woverloaded-virtual -Wwrite-strings -Wno-unused") 11 | set(CMAKE_CXX_FLAGS_DEBUG "-O0 -g3") 12 | set(CMAKE_CXX_FLAGS_RELEASE "-O3") 13 | set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fprofile-arcs -ftest-coverage") 14 | endif() 15 | 16 | include_directories( 17 | ${PROJECT_SOURCE_DIR}/dev/cpp 18 | ) 19 | 20 | add_library( 21 | jsNet 22 | dev/cpp/Network.cpp 23 | ) 24 | 25 | set(GOOGLETEST_ROOT test/googletest/googletest CACHE STRING "Google Test source root") 26 | set(GOOGLEMOCK_ROOT test/googletest/googlemock CACHE STRING "Google Mock source root") 27 | 28 | include_directories( 29 | ${PROJECT_SOURCE_DIR}/${GOOGLETEST_ROOT} 30 | ${PROJECT_SOURCE_DIR}/${GOOGLETEST_ROOT}/include 31 | ${PROJECT_SOURCE_DIR}/${GOOGLEMOCK_ROOT} 32 | ${PROJECT_SOURCE_DIR}/${GOOGLEMOCK_ROOT}/include 33 | ) 34 | 35 | set(GOOGLETEST_SOURCES 36 | ${PROJECT_SOURCE_DIR}/${GOOGLETEST_ROOT}/src/gtest-all.cc 37 | ${PROJECT_SOURCE_DIR}/${GOOGLETEST_ROOT}/src/gtest_main.cc 38 | ${PROJECT_SOURCE_DIR}/${GOOGLEMOCK_ROOT}/src/gmock-all.cc 39 | ${PROJECT_SOURCE_DIR}/${GOOGLEMOCK_ROOT}/src/gmock_main.cc 40 | ) 41 | 42 | foreach(_source ${GOOGLETEST_SOURCES}) 43 | set_source_files_properties(${_source} PROPERTIES GENERATED 1) 44 | endforeach() 45 | 46 | add_library(googletest ${GOOGLETEST_SOURCES}) 47 | 48 | add_executable( 49 | cpp-tests 50 | test/cpp-test.cpp 51 | ) 52 | 53 | add_dependencies(cpp-tests googletest) 54 | 55 | target_link_libraries( 56 | cpp-tests 57 | googletest 58 | jsNet 59 | pthread 60 | ) 61 | 62 | include(CTest) 63 | enable_testing() 64 | 65 | add_test(unit ${PROJECT_BINARY_DIR}/cpp-tests) 66 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2017 Dan Ruta 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /codecov.yml: -------------------------------------------------------------------------------- 1 | coverage: 2 | status: 3 | project: 4 | default: 5 | target: 95% 6 | threshold: 5% 7 | patch: 8 | default: 9 | target: 95% 10 | target: 5% -------------------------------------------------------------------------------- /dev/cpp/ConvLayer.cpp: -------------------------------------------------------------------------------- 1 | 2 | ConvLayer::ConvLayer (int netI, int s) : Layer(netI, s) { 3 | netInstance = netI; 4 | size = s; 5 | type = "Conv"; 6 | hasActivation = false; 7 | } 8 | 9 | ConvLayer::~ConvLayer (void) { 10 | for (int f=0; f(filters.size(), 1); 32 | deltaBiases = std::vector(filters.size(), 0); 33 | 34 | for (int f=0; fweightInitFn(netInstance, layerIndex, filterSize)); 45 | } 46 | } 47 | 48 | if (net->dropout != 1) { 49 | filters[f]->dropoutMap = NetUtil::createVolume(1, outMapSize, outMapSize, 0)[0]; 50 | } 51 | 52 | filters[f]->init(netInstance, channels, filterSize); 53 | 54 | filterDeltaWeights.push_back(NetUtil::createVolume(channels, filterSize, filterSize, 0)); 55 | } 56 | 57 | errors = NetUtil::createVolume(filters.size(), outMapSize, outMapSize, 0); 58 | activations = NetUtil::createVolume(filters.size(), outMapSize, outMapSize, 0); 59 | } 60 | 61 | void ConvLayer::forward (void) { 62 | 63 | Network* net = Network::getInstance(netInstance); 64 | 65 | std::vector > > actvs; 66 | 67 | if (prevLayer->type=="FC") { 68 | actvs = NetUtil::arrayToVolume(prevLayer->actvns, channels); 69 | } else { 70 | actvs = prevLayer->activations; 71 | } 72 | 73 | for (int f=0; fsumMap = NetUtil::convolve(actvs, zeroPadding, filterWeights[f], channels, stride, biases[f]); 76 | 77 | for (int sumY=0; sumYsumMap.size(); sumY++) { 78 | for (int sumX=0; sumXsumMap.size(); sumX++) { 79 | 80 | if (net->dropout != 1) { 81 | filters[f]->dropoutMap[sumY][sumX] = (double) rand() / (RAND_MAX) > net->dropout; 82 | } 83 | 84 | if (net->dropout != 1 && net->isTraining && filters[f]->dropoutMap[sumY][sumX]) { 85 | activations[f][sumY][sumX] = 0; 86 | 87 | } else if (hasActivation) { 88 | 89 | activations[f][sumY][sumX] = activationC(filters[f]->sumMap[sumY][sumX], false, filters[f]) / net->dropout; 90 | 91 | } else { 92 | activations[f][sumY][sumX] = filters[f]->sumMap[sumY][sumX]; 93 | } 94 | } 95 | } 96 | } 97 | } 98 | 99 | void ConvLayer::backward (bool lastLayer) { 100 | 101 | if (nextLayer->type == "FC") { 102 | 103 | // For each filter, build the errorMap from the weighted neuron errors in the next FCLayer corresponding to each value in the activation map 104 | for (int f=0; fneurons.size(); n++) { 112 | errors[f][emY][emX] += nextLayer->errs[n] * nextLayer->weights[n][weightI]; 113 | } 114 | } 115 | } 116 | } 117 | 118 | } else if (nextLayer->type == "Conv") { 119 | 120 | for (int f=0; fzeroPadding*2, nextLayer, f); 122 | } 123 | 124 | } else { 125 | 126 | for (int f=0; ferrors[f][r][v]; 130 | } 131 | } 132 | } 133 | } 134 | 135 | // Apply derivative to each error value 136 | for (int f=0; fdropoutMap.size() && filters[f]->dropoutMap[row][col]) { 141 | errors[f][row][col] = 0; 142 | } else if (hasActivation) { 143 | errors[f][row][col] *= activationC(filters[f]->sumMap[row][col], true, filters[f]); 144 | } 145 | } 146 | } 147 | } 148 | 149 | NetUtil::buildConvDWeights(this); 150 | } 151 | 152 | void ConvLayer::resetDeltaWeights (void) { 153 | 154 | deltaBiases = std::vector(size, 0); 155 | 156 | for (int f=0; fdropoutMap.size()) { 173 | for (int r=0; rdropoutMap.size(); r++) { 174 | for (int c=0; cdropoutMap[0].size(); c++) { 175 | filters[f]->dropoutMap[r][c] = false; 176 | } 177 | } 178 | } 179 | } 180 | } 181 | 182 | void ConvLayer::applyDeltaWeights (void) { 183 | 184 | Network* net = Network::getInstance(netInstance); 185 | 186 | for (int f=0; fl2) net->l2Error += 0.5 * net->l2 * pow(filterWeights[f][c][r][v], 2); 191 | if (net->l1) net->l1Error += net->l1 * fabs(filterWeights[f][c][r][v]); 192 | } 193 | } 194 | } 195 | } 196 | 197 | // Function pointers are far too slow, for this 198 | // Using code repetitive switch statements makes a substantial perf difference 199 | // Doesn't mean I like it :( 200 | switch (net->updateFnIndex) { 201 | case 0: // vanilla 202 | for (int f=0; fl2 * filterWeights[f][c][r][v] 209 | + net->l1 * (filterWeights[f][c][r][v] > 0 ? 1 : -1)) / net->miniBatchSize; 210 | 211 | filterWeights[f][c][r][v] = NetMath::vanillasgd(netInstance, filterWeights[f][c][r][v], regularized); 212 | 213 | if (net->maxNorm) net->maxNormTotal += filterWeights[f][c][r][v] * filterWeights[f][c][r][v]; 214 | } 215 | } 216 | } 217 | biases[f] = NetMath::vanillasgd(netInstance, biases[f], deltaBiases[f]); 218 | } 219 | break; 220 | case 1: // gain 221 | for (int f=0; fl2 * filterWeights[f][c][r][v] 228 | + net->l1 * (filterWeights[f][c][r][v] > 0 ? 1 : -1)) / net->miniBatchSize; 229 | 230 | filterWeights[f][c][r][v] = NetMath::gain(netInstance, filterWeights[f][c][r][v], regularized, filters[f], c, r, v); 231 | 232 | if (net->maxNorm) net->maxNormTotal += filterWeights[f][c][r][v] * filterWeights[f][c][r][v]; 233 | } 234 | } 235 | } 236 | biases[f] = NetMath::gain(netInstance, biases[f], deltaBiases[f], filters[f], -1, -1, -1); 237 | } 238 | break; 239 | case 2: // adagrad 240 | for (int f=0; fl2 * filterWeights[f][c][r][v] 247 | + net->l1 * (filterWeights[f][c][r][v] > 0 ? 1 : -1)) / net->miniBatchSize; 248 | 249 | filterWeights[f][c][r][v] = NetMath::adagrad(netInstance, filterWeights[f][c][r][v], regularized, filters[f], c, r, v); 250 | 251 | if (net->maxNorm) net->maxNormTotal += filterWeights[f][c][r][v] * filterWeights[f][c][r][v]; 252 | } 253 | } 254 | } 255 | biases[f] = NetMath::adagrad(netInstance, biases[f], deltaBiases[f], filters[f], -1, -1, -1); 256 | } 257 | break; 258 | case 3: // rmsprop 259 | for (int f=0; fl2 * filterWeights[f][c][r][v] 266 | + net->l1 * (filterWeights[f][c][r][v] > 0 ? 1 : -1)) / net->miniBatchSize; 267 | 268 | filterWeights[f][c][r][v] = NetMath::rmsprop(netInstance, filterWeights[f][c][r][v], regularized, filters[f], c, r, v); 269 | 270 | if (net->maxNorm) net->maxNormTotal += filterWeights[f][c][r][v] * filterWeights[f][c][r][v]; 271 | } 272 | } 273 | } 274 | biases[f] = NetMath::rmsprop(netInstance, biases[f], deltaBiases[f], filters[f], -1, -1, -1); 275 | } 276 | break; 277 | case 4: // adam 278 | for (int f=0; fl2 * filterWeights[f][c][r][v] 285 | + net->l1 * (filterWeights[f][c][r][v] > 0 ? 1 : -1)) / net->miniBatchSize; 286 | 287 | filterWeights[f][c][r][v] = NetMath::adam(netInstance, filterWeights[f][c][r][v], regularized, filters[f], c, r, v); 288 | 289 | if (net->maxNorm) net->maxNormTotal += filterWeights[f][c][r][v] * filterWeights[f][c][r][v]; 290 | } 291 | } 292 | } 293 | biases[f] = NetMath::adam(netInstance, biases[f], deltaBiases[f], filters[f], -1, -1, -1); 294 | } 295 | break; 296 | case 5: // adadelta 297 | for (int f=0; fl2 * filterWeights[f][c][r][v] 304 | + net->l1 * (filterWeights[f][c][r][v] > 0 ? 1 : -1)) / net->miniBatchSize; 305 | 306 | filterWeights[f][c][r][v] = NetMath::adadelta(netInstance, filterWeights[f][c][r][v], regularized, filters[f], c, r, v); 307 | 308 | if (net->maxNorm) net->maxNormTotal += filterWeights[f][c][r][v] * filterWeights[f][c][r][v]; 309 | } 310 | } 311 | } 312 | biases[f] = NetMath::adadelta(netInstance, biases[f], deltaBiases[f], filters[f], -1, -1, -1); 313 | } 314 | break; 315 | case 6: // momentum 316 | for (int f=0; fl2 * filterWeights[f][c][r][v] 323 | + net->l1 * (filterWeights[f][c][r][v] > 0 ? 1 : -1)) / net->miniBatchSize; 324 | 325 | filterWeights[f][c][r][v] = NetMath::momentum(netInstance, filterWeights[f][c][r][v], regularized, filters[f], c, r, v); 326 | 327 | if (net->maxNorm) net->maxNormTotal += filterWeights[f][c][r][v] * filterWeights[f][c][r][v]; 328 | } 329 | } 330 | } 331 | biases[f] = NetMath::momentum(netInstance, biases[f], deltaBiases[f], filters[f], -1, -1, -1); 332 | } 333 | break; 334 | } 335 | 336 | if (net->maxNorm) { 337 | net->maxNormTotal = sqrt(net->maxNormTotal); 338 | NetMath::maxNorm(netInstance); 339 | } 340 | } 341 | 342 | void ConvLayer::backUpValidation (void) { 343 | validationBiases = biases; 344 | validationFilterWeights = filterWeights; 345 | } 346 | 347 | void ConvLayer::restoreValidation (void) { 348 | biases = validationBiases; 349 | filterWeights = validationFilterWeights; 350 | } 351 | -------------------------------------------------------------------------------- /dev/cpp/FCLayer.cpp: -------------------------------------------------------------------------------- 1 | 2 | FCLayer::FCLayer (int netI, int s) : Layer(netI, s) { 3 | netInstance = netI; 4 | size = s; 5 | type = "FC"; 6 | hasActivation = false; 7 | } 8 | 9 | FCLayer::~FCLayer (void) { 10 | for (int n=0; n(size, 1); 27 | deltaBiases = std::vector(size, 0); 28 | } 29 | 30 | for (int n=0; ntype == "FC") { 39 | weightsCount = prevLayer->size; 40 | } else if (prevLayer->type == "Conv") { 41 | weightsCount = prevLayer->filters.size() * prevLayer->outMapSize * prevLayer->outMapSize; 42 | } else { 43 | weightsCount = prevLayer->activations.size() * prevLayer->outMapSize * prevLayer->outMapSize; 44 | } 45 | 46 | weights.push_back(Network::getInstance(netInstance)->weightInitFn(netInstance, layerIndex, weightsCount)); 47 | deltaWeights.push_back(std::vector(weightsCount, 0)); 48 | } 49 | 50 | neuron->init(netInstance, weightsCount); 51 | neurons.push_back(neuron); 52 | 53 | sums.push_back(0); 54 | errs.push_back(0); 55 | actvns.push_back(0); 56 | } 57 | } 58 | 59 | void FCLayer::forward (void) { 60 | 61 | Network* net = Network::getInstance(netInstance); 62 | 63 | for (int n=0; ndropped = (double) rand() / (RAND_MAX) > net->dropout; 66 | 67 | if (net->isTraining && neurons[n]->dropped) { 68 | actvns[n] = 0; 69 | 70 | } else { 71 | sums[n] = biases[n]; 72 | 73 | if (prevLayer->type == "FC") { 74 | for (int pn=0; pnneurons.size(); pn++) { 75 | sums[n] += prevLayer->actvns[pn] * weights[n][pn]; 76 | } 77 | } else if (prevLayer->type == "Conv") { 78 | 79 | for (int f=0; fsize; f++) { 80 | for (int y=0; youtMapSize; y++) { 81 | for (int x=0; xoutMapSize; x++) { 82 | sums[n] += prevLayer->activations[f][y][x] 83 | * weights[n][f*prevLayer->outMapSize*prevLayer->outMapSize + y*prevLayer->outMapSize + x]; 84 | } 85 | } 86 | } 87 | 88 | } else { 89 | for (int c=0; cchannels; c++) { 90 | for (int r=0; routMapSize; r++) { 91 | for (int v=0; voutMapSize; v++) { 92 | sums[n] += prevLayer->activations[c][r][v] * weights[n][c * prevLayer->outMapSize * prevLayer->outMapSize + r * prevLayer->outMapSize + v ]; 93 | } 94 | } 95 | } 96 | } 97 | 98 | if (hasActivation) { 99 | actvns[n] = activation(sums[n], false, neurons[n]) / net->dropout; 100 | } else { 101 | actvns[n] = sums[n] / net->dropout; 102 | } 103 | } 104 | } 105 | 106 | if (softmax) { 107 | actvns = NetMath::softmax(actvns); 108 | } 109 | } 110 | 111 | void FCLayer::backward (bool lastLayer) { 112 | 113 | Network* net = Network::getInstance(netInstance); 114 | 115 | for (int n=0; ndropped) { 118 | errs[n] = 0; 119 | deltaBiases[n] = 0; 120 | 121 | } else { 122 | 123 | if (!lastLayer) { 124 | if (hasActivation) { 125 | neurons[n]->derivative = activation(sums[n], true, neurons[n]); 126 | } else { 127 | neurons[n]->derivative = 1; 128 | } 129 | 130 | double weightedErrors = 0.0; 131 | 132 | for (int nn=0; nnneurons.size(); nn++) { 133 | weightedErrors += nextLayer->errs[nn] * nextLayer->weights[nn][n]; 134 | } 135 | 136 | errs[n] = neurons[n]->derivative * weightedErrors; 137 | } 138 | 139 | if (prevLayer->type == "FC") { 140 | for (int wi=0; wiactvns[wi]; 142 | } 143 | } else { 144 | 145 | int counter = 0; 146 | int span = prevLayer->activations[0].size(); 147 | 148 | for (int c=0; cactivations.size(); c++) { 149 | for (int row=0; rowactivations[c][row][col]; 152 | } 153 | } 154 | } 155 | } 156 | 157 | deltaBiases[n] += errs[n]; 158 | } 159 | } 160 | } 161 | 162 | void FCLayer::resetDeltaWeights (void) { 163 | 164 | deltaBiases = std::vector(neurons.size(), 0); 165 | 166 | for (int n=0; n(weights[n].size(), 0); 168 | } 169 | } 170 | 171 | 172 | void FCLayer::applyDeltaWeights (void) { 173 | 174 | Network* net = Network::getInstance(netInstance); 175 | 176 | for (int n=0; nl2) net->l2Error += 0.5 * net->l2 * pow(weights[n][dw], 2); 179 | if (net->l1) net->l1Error += net->l1 * fabs(weights[n][dw]); 180 | } 181 | } 182 | 183 | // Function pointers are far too slow for this 184 | // Using code repetitive switch statements makes a substantial perf difference 185 | // Doesn't mean I like it :( 186 | switch (net->updateFnIndex) { 187 | case 0: // vanilla 188 | for (int n=0; nl2 * weights[n][dw] 193 | + net->l1 * (weights[n][dw] > 0 ? 1 : -1)) / net->miniBatchSize; 194 | 195 | weights[n][dw] = NetMath::vanillasgd(netInstance, weights[n][dw], regularized); 196 | 197 | if (net->maxNorm) net->maxNormTotal += weights[n][dw] * weights[n][dw]; 198 | } 199 | biases[n] = NetMath::vanillasgd(netInstance, biases[n], deltaBiases[n]); 200 | } 201 | break; 202 | case 1: // gain 203 | for (int n=0; nl2 * weights[n][dw] 208 | + net->l1 * (weights[n][dw] > 0 ? 1 : -1)) / net->miniBatchSize; 209 | 210 | weights[n][dw] = NetMath::gain(netInstance, weights[n][dw], deltaWeights[n][dw], neurons[n], dw); 211 | 212 | if (net->maxNorm) net->maxNormTotal += weights[n][dw] * weights[n][dw]; 213 | } 214 | biases[n] = NetMath::gain(netInstance, biases[n], deltaBiases[n], neurons[n], -1); 215 | } 216 | break; 217 | case 2: // adagrad 218 | for (int n=0; nl2 * weights[n][dw] 223 | + net->l1 * (weights[n][dw] > 0 ? 1 : -1)) / net->miniBatchSize; 224 | 225 | weights[n][dw] = NetMath::adagrad(netInstance, weights[n][dw], regularized, neurons[n], dw); 226 | 227 | if (net->maxNorm) net->maxNormTotal += weights[n][dw] * weights[n][dw]; 228 | } 229 | biases[n] = NetMath::adagrad(netInstance, biases[n], deltaBiases[n], neurons[n], -1); 230 | } 231 | break; 232 | case 3: // rmsprop 233 | for (int n=0; nl2 * weights[n][dw] 238 | + net->l1 * (weights[n][dw] > 0 ? 1 : -1)) / net->miniBatchSize; 239 | 240 | weights[n][dw] = NetMath::rmsprop(netInstance, weights[n][dw], regularized, neurons[n], dw); 241 | 242 | if (net->maxNorm) net->maxNormTotal += weights[n][dw] * weights[n][dw]; 243 | } 244 | biases[n] = NetMath::rmsprop(netInstance, biases[n], deltaBiases[n], neurons[n], -1); 245 | } 246 | break; 247 | case 4: // adam 248 | for (int n=0; nl2 * weights[n][dw] 253 | + net->l1 * (weights[n][dw] > 0 ? 1 : -1)) / net->miniBatchSize; 254 | 255 | weights[n][dw] = NetMath::adam(netInstance, weights[n][dw], regularized, neurons[n], dw); 256 | 257 | if (net->maxNorm) net->maxNormTotal += weights[n][dw] * weights[n][dw]; 258 | } 259 | biases[n] = NetMath::adam(netInstance, biases[n], deltaBiases[n], neurons[n], -1); 260 | } 261 | break; 262 | case 5: // adadelta 263 | for (int n=0; nl2 * weights[n][dw] 268 | + net->l1 * (weights[n][dw] > 0 ? 1 : -1)) / net->miniBatchSize; 269 | 270 | weights[n][dw] = NetMath::adadelta(netInstance, weights[n][dw], regularized, neurons[n], dw); 271 | 272 | if (net->maxNorm) net->maxNormTotal += weights[n][dw] * weights[n][dw]; 273 | } 274 | biases[n] = NetMath::adadelta(netInstance, biases[n], deltaBiases[n], neurons[n], -1); 275 | } 276 | break; 277 | case 6: // momentum 278 | for (int n=0; nl2 * weights[n][dw] 283 | + net->l1 * (weights[n][dw] > 0 ? 1 : -1)) / net->miniBatchSize; 284 | 285 | weights[n][dw] = NetMath::momentum(netInstance, weights[n][dw], regularized, neurons[n], dw); 286 | 287 | if (net->maxNorm) net->maxNormTotal += weights[n][dw] * weights[n][dw]; 288 | } 289 | biases[n] = NetMath::momentum(netInstance, biases[n], deltaBiases[n], neurons[n], -1); 290 | } 291 | break; 292 | } 293 | 294 | if (net->maxNorm) { 295 | net->maxNormTotal = sqrt(net->maxNormTotal); 296 | NetMath::maxNorm(netInstance); 297 | } 298 | } 299 | 300 | void FCLayer::backUpValidation (void) { 301 | 302 | validationBiases = {}; 303 | validationWeights = {}; 304 | 305 | for (int n=0; n neuron; 309 | 310 | for (int w=0; wupdateFnIndex) { 7 | case 1: // gain 8 | biasGain = 1; 9 | weightGain = NetUtil::createVolume(channels, filterSize, filterSize, 1); 10 | break; 11 | case 2: // adagrad 12 | case 3: // rmsprop 13 | case 5: // adadelta 14 | case 6: // momentum 15 | biasCache = 0; 16 | weightsCache = NetUtil::createVolume(channels, filterSize, filterSize, 0); 17 | 18 | if (net->updateFnIndex == 5) { 19 | adadeltaBiasCache = 0; 20 | adadeltaCache = NetUtil::createVolume(channels, filterSize, filterSize, 0); 21 | } 22 | break; 23 | case 4: // adam 24 | m = 0; 25 | v = 0; 26 | break; 27 | } 28 | 29 | if (net->activation == &NetMath::lrelu) { 30 | lreluSlope = net->lreluSlope; 31 | } else if (net->activation == &NetMath::rrelu) { 32 | rreluSlope = ((double) rand() / (RAND_MAX))/5 - 0.1; 33 | } else if (net->activation == &NetMath::elu) { 34 | eluAlpha = net->eluAlpha; 35 | } 36 | } -------------------------------------------------------------------------------- /dev/cpp/NetMath.cpp: -------------------------------------------------------------------------------- 1 | // Activation functions 2 | template 3 | double NetMath::sigmoid(double value, bool prime, T* neuron) { 4 | double val = 1 / (1+exp(-value)); 5 | return prime ? val*(1-val) 6 | : val; 7 | } 8 | 9 | template 10 | double NetMath::tanh(double value, bool prime, T* neuron) { 11 | double ex = exp(2*value); 12 | double val = prime ? 4 / pow(exp(value)+exp(-value), 2) : (ex-1)/(ex+1); 13 | return val==0 ? 1e-18 : val; 14 | } 15 | 16 | template 17 | double NetMath::lecuntanh(double value, bool prime, T* neuron) { 18 | return prime ? 1.15333 * pow(NetMath::sech((2.0/3.0) * value), 2) 19 | : 1.7159 * NetMath::tanh((2.0/3.0) * value, false, neuron); 20 | } 21 | 22 | template 23 | double NetMath::relu(double value, bool prime, T* neuron) { 24 | return prime ? (value > 0 ? 1 : 0) 25 | : (value>=0 ? value : 0); 26 | } 27 | 28 | template 29 | double NetMath::lrelu(double value, bool prime, T* neuron) { 30 | return prime ? value > 0 ? 1 : neuron->lreluSlope 31 | : fmax(neuron->lreluSlope * fabs(value), value); 32 | } 33 | 34 | template 35 | double NetMath::rrelu(double value, bool prime, T* neuron) { 36 | return prime ? value > 0 ? 1 : neuron->rreluSlope 37 | : fmax(neuron->rreluSlope, value); 38 | } 39 | 40 | template 41 | double NetMath::elu(double value, bool prime, T* neuron) { 42 | return prime ? value >= 0 ? 1 : elu(value, false, neuron) + neuron->eluAlpha 43 | : value >= 0 ? value : neuron->eluAlpha * (exp(value) - 1); 44 | } 45 | 46 | // Cost Functions 47 | double NetMath::meansquarederror (std::vector calculated, std::vector desired) { 48 | double error = 0.0; 49 | 50 | for (int v=0; v calculated, std::vector desired) { 58 | return sqrt(NetMath::meansquarederror(calculated, desired)); 59 | } 60 | 61 | double NetMath::crossentropy (std::vector target, std::vector output) { 62 | double error = 0.0; 63 | 64 | for (int v=0; vlearningRate * deltaValue; 74 | } 75 | 76 | double NetMath::gain(int netInstance, double value, double deltaValue, Neuron* neuron, int weightIndex) { 77 | 78 | Network* net = Network::getInstance(netInstance); 79 | double newVal = value + net->learningRate * deltaValue * (weightIndex < 0 ? neuron->biasGain : neuron->weightGain[weightIndex]); 80 | 81 | if ((newVal<=0 && value>0) || (newVal>=0 && value<0)) { 82 | if (weightIndex>-1) { 83 | neuron->weightGain[weightIndex] = fmax(neuron->weightGain[weightIndex]*0.95, 0.5); 84 | } else { 85 | neuron->biasGain = fmax(neuron->biasGain*0.95, 0.5); 86 | } 87 | } else { 88 | if (weightIndex>-1) { 89 | neuron->weightGain[weightIndex] = fmin(neuron->weightGain[weightIndex]+0.05, 5); 90 | } else { 91 | neuron->biasGain = fmin(neuron->biasGain+0.05, 5); 92 | } 93 | } 94 | 95 | return newVal; 96 | } 97 | 98 | double NetMath::gain(int netInstance, double value, double deltaValue, Filter* filter, int c, int r, int v) { 99 | 100 | Network* net = Network::getInstance(netInstance); 101 | double newVal = value + net->learningRate * deltaValue * (c < 0 ? filter->biasGain : filter->weightGain[c][r][v]); 102 | 103 | if ((newVal<=0 && value>0) || (newVal>=0 && value<0)) { 104 | if (c>-1) { 105 | filter->weightGain[c][r][v] = fmax(filter->weightGain[c][r][v]*0.95, 0.5); 106 | } else { 107 | filter->biasGain = fmax(filter->biasGain*0.95, 0.5); 108 | } 109 | } else { 110 | if (c>-1) { 111 | filter->weightGain[c][r][v] = fmin(filter->weightGain[c][r][v]+0.05, 5); 112 | } else { 113 | filter->biasGain = fmin(filter->biasGain+0.05, 5); 114 | } 115 | } 116 | 117 | return newVal; 118 | } 119 | 120 | double NetMath::adagrad(int netInstance, double value, double deltaValue, Neuron* neuron, int weightIndex) { 121 | 122 | if (weightIndex>-1) { 123 | neuron->weightsCache[weightIndex] += pow(deltaValue, 2); 124 | } else { 125 | neuron->biasCache += pow(deltaValue, 2); 126 | } 127 | 128 | Network* net = Network::getInstance(netInstance); 129 | return value + net->learningRate * deltaValue / (1e-6 + sqrt(weightIndex>-1 ? neuron->weightsCache[weightIndex] 130 | : neuron->biasCache)); 131 | } 132 | 133 | double NetMath::adagrad(int netInstance, double value, double deltaValue, Filter* filter, int c, int r, int v) { 134 | 135 | Network* net = Network::getInstance(netInstance); 136 | 137 | if (c>-1) { 138 | filter->weightsCache[c][r][v] += pow(deltaValue, 2); 139 | } else { 140 | filter->biasCache += pow(deltaValue, 2); 141 | } 142 | 143 | return value + net->learningRate * deltaValue / (1e-6 + sqrt(c>-1 ? filter->weightsCache[c][r][v] 144 | : filter->biasCache)); 145 | } 146 | 147 | double NetMath::rmsprop(int netInstance, double value, double deltaValue, Neuron* neuron, int weightIndex) { 148 | 149 | Network* net = Network::getInstance(netInstance); 150 | 151 | if (weightIndex>-1) { 152 | neuron->weightsCache[weightIndex] = net->rmsDecay * neuron->weightsCache[weightIndex] + (1 - net->rmsDecay) * pow(deltaValue, 2); 153 | } else { 154 | neuron->biasCache = net->rmsDecay * neuron->biasCache + (1 - net->rmsDecay) * pow(deltaValue, 2); 155 | } 156 | 157 | return value + net->learningRate * deltaValue / (1e-6 + sqrt(weightIndex>-1 ? neuron->weightsCache[weightIndex] 158 | : neuron->biasCache)); 159 | } 160 | 161 | double NetMath::rmsprop(int netInstance, double value, double deltaValue, Filter* filter, int c, int r, int v) { 162 | 163 | Network* net = Network::getInstance(netInstance); 164 | 165 | if (c>-1) { 166 | filter->weightsCache[c][r][v] = net->rmsDecay * filter->weightsCache[c][r][v] + (1 - net->rmsDecay) * pow(deltaValue, 2); 167 | } else { 168 | filter->biasCache = net->rmsDecay * filter->biasCache + (1 - net->rmsDecay) * pow(deltaValue, 2); 169 | } 170 | 171 | return value + net->learningRate * deltaValue / (1e-6 + sqrt(c>-1 ? filter->weightsCache[c][r][v] 172 | : filter->biasCache)); 173 | } 174 | 175 | double NetMath::adam(int netInstance, double value, double deltaValue, Neuron* neuron, int weightIndex) { 176 | 177 | Network* net = Network::getInstance(netInstance); 178 | 179 | neuron->m = 0.9 * neuron->m + (1-0.9) * deltaValue; 180 | double mt = neuron->m / (1 - pow(0.9, net->iterations + 1)); 181 | 182 | neuron->v = 0.999 * neuron->v + (1-0.999) * pow(deltaValue, 2); 183 | double vt = neuron->v / (1 - pow(0.999, net->iterations + 1)); 184 | 185 | return value + net->learningRate * mt / (sqrt(vt) + 1e-6); 186 | } 187 | 188 | double NetMath::adam(int netInstance, double value, double deltaValue, Filter* filter, int c, int r, int v) { 189 | 190 | Network* net = Network::getInstance(netInstance); 191 | 192 | filter->m = 0.9 * filter->m + (1-0.9) * deltaValue; 193 | double mt = filter->m / (1 - pow(0.9, net->iterations + 1)); 194 | 195 | filter->v = 0.999 * filter->v + (1-0.999) * pow(deltaValue, 2); 196 | double vt = filter->v / (1 - pow(0.999, net->iterations + 1)); 197 | 198 | return value + net->learningRate * mt / (sqrt(vt) + 1e-6); 199 | } 200 | 201 | double NetMath::adadelta(int netInstance, double value, double deltaValue, Neuron* neuron, int weightIndex) { 202 | 203 | double rho = Network::getInstance(netInstance)->rho; 204 | 205 | if (weightIndex>-1) { 206 | neuron->weightsCache[weightIndex] = rho * neuron->weightsCache[weightIndex] + (1-rho) * pow(deltaValue, 2); 207 | double newVal = value + sqrt((neuron->adadeltaCache[weightIndex] + 1e-6) / (neuron->weightsCache[weightIndex] + 1e-6)) * deltaValue; 208 | neuron->adadeltaCache[weightIndex] = rho * neuron->adadeltaCache[weightIndex] + (1-rho) * pow(deltaValue, 2); 209 | return newVal; 210 | } else { 211 | neuron->biasCache = rho * neuron->biasCache + (1-rho) * pow(deltaValue, 2); 212 | double newVal = value + sqrt((neuron->adadeltaBiasCache + 1e-6) / (neuron->biasCache + 1e-6)) * deltaValue; 213 | neuron->adadeltaBiasCache = rho * neuron->adadeltaBiasCache + (1-rho) * pow(deltaValue, 2); 214 | return newVal; 215 | } 216 | } 217 | 218 | double NetMath::adadelta(int netInstance, double value, double deltaValue, Filter* filter, int c, int r, int v) { 219 | 220 | double rho = Network::getInstance(netInstance)->rho; 221 | 222 | if (c>-1) { 223 | filter->weightsCache[c][r][v] = rho * filter->weightsCache[c][r][v] + (1-rho) * pow(deltaValue, 2); 224 | double newVal = value + sqrt((filter->adadeltaCache[c][r][v] + 1e-6) / (filter->weightsCache[c][r][v] + 1e-6)) * deltaValue; 225 | filter->adadeltaCache[c][r][v] = rho * filter->adadeltaCache[c][r][v] + (1-rho) * pow(deltaValue, 2); 226 | return newVal; 227 | } else { 228 | filter->biasCache = rho * filter->biasCache + (1-rho) * pow(deltaValue, 2); 229 | double newVal = value + sqrt((filter->adadeltaBiasCache + 1e-6) / (filter->biasCache + 1e-6)) * deltaValue; 230 | filter->adadeltaBiasCache = rho * filter->adadeltaBiasCache + (1-rho) * pow(deltaValue, 2); 231 | return newVal; 232 | } 233 | } 234 | 235 | double NetMath::momentum(int netInstance, double value, double deltaValue, Neuron* neuron, int weightIndex) { 236 | 237 | Network* net = Network::getInstance(netInstance); 238 | 239 | double v; 240 | 241 | if (weightIndex>-1) { 242 | v = net->momentum * neuron->weightsCache[weightIndex] - net->learningRate * deltaValue; 243 | neuron->weightsCache[weightIndex] = v; 244 | } else { 245 | v = net->momentum * neuron->biasCache - net->learningRate * deltaValue; 246 | neuron->biasCache = v; 247 | } 248 | 249 | return value - v; 250 | } 251 | 252 | double NetMath::momentum(int netInstance, double value, double deltaValue, Filter* filter, int c, int r, int v) { 253 | 254 | Network* net = Network::getInstance(netInstance); 255 | 256 | double val; 257 | 258 | if (c>-1) { 259 | val = net->momentum * filter->weightsCache[c][r][v] - net->learningRate * deltaValue; 260 | filter->weightsCache[c][r][v] = val; 261 | } else { 262 | val = net->momentum * filter->biasCache - net->learningRate * deltaValue; 263 | filter->biasCache = val; 264 | } 265 | 266 | return value - val; 267 | } 268 | 269 | // Weights init 270 | std::vector NetMath::uniform (int netInstance, int layerIndex, int size) { 271 | std::vector values; 272 | 273 | float limit = Network::getInstance(netInstance)->weightsConfig["limit"]; 274 | 275 | for (int v=0; v NetMath::gaussian (int netInstance, int layerIndex, int size) { 283 | 284 | Network* net = Network::getInstance(netInstance); 285 | std::vector values; 286 | 287 | // Polar Box Muller 288 | for (int i=0; i= 1); 296 | 297 | values.push_back(net->weightsConfig["mean"] + ( x1 * sqrt(-2 * log(r) / r) ) * net->weightsConfig["stdDeviation"] ); 298 | } 299 | 300 | return values; 301 | } 302 | 303 | std::vector NetMath::lecununiform (int netInstance, int layerIndex, int size) { 304 | Network* net = Network::getInstance(netInstance); 305 | net->weightsConfig["limit"] = sqrt((double)3/net->layers[layerIndex]->fanIn); 306 | return NetMath::uniform(netInstance, layerIndex, size); 307 | } 308 | 309 | std::vector NetMath::lecunnormal (int netInstance, int layerIndex, int size) { 310 | Network* net = Network::getInstance(netInstance); 311 | net->weightsConfig["mean"] = 0; 312 | net->weightsConfig["stdDeviation"] = sqrt((double)1/net->layers[layerIndex]->fanIn); 313 | return NetMath::gaussian(netInstance, layerIndex, size); 314 | } 315 | 316 | std::vector NetMath::xavieruniform (int netInstance, int layerIndex, int size) { 317 | Network* net = Network::getInstance(netInstance); 318 | 319 | if (net->layers[layerIndex]->fanOut) { 320 | net->weightsConfig["limit"] = sqrt((double)6/(net->layers[layerIndex]->fanIn + net->layers[layerIndex]->fanOut)); 321 | 322 | return NetMath::uniform(netInstance, layerIndex, size); 323 | } else { 324 | return NetMath::lecununiform(netInstance, layerIndex, size); 325 | } 326 | } 327 | 328 | std::vector NetMath::xaviernormal (int netInstance, int layerIndex, int size) { 329 | Network* net = Network::getInstance(netInstance); 330 | 331 | if (net->layers[layerIndex]->fanOut) { 332 | 333 | net->weightsConfig["mean"] = 0; 334 | net->weightsConfig["stdDeviation"] = sqrt((double)2/(net->layers[layerIndex]->fanIn + net->layers[layerIndex]->fanOut)); 335 | 336 | return NetMath::gaussian(netInstance, layerIndex, size); 337 | } else { 338 | return NetMath::lecunnormal(netInstance, layerIndex, size); 339 | } 340 | } 341 | 342 | // Other 343 | std::vector NetMath::softmax (std::vector values) { 344 | 345 | double maxValue = -1/0.0; // -infinity 346 | 347 | for (int i=1; i maxValue) { 349 | maxValue = values[i]; 350 | } 351 | } 352 | 353 | // Exponentials 354 | std::vector exponentials; 355 | double exponentialsSum = 0; 356 | 357 | for (int i=0; i activations = NetUtil::getActivations(layer->prevLayer, channel, layer->inMapValuesCount); 374 | 375 | for (int r=0; routMapSize; r++) { 376 | for (int col=0; coloutMapSize; col++) { 377 | 378 | int rowStart = r * layer->stride; 379 | int colStart = col * layer->stride; 380 | 381 | // The first value 382 | double activation = activations[rowStart*layer->prevLayerOutWidth + colStart]; 383 | 384 | for (int filterRow=0; filterRowsize; filterRow++) { 385 | for (int filterCol=0; filterColsize; filterCol++) { 386 | 387 | double value = activations[ ((rowStart+filterRow) * layer->prevLayerOutWidth) + (colStart+filterCol) ]; 388 | 389 | if (value > activation) { 390 | activation = value; 391 | layer->indeces[channel][r][col] = {filterRow, filterCol}; 392 | } 393 | } 394 | } 395 | 396 | layer->activations[channel][r][col] = activation; 397 | } 398 | } 399 | } 400 | 401 | void NetMath::maxNorm(int netInstance) { 402 | Network* net = Network::getInstance(netInstance); 403 | 404 | if (net->maxNormTotal > net->maxNorm) { 405 | 406 | double multiplier = net->maxNorm / (1e-18 + net->maxNormTotal); 407 | 408 | for (int l=1; llayers.size(); l++) { 409 | for (int n=0; nlayers[l]->neurons.size(); n++) { 410 | for (int w=0; wlayers[l]->weights[n].size(); w++) { 411 | net->layers[l]->weights[n][w] *= multiplier; 412 | } 413 | } 414 | } 415 | } 416 | 417 | net->maxNormTotal = 0; 418 | } 419 | 420 | double NetMath::sech(double value) { 421 | return (2 * exp(-value)) / (1+exp(-2*value)); 422 | } 423 | -------------------------------------------------------------------------------- /dev/cpp/NetUtil.cpp: -------------------------------------------------------------------------------- 1 | 2 | void NetUtil::shuffle (std::vector, std::vector > > &values) { 3 | for (int i=values.size(); i; i--) { 4 | int j = floor(rand() / RAND_MAX * i); 5 | std::tuple, std::vector > x = values[i-1]; 6 | values[i-1] = values[j]; 7 | values[j] = x; 8 | } 9 | } 10 | 11 | std::vector > NetUtil::addZeroPadding (std::vector > map, int zP) { 12 | 13 | // Left and right columns 14 | for (int row=0; row row; 24 | 25 | for (int i=0; i row; 35 | 36 | for (int i=0; i > NetUtil::arrayToMap (std::vector array, int size) { 47 | 48 | std::vector > map; 49 | 50 | for (int i=0; i row; 52 | 53 | for (int j=0; j > > NetUtil::arrayToVolume (std::vector array, int channels) { 64 | 65 | std::vector > > vol; 66 | int size = sqrt(array.size() / channels); 67 | int mapValues = size * size; 68 | 69 | for (int d=0; d > map; 71 | 72 | for (int i=0; i row; 74 | 75 | for (int j=0; j > NetUtil::convolve(std::vector > > input, int zP, 88 | std::vector > > weights, int channels, int stride, double bias) { 89 | 90 | std::vector > output; 91 | 92 | int outSize = (input[0].size() - weights[0].size() + 2*zP) / stride + 1; 93 | 94 | // Fill with 0 values 95 | for (int r=0; r row; 97 | for (int v=0; v=0 && inputY=0 && inputX 140 | std::vector > > NetUtil::createVolume (int depth, int rows, int columns, T value) { 141 | 142 | std::vector > > volume; 143 | 144 | for (int d=0; d > map; 146 | 147 | for (int r=0; r row; 149 | 150 | for (int c=0; c > NetUtil::buildConvErrorMap (int paddedLength, Layer* nextLayer, int filterI) { 164 | 165 | // Cache / convenience 166 | int zeroPadding = nextLayer->zeroPadding; 167 | int fsSpread = floor(nextLayer->filterSize / 2); 168 | 169 | std::vector > errorMap; 170 | 171 | // Zero pad and clear the error map, to allow easy convoling 172 | for (int row=0; row paddedRow; 174 | 175 | for (int val=0; valfilters.size(); nlFilterI++) { 184 | 185 | std::vector > weights = nextLayer->filterWeights[nlFilterI][filterI]; 186 | std::vector > errMap = nextLayer->errors[nlFilterI]; 187 | 188 | // Unconvolve their error map using the weights 189 | for (int inY=fsSpread; inYstride) { 190 | for (int inX=fsSpread; inXstride) { 191 | 192 | for (int wY=0; wYfilterSize; wY++) { 193 | for (int wX=0; wXfilterSize; wX++) { 194 | errorMap[inY+(wY-fsSpread)][inX+(wX-fsSpread)] += weights[wY][wX] 195 | * errMap[(inY-fsSpread)/nextLayer->stride][(inX-fsSpread)/nextLayer->stride]; 196 | } 197 | } 198 | } 199 | } 200 | } 201 | 202 | // Take out the zero padding. Rows: 203 | errorMap.erase(errorMap.begin(), errorMap.begin()+zeroPadding); 204 | errorMap.erase(errorMap.end()-zeroPadding, errorMap.end()); 205 | 206 | // Columns: 207 | for (int eY=0; eYfilterWeights[0][0].size(); 218 | int fsSpread = floor(weightsCount / 2); 219 | int channelsCount = layer->filterWeights[0].size(); 220 | 221 | // For each filter 222 | for (int f=0; ffilters.size(); f++) { 223 | 224 | // Each channel will take the error map and the corresponding inputMap from the input... 225 | for (int c=0; c inputValues = NetUtil::getActivations(layer->prevLayer, c, layer->inMapValuesCount); 228 | std::vector > inputMap = NetUtil::addZeroPadding(NetUtil::arrayToMap(inputValues, sqrt(layer->inMapValuesCount)), layer->zeroPadding); 229 | 230 | // ...slide the filter with correct stride across the zero-padded inputMap... 231 | for (int inY=fsSpread; inYstride) { 232 | for (int inX=fsSpread; inXstride) { 233 | 234 | double error = layer->errors[f][(inY-fsSpread)/layer->stride][(inX-fsSpread)/layer->stride]; 235 | 236 | // ...and at each location... 237 | for (int wY=0; wYfilterDeltaWeights[f][c][wY][wX] += inputMap[inY-fsSpread+wY][inX-fsSpread+wX] * error; 241 | } 242 | } 243 | } 244 | } 245 | } 246 | 247 | // Increment the deltaBias by the sum of all errors in the filter 248 | for (int eY=0; eYerrors[f].size(); eY++) { 249 | for (int eX=0; eXerrors[f].size(); eX++) { 250 | layer->deltaBiases[f] += layer->errors[f][eY][eX]; 251 | } 252 | } 253 | } 254 | } 255 | 256 | std::vector NetUtil::getActivations (Layer* layer, int mapStartI, int mapSize) { 257 | 258 | std::vector activations; 259 | 260 | if (layer->type == "FC") { 261 | 262 | for (int n=mapStartI*mapSize; n<(mapStartI+1)*mapSize; n++) { 263 | activations.push_back(layer->actvns[n]); 264 | } 265 | 266 | } else if (layer->type == "Conv") { 267 | 268 | for (int r=0; ractivations[mapStartI].size(); r++) { 269 | for (int c=0; cactivations[mapStartI][r].size(); c++) { 270 | activations.push_back(layer->activations[mapStartI][r][c]); 271 | } 272 | } 273 | 274 | } else { 275 | 276 | for (int r=0; ractivations[mapStartI].size(); r++) { 277 | for (int v=0; vactivations[mapStartI].size(); v++) { 278 | activations.push_back(layer->activations[mapStartI][r][v]); 279 | } 280 | } 281 | 282 | } 283 | 284 | return activations; 285 | } -------------------------------------------------------------------------------- /dev/cpp/Network.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include "jsNet.h" 3 | #include "FCLayer.cpp" 4 | #include "ConvLayer.cpp" 5 | #include "PoolLayer.cpp" 6 | #include "Neuron.cpp" 7 | #include "Filter.cpp" 8 | #include "NetMath.cpp" 9 | #include "NetUtil.cpp" 10 | 11 | Network::~Network () { 12 | for (int l=0; literations = 0; 20 | net->rreluSlope = ((double) rand() / (RAND_MAX)) * 0.001; 21 | netInstances.push_back(net); 22 | net->instanceIndex = netInstances.size()-1; 23 | return net->instanceIndex; 24 | } 25 | 26 | void Network::deleteNetwork(void) { 27 | std::vector clearNetworkInstances; 28 | netInstances.swap(clearNetworkInstances); 29 | } 30 | 31 | void Network::deleteNetwork(int index) { 32 | delete netInstances[index]; 33 | netInstances[index] = 0; 34 | } 35 | 36 | Network* Network::getInstance(int i) { 37 | return netInstances[i]; 38 | } 39 | 40 | void Network::joinLayers(void) { 41 | for (int l=0; lfanIn = -1; 44 | layers[l]->fanOut = -1; 45 | 46 | // Join layer 47 | if (l>0) { 48 | layers[l-1]->assignNext(layers[l]); 49 | 50 | if (lfanOut = layers[l+1]->size; 52 | } 53 | 54 | layers[l]->assignPrev(layers[l-1]); 55 | layers[l]->fanIn = layers[l-1]->size; 56 | } else { 57 | layers[0]->fanOut = layers[1]->size; 58 | } 59 | 60 | layers[l]->init(l); 61 | } 62 | 63 | // Confusion matrices 64 | int outSize = layers[layers.size()-1]->size; 65 | for (int r=0; r(outSize, 0)); 67 | testConfusionMatrix.push_back(std::vector(outSize, 0)); 68 | validationConfusionMatrix.push_back(std::vector(outSize, 0)); 69 | } 70 | } 71 | 72 | std::vector Network::forward (std::vector input) { 73 | 74 | layers[0]->actvns = input; 75 | 76 | for (int l=1; lforward(); 78 | } 79 | 80 | return layers[layers.size()-1]->actvns; 81 | } 82 | 83 | void Network::backward () { 84 | 85 | layers[layers.size()-1]->backward(true); 86 | 87 | for (int l=layers.size()-2; l>0; l--) { 88 | layers[l]->backward(false); 89 | } 90 | } 91 | 92 | void Network::train (int its, int startI) { 93 | 94 | double totalErrors = 0.0; 95 | double iterationError = 0.0; 96 | 97 | isTraining = true; 98 | validationError = 0; 99 | 100 | for (int iterationIndex=startI; iterationIndex<(startI+its); iterationIndex++) { 101 | 102 | iterations++; 103 | std::vector output = forward(std::get<0>(trainingData[iterationIndex])); 104 | 105 | int classIndex = -1; 106 | int targetClassIndex = -1; 107 | double classValue = -std::numeric_limits::infinity(); 108 | 109 | for (int n=0; n classValue) { 111 | classValue = output[n]; 112 | classIndex = n; 113 | } 114 | if (std::get<1>(trainingData[iterationIndex])[n]==1) { 115 | targetClassIndex = n; 116 | layers[layers.size()-1]->errs[n] = 1 - output[n]; 117 | } else { 118 | layers[layers.size()-1]->errs[n] = 0 - output[n]; 119 | } 120 | } 121 | 122 | if (targetClassIndex != -1) { 123 | trainingConfusionMatrix[targetClassIndex][classIndex]++; 124 | } 125 | 126 | if (validationInterval!=0 && iterationIndex!=0 && iterationIndex%validationInterval==0) { 127 | validationError = validate(); 128 | 129 | if (collectErrors) { 130 | collectedValidationErrors.push_back(validationError); 131 | } 132 | 133 | if (earlyStoppingType && checkEarlyStopping()) { 134 | if (trainingLogging) { 135 | printf("Stopping early\n"); 136 | } 137 | stoppedEarly = true; 138 | break; 139 | } 140 | } 141 | 142 | backward(); 143 | 144 | iterationError = costFunction(std::get<1>(trainingData[iterationIndex]), output); 145 | totalErrors += iterationError; 146 | 147 | if (collectErrors) { 148 | collectedTrainingErrors.push_back(iterationError); 149 | } 150 | 151 | if ((iterationIndex+1) % miniBatchSize == 0) { 152 | applyDeltaWeights(); 153 | resetDeltaWeights(); 154 | } else if (iterationIndex >= trainingData.size()) { 155 | applyDeltaWeights(); 156 | } 157 | } 158 | 159 | isTraining = false; 160 | error = totalErrors / its; 161 | } 162 | 163 | double Network::validate (void) { 164 | 165 | double totalValidationErrors = 0; 166 | 167 | for (int i=0; i output = forward(std::get<0>(validationData[i])); 169 | 170 | int classIndex = -1; 171 | int targetClassIndex = -1; 172 | double classValue = -std::numeric_limits::infinity(); 173 | 174 | for (int n=0; n classValue) { 176 | classValue = output[n]; 177 | classIndex = n; 178 | } 179 | if (std::get<1>(validationData[i])[n]==1) { 180 | targetClassIndex = n; 181 | } 182 | } 183 | 184 | if (targetClassIndex != -1) { 185 | validationConfusionMatrix[targetClassIndex][classIndex]++; 186 | } 187 | 188 | totalValidationErrors += costFunction(std::get<1>(validationData[i]), output); 189 | 190 | validations++; 191 | } 192 | lastValidationError = totalValidationErrors / validationData.size(); 193 | return lastValidationError; 194 | } 195 | 196 | bool Network::checkEarlyStopping (void) { 197 | 198 | bool stop = false; 199 | 200 | switch (earlyStoppingType) { 201 | // threshold 202 | case 1: 203 | stop = lastValidationError <= earlyStoppingThreshold; 204 | 205 | // Do the last backward pass 206 | if (stop) { 207 | backward(); 208 | applyDeltaWeights(); 209 | } 210 | 211 | return stop; 212 | // patience 213 | case 2: 214 | 215 | if (lastValidationError < earlyStoppingBestError) { 216 | earlyStoppingPatienceCounter = 0; 217 | earlyStoppingBestError = lastValidationError; 218 | 219 | for (int l=1; lbackUpValidation(); 221 | } 222 | } else { 223 | earlyStoppingPatienceCounter++; 224 | stop = earlyStoppingPatienceCounter >= earlyStoppingPatience; 225 | } 226 | 227 | return stop; 228 | 229 | // divergence 230 | case 3: 231 | 232 | if (lastValidationError < earlyStoppingBestError) { 233 | 234 | earlyStoppingBestError = lastValidationError; 235 | 236 | for (int l=1; lbackUpValidation(); 238 | } 239 | 240 | } else { 241 | stop = (lastValidationError / earlyStoppingBestError) >= (1+earlyStoppingPercent/100); 242 | } 243 | 244 | return stop; 245 | 246 | } 247 | return stop; 248 | } 249 | 250 | double Network::test (int its, int startI) { 251 | 252 | double totalErrors = 0.0; 253 | 254 | for (int i=startI; i<(startI+its); i++) { 255 | std::vector output = forward(std::get<0>(testData[i])); 256 | 257 | int classIndex = -1; 258 | int targetClassIndex = -1; 259 | double classValue = -std::numeric_limits::infinity(); 260 | 261 | for (int n=0; n classValue) { 263 | classValue = output[n]; 264 | classIndex = n; 265 | } 266 | if (std::get<1>(testData[i])[n]==1) { 267 | targetClassIndex = n; 268 | } 269 | } 270 | 271 | if (targetClassIndex != -1) { 272 | testConfusionMatrix[targetClassIndex][classIndex]++; 273 | } 274 | 275 | double iterationError = costFunction(std::get<1>(testData[i]), output); 276 | 277 | if (collectErrors) { 278 | collectedTestErrors.push_back(iterationError); 279 | } 280 | 281 | totalErrors += iterationError; 282 | } 283 | 284 | return totalErrors / its; 285 | } 286 | 287 | void Network::resetDeltaWeights (void) { 288 | for (int l=1; lresetDeltaWeights(); 290 | } 291 | } 292 | 293 | void Network::applyDeltaWeights (void) { 294 | for (int l=1; lapplyDeltaWeights(); 296 | } 297 | } 298 | 299 | void Network::restoreValidation (void) { 300 | for (int l=1; lrestoreValidation(); 302 | } 303 | } 304 | 305 | std::vector Network::netInstances = {}; 306 | -------------------------------------------------------------------------------- /dev/cpp/Neuron.cpp: -------------------------------------------------------------------------------- 1 | 2 | void Neuron::init (int netInstance, int weightsCount) { 3 | 4 | Network* net = Network::getInstance(netInstance); 5 | 6 | dropped = false; 7 | 8 | switch (net->updateFnIndex) { 9 | case 1: // gain 10 | biasGain = 1; 11 | for (int i=0; iupdateFnIndex == 5) { 25 | adadeltaBiasCache = 0; 26 | for (int i=0; iactivation == &NetMath::lrelu) { 39 | lreluSlope = net->lreluSlope; 40 | } else if (net->activation == &NetMath::rrelu) { 41 | rreluSlope = ((double) rand() / (RAND_MAX)) * 0.001; 42 | } else if (net->activation == &NetMath::elu) { 43 | eluAlpha = net->eluAlpha; 44 | } 45 | } -------------------------------------------------------------------------------- /dev/cpp/PoolLayer.cpp: -------------------------------------------------------------------------------- 1 | 2 | PoolLayer::PoolLayer (int netI, int s) : Layer(netI, s) { 3 | netInstance = netI; 4 | size = s; 5 | type = "Pool"; 6 | hasActivation = false; 7 | } 8 | 9 | PoolLayer::~PoolLayer (void) {} 10 | 11 | void PoolLayer::assignNext (Layer* l) { 12 | nextLayer = l; 13 | } 14 | 15 | void PoolLayer::assignPrev (Layer* l) { 16 | prevLayer = l; 17 | } 18 | 19 | void PoolLayer::init (int layerIndex) { 20 | prevLayerOutWidth = sqrt(inMapValuesCount); 21 | activations = NetUtil::createVolume(channels, outMapSize, outMapSize, 0.0); 22 | errors = NetUtil::createVolume(channels, prevLayerOutWidth, prevLayerOutWidth, 0.0); 23 | std::vector emptyIndex = {0,0}; 24 | indeces = NetUtil::createVolume(channels, outMapSize, outMapSize, emptyIndex); 25 | } 26 | 27 | void PoolLayer::forward (void) { 28 | 29 | for (int channel=0; channeltype=="FC") { 56 | 57 | for (int c=0; cneurons.size(); n++) { 66 | errors[c][rowI][colI] += nextLayer->errs[n] * nextLayer->weights[n][weightI]; 67 | } 68 | } 69 | } 70 | } 71 | 72 | } else if (nextLayer->type=="Conv") { 73 | 74 | for (int c=0; c > errs = NetUtil::buildConvErrorMap(outMapSize+nextLayer->zeroPadding*2, nextLayer, c); 78 | 79 | for (int r=0; rerrors[c][r][v]; 99 | } 100 | } 101 | } 102 | } 103 | 104 | // Apply derivatives 105 | if (hasActivation) { 106 | for (int c=0; c 2 | #include 3 | #include 4 | #include 5 | 6 | // For easier debugging 7 | // #include "printv.h" 8 | 9 | class Layer; 10 | class Neuron; 11 | class Filter; 12 | class NetMath; 13 | class NetUtil; 14 | 15 | class Network { 16 | public: 17 | static std::vector netInstances; 18 | int instanceIndex; 19 | int iterations=0; 20 | int validations=0; 21 | int validationInterval; 22 | int miniBatchSize; 23 | int channels; 24 | float learningRate=0; 25 | float momentum=0; 26 | float rmsDecay=0; 27 | float rho=0; 28 | float lreluSlope=0; 29 | float rreluSlope=0; 30 | float eluAlpha=0; 31 | bool isTraining; 32 | float dropout=0; 33 | double l2=0; 34 | double l2Error=0; 35 | double l1=0; 36 | double l1Error=0; 37 | float maxNorm=0; 38 | double maxNormTotal=0; 39 | double trainingLogging; 40 | double error=0; 41 | double validationError=0; 42 | double lastValidationError; 43 | bool stoppedEarly=false; 44 | int earlyStoppingType=0; 45 | double earlyStoppingThreshold=0; 46 | double earlyStoppingBestError=0; 47 | int earlyStoppingPatience=0; 48 | int earlyStoppingPatienceCounter=0; 49 | float earlyStoppingPercent=0; 50 | std::vector layers; 51 | std::vector, std::vector > > trainingData; 52 | std::vector, std::vector > > validationData; 53 | std::vector, std::vector > > testData; 54 | std::map weightsConfig; 55 | double (*activation)(double, bool, Neuron*); 56 | double (*costFunction)(std::vector calculated, std::vector desired); 57 | std::vector (*weightInitFn)(int netInstance, int layerIndex, int size); 58 | 59 | std::vector> trainingConfusionMatrix; 60 | std::vector> testConfusionMatrix; 61 | std::vector> validationConfusionMatrix; 62 | 63 | bool collectErrors=false; 64 | std::vector collectedTrainingErrors; 65 | std::vector collectedValidationErrors; 66 | std::vector collectedTestErrors; 67 | 68 | int updateFnIndex; 69 | 70 | Network () {} 71 | 72 | ~Network (); 73 | 74 | static int newNetwork(void); 75 | 76 | static void deleteNetwork(void); 77 | 78 | static void deleteNetwork(int index); 79 | 80 | static Network* getInstance(int i); 81 | 82 | void joinLayers(); 83 | 84 | std::vector forward (std::vector input); 85 | 86 | void backward (void); 87 | 88 | void train (int iterations, int startIndex); 89 | 90 | double validate (void); 91 | 92 | bool checkEarlyStopping (void); 93 | 94 | double test (int iterations, int startIndex); 95 | 96 | void resetDeltaWeights (void); 97 | 98 | void applyDeltaWeights (void); 99 | 100 | void restoreValidation (void); 101 | 102 | }; 103 | 104 | 105 | class Layer { 106 | public: 107 | int netInstance; 108 | std::string type; 109 | int size; 110 | int fanIn; 111 | int fanOut; 112 | int channels; 113 | int filterSize; 114 | int stride; 115 | int zeroPadding; 116 | int inMapValuesCount; 117 | int inZPMapValuesCount; 118 | int outMapSize; 119 | int prevLayerOutWidth; 120 | bool hasActivation; 121 | bool softmax=false; 122 | std::vector neurons; 123 | std::vector filters; 124 | std::vector > > > indeces; 125 | std::vector > > errors; 126 | std::vector > > activations; 127 | std::vector deltaBiases; 128 | std::vector validationBiases; 129 | 130 | std::vector > weights; // FC 131 | std::vector > validationWeights; // FC 132 | std::vector > > > filterWeights; 133 | std::vector > > > validationFilterWeights; 134 | 135 | std::vector > deltaWeights; // FC 136 | std::vector > > > filterDeltaWeights; 137 | 138 | std::vector biases; // FC 139 | std::vector sums; // FC 140 | std::vector errs; // FC 141 | std::vector actvns; // FC 142 | 143 | Layer* nextLayer; 144 | Layer* prevLayer; 145 | double (*activation)(double, bool, Neuron*); 146 | double (*activationC)(double, bool, Filter*); 147 | double (*activationP)(double, bool, Network*); 148 | 149 | Layer (int netI, int s) {}; 150 | 151 | virtual ~Layer(void) {}; 152 | 153 | virtual void assignNext (Layer* l) = 0; 154 | 155 | virtual void assignPrev (Layer* l) = 0; 156 | 157 | virtual void init (int layerIndex) = 0; 158 | 159 | virtual void forward (void) = 0; 160 | 161 | virtual void backward (bool lastLayer) = 0; 162 | 163 | virtual void applyDeltaWeights (void) = 0; 164 | 165 | virtual void resetDeltaWeights (void) = 0; 166 | 167 | virtual void backUpValidation (void) = 0; 168 | 169 | virtual void restoreValidation (void) = 0; 170 | 171 | }; 172 | 173 | class FCLayer : public Layer { 174 | public: 175 | 176 | FCLayer (int netI, int s); 177 | 178 | ~FCLayer (void); 179 | 180 | void assignNext (Layer* l); 181 | 182 | void assignPrev (Layer* l); 183 | 184 | void init (int layerIndex); 185 | 186 | void forward (void); 187 | 188 | void backward (bool lastLayer); 189 | 190 | void applyDeltaWeights (void); 191 | 192 | void resetDeltaWeights (void); 193 | 194 | void backUpValidation (void); 195 | 196 | void restoreValidation (void); 197 | }; 198 | 199 | class ConvLayer : public Layer { 200 | public: 201 | 202 | ConvLayer (int netI, int s); 203 | 204 | ~ConvLayer (void); 205 | 206 | void assignNext (Layer* l); 207 | 208 | void assignPrev (Layer* l); 209 | 210 | void init (int layerIndex); 211 | 212 | void forward (void); 213 | 214 | void backward (bool lastLayer); 215 | 216 | void backward (void) { 217 | backward(false); 218 | }; 219 | 220 | void applyDeltaWeights (void); 221 | 222 | void resetDeltaWeights (void); 223 | 224 | void backUpValidation (void); 225 | 226 | void restoreValidation (void); 227 | 228 | }; 229 | 230 | class PoolLayer : public Layer { 231 | public: 232 | 233 | PoolLayer (int netI, int s); 234 | 235 | ~PoolLayer (void); 236 | 237 | void assignNext (Layer* l); 238 | 239 | void assignPrev (Layer* l); 240 | 241 | void init (int layerIndex); 242 | 243 | void forward (void); 244 | 245 | void backward (bool lastLayer); 246 | 247 | void backward (void) { 248 | backward(false); 249 | }; 250 | 251 | void applyDeltaWeights (void) {}; 252 | 253 | void resetDeltaWeights (void) {}; 254 | 255 | void backUpValidation (void) {}; 256 | 257 | void restoreValidation (void) {}; 258 | }; 259 | 260 | 261 | class Neuron { 262 | public: 263 | std::vector weightGain; 264 | std::vector weightsCache; 265 | std::vector adadeltaCache; 266 | double lreluSlope; 267 | double rreluSlope; 268 | double derivative; 269 | double eluAlpha; 270 | double biasGain; 271 | double adadeltaBiasCache; 272 | double biasCache; 273 | double m; 274 | double v; 275 | bool dropped; 276 | 277 | Neuron(void) {} 278 | 279 | void init (int netInstance, int weightsCount); 280 | }; 281 | 282 | class Filter { 283 | public: 284 | std::vector > > weightGain; 285 | std::vector > > weightsCache; 286 | std::vector > > adadeltaCache; 287 | std::vector > sumMap; 288 | std::vector > dropoutMap; 289 | double lreluSlope; 290 | double rreluSlope; 291 | double derivative; 292 | double activation; 293 | double eluAlpha; 294 | double biasGain; 295 | double adadeltaBiasCache; 296 | double biasCache; 297 | double m; 298 | double v; 299 | bool dropped; 300 | 301 | Filter (void) {} 302 | 303 | void init (int netInstance, int channels, int filterSize); 304 | }; 305 | 306 | 307 | class NetMath { 308 | public: 309 | template 310 | static double sigmoid(double value, bool prime, T* neuron); 311 | 312 | template 313 | static double tanh(double value, bool prime, T* neuron); 314 | 315 | template 316 | static double lecuntanh(double value, bool prime, T* neuron); 317 | 318 | template 319 | static double relu(double value, bool prime, T* neuron); 320 | 321 | template 322 | static double lrelu(double value, bool prime, T* neuron); 323 | 324 | template 325 | static double rrelu(double value, bool prime, T* neuron); 326 | 327 | template 328 | static double elu(double value, bool prime, T* neuron); 329 | 330 | static double meansquarederror (std::vector calculated, std::vector desired); 331 | 332 | static double rootmeansquarederror (std::vector calculated, std::vector desired); 333 | 334 | static double crossentropy (std::vector target, std::vector output); 335 | 336 | static double vanillasgd (int netInstance, double value, double deltaValue); 337 | 338 | static double gain(int netInstance, double value, double deltaValue, Neuron* neuron, int weightIndex); 339 | 340 | static double gain(int netInstance, double value, double deltaValue, Filter* filter, int c, int r, int v); 341 | 342 | static double adagrad(int netInstance, double value, double deltaValue, Neuron* neuron, int weightIndex); 343 | 344 | static double adagrad(int netInstance, double value, double deltaValue, Filter* filter, int c, int r, int v); 345 | 346 | static double rmsprop(int netInstance, double value, double deltaValue, Neuron* neuron, int weightIndex); 347 | 348 | static double rmsprop(int netInstance, double value, double deltaValue, Filter* filter, int c, int r, int v); 349 | 350 | static double adam(int netInstance, double value, double deltaValue, Neuron* neuron, int weightIndex); 351 | 352 | static double adam(int netInstance, double value, double deltaValue, Filter* filter, int c, int r, int v); 353 | 354 | static double adadelta(int netInstance, double value, double deltaValue, Neuron* neuron, int weightIndex); 355 | 356 | static double adadelta(int netInstance, double value, double deltaValue, Filter* filter, int c, int r, int v); 357 | 358 | static double momentum(int netInstance, double value, double deltaValue, Neuron* neuron, int weightIndex); 359 | 360 | static double momentum(int netInstance, double value, double deltaValue, Filter* filter, int c, int r, int v); 361 | 362 | static std::vector uniform (int netInstance, int layerIndex, int size); 363 | 364 | static std::vector gaussian (int netInstance, int layerIndex, int size); 365 | 366 | static std::vector lecununiform (int netInstance, int layerIndex, int size); 367 | 368 | static std::vector lecunnormal (int netInstance, int layerIndex, int size); 369 | 370 | static std::vector xavieruniform (int netInstance, int layerIndex, int size); 371 | 372 | static std::vector xaviernormal (int netInstance, int layerIndex, int size); 373 | 374 | static std::vector softmax (std::vector values); 375 | 376 | static void maxPool (PoolLayer* layer, int channels); 377 | 378 | static void maxNorm(int netInstance); 379 | 380 | static double sech (double value); 381 | }; 382 | 383 | class NetUtil { 384 | public: 385 | 386 | static void shuffle (std::vector, std::vector > > &values); 387 | 388 | static std::vector > addZeroPadding (std::vector > map, int zP); 389 | 390 | static std::vector > convolve(std::vector > > input, int zP, 391 | std::vector > > weights, int channels, int stride, double bias); 392 | 393 | static std::vector > arrayToMap (std::vector array, int size); 394 | 395 | static std::vector > > arrayToVolume (std::vector array, int channels); 396 | 397 | template 398 | static std::vector > > createVolume (int depth, int rows, int columns, T value); 399 | 400 | static std::vector > buildConvErrorMap (int paddedLength, Layer* nextLayer, int filterI); 401 | 402 | static void buildConvDWeights (ConvLayer* layer); 403 | 404 | static std::vector getActivations (Layer* layer, int mapStartI, int mapSize); 405 | 406 | }; 407 | -------------------------------------------------------------------------------- /dev/cpp/printv.h: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | template 4 | void printv(T values) { 5 | EM_ASM(window.printfVector = []); 6 | 7 | for (int i=0; i 17 | void printv(std::vector> values) { 18 | EM_ASM(window.printfVector = []); 19 | 20 | for (int i=0; i 36 | void printv(std::vector>> values) { 37 | EM_ASM(window.printfVector = []); 38 | 39 | for (int i=0; i 60 | void printv(T values[], int size) { 61 | EM_ASM(window.printfVector = []); 62 | 63 | for (int i=0; i `WASM ${this.activationName||this.net.activationName}` 93 | }) 94 | this.activation = NetUtil.activationsIndeces[this.activationName||this.net.activationName] 95 | } 96 | 97 | this.filters = [...new Array(this.size)].map(f => new Filter()) 98 | } 99 | 100 | init () { 101 | this.filters.forEach((filter, fi) => { 102 | 103 | const paramTypes = ["number", "number", "number"] 104 | const params = [this.netInstance, this.layerIndex, fi] 105 | 106 | NetUtil.defineMapProperty(filter, "activationMap", paramTypes, params, this.outMapSize, this.outMapSize, {pre: "filter_"}) 107 | NetUtil.defineMapProperty(filter, "errorMap", paramTypes, params, this.outMapSize, this.outMapSize, {pre: "filter_"}) 108 | NetUtil.defineMapProperty(filter, "sumMap", paramTypes, params, this.outMapSize, this.outMapSize, {pre: "filter_"}) 109 | NetUtil.defineMapProperty(filter, "dropoutMap", paramTypes, params, this.outMapSize, this.outMapSize, { 110 | pre: "filter_", 111 | getCallback: m => m.map(row => row.map(v => v==1)) 112 | }) 113 | 114 | filter.init(this.netInstance, this.layerIndex, fi, { 115 | updateFn: this.net.updateFn, 116 | filterSize: this.filterSize, 117 | channels: this.channels 118 | }) 119 | }) 120 | } 121 | 122 | toJSON () { 123 | return { 124 | weights: this.filters.map(filter => { 125 | return { 126 | bias: filter.bias, 127 | weights: filter.weights 128 | } 129 | }) 130 | } 131 | } 132 | 133 | fromJSON (data, layerIndex) { 134 | this.filters.forEach((filter, fi) => { 135 | 136 | if (data.weights[fi].weights.length != filter.weights.length) { 137 | throw new Error(`Mismatched weights depth. Given: ${data.weights[fi].weights.length} Existing: ${filter.weights.length}. At: layers[${layerIndex}], filters[${fi}]`) 138 | } 139 | 140 | if (data.weights[fi].weights[0].length != filter.weights[0].length) { 141 | throw new Error(`Mismatched weights size. Given: ${data.weights[fi].weights[0].length} Existing: ${filter.weights[0].length}. At: layers[${layerIndex}], filters[${fi}]`) 142 | } 143 | 144 | filter.bias = data.weights[fi].bias 145 | filter.weights = data.weights[fi].weights 146 | }) 147 | } 148 | 149 | // Used for importing data 150 | getDataSize () { 151 | 152 | let size = 0 153 | 154 | for (let f=0; f new Neuron()) 8 | this.layerIndex = 0 9 | 10 | if (activation != undefined) { 11 | if (typeof activation == "boolean" && !activation) { 12 | activation = "noactivation" 13 | } 14 | if (typeof activation != "string") { 15 | throw new Error("Custom activation functions are not available in the WebAssembly version") 16 | } 17 | this.activationName = NetUtil.format(activation) 18 | } 19 | } 20 | 21 | assignNext (layer) { 22 | this.nextLayer = layer 23 | } 24 | 25 | assignPrev (layer, layerIndex) { 26 | this.netInstance = this.net.netInstance 27 | this.prevLayer = layer 28 | this.layerIndex = layerIndex 29 | 30 | if (this.activationName || this.net.activationName) { 31 | NetUtil.defineProperty(this, "activation", ["number", "number"], [this.netInstance, layerIndex], { 32 | pre: "fc_", 33 | getCallback: _ => `WASM ${this.activationName||this.net.activationName}` 34 | }) 35 | this.activation = NetUtil.activationsIndeces[this.activationName||this.net.activationName] 36 | } 37 | } 38 | 39 | init () { 40 | this.neurons.forEach((neuron, ni) => { 41 | switch (true) { 42 | 43 | case this.prevLayer instanceof FCLayer: 44 | neuron.size = this.prevLayer.size 45 | break 46 | 47 | case this.prevLayer instanceof ConvLayer: 48 | neuron.size = this.prevLayer.filters.length * this.prevLayer.outMapSize**2 49 | break 50 | 51 | case this.prevLayer instanceof PoolLayer: 52 | neuron.size = this.prevLayer.channels * this.prevLayer.outMapSize**2 53 | break 54 | } 55 | 56 | neuron.init(this.netInstance, this.layerIndex, ni, { 57 | updateFn: this.net.updateFn 58 | }) 59 | }) 60 | } 61 | 62 | toJSON () { 63 | return { 64 | weights: this.neurons.map(neuron => { 65 | return { 66 | bias: neuron.bias, 67 | weights: neuron.weights 68 | } 69 | }) 70 | } 71 | } 72 | 73 | fromJSON (data, layerIndex) { 74 | 75 | this.neurons.forEach((neuron, ni) => { 76 | 77 | if (data.weights[ni].weights.length!=(neuron.weights).length) { 78 | throw new Error(`Mismatched weights count. Given: ${data.weights[ni].weights.length} Existing: ${neuron.weights.length}. At layers[${layerIndex}], neurons[${ni}]`) 79 | } 80 | 81 | neuron.bias = data.weights[ni].bias 82 | neuron.weights = data.weights[ni].weights 83 | }) 84 | } 85 | 86 | // Used for importing data 87 | getDataSize () { 88 | let size = 0 89 | 90 | for (let n=0; n> 1) 40 | break 41 | case "HEAP32": case "HEAPU32": case "HEAPF32": 42 | NetUtil.Module[heapIn].set(typedArray, buf >> 2) 43 | break 44 | case "HEAPF64": 45 | NetUtil.Module[heapIn].set(typedArray, buf >> 3) 46 | break 47 | } 48 | 49 | bufs.push(buf) 50 | parameters.push(buf) 51 | parameters.push(params[p].length) 52 | parameterTypes.push("number") 53 | parameterTypes.push("number") 54 | 55 | } else { 56 | parameters.push(params[p]) 57 | parameterTypes.push(paramTypes[p]==undefined ? "number" : paramTypes[p]) 58 | } 59 | } 60 | } 61 | 62 | res = NetUtil.Module.ccall(func, returnTypeParam, parameterTypes, parameters) 63 | } catch (e) { 64 | error = e 65 | } finally { 66 | for (let b=0; b= 3600000) formatted.push(`${date.getHours()}h`) 170 | 171 | formatted.push(`${date.getMinutes()}m`) 172 | formatted.push(`${date.getSeconds()}s`) 173 | } 174 | 175 | value = formatted.join(" ") 176 | break 177 | } 178 | 179 | return value 180 | } 181 | 182 | static shuffle (arr) { 183 | for (let i=arr.length; i; i--) { 184 | const j = Math.floor(Math.random() * i) 185 | const x = arr[i-1] 186 | arr[i-1] = arr[j] 187 | arr[j] = x 188 | } 189 | } 190 | 191 | static splitData (data, {training=0.7, validation=0.15, test=0.15}={}) { 192 | 193 | const split = { 194 | training: [], 195 | validation: [], 196 | test: [] 197 | } 198 | 199 | // Define here splits, for returning at the end 200 | for (let i=0; i 1-training) { 204 | split.training.push(data[i]) 205 | } else { 206 | 207 | if (x maxVal) { 228 | maxVal = data[i] 229 | } 230 | } 231 | 232 | if ((-1*minVal + maxVal) != 0) { 233 | for (let i=0; ix, setCallback=x=>x, pre=""}={}) { 246 | Object.defineProperty(self, prop, { 247 | get: () => getCallback(this.Module.ccall(`get_${pre}${prop}`, "number", valTypes, values)), 248 | set: val => this.Module.ccall(`set_${pre}${prop}`, null, valTypes.concat("number"), values.concat(setCallback(val))) 249 | }) 250 | } 251 | 252 | static defineArrayProperty (self, prop, valTypes, values, returnSize, {pre=""}={}) { 253 | Object.defineProperty(self, prop, { 254 | get: () => NetUtil.ccallArrays(`get_${pre}${prop}`, "array", valTypes, values, {returnArraySize: returnSize, heapOut: "HEAPF64"}), 255 | set: value => NetUtil.ccallArrays(`set_${pre}${prop}`, null, valTypes.concat("array"), values.concat([value]), {heapIn: "HEAPF64"}) 256 | }) 257 | } 258 | 259 | static defineMapProperty (self, prop, valTypes, values, rows, columns, {getCallback=x=>x, setCallback=x=>x, pre=""}={}) { 260 | Object.defineProperty(self, prop, { 261 | get: () => getCallback(NetUtil.ccallVolume(`get_${pre}${prop}`, "volume", valTypes, values, {depth: 1, rows, columns, heapOut: "HEAPF64"})[0]), 262 | set: value => NetUtil.ccallVolume(`set_${pre}${prop}`, null, valTypes.concat("array"), values.concat([setCallback(value)]), {heapIn: "HEAPF64"}) 263 | }) 264 | } 265 | 266 | static defineVolumeProperty (self, prop, valTypes, values, depth, rows, columns, {getCallback=x=>x, setCallback=x=>x, pre=""}={}) { 267 | Object.defineProperty(self, prop, { 268 | get: () => getCallback(NetUtil.ccallVolume(`get_${pre}${prop}`, "volume", valTypes, values, {depth, rows, columns, heapOut: "HEAPF64"})), 269 | set: value => NetUtil.ccallVolume(`set_${pre}${prop}`, null, valTypes.concat("array"), values.concat([setCallback(value)]), {heapIn: "HEAPF64"}) 270 | }) 271 | } 272 | 273 | static makeConfusionMatrix (originalData) { 274 | let total = 0 275 | let totalCorrect = 0 276 | const data = [] 277 | 278 | for (let r=0; r { 361 | num = percent ? num.toFixed(1) + "%" : num.toString() 362 | const leftPad = Math.max(Math.floor((3*2+1 - num.length) / 2), 0) 363 | const rightPad = Math.max(3*2+1 - (num.length + leftPad), 0) 364 | return " ".repeat(leftPad)+num+" ".repeat(rightPad) 365 | } 366 | 367 | let colourText 368 | let colourBackground 369 | 370 | // Bright 371 | process.stdout.write("\n\x1b[1m") 372 | 373 | for (let r=0; r v==1, 16 | setCallback: v => v ? 1 : 0 17 | }) 18 | NetUtil.defineProperty(this, "activation", paramTypes, params, {pre: "neuron_"}) 19 | NetUtil.defineProperty(this, "error", paramTypes, params, {pre: "neuron_"}) 20 | NetUtil.defineProperty(this, "derivative", paramTypes, params, {pre: "neuron_"}) 21 | 22 | NetUtil.defineProperty(this, "bias", paramTypes, params, {pre: "neuron_"}) 23 | 24 | if (layerIndex) { 25 | NetUtil.defineArrayProperty(this, "weights", paramTypes, params, this.size, {pre: "neuron_"}) 26 | } 27 | 28 | NetUtil.defineProperty(this, "deltaBias", paramTypes, params, {pre: "neuron_"}) 29 | NetUtil.defineArrayProperty(this, "deltaWeights", paramTypes, params, this.size, {pre: "neuron_"}) 30 | 31 | switch (updateFn) { 32 | case "gain": 33 | NetUtil.defineProperty(this, "biasGain", paramTypes, params, {pre: "neuron_"}) 34 | NetUtil.defineArrayProperty(this, "weightGain", paramTypes, params, this.size, {pre: "neuron_"}) 35 | break 36 | case "adagrad": 37 | case "rmsprop": 38 | case "adadelta": 39 | NetUtil.defineProperty(this, "biasCache", paramTypes, params, {pre: "neuron_"}) 40 | NetUtil.defineArrayProperty(this, "weightsCache", paramTypes, params, this.size, {pre: "neuron_"}) 41 | 42 | if (updateFn=="adadelta") { 43 | NetUtil.defineProperty(this, "adadeltaBiasCache", paramTypes, params, {pre: "neuron_"}) 44 | NetUtil.defineArrayProperty(this, "adadeltaCache", paramTypes, params, this.size, {pre: "neuron_"}) 45 | } 46 | break 47 | 48 | case "adam": 49 | NetUtil.defineProperty(this, "m", paramTypes, params, {pre: "neuron_"}) 50 | NetUtil.defineProperty(this, "v", paramTypes, params, {pre: "neuron_"}) 51 | break 52 | } 53 | } 54 | } 55 | 56 | /* istanbul ignore next */ 57 | typeof window!="undefined" && (window.Neuron = Neuron) 58 | exports.Neuron = Neuron -------------------------------------------------------------------------------- /dev/js-WebAssembly/OutputLayer.js: -------------------------------------------------------------------------------- 1 | "use strict" 2 | 3 | class OutputLayer extends FCLayer { 4 | 5 | constructor (size, {activation, softmax}={}) { 6 | 7 | super(size, {activation}) 8 | 9 | if (softmax) { 10 | this.softmax = true 11 | } 12 | } 13 | } 14 | 15 | /* istanbul ignore next */ 16 | typeof window!="undefined" && (window.OutputLayer = OutputLayer) 17 | exports.OutputLayer = OutputLayer 18 | -------------------------------------------------------------------------------- /dev/js-WebAssembly/PoolLayer.js: -------------------------------------------------------------------------------- 1 | "use strict" 2 | 3 | class PoolLayer { 4 | 5 | constructor (size, {stride, activation}={}) { 6 | 7 | if (size) this.size = size 8 | if (stride) this.stride = stride 9 | 10 | this.activation = false 11 | this.activationName = activation 12 | 13 | if (activation != undefined) { 14 | if (typeof activation == "boolean" && !activation) { 15 | activation = "noactivation" 16 | } 17 | if (typeof activation != "string") { 18 | throw new Error("Custom activation functions are not available in the WebAssembly version") 19 | } 20 | this.activationName = NetUtil.format(activation) 21 | } 22 | } 23 | 24 | assignNext (layer) { 25 | this.nextLayer = layer 26 | } 27 | 28 | assignPrev (layer, layerIndex) { 29 | 30 | this.netInstance = this.net.netInstance 31 | this.prevLayer = layer 32 | this.layerIndex = layerIndex 33 | 34 | let channels 35 | let prevLayerOutWidth = layer.outMapSize 36 | const size = this.size || this.net.pool.size || 2 37 | const stride = this.stride || this.net.pool.stride || this.size 38 | 39 | NetUtil.defineProperty(this, "channels", ["number", "number"], [this.netInstance, layerIndex], {pre: "pool_"}) 40 | NetUtil.defineProperty(this, "stride", ["number", "number"], [this.netInstance, layerIndex], {pre: "pool_"}) 41 | this.size = size 42 | this.stride = stride 43 | 44 | switch (true) { 45 | 46 | case layer instanceof FCLayer: 47 | channels = this.net.channels 48 | prevLayerOutWidth = Math.max(Math.floor(Math.sqrt(layer.size/channels)), 1) 49 | break 50 | 51 | case layer instanceof ConvLayer: 52 | channels = layer.size 53 | break 54 | 55 | case layer instanceof PoolLayer: 56 | channels = layer.channels 57 | break 58 | } 59 | 60 | this.channels = channels 61 | 62 | NetUtil.defineProperty(this, "prevLayerOutWidth", ["number", "number"], [this.netInstance, layerIndex], {pre: "pool_"}) 63 | NetUtil.defineProperty(this, "inMapValuesCount", ["number", "number"], [this.netInstance, layerIndex], {pre: "pool_"}) 64 | NetUtil.defineProperty(this, "outMapSize", ["number", "number"], [this.netInstance, layerIndex], {pre: "pool_"}) 65 | NetUtil.defineVolumeProperty(this, "errors", ["number", "number"], [this.netInstance, layerIndex], channels, prevLayerOutWidth, prevLayerOutWidth, {pre: "pool_"}) 66 | 67 | const outMapSize = (prevLayerOutWidth - size) / stride + 1 68 | this.outMapSize = outMapSize 69 | this.inMapValuesCount = prevLayerOutWidth ** 2 70 | 71 | NetUtil.defineVolumeProperty(this, "activations", ["number", "number"], [this.netInstance, layerIndex], channels, outMapSize, outMapSize, {pre: "pool_"}) 72 | NetUtil.defineVolumeProperty(this, "indeces", ["number", "number"], [this.netInstance, layerIndex], channels, outMapSize, outMapSize, { 73 | pre: "pool_", 74 | getCallback: vol => vol.map(map => map.map(row => row.map(val => [parseInt(val/2), val%2]))), 75 | setCallback: vol => vol.map(map => map.map(row => row.map(([x,y]) => 2*x+y))) 76 | }) 77 | 78 | if (outMapSize%1 != 0) { 79 | throw new Error(`Misconfigured hyperparameters. Activation volume dimensions would be ${outMapSize} in pool layer at index ${layerIndex}`) 80 | } 81 | 82 | if (this.activationName) { 83 | NetUtil.defineProperty(this, "activation", ["number", "number"], [this.netInstance, layerIndex], { 84 | pre: "pool_", 85 | getCallback: _ => `WASM ${this.activationName}` 86 | }) 87 | this.activation = NetUtil.activationsIndeces[this.activationName] 88 | } 89 | } 90 | 91 | init () {} 92 | 93 | toJSON () {return {}} 94 | 95 | fromJSON() {} 96 | 97 | getDataSize () {return 0} 98 | 99 | toIMG () {return []} 100 | 101 | fromIMG () {} 102 | 103 | } 104 | 105 | /* istanbul ignore next */ 106 | typeof window!="undefined" && (window.PoolLayer = PoolLayer) 107 | exports.PoolLayer = PoolLayer -------------------------------------------------------------------------------- /dev/js/ConvLayer.js: -------------------------------------------------------------------------------- 1 | "use strict" 2 | 3 | class ConvLayer { 4 | 5 | constructor (size, {filterSize, zeroPadding, stride, activation}={}) { 6 | 7 | if (filterSize) this.filterSize = filterSize 8 | if (stride) this.stride = stride 9 | if (size) this.size = size 10 | 11 | this.zeroPadding = zeroPadding 12 | this.activationName = activation 13 | 14 | if (activation!=undefined) { 15 | 16 | if (typeof activation=="boolean" && !activation) { 17 | this.activation = false 18 | } else { 19 | this.activation = typeof activation=="function" ? activation : NetMath[NetUtil.format(activation)].bind(this) 20 | } 21 | } 22 | 23 | this.state = "not-initialised" 24 | } 25 | 26 | assignNext (layer) { 27 | this.nextLayer = layer 28 | } 29 | 30 | assignPrev (layer, layerIndex) { 31 | 32 | this.prevLayer = layer 33 | 34 | this.layerIndex = layerIndex 35 | this.size = this.size || 4 36 | this.filterSize = this.filterSize || this.net.conv.filterSize || 3 37 | this.stride = this.stride || this.net.conv.stride || 1 38 | 39 | switch (true) { 40 | case layer instanceof FCLayer: 41 | this.channels = this.net.channels ||1 42 | break 43 | 44 | case layer instanceof ConvLayer: 45 | this.channels = layer.size 46 | break 47 | 48 | case layer instanceof PoolLayer: 49 | this.channels = layer.activations.length 50 | break 51 | } 52 | 53 | if (this.zeroPadding==undefined) { 54 | this.zeroPadding = this.net.conv.zeroPadding==undefined ? Math.floor(this.filterSize/2) : this.net.conv.zeroPadding 55 | } 56 | 57 | // Caching calculations 58 | const prevLayerOutWidth = layer instanceof FCLayer ? Math.max(Math.floor(Math.sqrt(layer.size/this.channels)), 1) 59 | : layer.outMapSize 60 | 61 | this.inMapValuesCount = Math.pow(prevLayerOutWidth, 2) 62 | this.inZPMapValuesCount = Math.pow(prevLayerOutWidth + this.zeroPadding*2, 2) 63 | this.outMapSize = (prevLayerOutWidth - this.filterSize + 2*this.zeroPadding) / this.stride + 1 64 | 65 | if (this.outMapSize%1!=0) { 66 | throw new Error(`Misconfigured hyperparameters. Activation volume dimensions would be ${this.outMapSize} in conv layer at index ${layerIndex}`) 67 | } 68 | 69 | this.filters = [...new Array(this.size)].map(f => new Filter()) 70 | } 71 | 72 | init () { 73 | this.filters.forEach(filter => { 74 | 75 | filter.weights = [...new Array(this.channels)].map(channelWeights => { 76 | return [...new Array(this.filterSize)].map(weightsRow => this.net.weightsInitFn(this.filterSize, this.weightsConfig)) 77 | }) 78 | 79 | filter.activationMap = [...new Array(this.outMapSize)].map(row => [...new Array(this.outMapSize)].map(v => 0)) 80 | filter.errorMap = [...new Array(this.outMapSize)].map(row => [...new Array(this.outMapSize)].map(v => 0)) 81 | filter.bias = 1 82 | 83 | if (this.net.dropout != 1) { 84 | filter.dropoutMap = filter.activationMap.map(row => row.map(v => false)) 85 | } 86 | 87 | filter.init({ 88 | updateFn: this.net.updateFn, 89 | activation: this.activationName || this.net.activationConfig, 90 | eluAlpha: this.net.eluAlpha 91 | }) 92 | }) 93 | } 94 | 95 | forward () { 96 | 97 | const activations = NetUtil.getActivations(this.prevLayer) 98 | 99 | for (let filterI=0; filterI this.net.dropout)) { 115 | filter.activationMap[sumY][sumX] = 0 116 | } else if (this.activation) { 117 | filter.activationMap[sumY][sumX] = this.activation(filter.sumMap[sumY][sumX], false, filter) / (this.net.dropout||1) 118 | } else { 119 | filter.activationMap[sumY][sumX] = filter.sumMap[sumY][sumX] 120 | } 121 | } 122 | } 123 | } 124 | } 125 | 126 | backward () { 127 | 128 | // First, get the filters' error maps 129 | if (this.nextLayer instanceof FCLayer) { 130 | 131 | // For each filter, build the errorMap from the weighted neuron errors in the next FCLayer corresponding to each value in the activation map 132 | for (let filterI=0; filterI 0 ? 1 : -1)) / this.net.miniBatchSize 236 | 237 | filter.weights[channel][row][col] = this.net.weightUpdateFn.bind(this.net, filter.weights[channel][row][col], 238 | regularized, filter, [channel, row, col])() 239 | 240 | if (this.net.maxNorm!=undefined) this.net.maxNormTotal += filter.weights[channel][row][col]**2 241 | } 242 | } 243 | } 244 | 245 | filter.bias = this.net.weightUpdateFn.bind(this.net, filter.bias, filter.deltaBias, filter)() 246 | } 247 | } 248 | 249 | backUpValidation () { 250 | for (let f=0; f { 283 | return { 284 | bias: filter.bias, 285 | weights: filter.weights 286 | } 287 | }) 288 | } 289 | } 290 | 291 | fromJSON (data, layerIndex) { 292 | this.filters.forEach((filter, fi) => { 293 | 294 | if (data.weights[fi].weights.length != filter.weights.length) { 295 | throw new Error(`Mismatched weights depth. Given: ${data.weights[fi].weights.length} Existing: ${filter.weights.length}. At: layers[${layerIndex}], filters[${fi}]`) 296 | } 297 | 298 | if (data.weights[fi].weights[0].length != filter.weights[0].length) { 299 | throw new Error(`Mismatched weights size. Given: ${data.weights[fi].weights[0].length} Existing: ${filter.weights[0].length}. At: layers[${layerIndex}], filters[${fi}]`) 300 | } 301 | 302 | filter.bias = data.weights[fi].bias 303 | filter.weights = data.weights[fi].weights 304 | }) 305 | } 306 | 307 | // Used for importing data 308 | getDataSize () { 309 | 310 | let size = 0 311 | 312 | for (let f=0; f new Neuron()) 8 | this.state = "not-initialised" 9 | 10 | if (activation!=undefined) { 11 | if (typeof activation=="boolean" && !activation) { 12 | this.activation = false 13 | } else { 14 | this.activation = typeof activation=="function" ? activation : NetMath[NetUtil.format(activation)].bind(this) 15 | } 16 | } 17 | } 18 | 19 | assignNext (layer) { 20 | this.nextLayer = layer 21 | } 22 | 23 | assignPrev (layer, layerIndex) { 24 | this.prevLayer = layer 25 | this.layerIndex = layerIndex 26 | } 27 | 28 | init () { 29 | this.neurons.forEach(neuron => { 30 | 31 | let weightsCount 32 | 33 | switch (true) { 34 | case this.prevLayer instanceof FCLayer: 35 | weightsCount = this.prevLayer.size 36 | break 37 | 38 | case this.prevLayer instanceof ConvLayer: 39 | weightsCount = this.prevLayer.filters.length * this.prevLayer.outMapSize**2 40 | break 41 | 42 | case this.prevLayer instanceof PoolLayer: 43 | weightsCount = this.prevLayer.activations.length * this.prevLayer.outMapSize**2 44 | break 45 | } 46 | 47 | neuron.weights = this.net.weightsInitFn(weightsCount, this.weightsConfig) 48 | neuron.bias = 1 49 | 50 | neuron.init({ 51 | updateFn: this.net.updateFn, 52 | activationConfig: this.net.activationConfig, 53 | eluAlpha: this.net.eluAlpha 54 | }) 55 | }) 56 | } 57 | 58 | forward () { 59 | this.neurons.forEach((neuron, ni) => { 60 | if (this.state=="training" && (neuron.dropped = Math.random() > this.net.dropout)) { 61 | neuron.activation = 0 62 | } else { 63 | neuron.sum = neuron.bias 64 | 65 | const activations = NetUtil.getActivations(this.prevLayer) 66 | 67 | for (let ai=0; ai { 78 | 79 | if (neuron.dropped) { 80 | neuron.error = 0 81 | neuron.deltaBias += 0 82 | } else { 83 | if (typeof errors !== "undefined") { 84 | neuron.error = errors[ni] 85 | } else { 86 | neuron.derivative = this.activation ? this.activation(neuron.sum, true, neuron) : 1 87 | neuron.error = neuron.derivative * this.nextLayer.neurons.map(n => n.error * (n.weights[ni]||0)) 88 | .reduce((p,c) => p+c, 0) 89 | } 90 | 91 | const activations = NetUtil.getActivations(this.prevLayer) 92 | 93 | for (let wi=0; wi 0 ? 1 : -1)) / this.net.miniBatchSize 126 | 127 | neuron.weights[dwi] = this.net.weightUpdateFn.bind(this.net, neuron.weights[dwi], regularized, neuron, dwi)() 128 | 129 | if (this.net.maxNorm!=undefined) this.net.maxNormTotal += neuron.weights[dwi]**2 130 | } 131 | 132 | neuron.bias = this.net.weightUpdateFn.bind(this.net, neuron.bias, neuron.deltaBias, neuron)() 133 | } 134 | } 135 | 136 | backUpValidation () { 137 | for (let n=0; n { 155 | return { 156 | bias: neuron.bias, 157 | weights: neuron.weights 158 | } 159 | }) 160 | } 161 | } 162 | 163 | fromJSON (data, layerIndex) { 164 | this.neurons.forEach((neuron, ni) => { 165 | 166 | if (data.weights[ni].weights.length!=neuron.weights.length) { 167 | throw new Error(`Mismatched weights count. Given: ${data.weights[ni].weights.length} Existing: ${neuron.weights.length}. At layers[${layerIndex}], neurons[${ni}]`) 168 | } 169 | 170 | neuron.bias = data.weights[ni].bias 171 | neuron.weights = data.weights[ni].weights 172 | }) 173 | } 174 | 175 | // Used for importing data 176 | getDataSize () { 177 | 178 | let size = 0 179 | 180 | for (let n=0; n channel.map(wRow => wRow.map(w => 0))) 10 | this.deltaBias = 0 11 | 12 | switch (updateFn) { 13 | 14 | case "gain": 15 | this.biasGain = 1 16 | this.weightGains = this.weights.map(channel => channel.map(wRow => wRow.map(w => 1))) 17 | this.getWeightGain = ([channel, row, column]) => this.weightGains[channel][row][column] 18 | this.setWeightGain = ([channel, row, column], v) => this.weightGains[channel][row][column] = v 19 | break 20 | 21 | case "adagrad": 22 | case "rmsprop": 23 | case "adadelta": 24 | case "momentum": 25 | this.biasCache = 0 26 | this.weightsCache = this.weights.map(channel => channel.map(wRow => wRow.map(w => 0))) 27 | this.getWeightsCache = ([channel, row, column]) => this.weightsCache[channel][row][column] 28 | this.setWeightsCache = ([channel, row, column], v) => this.weightsCache[channel][row][column] = v 29 | 30 | if (updateFn=="adadelta") { 31 | this.adadeltaBiasCache = 0 32 | this.adadeltaCache = this.weights.map(channel => channel.map(wRow => wRow.map(w => 0))) 33 | this.getAdadeltaCache = ([channel, row, column]) => this.adadeltaCache[channel][row][column] 34 | this.setAdadeltaCache = ([channel, row, column], v) => this.adadeltaCache[channel][row][column] = v 35 | } 36 | break 37 | 38 | case "adam": 39 | this.m = 0 40 | this.v = 0 41 | break 42 | } 43 | 44 | if (activation=="rrelu") { 45 | this.rreluSlope = Math.random() * 0.001 46 | 47 | } else if (activation=="elu") { 48 | this.eluAlpha = eluAlpha 49 | } 50 | } 51 | 52 | getWeight ([channel, row, column]) { 53 | return this.weights[channel][row][column] 54 | } 55 | 56 | setWeight ([channel, row, column], v) { 57 | this.weights[channel][row][column] = v 58 | } 59 | 60 | getDeltaWeight ([channel, row, column]) { 61 | return this.deltaWeights[channel][row][column] 62 | } 63 | 64 | setDeltaWeight ([channel, row, column], v) { 65 | this.deltaWeights[channel][row][column] = v 66 | } 67 | } 68 | 69 | /* istanbul ignore next */ 70 | typeof window!="undefined" && (window.Filter = Filter) 71 | exports.Filter = Filter -------------------------------------------------------------------------------- /dev/js/InputLayer.js: -------------------------------------------------------------------------------- 1 | "use strict" 2 | 3 | class InputLayer extends FCLayer { 4 | constructor (size, {span=1}={}) { 5 | super(size * span*span) 6 | } 7 | } 8 | 9 | /* istanbul ignore next */ 10 | typeof window!="undefined" && (window.InputLayer = InputLayer) 11 | exports.InputLayer = InputLayer 12 | -------------------------------------------------------------------------------- /dev/js/NetMath.js: -------------------------------------------------------------------------------- 1 | "use strict" 2 | 3 | class NetMath { 4 | 5 | // Activation functions 6 | static sigmoid (value, prime) { 7 | const val = 1/(1+Math.exp(-value)) 8 | return prime ? val*(1-val) 9 | : val 10 | } 11 | 12 | static tanh (value, prime) { 13 | const exp = Math.exp(2*value) 14 | return prime ? 4/Math.pow(Math.exp(value)+Math.exp(-value), 2) || 1e-18 15 | : (exp-1)/(exp+1) || 1e-18 16 | } 17 | 18 | static relu (value, prime) { 19 | return prime ? value > 0 ? 1 : 0 20 | : Math.max(value, 0) 21 | } 22 | 23 | static lrelu (value, prime) { 24 | return prime ? value > 0 ? 1 : (this.lreluSlope || -0.0005) 25 | : Math.max((this.lreluSlope || -0.0005)*Math.abs(value), value) 26 | } 27 | 28 | static rrelu (value, prime, neuron) { 29 | return prime ? value > 0 ? 1 : neuron.rreluSlope 30 | : Math.max(neuron.rreluSlope, value) 31 | } 32 | 33 | static lecuntanh (value, prime) { 34 | return prime ? 1.15333 * Math.pow(NetMath.sech((2/3) * value), 2) 35 | : 1.7159 * NetMath.tanh((2/3) * value) 36 | } 37 | 38 | static elu (value, prime, neuron) { 39 | return prime ? value >=0 ? 1 : NetMath.elu(value, false, neuron) + neuron.eluAlpha 40 | : value >=0 ? value : neuron.eluAlpha * (Math.exp(value) - 1) 41 | } 42 | 43 | // Cost functions 44 | static crossentropy (target, output) { 45 | return output.map((value, vi) => target[vi] * Math.log(value+1e-15) + ((1-target[vi]) * Math.log((1+1e-15)-value))) 46 | .reduce((p,c) => p-c, 0) 47 | } 48 | 49 | static meansquarederror (calculated, desired) { 50 | return calculated.map((output, index) => Math.pow(output - desired[index], 2)) 51 | .reduce((prev, curr) => prev+curr, 0) / calculated.length 52 | } 53 | 54 | static rootmeansquarederror (calculated, desired) { 55 | return Math.sqrt(NetMath.meansquarederror(calculated, desired)) 56 | } 57 | 58 | // Weight updating functions 59 | static vanillasgd (value, deltaValue) { 60 | return value + this.learningRate * deltaValue 61 | } 62 | 63 | static gain (value, deltaValue, neuron, weightI) { 64 | 65 | const newVal = value + this.learningRate * deltaValue * (weightI==null ? neuron.biasGain : neuron.getWeightGain(weightI)) 66 | 67 | if (newVal<=0 && value>0 || newVal>=0 && value<0){ 68 | if (weightI!=null) { 69 | neuron.setWeightGain(weightI, Math.max(neuron.getWeightGain(weightI)*0.95, 0.5)) 70 | } else { 71 | neuron.biasGain = Math.max(neuron.biasGain*0.95, 0.5) 72 | } 73 | } else { 74 | if (weightI!=null) { 75 | neuron.setWeightGain(weightI, Math.min(neuron.getWeightGain(weightI)+0.05, 5)) 76 | } else { 77 | neuron.biasGain = Math.min(neuron.biasGain+0.05, 5) 78 | } 79 | } 80 | 81 | return newVal 82 | } 83 | 84 | static adagrad (value, deltaValue, neuron, weightI) { 85 | 86 | if (weightI!=null) { 87 | neuron.setWeightsCache(weightI, neuron.getWeightsCache(weightI) + Math.pow(deltaValue, 2)) 88 | } else { 89 | neuron.biasCache += Math.pow(deltaValue, 2) 90 | } 91 | 92 | return value + this.learningRate * deltaValue / (1e-6 + Math.sqrt(weightI!=null ? neuron.getWeightsCache(weightI) 93 | : neuron.biasCache)) 94 | } 95 | 96 | static rmsprop (value, deltaValue, neuron, weightI) { 97 | 98 | if (weightI!=null) { 99 | neuron.setWeightsCache(weightI, this.rmsDecay * neuron.getWeightsCache(weightI) + (1 - this.rmsDecay) * Math.pow(deltaValue, 2)) 100 | } else { 101 | neuron.biasCache = this.rmsDecay * neuron.biasCache + (1 - this.rmsDecay) * Math.pow(deltaValue, 2) 102 | } 103 | 104 | return value + this.learningRate * deltaValue / (1e-6 + Math.sqrt(weightI!=null ? neuron.getWeightsCache(weightI) 105 | : neuron.biasCache)) 106 | } 107 | 108 | static adam (value, deltaValue, neuron) { 109 | 110 | neuron.m = 0.9*neuron.m + (1-0.9) * deltaValue 111 | const mt = neuron.m / (1-Math.pow(0.9, this.iterations + 1)) 112 | 113 | neuron.v = 0.999*neuron.v + (1-0.999) * Math.pow(deltaValue, 2) 114 | const vt = neuron.v / (1-Math.pow(0.999, this.iterations + 1)) 115 | 116 | return value + this.learningRate * mt / (Math.sqrt(vt) + 1e-8) 117 | } 118 | 119 | static adadelta (value, deltaValue, neuron, weightI) { 120 | 121 | if (weightI!=null) { 122 | neuron.setWeightsCache(weightI, this.rho * neuron.getWeightsCache(weightI) + (1-this.rho) * Math.pow(deltaValue, 2)) 123 | const newVal = value + Math.sqrt((neuron.getAdadeltaCache(weightI) + 1e-6)/(neuron.getWeightsCache(weightI) + 1e-6)) * deltaValue 124 | neuron.setAdadeltaCache(weightI, this.rho * neuron.getAdadeltaCache(weightI) + (1-this.rho) * Math.pow(deltaValue, 2)) 125 | return newVal 126 | 127 | } else { 128 | neuron.biasCache = this.rho * neuron.biasCache + (1-this.rho) * Math.pow(deltaValue, 2) 129 | const newVal = value + Math.sqrt((neuron.adadeltaBiasCache + 1e-6)/(neuron.biasCache + 1e-6)) * deltaValue 130 | neuron.adadeltaBiasCache = this.rho * neuron.adadeltaBiasCache + (1-this.rho) * Math.pow(deltaValue, 2) 131 | return newVal 132 | } 133 | } 134 | 135 | static momentum (value, deltaValue, neuron, weightI) { 136 | 137 | let v 138 | 139 | if (weightI!=null) { 140 | v = this.momentum * (neuron.getWeightsCache(weightI)) - this.learningRate * deltaValue 141 | neuron.setWeightsCache(weightI, v) 142 | } else { 143 | v = this.momentum * (neuron.biasCache) - this.learningRate * deltaValue 144 | neuron.biasCache = v 145 | } 146 | 147 | return value - v 148 | } 149 | 150 | // Weights init 151 | static uniform (size, {limit}) { 152 | const values = [] 153 | 154 | for (let i=0; i= 1 || !r) 173 | 174 | values.push(mean + (x1 * (Math.sqrt(-2 * Math.log(r) / r))) * stdDeviation) 175 | } 176 | 177 | return values 178 | } 179 | 180 | static xaviernormal (size, {fanIn, fanOut}) { 181 | return fanOut || fanOut==0 ? NetMath.gaussian(size, {mean: 0, stdDeviation: Math.sqrt(2/(fanIn+fanOut))}) 182 | : NetMath.lecunnormal(size, {fanIn}) 183 | } 184 | 185 | static xavieruniform (size, {fanIn, fanOut}) { 186 | return fanOut || fanOut==0 ? NetMath.uniform(size, {limit: Math.sqrt(6/(fanIn+fanOut))}) 187 | : NetMath.lecununiform(size, {fanIn}) 188 | } 189 | 190 | static lecunnormal (size, {fanIn}) { 191 | return NetMath.gaussian(size, {mean: 0, stdDeviation: Math.sqrt(1/fanIn)}) 192 | } 193 | 194 | static lecununiform (size, {fanIn}) { 195 | return NetMath.uniform(size, {limit: Math.sqrt(3/fanIn)}) 196 | } 197 | 198 | // Pool 199 | static maxPool (layer, channel) { 200 | 201 | const activations = NetUtil.getActivations(layer.prevLayer, channel, layer.inMapValuesCount) 202 | 203 | for (let row=0; row activation) { 218 | activation = value 219 | layer.indeces[channel][row][col] = [filterRow, filterCol] 220 | } 221 | } 222 | } 223 | 224 | layer.activations[channel][row][col] = activation 225 | } 226 | } 227 | } 228 | 229 | // Other 230 | static softmax (v) { 231 | 232 | const values = v.slice(0) 233 | let maxValue = values[0] 234 | 235 | for (let i=1; i maxValue) { 237 | maxValue = values[i] 238 | } 239 | } 240 | 241 | // Exponentials 242 | const exponentials = new Array(values.length) 243 | let exponentialsSum = 0.0 244 | 245 | for (let i=0; i p+c) / arr.length 265 | const diffs = arr.map(v => v - avg).map(v => v**2) 266 | return Math.sqrt(diffs.reduce((p,c) => p+c) / diffs.length) 267 | } 268 | 269 | static maxNorm () { 270 | 271 | if (this.maxNormTotal > this.maxNorm) { 272 | 273 | const multiplier = this.maxNorm / (1e-18 + this.maxNormTotal) 274 | 275 | this.layers.forEach((layer, li) => { 276 | li && layer.neurons.forEach(neuron => { 277 | neuron.weights.forEach((w, wi) => neuron.setWeight(wi, neuron.getWeight(wi) * multiplier)) 278 | }) 279 | }) 280 | } 281 | 282 | this.maxNormTotal = 0 283 | } 284 | } 285 | 286 | /* istanbul ignore next */ 287 | typeof window!="undefined" && (window.NetMath = NetMath) 288 | exports.NetMath = NetMath -------------------------------------------------------------------------------- /dev/js/Neuron.js: -------------------------------------------------------------------------------- 1 | "use strict" 2 | 3 | class Neuron { 4 | 5 | constructor () {} 6 | 7 | init ({updateFn, activation, eluAlpha}={}) { 8 | 9 | const size = this.weights.length 10 | this.deltaWeights = this.weights.map(v => 0) 11 | 12 | switch (updateFn) { 13 | 14 | case "gain": 15 | this.biasGain = 1 16 | this.weightGains = [...new Array(size)].map(v => 1) 17 | this.getWeightGain = i => this.weightGains[i] 18 | this.setWeightGain = (i,v) => this.weightGains[i] = v 19 | break 20 | 21 | case "adagrad": 22 | case "rmsprop": 23 | case "adadelta": 24 | case "momentum": 25 | this.biasCache = 0 26 | this.weightsCache = [...new Array(size)].map(v => 0) 27 | this.getWeightsCache = i => this.weightsCache[i] 28 | this.setWeightsCache = (i,v) => this.weightsCache[i] = v 29 | 30 | if (updateFn=="adadelta") { 31 | this.adadeltaBiasCache = 0 32 | this.adadeltaCache = [...new Array(size)].map(v => 0) 33 | this.getAdadeltaCache = i => this.adadeltaCache[i] 34 | this.setAdadeltaCache = (i,v) => this.adadeltaCache[i] = v 35 | } 36 | break 37 | 38 | case "adam": 39 | this.m = 0 40 | this.v = 0 41 | break 42 | } 43 | 44 | if (activation=="rrelu") { 45 | this.rreluSlope = Math.random() * 0.001 46 | 47 | } else if (activation=="elu") { 48 | this.eluAlpha = eluAlpha 49 | } 50 | } 51 | 52 | getWeight (i) { 53 | return this.weights[i] 54 | } 55 | 56 | setWeight (i, v) { 57 | this.weights[i] = v 58 | } 59 | 60 | getDeltaWeight (i) { 61 | return this.deltaWeights[i] 62 | } 63 | 64 | setDeltaWeight (i, v) { 65 | this.deltaWeights[i] = v 66 | } 67 | } 68 | 69 | /* istanbul ignore next */ 70 | typeof window!="undefined" && (window.Neuron = Neuron) 71 | exports.Neuron = Neuron -------------------------------------------------------------------------------- /dev/js/OutputLayer.js: -------------------------------------------------------------------------------- 1 | "use strict" 2 | 3 | class OutputLayer extends FCLayer { 4 | 5 | constructor (size, {activation, softmax}={}) { 6 | 7 | super(size, {activation}) 8 | 9 | if (softmax) { 10 | this.softmax = true 11 | } 12 | } 13 | 14 | forward () { 15 | 16 | super.forward() 17 | 18 | if (this.softmax) { 19 | 20 | const softmax = NetMath.softmax(this.neurons.map(n => n.activation)) 21 | 22 | for (let s=0; s { 57 | return [...new Array(this.outMapSize)].map(row => [...new Array(this.outMapSize)].map(v => 0)) 58 | }) 59 | this.errors = [...new Array(this.channels)].map(channel => { 60 | return [...new Array(prevLayerOutWidth)].map(row => [...new Array(prevLayerOutWidth)].map(v => 0)) 61 | }) 62 | this.indeces = this.activations.map(channel => channel.map(row => row.map(v => [0,0]))) 63 | } 64 | 65 | forward () { 66 | for (let channel=0; channel require("./jsNetJS.min.js") 4 | exports.webassembly = (path="./node_modules/jsnet/dist/NetWASM.wasm") => { 5 | global.jsNetWASMPath = path 6 | const jsNet = require("./jsNetWebAssembly.min.js") 7 | jsNet.Module = require("./NetWASM.js") 8 | return jsNet 9 | } 10 | -------------------------------------------------------------------------------- /dist/NetWASM.wasm: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DanRuta/jsNet/b17bf7eb7dc3464ecf6f6a9d813180fb89b7d09f/dist/NetWASM.wasm -------------------------------------------------------------------------------- /dist/jsNet.js: -------------------------------------------------------------------------------- 1 | "use strict" 2 | 3 | exports.js = () => require("./jsNetJS.min.js") 4 | exports.webassembly = (path="./node_modules/jsnet/dist/NetWASM.wasm") => { 5 | global.jsNetWASMPath = path 6 | const jsNet = require("./jsNetWebAssembly.min.js") 7 | jsNet.Module = require("./NetWASM.js") 8 | return jsNet 9 | } 10 | 11 | //# sourceMappingURL=jsNet.js.map -------------------------------------------------------------------------------- /dist/jsNet.js.map: -------------------------------------------------------------------------------- 1 | {"version":3,"sources":["../dev/jsNet.js"],"names":[],"mappings":"AAAA,CAAC,GAAG,CAAC,MAAM,CAAC,CAAC;AACb,CAAC;AACD,OAAO,CAAC,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,EAAE,CAAC,OAAO,IAAI,OAAO,CAAC,GAAG,CAAC,EAAE,EAAE,CAAC;AAC/C,OAAO,CAAC,WAAW,CAAC,CAAC,CAAC,CAAC,IAAI,IAAI,YAAY,CAAC,KAAK,CAAC,IAAI,CAAC,OAAO,CAAC,IAAI,EAAE,CAAC,EAAE,CAAC,CAAC,CAAC;AAC3E,IAAI,MAAM,CAAC,aAAa,CAAC,CAAC,CAAC,IAAI,CAAC;AAChC,IAAI,KAAK,CAAC,KAAK,CAAC,CAAC,CAAC,OAAO,IAAI,gBAAgB,CAAC,GAAG,CAAC,EAAE,EAAE,CAAC;AACvD,IAAI,KAAK,CAAC,MAAM,CAAC,CAAC,CAAC,OAAO,IAAI,OAAO,CAAC,EAAE,EAAE,CAAC;AAC3C,IAAI,MAAM,CAAC,KAAK,CAAC;AACjB,CAAC,CAAC","file":"jsNet.js","sourcesContent":["\"use strict\"\r\n\r\nexports.js = () => require(\"./jsNetJS.min.js\")\r\nexports.webassembly = (path=\"./node_modules/jsnet/dist/NetWASM.wasm\") => {\r\n global.jsNetWASMPath = path\r\n const jsNet = require(\"./jsNetWebAssembly.min.js\")\r\n jsNet.Module = require(\"./NetWASM.js\")\r\n return jsNet\r\n}\r\n"]} -------------------------------------------------------------------------------- /examples/confusion.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DanRuta/jsNet/b17bf7eb7dc3464ecf6f6a9d813180fb89b7d09f/examples/confusion.png -------------------------------------------------------------------------------- /examples/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | jsNet examples index 5 | 6 | 7 | MNIST example
8 | Example Webpack loading
9 | Example WASM multiple network instances
10 | 11 | -------------------------------------------------------------------------------- /examples/mnist/NetChart.js: -------------------------------------------------------------------------------- 1 | "use strict" 2 | 3 | class NetChart { 4 | constructor ({container, size = {x: 500, y: 500}, cutOff=0, interval=5, averageOver=10}) { 5 | const canvas = document.createElement("canvas") 6 | canvas.width = size.x 7 | canvas.height = size.y 8 | canvas.style.maxHeight = size.y 9 | 10 | this.chart = new Chart(canvas.getContext("2d"), { 11 | type: "line", 12 | data: { 13 | datasets: [{ 14 | label: "Training Error", 15 | borderColor: "rgba(0, 0, 0, 0.1)", 16 | data: [], 17 | pointRadius: 0 18 | }, { 19 | label: "Validation Error", 20 | fill: false, 21 | data: [], 22 | borderColor: "rgba(150, 26, 31, 0.25)", 23 | backgroundColor: "rgba(150, 26, 31, 0.25)", 24 | pointRadius: 0 25 | }] 26 | }, 27 | options: { 28 | scales: { 29 | xAxes: [{ 30 | type: "linear", 31 | position: "bottom" 32 | }], 33 | yAxes: [{ 34 | ticks: { 35 | beginAtZero: true 36 | } 37 | }] 38 | }, 39 | tooltips: { 40 | enabled: false 41 | }, 42 | // maintainAspectRatio: false 43 | responsive: false 44 | } 45 | }) 46 | this.chartX = 0 47 | this.chartYCount = 0 48 | this.chartY = 0 49 | this.chartY2 = 0 50 | this.chartY2Count = 0 51 | this.averageOver = averageOver 52 | this.interval = interval 53 | this.cutOff = cutOff 54 | container.appendChild(canvas) 55 | } 56 | 57 | addTrainingError (err) { 58 | 59 | this.chartY += err 60 | this.chartYCount++ 61 | 62 | if (this.chartYCount==this.averageOver) { 63 | 64 | this.chart.data.datasets[0].data.push({ 65 | x: this.chartX * this.interval, 66 | y: this.chartY/this.averageOver 67 | }) 68 | 69 | if (this.cutOff && this.chart.data.datasets[0].data.length>this.cutOff/this.averageOver) { 70 | this.chart.data.datasets[0].data.shift() 71 | } 72 | 73 | this.chartYCount = 0 74 | this.chartY = 0 75 | this.chartX += this.averageOver 76 | this.chart.update() 77 | } 78 | } 79 | 80 | addValidationError (err) { 81 | 82 | this.chart.data.datasets[1].data.push({ 83 | x: this.chartX * this.interval, 84 | y: err 85 | }) 86 | 87 | this.chartY2Count = 0 88 | this.chartY2 = 0 89 | } 90 | 91 | clear () { 92 | this.chart.data.datasets[0].data = [] 93 | this.chart.data.datasets[1].data = [] 94 | this.chartX = 0 95 | this.chartYCount = 0 96 | this.chartY = 0 97 | this.chartY2Count = 0 98 | this.chartY2 = 0 99 | this.chart.update() 100 | } 101 | 102 | loadAllData ({training, validation, validationRate}) { 103 | 104 | this.clear() 105 | 106 | let chartY = 0 107 | let chartYCount = 0 108 | 109 | for (let i=0; i validationRate && i%validationRate == 0) { 114 | this.addValidationError(validation.shift()) 115 | } 116 | } 117 | 118 | this.chart.update() 119 | } 120 | } -------------------------------------------------------------------------------- /examples/mnist/fc-784f-100f-10f.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DanRuta/jsNet/b17bf7eb7dc3464ecf6f6a9d813180fb89b7d09f/examples/mnist/fc-784f-100f-10f.png -------------------------------------------------------------------------------- /examples/mnist/mnist.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | jsNet - MNIST 5 | 6 | 7 | 8 | 9 | 46 | 331 | 332 | Epochs 333 | Mini Batch 334 | 335 | 336 | Keep the console open for output. Right click clears the digit drawing. 337 | 338 |
Total iterations 0 339 |
Training Error -% 340 |
Validation Error -% 341 |
Testing Error -% 342 |

343 | 344 |
345 |
346 |
347 |
348 |
349 | 350 |
351 |
352 |
353 |
354 | Collect Errors
355 | Callback 356 | Callback Interval: 357 | Validation Interval: 358 |

359 | 369 | 370 | 371 | 372 | 373 | 374 |

375 | 376 |

377 | 378 | 379 |

380 | 381 | 382 | -------------------------------------------------------------------------------- /examples/mnist/readmeimg.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DanRuta/jsNet/b17bf7eb7dc3464ecf6f6a9d813180fb89b7d09f/examples/mnist/readmeimg.png -------------------------------------------------------------------------------- /examples/multiInstance/multiple.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | jsNet - Multiple WASM instances example 5 | 6 | 7 | 8 | 37 | 83 | 84 | 85 | Click the Create new net instance button to add new net instances. Click their Train buttons to train them on the XOR data set, and click Run to run the values in the below input fields through the network. 86 | 87 |
88 | 89 | 90 |
91 | 92 | 93 | 94 |
95 | 96 | 97 | 98 | -------------------------------------------------------------------------------- /examples/nodejsDemo.js: -------------------------------------------------------------------------------- 1 | "use strict" 2 | 3 | const xor = [ 4 | {input: [0, 0], expected: [0]}, 5 | {input: [0, 1], expected: [1]}, 6 | {input: [1, 0], expected: [1]}, 7 | {input: [1, 1], expected: [0]} 8 | ] 9 | 10 | // Load the JavaScript only version 11 | const demoJS = () => { 12 | // npm 13 | // const {Network, FCLayer} = require("jsNet").js() 14 | 15 | // manual 16 | const {Network, FCLayer} = require("./dist/jsNet.js").js() 17 | 18 | // manual - js only 19 | // const {Network, FCLayer} = require("./dist/jsNetJS.min.js") 20 | 21 | console.log("JavaScript version loaded. Starting training...") 22 | const start = Date.now() 23 | 24 | const net = new Network({layers: [new FCLayer(2), new FCLayer(3), new FCLayer(1)]}) 25 | 26 | net.train(xor, {epochs: 1000, log: false}).then(() => { 27 | console.log("Forward 0,0: ", net.forward(xor[0].input)) 28 | console.log("Forward 0,1: ", net.forward(xor[1].input)) 29 | console.log("Forward 1,0: ", net.forward(xor[2].input)) 30 | console.log("Forward 1,1: ", net.forward(xor[3].input)) 31 | console.log(`\n\nElapsed: ${Date.now()-start}ms`) 32 | }) 33 | } 34 | 35 | // Load the WebAsssembly version 36 | const demoWebAssembly = () => { 37 | 38 | // npm 39 | // const {Module, Network, FCLayer} = require("jsNet").webassembly() 40 | 41 | // manual 42 | const {Module, Network, FCLayer} = require("./dist/jsNet.js").webassembly("./dist/NetWASM.wasm") 43 | 44 | // manual - webassembly only 45 | // global.jsNetWASMPath = "./dist/NetWASM.wasm" 46 | // const {Network, FCLayer} = require("./dist/jsNetWebAssembly.min.js") 47 | // const Module = require("./dist/NetWASM.js") 48 | 49 | global.onWASMLoaded = () => { 50 | console.log("WebAsssembly version loaded. Starting training...") 51 | const start = Date.now() 52 | 53 | const net = new Network({ 54 | Module: Module, 55 | layers: [new FCLayer(2), new FCLayer(3), new FCLayer(1)] 56 | }) 57 | 58 | net.train(xor, {epochs: 1000, log: false}).then(() => { 59 | console.log("Forward 0,0: ", net.forward(xor[0].input)) 60 | console.log("Forward 0,1: ", net.forward(xor[1].input)) 61 | console.log("Forward 1,0: ", net.forward(xor[2].input)) 62 | console.log("Forward 1,1: ", net.forward(xor[3].input)) 63 | console.log(`\n\nElapsed: ${Date.now()-start}ms`) 64 | demoJS() 65 | }) 66 | } 67 | } 68 | 69 | demoWebAssembly() 70 | -------------------------------------------------------------------------------- /examples/webpack loading/dist/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | jsNet - Webpack 5 | 6 | 7 | 8 | (Open the console. Serve via server.js) 9 | 10 | -------------------------------------------------------------------------------- /examples/webpack loading/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "", 3 | "version": "", 4 | "dependencies": { 5 | "jsnet": "^3.3.1" 6 | }, 7 | "devDependencies": { 8 | "webpack": "^4.2.0", 9 | "webpack-cli": "^2.0.12" 10 | } 11 | } 12 | -------------------------------------------------------------------------------- /examples/webpack loading/server.js: -------------------------------------------------------------------------------- 1 | "use strict" 2 | 3 | const http = require("http") 4 | const fs = require("fs") 5 | const url = require("url") 6 | const PORT = 1337 7 | 8 | http.createServer((request, response) => { 9 | 10 | let path = url.parse(request.url).pathname 11 | let data 12 | 13 | path = (path=="/"?"/dist/index.html":path) 14 | 15 | console.log(path) 16 | 17 | switch (true) { 18 | case path.endsWith("/NetWASM.wasm"): 19 | try { 20 | console.log("Returning the wasm file", __dirname+"/node_modules/jsnet/dist/NetWASM.wasm") 21 | data = fs.readFileSync(__dirname+"/node_modules/jsnet/dist/NetWASM.wasm") 22 | } catch (e) {} 23 | break 24 | default: 25 | try { 26 | data = fs.readFileSync(__dirname+path) 27 | } catch (e) {} 28 | } 29 | 30 | response.end(data) 31 | 32 | }).listen(PORT, () => console.log(`Server Listening on port: ${PORT}`)) 33 | 34 | -------------------------------------------------------------------------------- /examples/webpack loading/src/index.js: -------------------------------------------------------------------------------- 1 | import jsnet from "jsnet" 2 | 3 | const { Network, Layer, FCLayer, ConvLayer, PoolLayer, Filter, Neuron, NetMath, NetUtil } = jsnet.webassembly() 4 | 5 | window.addEventListener("jsNetWASMLoaded", () => { 6 | 7 | console.log("jsnet", jsnet) 8 | console.log("Module", Module) 9 | 10 | window.net = new Network({ 11 | Module, 12 | layers: [2,3,4] 13 | }) 14 | console.log("net", window.net) 15 | }) -------------------------------------------------------------------------------- /examples/webpack loading/webpack.config.js: -------------------------------------------------------------------------------- 1 | const path = require('path'); 2 | 3 | module.exports = { 4 | entry: './src/index.js', 5 | node: { 6 | fs: 'empty' 7 | }, 8 | output: { 9 | filename: 'main.js', 10 | path: path.resolve(__dirname, 'dist') 11 | } 12 | }; -------------------------------------------------------------------------------- /gruntfile.js: -------------------------------------------------------------------------------- 1 | module.exports = function(grunt){ 2 | grunt.initConfig({ 3 | concat: { 4 | options: { 5 | sourceMap: true 6 | }, 7 | "jsNet": { 8 | src: ["dev/jsNet.js"], 9 | dest: "dist/jsNet.js" 10 | }, 11 | "js-WebAssembly": { 12 | src: ["dev/js-WebAssembly/*.js", "!dev/js-WebAssembly/NetWASM.js"], 13 | dest: "dist/jsNetWebAssembly.concat.js" 14 | }, 15 | "js-noWebAssembly": { 16 | src: ["dev/js/*.js", "!dev/js/NetAssembly.js"], 17 | dest: "dist/jsNetJS.concat.js" 18 | }, 19 | "NetWASM.js": { 20 | src: ["dist/NetWASM.js", "dev/js-WebAssembly/NetWASM.js"], 21 | dest: "dist/NetWASM.js" 22 | } 23 | }, 24 | 25 | uglify: { 26 | my_target: { 27 | options: { 28 | sourceMap: { 29 | includeSources: true, 30 | }, 31 | mangle: false, 32 | }, 33 | files: { 34 | "dist/jsNetWebAssembly.min.js" : ["dist/jsNetWebAssembly.concat.js"], 35 | "dist/jsNetJS.min.js" : ["dist/jsNetJS.concat.js"] 36 | } 37 | } 38 | }, 39 | 40 | exec: { 41 | build: "C:/emsdk/emsdk_env.bat & echo Building... & emcc -o ./dist/NetWASM.js ./dev/cpp/emscripten.cpp -O3 -s ALLOW_MEMORY_GROWTH=1 -s WASM=1 -s NO_EXIT_RUNTIME=1 -std=c++14", 42 | emscriptenTests: "C:/emsdk/emsdk_env.bat & echo Building... & emcc -o ./test/emscriptenTests.js ./test/emscriptenTests.cpp -O3 -s ALLOW_MEMORY_GROWTH=1 -s WASM=1 -s NO_EXIT_RUNTIME=1 -std=c++14" 43 | }, 44 | 45 | watch: { 46 | jsNet: { 47 | files: ["dev/jsNet.js"], 48 | tasks: ["concat:jsNet"] 49 | }, 50 | cpp: { 51 | files: ["dev/cpp/*.cpp", "dev/cpp/*.h"], 52 | tasks: ["exec:build", "concat:NetWASM.js", "concat:js-WebAssembly", "uglify", "replace:emscriptenWASMPath"] 53 | }, 54 | js: { 55 | files: ["dev/js/*.js"], 56 | tasks: ["concat:js-noWebAssembly", "uglify"] 57 | }, 58 | wa: { 59 | files: ["dev/js-WebAssembly/*.js"], 60 | tasks: ["concat:js-WebAssembly", "uglify", "replace:emscriptenWASMPath"] 61 | }, 62 | emscriptenTests: { 63 | files: ["test/emscriptenTests.cpp"], 64 | tasks: ["exec:emscriptenTests", "replace:emscriptenTestsFilePath"] 65 | } 66 | }, 67 | 68 | replace: { 69 | emscriptenTestsFilePath: { 70 | src: ["test/emscriptenTests.js"], 71 | dest: "test/emscriptenTests.js", 72 | replacements: [{ 73 | from: "emscriptenTests.wasm", 74 | to: "test/emscriptenTests.wasm" 75 | }] 76 | }, 77 | emscriptenWASMPath: { 78 | src: ["dist/NetWASM.js"], 79 | dest: ["dist/NetWASM.js"], 80 | replacements: [{ 81 | from: `"NetWASM.wasm"`, 82 | to: "global.jsNetWASMPath" 83 | }] 84 | } 85 | } 86 | }) 87 | 88 | grunt.loadNpmTasks("grunt-contrib-watch") 89 | grunt.loadNpmTasks('grunt-contrib-concat') 90 | grunt.loadNpmTasks('grunt-contrib-uglify-es') 91 | grunt.loadNpmTasks('grunt-text-replace') 92 | grunt.loadNpmTasks("grunt-exec") 93 | 94 | grunt.registerTask("default", ["watch"]) 95 | } -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "jsnet", 3 | "version": "3.4.1", 4 | "description": "Javascript based deep learning framework for basic and convolutional neural networks.", 5 | "scripts": { 6 | "test": "npm run js-tests && npm run wa-tests", 7 | "js-tests": "nyc mocha test/js-test.js", 8 | "wa-tests": "nyc mocha test/wa-test.js", 9 | "cpp-tests": "cd ./build && make && cpp-tests", 10 | "coverage": "nyc report --reporter=text-lcov | coveralls", 11 | "coveralls": "npm run coverage -- --report lcovonly && cat ./coverage/lcov.info | coveralls", 12 | "build": "rm -rf build && mkdir build && cd build && cmake -G\"MSYS Makefiles\" .. && npm install" 13 | }, 14 | "repository": { 15 | "type": "git", 16 | "url": "git+https://github.com/DanRuta/jsNet.git" 17 | }, 18 | "keywords": [ 19 | "convolutional", 20 | "deep", 21 | "learning", 22 | "neural", 23 | "network", 24 | "webassembly", 25 | "javascript" 26 | ], 27 | "main": "dist/jsNet.js", 28 | "author": "Dan Ruta", 29 | "license": "MIT", 30 | "directories": {}, 31 | "bugs": { 32 | "url": "https://github.com/DanRuta/jsNet/issues" 33 | }, 34 | "engines": { 35 | "node": ">= 8.4.0" 36 | }, 37 | "homepage": "https://github.com/DanRuta/jsNet#readme", 38 | "devDependencies": { 39 | "chai": "3.5.0", 40 | "chai-as-promised": "^6.0.0", 41 | "coveralls": "^2.13.1", 42 | "grunt": "^1.0.1", 43 | "grunt-cli": "^1.2.0", 44 | "grunt-contrib-concat": "^1.0.1", 45 | "grunt-contrib-uglify": "git://github.com/gruntjs/grunt-contrib-uglify.git#harmony", 46 | "grunt-contrib-uglify-es": "git://github.com/gruntjs/grunt-contrib-uglify.git#harmony", 47 | "grunt-contrib-watch": "^1.0.0", 48 | "grunt-exec": "^3.0.0", 49 | "grunt-text-replace": "^0.4.0", 50 | "istanbul": "^0.4.5", 51 | "mocha": "^3.4.2", 52 | "mocha-lcov-reporter": "^1.3.0", 53 | "nyc": "^11.0.2", 54 | "sinon": "^2.3.2", 55 | "sinon-chai": "^2.10.0" 56 | } 57 | } 58 | -------------------------------------------------------------------------------- /server.js: -------------------------------------------------------------------------------- 1 | "use strict" 2 | 3 | const http = require("http") 4 | const fs = require("fs") 5 | const url = require("url") 6 | const PORT = 1337 7 | 8 | http.createServer((request, response) => { 9 | 10 | let path = url.parse(request.url).pathname 11 | let data 12 | 13 | path = (path=="/"?"/examples/index.html":path).replace(/%20/g, " ") 14 | 15 | console.log(path) 16 | 17 | switch (true) { 18 | case path.endsWith("/NetWASM.wasm"): 19 | try { 20 | console.log("Returning the wasm file", __dirname+"/dist/NetWASM.wasm") 21 | data = fs.readFileSync(__dirname+"/dist/NetWASM.wasm") 22 | } catch (e) {} 23 | break 24 | default: 25 | try { 26 | data = fs.readFileSync(__dirname+path) 27 | } catch (e) {} 28 | } 29 | 30 | response.end(data) 31 | 32 | }).listen(PORT, () => console.log(`Server Listening on port: ${PORT}`)) 33 | 34 | -------------------------------------------------------------------------------- /test/cpp-mocks.cpp: -------------------------------------------------------------------------------- 1 | #include "gmock/gmock.h" 2 | 3 | class MockLayer : public Layer { 4 | public: 5 | 6 | MockLayer (int netI, int s); 7 | 8 | virtual ~MockLayer(); 9 | 10 | MOCK_METHOD1(assignNext, void(Layer* l)); 11 | 12 | MOCK_METHOD1(assignPrev, void(Layer* l)); 13 | 14 | MOCK_METHOD1(init, void(int layerIndex)); 15 | 16 | MOCK_METHOD0(forward, void(void)); 17 | 18 | MOCK_METHOD1(backward, void(bool lastLayer)); 19 | 20 | MOCK_METHOD0(applyDeltaWeights, void(void)); 21 | 22 | MOCK_METHOD0(resetDeltaWeights, void(void)); 23 | 24 | MOCK_METHOD0(backUpValidation, void(void)); 25 | 26 | MOCK_METHOD0(restoreValidation, void(void)); 27 | }; -------------------------------------------------------------------------------- /test/emscriptenTests.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | 6 | int main(int argc, char const *argv[]) { 7 | return 0; 8 | } 9 | 10 | extern "C" { 11 | 12 | /* NetAssembly.ccallArrays */ 13 | 14 | EMSCRIPTEN_KEEPALIVE 15 | float* getSetWASMArray (float *buf1, int buf1Size, int aNumber, float *buf2, int buf2Size) { 16 | 17 | float values[buf1Size]; 18 | 19 | for (int i=0; i > > test = { {{1,2},{3,4}}, {{5,6},{7,8}} }; 39 | 40 | int depth = test.size(); 41 | int rows = test[0].size(); 42 | int cols = test[0][0].size(); 43 | float values[depth * rows * cols]; 44 | 45 | for (int d=0; d > > test = { {{1,2,3},{4,5,6}} }; 61 | 62 | uint8_t values[depth * rows * cols]; 63 | 64 | for (int d=0; d 80 | int32_t *values = (int32_t*) std::malloc(sizeof(*values)); 81 | 82 | for (int i=0; i<10; i++) { 83 | values[i] = i+1; 84 | } 85 | 86 | auto arrayPtr = &values[0]; 87 | return arrayPtr; 88 | } 89 | 90 | EMSCRIPTEN_KEEPALIVE 91 | int addNums (float *buf, int bufSize) { 92 | 93 | int x = 0; 94 | 95 | for (int i=0; i