├── CODEOWNERS ├── ISSUE_TEMPLATE.md ├── LICENSE ├── NeuralNetworks ├── MLP.cpp ├── MLP.h ├── NeuralNetworks.cpp ├── NeuralNetworks.sln ├── NeuralNetworks.vcxproj └── NeuralNetworks.vcxproj.filters ├── PULL_REQUEST_TEMPLATE.md ├── Personal Note ├── 1. Introduction.md ├── 10. Simple Neuron Model.md ├── 11. Activation Functions.md ├── 12. Perceptrons.md ├── 13. Challenge on Perceptrons.md ├── 14. Perceptron Challenge Solution.md ├── 15. Logic Gates.md ├── 16. Challenge on Logic Gates.md ├── 17. Logit Gates Chalenge Solution.md ├── 18. Linear Separability.md ├── 19. Multilayer Perceptron Class.md ├── 2. Prerequisites.md ├── 20. Challenge on Multilayer Perceptron.md ├── 21. Multilayer Perceptron Challenge Solution.md ├── 22. Need for Training.md ├── 23. Training Process.md ├── 24. Error Function.md ├── 25. Gradient Descent.md ├── 26. The Delta Rule.md ├── 27. Backpropagation Algorithm.md ├── 28. Challenge on Backpropagation Algorithm.md ├── 29. Backpropagation Challenge Solution.md ├── 3. What is a Neural Network.md ├── 30. Segment Display Recognition.md ├── 31. Challenge on Segment Display.md ├── 32. SDR Challenge Solution.md ├── 33. Challenge on Training SDR NN.md ├── 34. Train SDR NN Challenge Solution.md ├── 35. Conclusion.md ├── 4. Why C++.md ├── 5. Applications of ML.md ├── 6. Types of Classifiers.md ├── 7. Types of Neural Networks.md ├── 8. Multilayer Perceptrons.md ├── 9. Neurons and Brain.md └── README.md ├── README.md └── SDRNN ├── Executables ├── Linux │ ├── SDRNN_7to1 │ ├── SDRNN_7to10 │ └── SDRNN_7to7 ├── MacOSX │ ├── SDRNN_7to1.app.zip │ ├── SDRNN_7to10.app.zip │ └── SDRNN_7to7.app.zip └── Windows │ ├── SDRNN_7to1.exe │ ├── SDRNN_7to10.exe │ └── SDRNN_7to7.exe ├── SDRNN_7to1 ├── Builds │ ├── LinuxMakefile │ │ └── Makefile │ ├── MacOSX │ │ ├── Info-App.plist │ │ ├── RecentFilesMenuTemplate.nib │ │ └── SDRNN_7to1.xcodeproj │ │ │ ├── project.pbxproj │ │ │ └── project.xcworkspace │ │ │ └── xcshareddata │ │ │ └── WorkspaceSettings.xcsettings │ └── VisualStudio2019 │ │ ├── SDRNN_7to1.sln │ │ ├── SDRNN_7to1_App.vcxproj │ │ ├── SDRNN_7to1_App.vcxproj.filters │ │ └── resources.rc ├── JuceLibraryCode │ ├── JuceHeader.h │ ├── ReadMe.txt │ ├── include_juce_core.cpp │ ├── include_juce_core.mm │ ├── include_juce_data_structures.cpp │ ├── include_juce_data_structures.mm │ ├── include_juce_events.cpp │ ├── include_juce_events.mm │ ├── include_juce_graphics.cpp │ ├── include_juce_graphics.mm │ ├── include_juce_gui_basics.cpp │ └── include_juce_gui_basics.mm ├── SDRNN_7to1.jucer └── Source │ ├── MLP.cpp │ ├── MLP.h │ ├── Main.cpp │ └── SDRNN_7to1_GUI.h ├── SDRNN_7to10 ├── Builds │ ├── LinuxMakefile │ │ └── Makefile │ ├── MacOSX │ │ ├── Info-App.plist │ │ ├── RecentFilesMenuTemplate.nib │ │ └── SDRNN_7to10.xcodeproj │ │ │ ├── project.pbxproj │ │ │ └── project.xcworkspace │ │ │ └── xcshareddata │ │ │ └── WorkspaceSettings.xcsettings │ └── VisualStudio2019 │ │ ├── SDRNN_7to10.sln │ │ ├── SDRNN_7to10_App.vcxproj │ │ ├── SDRNN_7to10_App.vcxproj.filters │ │ └── resources.rc ├── JuceLibraryCode │ ├── JuceHeader.h │ ├── ReadMe.txt │ ├── include_juce_core.cpp │ ├── include_juce_core.mm │ ├── include_juce_data_structures.cpp │ ├── include_juce_data_structures.mm │ ├── include_juce_events.cpp │ ├── include_juce_events.mm │ ├── include_juce_graphics.cpp │ ├── include_juce_graphics.mm │ ├── include_juce_gui_basics.cpp │ └── include_juce_gui_basics.mm ├── SDRNN_7to10.jucer └── Source │ ├── MLP.cpp │ ├── MLP.h │ ├── Main.cpp │ └── SDRNN_7to10_GUI.h └── SDRNN_7to7 ├── Builds ├── LinuxMakefile │ └── Makefile ├── MacOSX │ ├── Info-App.plist │ ├── RecentFilesMenuTemplate.nib │ └── SDRNN_7to7.xcodeproj │ │ ├── project.pbxproj │ │ └── project.xcworkspace │ │ └── xcshareddata │ │ └── WorkspaceSettings.xcsettings └── VisualStudio2019 │ ├── SDRNN_7to7.sln │ ├── SDRNN_7to7_App.vcxproj │ ├── SDRNN_7to7_App.vcxproj.filters │ └── resources.rc ├── JuceLibraryCode ├── JuceHeader.h ├── ReadMe.txt ├── include_juce_core.cpp ├── include_juce_core.mm ├── include_juce_data_structures.cpp ├── include_juce_data_structures.mm ├── include_juce_events.cpp ├── include_juce_events.mm ├── include_juce_graphics.cpp ├── include_juce_graphics.mm ├── include_juce_gui_basics.cpp └── include_juce_gui_basics.mm ├── SDRNN_7to7.jucer └── Source ├── MLP.cpp ├── MLP.h ├── Main.cpp └── SDRNN_7to7_GUI.h /CODEOWNERS: -------------------------------------------------------------------------------- 1 | # Codeowners for these exercise files: 2 | # * (asterisk) deotes "all files and folders" 3 | # Example: * @producer @instructor 4 | 5 | * @aryashah2k 6 | -------------------------------------------------------------------------------- /ISSUE_TEMPLATE.md: -------------------------------------------------------------------------------- 1 | 7 | 8 | ## Issue Overview 9 | 10 | 11 | ## Describe your environment 12 | 13 | 14 | ## Steps to Reproduce 15 | 16 | 1. 17 | 2. 18 | 3. 19 | 4. 20 | 21 | ## Expected Behavior 22 | 23 | 24 | ## Current Behavior 25 | 26 | 27 | ## Possible Solution 28 | 29 | 30 | ## Screenshots / Video 31 | 32 | 33 | ## Related Issues 34 | 35 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2021 Arya Shah 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /NeuralNetworks/MLP.cpp: -------------------------------------------------------------------------------- 1 | #include "MLP.h" 2 | 3 | double frand(){ 4 | return (2.0*(double)rand() / RAND_MAX) - 1.0; 5 | } 6 | 7 | 8 | // Return a new Perceptron object with the specified number of inputs (+1 for the bias). 9 | Perceptron::Perceptron(int inputs, double bias){ 10 | this->bias = bias; 11 | weights.resize(inputs+1); 12 | generate(weights.begin(),weights.end(),frand); 13 | } 14 | 15 | // Run the perceptron. x is a vector with the input values. 16 | double Perceptron::run(vector x){ 17 | x.push_back(bias); 18 | double sum = inner_product(x.begin(),x.end(),weights.begin(),(double)0.0); 19 | return sigmoid(sum); 20 | } 21 | 22 | // Set the weights. w_init is a vector with the weights. 23 | void Perceptron::set_weights(vector w_init){ 24 | weights = w_init; 25 | } 26 | 27 | // Evaluate the sigmoid function for the floating point input x. 28 | double Perceptron::sigmoid(double x){ 29 | return 1.0/(1.0 + exp(-x)); 30 | } 31 | 32 | 33 | // Return a new MultiLayerPerceptron object with the specified parameters. 34 | MultiLayerPerceptron::MultiLayerPerceptron(vector layers, double bias, double eta) { 35 | this->layers = layers; 36 | this->bias = bias; 37 | this->eta = eta; 38 | 39 | for (int i = 0; i < layers.size(); i++){ 40 | values.push_back(vector(layers[i],0.0)); 41 | d.push_back(vector(layers[i],0.0)); 42 | network.push_back(vector()); 43 | if (i > 0) //network[0] is the input layer,so it has no neurons 44 | for (int j = 0; j < layers[i]; j++) 45 | network[i].push_back(Perceptron(layers[i-1], bias)); 46 | } 47 | } 48 | 49 | 50 | // Set the weights. w_init is a vector of vectors of vectors with the weights for all but the input layer. 51 | void MultiLayerPerceptron::set_weights(vector > > w_init) { 52 | for (int i = 0; i< w_init.size(); i++) 53 | for (int j = 0; j < w_init[i].size(); j++) 54 | network[i+1][j].set_weights(w_init[i][j]); 55 | } 56 | 57 | void MultiLayerPerceptron::print_weights() { 58 | cout << endl; 59 | for (int i = 1; i < network.size(); i++){ 60 | for (int j = 0; j < layers[i]; j++) { 61 | cout << "Layer " << i+1 << " Neuron " << j << ": "; 62 | for (auto &it: network[i][j].weights) 63 | cout << it <<" "; 64 | cout << endl; 65 | } 66 | } 67 | cout << endl; 68 | } 69 | 70 | // Feed a sample x into the MultiLayer Perceptron. 71 | vector MultiLayerPerceptron::run(vector x) { 72 | values[0] = x; 73 | for (int i = 1; i < network.size(); i++) 74 | for (int j = 0; j < layers[i]; j++) 75 | values[i][j] = network[i][j].run(values[i-1]); 76 | return values.back(); 77 | } 78 | 79 | // Run a single (x,y) pair with the backpropagation algorithm. 80 | double MultiLayerPerceptron::bp(vector x, vector y){ 81 | 82 | // Backpropagation Step by Step: 83 | 84 | // STEP 1: Feed a sample to the network 85 | vector outputs = run(x); 86 | 87 | // STEP 2: Calculate the MSE 88 | vector error; 89 | double MSE = 0.0; 90 | for (int i = 0; i < y.size(); i++){ 91 | error.push_back(y[i] - outputs[i]); 92 | MSE += error[i] * error[i]; 93 | } 94 | MSE /= layers.back(); 95 | 96 | // STEP 3: Calculate the output error terms 97 | for (int i = 0; i < outputs.size(); i++) 98 | d.back()[i] = outputs[i] * (1 - outputs[i]) * (error[i]); 99 | 100 | // STEP 4: Calculate the error term of each unit on each layer 101 | for (int i = network.size()-2; i > 0; i--) 102 | for (int h = 0; h < network[i].size(); h++){ 103 | double fwd_error = 0.0; 104 | for (int k = 0; k < layers[i+1]; k++) 105 | fwd_error += network[i+1][k].weights[h] * d[i+1][k]; 106 | d[i][h] = values[i][h] * (1-values[i][h]) * fwd_error; 107 | } 108 | 109 | // STEPS 5 & 6: Calculate the deltas and update the weights 110 | for (int i = 1; i < network.size(); i++) 111 | for (int j = 0; j < layers[i]; j++) 112 | for (int k = 0; k < layers[i-1]+1; k++){ 113 | double delta; 114 | if (k==layers[i-1]) 115 | delta = eta * d[i][j] * bias; 116 | else 117 | delta = eta * d[i][j] * values[i-1][k]; 118 | network[i][j].weights[k] += delta; 119 | } 120 | return MSE; 121 | } 122 | 123 | -------------------------------------------------------------------------------- /NeuralNetworks/MLP.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | #include 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | 10 | using namespace std; 11 | 12 | class Perceptron { 13 | public: 14 | vector weights; 15 | double bias; 16 | Perceptron(int inputs, double bias=1.0); 17 | double run(vector x); 18 | void set_weights(vector w_init); 19 | double sigmoid(double x); 20 | }; 21 | 22 | class MultiLayerPerceptron { 23 | public: 24 | MultiLayerPerceptron(vector layers, double bias=1.0, double eta = 0.5); 25 | void set_weights(vector > > w_init); 26 | void print_weights(); 27 | vector run(vector x); 28 | double bp(vector x, vector y); 29 | 30 | vector layers; 31 | double bias; 32 | double eta; 33 | vector > network; 34 | vector > values; 35 | vector > d; 36 | }; 37 | 38 | -------------------------------------------------------------------------------- /NeuralNetworks/NeuralNetworks.cpp: -------------------------------------------------------------------------------- 1 | // NeuralNetworks.cpp : This file contains the 'main' function. Program execution begins and ends there.// 2 | 3 | #include 4 | #include "MLP.h" 5 | 6 | int main() { 7 | srand(time(NULL)); 8 | rand(); 9 | 10 | 11 | cout << "\n\n--------Logic Gate Example----------------\n\n"; 12 | Perceptron *p = new Perceptron(2); 13 | 14 | //{10,10,-15} #AND 15 | //{15,15,-10} #OR 16 | //{-15,-15,10} #NOR 17 | //{-10,-10,15} #NAND 18 | 19 | p->set_weights({15,15,-10}); 20 | 21 | cout << "Gate: "<run({0,0})<run({0,1})<run({1,0})<run({1,1})<bp({1,1,1,1,1,1,0}, {1,0,0,0,0,0,0,0,0,0}); //0 pattern 96 | MSE += sdrnn->bp({0,1,1,0,0,0,0}, {0,1,0,0,0,0,0,0,0,0}); //1 pattern 97 | MSE += sdrnn->bp({1,1,0,1,1,0,1}, {0,0,1,0,0,0,0,0,0,0}); //2 pattern 98 | MSE += sdrnn->bp({1,1,1,1,0,0,1}, {0,0,0,1,0,0,0,0,0,0}); //3 pattern 99 | MSE += sdrnn->bp({0,1,1,0,0,1,1}, {0,0,0,0,1,0,0,0,0,0}); //4 pattern 100 | MSE += sdrnn->bp({1,0,1,1,0,1,1}, {0,0,0,0,0,1,0,0,0,0}); //5 pattern 101 | MSE += sdrnn->bp({1,0,1,1,1,1,1}, {0,0,0,0,0,0,1,0,0,0}); //6 pattern 102 | MSE += sdrnn->bp({1,1,1,0,0,0,0}, {0,0,0,0,0,0,0,1,0,0}); //7 pattern 103 | MSE += sdrnn->bp({1,1,1,1,1,1,1}, {0,0,0,0,0,0,0,0,1,0}); //8 pattern 104 | MSE += sdrnn->bp({1,1,1,1,0,1,1}, {0,0,0,0,0,0,0,0,0,1}); //9 pattern 105 | } 106 | MSE /= 10.0; 107 | cout << "7 to 10 network MSE: " << MSE << endl; 108 | 109 | 110 | // Dataset for the 7 to 7 network 111 | delete(sdrnn); 112 | sdrnn = new MultiLayerPerceptron({7,7,7}); 113 | 114 | for (int i = 0; i < epochs; i++){ 115 | MSE = 0.0; 116 | MSE += sdrnn->bp({1,1,1,1,1,1,0}, {1,1,1,1,1,1,0}); //0 pattern 117 | MSE += sdrnn->bp({0,1,1,0,0,0,0}, {0,1,1,0,0,0,0}); //1 pattern 118 | MSE += sdrnn->bp({1,1,0,1,1,0,1}, {1,1,0,1,1,0,1}); //2 pattern 119 | MSE += sdrnn->bp({1,1,1,1,0,0,1}, {1,1,1,1,0,0,1}); //3 pattern 120 | MSE += sdrnn->bp({0,1,1,0,0,1,1}, {0,1,1,0,0,1,1}); //4 pattern 121 | MSE += sdrnn->bp({1,0,1,1,0,1,1}, {1,0,1,1,0,1,1}); //5 pattern 122 | MSE += sdrnn->bp({1,0,1,1,1,1,1}, {1,0,1,1,1,1,1}); //6 pattern 123 | MSE += sdrnn->bp({1,1,1,0,0,0,0}, {1,1,1,0,0,0,0}); //7 pattern 124 | MSE += sdrnn->bp({1,1,1,1,1,1,1}, {1,1,1,1,1,1,1}); //8 pattern 125 | MSE += sdrnn->bp({1,1,1,1,0,1,1}, {1,1,1,1,0,1,1}); //9 pattern 126 | } 127 | MSE /= 10.0; 128 | cout << "7 to 7 network MSE: " << MSE << endl << endl; 129 | 130 | } 131 | -------------------------------------------------------------------------------- /NeuralNetworks/NeuralNetworks.sln: -------------------------------------------------------------------------------- 1 |  2 | Microsoft Visual Studio Solution File, Format Version 12.00 3 | # Visual Studio Version 16 4 | VisualStudioVersion = 16.0.30621.155 5 | MinimumVisualStudioVersion = 10.0.40219.1 6 | Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "NeuralNetworks", "NeuralNetworks.vcxproj", "{BA3742FA-BA0E-40B9-AB85-037B2EBF3C79}" 7 | EndProject 8 | Global 9 | GlobalSection(SolutionConfigurationPlatforms) = preSolution 10 | Debug|x64 = Debug|x64 11 | Debug|x86 = Debug|x86 12 | Release|x64 = Release|x64 13 | Release|x86 = Release|x86 14 | EndGlobalSection 15 | GlobalSection(ProjectConfigurationPlatforms) = postSolution 16 | {BA3742FA-BA0E-40B9-AB85-037B2EBF3C79}.Debug|x64.ActiveCfg = Debug|x64 17 | {BA3742FA-BA0E-40B9-AB85-037B2EBF3C79}.Debug|x64.Build.0 = Debug|x64 18 | {BA3742FA-BA0E-40B9-AB85-037B2EBF3C79}.Debug|x86.ActiveCfg = Debug|Win32 19 | {BA3742FA-BA0E-40B9-AB85-037B2EBF3C79}.Debug|x86.Build.0 = Debug|Win32 20 | {BA3742FA-BA0E-40B9-AB85-037B2EBF3C79}.Release|x64.ActiveCfg = Release|x64 21 | {BA3742FA-BA0E-40B9-AB85-037B2EBF3C79}.Release|x64.Build.0 = Release|x64 22 | {BA3742FA-BA0E-40B9-AB85-037B2EBF3C79}.Release|x86.ActiveCfg = Release|Win32 23 | {BA3742FA-BA0E-40B9-AB85-037B2EBF3C79}.Release|x86.Build.0 = Release|Win32 24 | EndGlobalSection 25 | GlobalSection(SolutionProperties) = preSolution 26 | HideSolutionNode = FALSE 27 | EndGlobalSection 28 | GlobalSection(ExtensibilityGlobals) = postSolution 29 | SolutionGuid = {3202A2D5-9B50-41A9-95E7-C7D7D6F32084} 30 | EndGlobalSection 31 | EndGlobal 32 | -------------------------------------------------------------------------------- /NeuralNetworks/NeuralNetworks.vcxproj: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | Debug 6 | Win32 7 | 8 | 9 | Release 10 | Win32 11 | 12 | 13 | Debug 14 | x64 15 | 16 | 17 | Release 18 | x64 19 | 20 | 21 | 22 | 16.0 23 | Win32Proj 24 | {ba3742fa-ba0e-40b9-ab85-037b2ebf3c79} 25 | NeuralNetworks 26 | 10.0 27 | 28 | 29 | 30 | Application 31 | true 32 | v142 33 | Unicode 34 | 35 | 36 | Application 37 | false 38 | v142 39 | true 40 | Unicode 41 | 42 | 43 | Application 44 | true 45 | v142 46 | Unicode 47 | 48 | 49 | Application 50 | false 51 | v142 52 | true 53 | Unicode 54 | 55 | 56 | 57 | 58 | 59 | 60 | 61 | 62 | 63 | 64 | 65 | 66 | 67 | 68 | 69 | 70 | 71 | 72 | 73 | 74 | true 75 | 76 | 77 | false 78 | 79 | 80 | true 81 | 82 | 83 | false 84 | 85 | 86 | 87 | Level3 88 | true 89 | WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions) 90 | true 91 | 92 | 93 | Console 94 | true 95 | 96 | 97 | 98 | 99 | Level3 100 | true 101 | true 102 | true 103 | WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) 104 | true 105 | 106 | 107 | Console 108 | true 109 | true 110 | true 111 | 112 | 113 | 114 | 115 | Level3 116 | true 117 | _DEBUG;_CONSOLE;%(PreprocessorDefinitions) 118 | true 119 | 120 | 121 | Console 122 | true 123 | 124 | 125 | 126 | 127 | Level3 128 | true 129 | true 130 | true 131 | NDEBUG;_CONSOLE;%(PreprocessorDefinitions) 132 | true 133 | 134 | 135 | Console 136 | true 137 | true 138 | true 139 | 140 | 141 | 142 | 143 | 144 | 145 | 146 | 147 | 148 | 149 | 150 | 151 | -------------------------------------------------------------------------------- /NeuralNetworks/NeuralNetworks.vcxproj.filters: -------------------------------------------------------------------------------- 1 |  2 | 3 | 4 | 5 | {4FC737F1-C7A5-4376-A066-2A32D752A2FF} 6 | cpp;c;cc;cxx;c++;cppm;ixx;def;odl;idl;hpj;bat;asm;asmx 7 | 8 | 9 | {93995380-89BD-4b04-88EB-625FBE52EBFB} 10 | h;hh;hpp;hxx;h++;hm;inl;inc;ipp;xsd 11 | 12 | 13 | {67DA6AB6-F800-4c08-8B7A-83BB121AAD01} 14 | rc;ico;cur;bmp;dlg;rc2;rct;bin;rgs;gif;jpg;jpeg;jpe;resx;tiff;tif;png;wav;mfcribbon-ms 15 | 16 | 17 | 18 | 19 | Source Files 20 | 21 | 22 | Source Files 23 | 24 | 25 | 26 | 27 | Header Files 28 | 29 | 30 | -------------------------------------------------------------------------------- /PULL_REQUEST_TEMPLATE.md: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /Personal Note/1. Introduction.md: -------------------------------------------------------------------------------- 1 | Neural networks are everywhere, and chances are you are using them every day. When you're shopping online and a product you like pops up, when you see a weather forecast and why not when you use your smartphone. We will create a neural network from scratch in C++. Whether you are starting your journey into artificial intelligence and machine learning, or you have some experience with neural networks with popular tools like PyTorch or TensorFlow and you'd like to brush up on the basics. 2 | 3 | Although professional tools allow you to train neural networks from a high level perspective, the objective of this course is to give you a chance to tap into the details of the algorithms behind neural networks. 4 | 5 | This will allow you to fully understand what's going on inside, and you'll make much better use of professional machine learning tools in the future. -------------------------------------------------------------------------------- /Personal Note/10. Simple Neuron Model.md: -------------------------------------------------------------------------------- 1 | A simple model of a neuron 2 | 3 | 4 | So based on the roles of the dendrites, nucleus and axons, let's just assign those roles to a set of inputs, a calculation unit and an output respectively to come up with the block diagram you can see at the right. We'll call the inputs x0 through xn minus one for a neuron with n inputs. 5 | 6 | Now, biological neurons seems to react in different sensitivities to different incoming signals. So we'll model that mathematically by assigning a weight to each input channel. Let's call them w0 through wn minus one. Next, the computation performed in order to fire or not is a simple weighted sum as shown inside the circle. And that's exactly what our output will be. 7 | 8 | Here's an example of how a neuron would help us classify. Let's suppose we have a two-input neuron and we feed data with two features into it. These features are shown in the plot at the right, which shows the risk of getting some medical condition, x, based on two factors, A and B as the horizontal and vertical axis. Each point represents a person and its shape represents that person's risk of getting that medial condition. Triangles are at risk while squares are not at risk. 9 | 10 | Let's say these points come from a number of observations in a research study. Now let's suppose we want to predict if a person is at risk of getting that medical condition x based on factors A and B alone. We could feed A through x0 and B through x1 into our neuron and it will give us some useful information. 11 | 12 | Notice that the weights I assigned to the inputs are one and minus one. This means that the boundary that divides positive and negative output results -------------------------------------------------------------------------------- /Personal Note/11. Activation Functions.md: -------------------------------------------------------------------------------- 1 | Activation functions 2 | 3 | We are almost there, but our neuron is still missing something. So let me tell you what's wrong with weighted sums. There are two inconveniences I'd like to mention. First, values aren't constrained, so as sum may sometimes result in a very large value or a very small value. 4 | 5 | Second, a weighted sum is a linear function, so the threshold to "fire" is not very well-defined. That is, a change between true and false is not very notable, and most importantly, it's not easily trained. It turns out that other functions that make learning easier are nonlinear. This is the real reason to add an element to our neuron. So what's wrong with having a very large and a very small value? 6 | 7 | Considered this example where we have a two input neuron, and we are feeding 1,000 x0 and two to x1. For now, let's leave the bias weight at zero, so the bias is not shown to keep the diagram simple. If we run the neuron, we'll have a result of 2,006. So notice that although the weights are very similar, two and three, the big difference in the input values has made the neuron very sensitive to x0 and insensitive to x1. 8 | 9 | That's the job of the weights, not of the inputs. And what's wrong with linear functions? Well, consider this neuron with one feature input and one bias input. At the right we have a plot of the output as a function of x0. Now, this is not the usual 2D plot we have seen so far. The line is not the boundary. This plot is showing the output sum z as a function of x0, so the boundary is the horizontal axis. Remember, this neuron will classify the input values -------------------------------------------------------------------------------- /Personal Note/12. Perceptrons.md: -------------------------------------------------------------------------------- 1 | Perceptrons: A better model of a neuron 2 | 3 | Now we have our complete model known as a perceptron. As you can see, it has a set of inputs with a global bias term. This input vector will go through a weighted sum, and this value will go into our sigmoid of activation function. Once again, pay attention to the numbering. 4 | 5 | The inputs and their weights are numbered from zero to n minus one, and the bias is treated as input number n. So, how should we interpret the output values of our neuron? Well, the output comes from the sigmoid function. Notice that the output is greater than 0.5 for a positive input. 6 | 7 | That is, for a positive weighted sum. This way, an output value of 0.5 seems like a reasonable threshold for firing. So before we dive into the code, let me point out some implementation notes. All values must be real numbers, not integers. So I'll use double precision floating point numbers, or the double C++ type. The weights and inputs may be implemented as one dimensional vectors. 8 | 9 | In our case, we'll use the generic vector from the C++ standard template library. This way, the weighted sum may be calculated in one operation as the dot product between the two vectors. That's one line of code. Finally, we'll feed the sum to our implementation of the sigmoid function. So here's the code. We are looking at mlp.h, and this is the only time we'll look at this file for the perceptron class, which starts at line 12. The only member data we need for this class -------------------------------------------------------------------------------- /Personal Note/13. Challenge on Perceptrons.md: -------------------------------------------------------------------------------- 1 | Challenge: Finish the perceptron 2 | 3 | Ready for your first challenge? Let's finish up the perceptron class. In order to test the class, we'll need to be able to set the weights as specific values. So we need the set weights function you see at line 24. And remember, we'll use a sigmoid as the activation function. 4 | 5 | So that's the second thing you'll have to write starting at line 28. So once again, your task is to write two class functions, the sigmoid activation function and a separate function to write values to the weights. For the argument, receive a vector of doubles. 6 | 7 | You may or may not validate that the length of this vector matches the number of inputs in the neuron, including the bias. This is not crucial because we'll use it for simple tests. This should take you about 15 minutes. -------------------------------------------------------------------------------- /Personal Note/14. Perceptron Challenge Solution.md: -------------------------------------------------------------------------------- 1 | Solution: Finish the perceptron 2 | 3 | Here's my solution. As you can see, both functions are one-liners. First, to set the weights, I'm simply assigning the argument to the classes weight. That's possible because the assignment operator for the vector class, makes a copy of the source vector. 4 | 5 | As for the Sigmoid function, it's simply written as the definition we saw earlier. I used the standard library's exponential function for this. That's it, we are now ready to test our neuron. But first, let's give some meaning to our test. -------------------------------------------------------------------------------- /Personal Note/15. Logic Gates.md: -------------------------------------------------------------------------------- 1 | Logic gates 2 | 3 | 4 | In their early days, neural networks were tested with simple functions to see if they were capable of performing the calculations they were designed to perform. This led to implementing logic gates with perceptrons. So let's take a look at a two input AND gate. Here we have its truth table, which summarizes its behavior. 5 | 6 | Supposing zero means false and one means true, the gate outputs true only in the case where both inputs are true. You may already be familiar with all of this, but let's look at it from a totally different angle, as a classification problem. This plot shows four data points. The coordinates of these data points are the values of inputs A and B. Notice that the data points show their category as a zero or a one. This way, a two input classifier may come up with a boundary that divides the categories. 7 | 8 | Based on this, a perceptron may behave as an AND gate. So let me show you how the classification is possible. The boundary we are seeing in this 2D plot is technically the line where the sigmoid is 0.5. So a 3D plot may help making this explanation better. So let's add a third axis now vertical to express the output of the sigmoid. This will be the category that the perceptron has inferred. 9 | 10 | So if we apply the sigmoid, all the samples will be a part of the surface of the sigmoid, like objects lying on this uneven terrain. So once again, the height of these objects finally determines the category the perceptron assigns to them. The decision plane will be located at the middle of the sigmoid. So here's a valid implementation of a two input AND gate. It's a two input perceptron with the following weights: 10 for both inputs, and -15 for the bias. 11 | 12 | I just came up with these weights by aiming to get a negative sum for our result of zero, and a positive sum for one. After testing it with my perceptron, I get the values shown in this table. Notice that I included a column for the weighted sum z. Those were the values I was aiming for with the weights I used. In the Y column, the three top values are very close to zero, so they may be safely rounded to zero, and the last value is almost one. So here's how I tested the perceptron in the code. 13 | 14 | This is the first time we'll run some code, so we need a main function. I wrote this in the neuralnetworks.cpp file. Starting at line 12, I created a perceptron with two inputs. And then in line 14, I entered the weights as a list, which is acting as an initializer for the vector argument. Notice the order of the weights, 10, 10, and -15 for the bias. That's it. Now I'm just testing all four cases with the run function and sending it to cout. 15 | 16 | So let's see it working. Great. So our perceptron can indeed operate as an AND gate. Let's move on. -------------------------------------------------------------------------------- /Personal Note/16. Challenge on Logic Gates.md: -------------------------------------------------------------------------------- 1 | Challenge: Logic gates with perceptrons 2 | 3 | Ready for your next challenge? Your task is to make your perceptron behave as a two input OR gate. Here's the truth table for the OR gate. And this is what the classification problem looks like. So, to be clear what you need to do is come up with a combination of weights that will make the perceptron behave as an OR gate. 4 | 5 | It may help to fill out the z column of this table first. Remember, you want a negative sum whenever you want the output to be zero and a positive sum for one. Go ahead and tweak the code to test your new OR gate. 6 | 7 | This shouldn't take you more than 15 minutes, especially because you may use the provided code, which is a modification of the AND gate code you just saw. -------------------------------------------------------------------------------- /Personal Note/17. Logit Gates Chalenge Solution.md: -------------------------------------------------------------------------------- 1 | Solution: Logic gates with perceptrons 2 | 3 | 4 | The values I came up with are 15 for both inputs and minus 10 for the bias. This way, any value of one in the inputs will make the weighted sum positive. Running it, we can see the or behavior. 5 | 6 | That is for zero, zero, we get a value very close to zero and the rest are greater than 0.99. If you like this exercise, you may want to create the Nant and nor Gates as an extra exercise. 7 | 8 | Great. So our perceptron can operate as we ask with the weights we write into it. Now we are ready to teach a behavior to our perceptron. -------------------------------------------------------------------------------- /Personal Note/18. Linear Separability.md: -------------------------------------------------------------------------------- 1 | Linear separability 2 | 3 | Let me tell you what it is. Looking back at the pass or fail example, this situation is suitable for a perceptron because it is linearly separable. Simply put, linear separability is a property of a data set with two categories where a linear function can separate the categories. This is a 2D plot, and there is a straight line that separates the categories. Therefore, this example is linearly separable, and a perceptron can take care of it. 4 | 5 | For more dimensions, a plane or hyperplane would separate the categories, but it's still a linear function. Let's look at a pesky logic gate example. The XOR gate outputs one when only one of its two inputs is one, but not both. Look at the 2D plot. I dare you to find one single straight line that divides the zeros and the ones. Did you find it? Of course not because there is none. The XOR problem is not linearly separable, so it's impossible to solve it with a single perceptron. See what I did there? I chose my words carefully. 6 | 7 | We cannot implement an XOR gate with one perceptron, but we can implement it with three perceptrons. Let me show you how. Recall the OR gate. This gate takes care of the first three XOR cases in the truth table. Look at the 2D plot of the XOR gate. If we used an OR gate, we would get all but one of the points correctly classified. The top right point should be zero, but the OR gate would classify it as one. What about a NAND gate? This one gets the last three XOR cases right. If we used a NAND gate to classify the XOR cases, we would misclassify the bottom left point. See where I'm going? If we could somehow combine the OR gate with the NAND gate, we would get an XOR. Well, almost. We still need some simple logic. 8 | 9 | Look at the overlap between the 2D plots. We want to classify as one only the points that get classified as one by both gates. So the AND gate would do the trick. This isn't anything new though. This is a well-known implementation of the XOR gate, a composition of an OR, a NAND, and an AND gate. Here's the schematic of such composition. So this looks like a plan. Let's create a network of three perceptrons. We've already figured out the weights for the AND and OR gates. All that's left is the NAND gate. 10 | 11 | So let me show you our first neural network to create this logic circuit. Here's our NAND gate. You may want to pause the video and look at the weights to verify that it indeed behaves as a NAND gate. Here is the OR gate you already designed. And finally, we plug the outputs into our AND gate. Notice that the bias is always one. And that's a very common practice to tie all the bias inputs to a constant value of one, and only deal with the bias weights. -------------------------------------------------------------------------------- /Personal Note/19. Multilayer Perceptron Class.md: -------------------------------------------------------------------------------- 1 | Writing the multilayer perceptron class 2 | 3 | So it turns out that the network we just described is a multilayer perceptron. Think about it. It's made of perceptrons, they are interconnected in a feed-forward fashion and they are organized in layers. This is the input layer. Remember, no neurons, just the inputs. This is the hidden layer with two neurons and this is the output layer with just one neuron. So it's time to start writing code again. 4 | 5 | So here's our first implementation of the multilayer perceptron class. I'll write the constructor and you'll write the rest. But first, let's look at MLP.h to see the class members starting at line 30. First, we have layers, which is a vector of integers that represent the number of neurons per layer. This includes the input layer, which has no neurons but here we mean the number of inputs. The bias is also a member. There's a third member called eta in line 32, which is known as the learning rate. 6 | 7 | We'll talk about it later. Next, we have the actual network. It's a vector of vectors of perceptrons. Then we'll need another vector with the same dimensions as the network to hold the output values of the neurons. I named it values. This will be useful for propagating the results forward through the network. Finally, we have yet another vector of vectors of doubles called d. It will contain the so-called error terms for the neurons. We'll learn about this later. So let's go to MLP.cpp. Look at the constructor starting at line 34. It has three parameters: the layers vector, the bias with the usual default value of one and eta with a default value of 0.5. 8 | 9 | Remember, we'll use this last parameter later. So in lines 35 through 37, the members are initialized with the arguments. The next part consists of two nested loops to create the neurons layer by layer. The outer loop iterates on i for each layer. So for each layer, we need to add a vector of values and a vector of neurons. The new vector of values will be filled with zeros for every neuron in the layer. And the vector of neurons will be empty for now. Now, the inner loop iterates on j for each neuron in the layer but will leave the first layer empty because it has no neurons. 10 | 11 | So for every neuron, I'll create a perceptron with as many inputs as the neurons in the previous layer. Remember, the bias input doesn't count here. And I also pass the bias value to the perceptron constructor so that it's it for the constructor. -------------------------------------------------------------------------------- /Personal Note/2. Prerequisites.md: -------------------------------------------------------------------------------- 1 | Before we start, there are some things you should know. 2 | 3 | First and foremost, you must feel comfortable with writing code in C++. If you're not sure, I suggest that you give it a try until you reach the first programming exercise. If you still need some training, go ahead and browse our library to choose one of our many courses on C++. 4 | 5 | You'll also need some basic knowledge on the C++ standard template library. If you lack this knowledge, or if you think you need to brush up on it, I recommend that you take my LinkedIn Learning course, titled "The C++ Standard Template Library." Although you won't need an IDE to run your C++ code, I'll use Visual Studio Community, but you may use your favorite development environment. 6 | 7 | We'll write and test all of our code with your C++ compiler and the standard template library. So you don't need any additional libraries. However, my implementation of the final challenge has a graphical user interface that uses JUCE, and multi-platform framework for creating GUIs that run identically on Windows, MacOS, and Linux. 8 | 9 | So if you'd like to follow along and tweak the code for my implementation of the final application challenge, you'll have to install JUCE. If not, don't worry. I've provided the executable files of my application for all three platforms anyway. Lastly, you'll need a basic understanding of college level mathematics. We'll be discussing functions, like summations and exponentials. So having experience with that level of mathematics will be helpful. -------------------------------------------------------------------------------- /Personal Note/20. Challenge on Multilayer Perceptron.md: -------------------------------------------------------------------------------- 1 | Challenge: Finish the multilayer perceptron class 2 | 3 | Now, you must write two functions, so we may test our multi-layer perceptron class. First, we have the set weights function starting at line 52. As argument, you may use the organization you want, but I suggest that you make it capable of initializing a network of any size. 4 | 5 | Don't forget the bias weights. In the code, you'll see that I declared the argument w_init as a vector of vectors of vectors of doubles. If you'd like to use some other structure make sure you also change the function prototype in MLP.age, or you may want to overload this function with your own, your choice. 6 | 7 | Now, look at line 57, I've written a print_weights function for you to check if your neural network has received the weights correctly and to see the weights when you have trained it later in the course. Second, we have the run functions starting at line 70, which feeds a sample to the network and returns a vector with the output values. I've written the return line, just to be clear. 8 | 9 | We simply return the last element in the values vector, which is a vector containing exactly the output layer values. So, to recap, you must write a function to write values to the weights and the run function to produce an output. You can, finally, test your new neural network with the XOR gate weights we just saw. This shouldn't take you more than 15 minutes. -------------------------------------------------------------------------------- /Personal Note/21. Multilayer Perceptron Challenge Solution.md: -------------------------------------------------------------------------------- 1 | Solution: Finish the multilayer perceptron class 2 | 3 | For the set_weights function in line 50, I'm implementing w_init as a vector of vectors of vectors, of doubles. That's three dimensions. And this is because I'm specifying the layer, the neuron, and the input associated to each weight. However, w_init will have one less entry in the first dimension because I'm not specifying anything for the input layer as it has no neurons. 4 | 5 | So I implemented two nested loops. The outer loop iterates i through the layers in the network and the inner loop iterates j through the neurons in each layer. Now inside the inner loop, I'm using the set_weights function for each neuron. Notice that since w_init doesn't have anything for the input layer, I'm indexing the network array at i + 1. Now for the run function in line 70, the first thing I do is copy x into the first layer of the values vector. Now it's time to run a two-level nested loop for every layer in ascending order, and every neuron in each layer. The body of the loop is simply running the current neuron by feeding it the values in the previous layer. That's it. Now let's test the whole thing in NeuralNetworks.cpp. 6 | 7 | You'll see that I left the previous examples and I'm adding this hard-coded XOR example at the end, starting at line 27. First, I created a MultiLayerPerceptron with the dimensions of the XOR gate design. Next I'm assigning the required weights. As a sanity check, I'm printing out to check that the weights were successfully assigned. And finally, here we have four printing lines to test our network with all four cases of the two inputs. Let's run it. And here's the result. As you can see, it's indeed behaving as an XOR gate. Give yourself a pat on the back. -------------------------------------------------------------------------------- /Personal Note/22. Need for Training.md: -------------------------------------------------------------------------------- 1 | The need for training 2 | 3 | It's time to talk about the need for training. So let's look back and reflect on the following points. Throughout the coding exercises of this course, we haven't seen a useful neural network yet. True, we have seen networks that behave like gates and they may be useful, but there are much better hard-coded alternatives to perceptions when it comes to implementing a (indistinct). 4 | 5 | For example, we could simply use the logic embedded in programming languages to get away with logic. Well, it turns out that the real value of neural networks lie in their ability to learn. Sure, we just got a multi-layer perceptron to behave as a next or gate by writing the exact weights it needed. But what if we could show the neural network a lot of examples of how (murmurs) behaves so that it can learn from those examples? Wouldn't that be something? So I have good news upper ahead we'll see an algorithm to train multi-layer perceptrons known as the backpropagation algorithm. 6 | 7 | So sit tight and pay attention, but wait there's another reason to train a neural network. Remember linear separability? Well, I have bad news. Linear separability is hardly a given considered this example of classifying things as small or large based on their length and width. Let's say that small is represented by triangles and large is represented by dots. Moreover, notice that this is not linearly separable. 8 | 9 | There is no straight line that will divide the two categories, but that doesn't mean that a single perceptron won't do a good job at classifying these samples. Take this line for example, notice that it will misclassify one dot and two triangles, which doesn't seem so bad. And even if we use the multi-layer perceptron we will get a nonlinear boundary like this one which does a better job. Misclassifying, only one triangle it's better but it's not perfect. And that's the whole point of training. We are looking for a model that will get most of the samples correctly classified because we don't know so much about the problem. 10 | 11 | And we are basing our judgment on the samples we have seen. This brings me to the problem of generalizing. This neural network may work very well for the provided data points but it still has to prove useful for new data. it hasn't seen before. So let me tell you about three situations in the spectrum of misclassifying and generalizing. Here we have a different data set for the same problem we just saw. I'm showing you the same blood three times because I want you to compare these three situations for a classifier. 12 | 13 | So look at the leftmost blot and suppose we use a single perceptron with a straight line as boundary, as you can see this one misclassified two dots and five triangles. This situation is known as underfitting where the network misclassified too often. So it's not very accurate. So we don't want this, this is bad. Now look at the middle plot. Suppose we use a multi-layer perceptron that ends up using an arc as a boundary. Notice that the misclassification has dropped to one dot and two triangles. 14 | 15 | Now these numbers aren't as important as the visible shape of the trend between the categories. When we train a neural network we are aiming for a boundary that works just right. That is it rarely misclassified. And it generalizes well, if we feed new unseen samples to this network, chances are, it will get it right most of the time. Now look at the right most blot, suppose we exhaustively train a very complex neural network so that it always gets it right with a perfectly accurate boundary. 16 | 17 | Notice that it seems wrong to get every outlier correctly classified. It seems wrong because it is wrong. This situation is known as overfitting and you may have guessed that an overfitting neural network is bad at generalizing. If we feed new on-scene data to this classifier it will probably fail often for data points near the boundary. In the real world, outliers are inevitable and we don't need to sacrifice the accuracy of our classifier just to classify known data. -------------------------------------------------------------------------------- /Personal Note/23. Training Process.md: -------------------------------------------------------------------------------- 1 | The training process 2 | 3 | When training your neural network, there's a usual top level procedure I'll briefly describe to you. But first let me tell you what a dataset is. First of all, a dataset is a collection of samples that contain features and labels. We usually represent features with X and labels with Y. The input signals for the network are known as features because what you usually feed a narrow network are features of a data point, which can be represented as numbers. 4 | 5 | For example, length, height, price, salary number of rooms in a house, blood sugar level and so on. The labels on the other hand are the known category attached to each sample. This is how we teach the network, we show the samples to it. And finally, the network is able to learn with each feature labeled pair. So here's the usual training process. You typically want to use three datasets, a training set, a validation set and at testing set. 6 | 7 | The training set is used to train the network so that it learns all is supposed to learn. This is important so I'll say it again with different words. This is the only dataset that will be used with the training algorithm. The other two are used for two rounds of assessment. So we run the training set with the training algorithm. Another important detail is that we have to run the training set lots of times. Each time we run the training set with the learning algorithm is known as a Training Epoch. Well, the training process usually involves hundreds or thousands of training epochs. 8 | 9 | We stop after some number of epochs or until some error metric drops under a desired value. We'll talk about this error metric in a bit. After that the neural network will have learned something from the samples. The validation set is used to assess how well our neural network has learned as compared to other competitors. There's nothing we intend to do to improve the learning. Actually what's usually done here is that we prepare several classifiers, say to support vector machines and three neural networks. We may tweak our network you know, we could use one with one hidden layer and another with three hidden layers. We may also vary the number of neurons per layer and we may use different activation functions for example. 10 | 11 | So we train all of the competing classifiers and then we feed the validation set to all classifiers. This time we only run the dataset, we don't train the classifiers. The validation set will allow us to rank our classifiers and choose the one that shows the best performance for us. Let's say that's our neural network A. Lastly, the testing set is used for evaluating the finally chosen model just to make sure it's being able to classify data it hasn't seen before. Okay, so what happens when we run one single training sample? 12 | 13 | This is important for us to write our training code. So first we feed an input sample X to the network then we compare the output to the correct value Y. With this output and the expected correct value we may calculate the error and we can use this error to adjust the weights in the network. And we do that to classify that sample a little better in the future without messing up our response to other samples the network learn earlier. -------------------------------------------------------------------------------- /Personal Note/24. Error Function.md: -------------------------------------------------------------------------------- 1 | Error function 2 | 3 | Earlier I mentioned the error of a neural network output, so let me tell you what I meant by that. An error function measures how bad a classifier is doing. So a large value is bad and a small value is good. This function is essential in the training process. 4 | 5 | So up ahead, we'll learn about a training process called gradient descent. Throughout the algorithm, we'll calculate two types of error metrics, one for the output of a neuron and one for the whole network. So the first metric is not formally known as the error, because it's just the measure of how far off a neuron is from the expected value dictated by the label in the dataset. It's simply a subtraction. Let me show you. 6 | 7 | Suppose we enter a sample x,y to a neural network which for now will be a single layer perceptron. Now suppose that the output of this one output network is 0.6. And let's say that the label for that input sample is y equals one. This way, the error or deviation can be calculated as the subtraction y minus out. Again, this output error is just a step in the calculations, but it will show up quite often. So the point is that the training function must somehow contribute to getting out closer to y over time, that is, making the error approach zero. 8 | 9 | Now, when we are assessing the performance of the neural network, we use a metric known as the mean squared error. There are several important details about this function. The error is calculated as the sum of the squared output errors for all neurons in the output layer, all of this divided by n, the number of neurons in the output layer. Remember a neural network may have more than one outlet. 10 | 11 | Since we'll use this error to check how our training is going, the training process seeks to minimize this error. A nice thing about this metric is that it gets rid of the sign of the actual error. So when minimizing the error, we're not interested in the direction of this error. It's all the same to us if the output is over or under the desired value. What we extract from this function is the size of the error. This way, we always want to minimize this function. -------------------------------------------------------------------------------- /Personal Note/25. Gradient Descent.md: -------------------------------------------------------------------------------- 1 | Gradient descent 2 | 3 | Great, now that we know about the error function we want to minimize, let's talk about the general algorithm known as gradient descent. This is a training method to minimize the error of our neural network. It consists on adjusting the weights in order to find the minimum error. To get an idea of the logic behind gradient descent, think about trying to go downhill to the lowest valley in the error function. 4 | 5 | So here's a graphic aid to understand what gradient descent is all about. For now, suppose we have a neural network with several weights, but we are only interested in modifying one weight to see how it affects the overall error of the network. So let's say we get this function, and we set this weight to say this value. Now think of valuating this function as placing a marble over the function like this. 6 | 7 | Reducing the weight will get us to a higher thus worst error but increasing the weight will lead us downhill. If we keep increasing the weight, we'll eventually reach the lowest point in the plot. That's our objective. This is the global minimum of the function and that's the best error we can get by modifying this particular weight. So in some sense, we want to simulate gravity in this plot and that's what gradient descent will do for us. Now, let me quickly warn you about a possible problem here. 8 | 9 | Since we don't know what our error function will look like, and we initialize our weights randomly, what would happen if we start with this value for the weight? Well, simulating the same gravity effect with gradient descent, we would move the marble to the left, eventually getting stuck in this valley, which is not the global minimum, but a local minimum. Don't worry, there are several methods to overcome this local minima problem. Pushing our example a little forward, think about modifying two weights to manipulate the error. This would give us a 3D plot where the height is the error, and the two weights will place the marble at different points in this surface with mountains and valleys. The objective is still to get the marble to the lowest point. 10 | 11 | As you can see, it becomes a bit more complicated with more weights. So with two weights, this became a tri-dimensional plot. Well, the (indistinct) neural network has nine weights, so this becomes a 10 dimensional plot, which we can't even understand graphically. The good news is that we have a method that will simulate this gravity for us, so we don't have to worry about the number of weights involved. -------------------------------------------------------------------------------- /Personal Note/26. The Delta Rule.md: -------------------------------------------------------------------------------- 1 | The delta rule 2 | 3 | The simplest form of the algorithm we'll implement is known as the delta rule. It's a simple update formula for adjusting the weights in a single perceptron, that is a neuron. Yes, it's a simple formula, but its reasoning is very clever. The delta rule considers the following values; the output error, this is the simple subtraction error, one input, the one effected by the weight we are going to tweak, and a constant known as the learning rate. So here's a nice equation to calculate the update in a weight I in a neuron K. 4 | 5 | Let's call it delta W sub I K. And it's the value we'll have to add to W sub I K to get the boundary closer to what we want. So to calculate this delta, we multiply the learning rate times the output error that is the label Y sub K, minus the neurons output O sub K, times the I-th input value X sub I K. Yeah, it's very simple. But notice what's happening there. The output error will be positive if the output is higher than the desired output, and it will be negative if the output is lower than the label. This means that when we later update W, it will contribute to making the output closer to the provided label. If we calculate all of the delta W's and add them to the W's, our perception will be one step closer to having the boundary we want. 6 | 7 | So let me tell you a few things about the learning rate. First, it's a unique constant in the neural network. There's only one learning rate for all neurons. As the name suggests, it directly affects the rate of learning because higher values will result in larger leaps for the weights and lower values will result in smaller leaps for the weights. Does a higher learning rate mean faster learning? Yes. Does a higher learning rate mean better? No. The learning rate is usually initialized at 0.5 but you may have to tune it if learning is too fast or too slow. Here's why. Let me show you six updates of a weight in this error function considering a learning rate that's too slow. So pay attention to the marble. One, two, three, four, five, six. The marble will eventually find a minimum. It will take a long time and it may get stuck at the first local minimum it finds. 8 | 9 | This could be much better. Let's see six steps again with a learning rate that's too high. One, Two, three, four, five, six. Large leaps may miss the minimum, getting stuck around it or even missing it altogether. The desired situation is a learning rate that's just right. Notice that it may mimic inertia going a bit past the minimum, but being drawn back into the valley. One, two, three, four, five, six. -------------------------------------------------------------------------------- /Personal Note/27. Backpropagation Algorithm.md: -------------------------------------------------------------------------------- 1 | The backpropagation algorithm 2 | 3 | We are finally ready to see the backpropagation training algorithm. This is a general form of the delta rule. It has several requirements on the neuron model especially on the activation function. Don't worry the sigmoid makes it easy. The algorithm calculates all the weight updates throughout the network. This is done by propagating the error back through the layers. So here are the steps of the backpropagation algorithm to train a multilayer perception with one sample. 4 | One. Feed a sample to the network. 5 | 6 | Two. Calculate the mean squared error. 7 | 8 | Three. Calculate the error term of each output neuron. 9 | 10 | Four. Iteratively calculate the error terms in the hidden layers. Five. Applied the delta rule. And six. Adjust the weights. 11 | 12 | Now for your next challenge you'll have to write the backpropagation algorithm. That's why we're going to have a close look at each of these steps, because it's easy to get confused. For this example, I'll use a volunteer, which is this network with three inputs, four neurons in the first hidden layer, three neurons in the second one and two in the output layer. In my multi-layer perception class, these are layers zero through three respectively. As you can see, our neurons are explicitly showing their bias inputs because we'll use them in the algorithm. 13 | 14 | But remember that these inputs are rarely shown for the sake of simplicity. The bias is fixed at one. So don't confuse it with the bias input weights of the neurons. The first step is to run the network forward. So we feed a sample, say X equals 251. And let's say that that our network spitted out the output vector O equals 0.2 and 0.49. Our training sample included a vector of labels Y equals 01. That is the expected output for the first output neuron is zero and one for the second one. The second step is to calculate the mean squared error. This needs to be done at this point because we will assess the accuracy of the neural network later. And we are about to change its weights. Remember, the MSE is calculated on the outputs so we use Y and O. So subtracting vectors, Y minus O, we get minus 0.2 and 0.51. Squaring those values, we get 0.04 and 0.2601. Adding those values and dividing by two we finally get 0.15. That's the current state of the error metric for this network when receives X as input. 15 | 16 | We'll use this value later to review how well the network is learning. This is the error function that is undergoing gradient descent. Is 0.15 a large or small error? Bear with me when I say that it doesn't matter. What matters is reducing this number very much as we iterate. Step three is to calculate the output error terms. This is a third error metric and it's an intermediate error calculation that will be used for guessing how bad each neuron is doing. Notice that we're paying attention to the output layer. We'll later use these error terms to calculate the error terms in the hidden layers moving backwards through the network. That's the reason for the name backpropagation. 17 | 18 | Finally we'll know the error term for all of the neurons in the network and we will apply the delta rule to calculate the deltas and adjust the weights. So the error term for neuron K in the output layer is represented by lowercase delta sub-K. And it's related to the partial derivative of the error of the network with respect to each weight in that neuron. So delta sub-K equals O sub K, that's the output value of neuron K, times one minus O sub-K times the simple output error, Y sub-K minus O sub-K. Just so you know, this part is the derivative of the sigmoid function, which is very simple. And that's one of the main reasons to use the sigmoid function as an activation function for our neurons. So let's suppose we are interested in calculating the error term for output O1. 19 | 20 | Now let's zoom into the output layer for this example. To calculate lowercase delta sub-one, that's the error term for neuron number one in the output layer, we'll use O sub one and Y sub one in the equation. It's that simple. The fourth step is where the magic happens. Now we're going to calculate the error terms for the hidden layers. And we do this backwards. The previous step was done in the output layer. This step iterates from the last hidden layer, all the way to the first hidden layer to find an error term per neuron. 21 | 22 | Here's the equation for an error term lowercase delta sub-H. It's almost the same as for the output layer. The derivative of the sigmoid is still there but in the hidden layer, we have no idea about the error because we simply don't know what to expect from the intermediate neurons. So what we use instead is a sum of a product that includes the error terms in the neurons connected to this neurons output. These neurons are in the next layer, and we have just calculated their error terms lowercase delta sub-K, but that's not all we calculated in this sum. We must multiply these error terms by the weight of the input that's connected to the output of our neuron H. So let's zoom in again, to see what happens in the second neuron of the second hidden layer. This will be neuron one in the last hidden layer. So in the equation, H will be one. Actually, by lowercase delta one, I'm hiding the layer number. So let me be more specific in the diagram and call this lowercase delta 21. 23 | 24 | Meaning that this neuron is in layer two and it's neuron number one, the second neuron. To calculate lowercase delta sub 21, we'll need the usual derivative of the sigmoid times the weighted sum of the error terms. So these are the products we'll have to add for this neuron. Lowercase delta 30, or the error term of the output neuron zero times W01. That is the input weight one of this output neuron, we'll add this product with the product of lowercase delta 31 which is the error term of the other output neuron times W11, which is the weight of the input connected to our neuron 21. See why I called this magic. We are reacting to the error propagated back through the network in the right proportion by scaling the error terms with the weights. 25 | 26 | This means that errors with higher weights will take more of the blame and errors with lower weights will get less of the blame. Now, just for completeness, let's back up to layer one. So now in layer one, let's say we are interested in the third neuron that's lowercase delta two in this layer or its full name would be lowercase delta 12 as shown in the diagram. To calculate the sum, we'll have to multiply W02 with delta 20, W12 with delta 21, W22 with delta 22 and add these three products. It's all downhill from here. Step five is to apply the delta rule. 27 | 28 | Since we now have all of the error terms, the lowercase deltas, we may proceed to calculate the weight adjustments or uppercase deltas. So as the equation shows to compute the weight adjustment for an input J in a neuron I, we need to multiply the learning rate times the error term of the neuron I, times the input value J in neuron I. 29 | 30 | Great, now that we have the deltas we simply add them to the weights and we are done. -------------------------------------------------------------------------------- /Personal Note/28. Challenge on Backpropagation Algorithm.md: -------------------------------------------------------------------------------- 1 | Challenge: Write your own backpropagation function 2 | 3 | Ready to finish up your multi-layer perceptron, class? This time your task is to write a backpropagation trainer function, which will run one sample through the network with the backpropagation algorithm. Don't worry, you'll just have to write a few lines per step in the provided code. You'll simply have to fill in the blanks. You can do this, and feel free to go back to the backpropagation videos if you get stuck. 4 | 5 | The function is called BP and it starts at line 80. It receives a feature vector X and a label vector Y. I've placed a comment for each step and skeleton code for the loops. Now, for steps three and four, you'll need a vector of vectors, I added to the class to store the error terms or lowercase deltas. That's why I named it lowercase D. You may want to check the constructor code at mlp.h and look for this vector. You'll see that it has the same organization as the values vector. So it's created and initialized right next to it. So this D vector holds the error term for each neuron just like the values vector holds their output values. 6 | 7 | Back to the backpropagation method, it must return the mean squared error because we'll need it in the training process. Speaking of training, let's go to neuralnetworks.cpp. At the end, starting at line 40, I have including this training example. I'm running the BP function once for each case in the XOR truth table. And that's my whole dataset, just four samples. I'm running it for 3,000 epochs. And I'm calculating the mean of the four errors in an epoch. 8 | 9 | Notice that I'm only printing this error every 100 epochs. So what I'm expecting to see 30 error values in the terminal to keep it simple. I'm also printing out the weights for you to see what the neural network finally came up with. And lastly, we print the truth table. So when you're ready to test your backpropagation function you should see the error drop as the training advances. You'll see the final weights. 10 | 11 | And finally, the four cases will be tested. So you'll know how your training function is doing. This may take you awhile, anything between 30 and 45 minutes, so have fun. -------------------------------------------------------------------------------- /Personal Note/29. Backpropagation Challenge Solution.md: -------------------------------------------------------------------------------- 1 | Solution: Write your own backpropagation function 2 | 3 | Step one is the simplest. We just run x through the network and assign the result to a new vector called outputs. 4 | 5 | Step two is where we calculate the mean squared error. So first, I declare a vector to store the simple errors. I name that error. Then I'm calculating these simple errors and the sum of their squares in a for loop. And finally, I'm dividing this sum by the number of neurons in the last layer. 6 | 7 | Step three is done element by element in a for loop, just following the equation. 8 | 9 | Notice that the result goes to the last element in our d vector. In step four, first, I calculate the weighed sum of the forward error terms in a variable called fwd_error and then use that sum to calculate the current error term. Notice that the outputs are not recalculated. They are fetched from our values cache. All this is assigned to each element in the d vector, which contains the error terms. 10 | 11 | Steps five and six contain the most code but it's actually very simple. I goes through the layers, j goes through the neurons, and k goes through the inputs. That's why it goes from zero to the number of neurons in the previous layer plus one because of the bias weight. And that's what the body of the innermost loop is doing. If k is the last weight, we calculate the delta by multiplying the learning rate times the error term in that neuron times the bias term as the input because well, that's the input there. 12 | 13 | If it's not the bias weight we're checking with k, then we calculate the delta as the learning rate times the error term in that neuron times the actual input, which comes from our values cache indexed at the previous layer. That's it. Finally, I return the MSE. So let's see it working. Pay attention to the error values as they go down. Next, we have the weights. Notice that it came up with something other than the nand or and combination we designed earlier. Look at the values and the signs of the weights. This is surely a logical equivalent of that initial XOR and finally, we have the truth table. As you can see, we are practically getting the XOR behavior, meaning that our artificial brain has learned. It's alive. 14 | 15 | Now, this is my favorite part of the whole process. This plot shows the learning process of the XOR gate you just saw. I got this data by training an XOR just the way you saw and I copied the 30 error values reported in the terminal. I pasted them in a spreadsheet to finally make the plot. You should try it on your own and you'll get something very similar. As you can see, the plot shows how the error drops as the neural network learns epoch after epoch. These plots usually show a very subtle improvement in the error in the first iterations. 16 | 17 | But when the gradient descent starts to pay off, you'll see a dramatic drop after which the improvement is subtle again. That's just the law of diminishing returns working. And that's a smart way of telling when you should stop training. You don't want to waste your time getting less significant improvements or even worse, getting your neural network to over fit. -------------------------------------------------------------------------------- /Personal Note/3. What is a Neural Network.md: -------------------------------------------------------------------------------- 1 | What is a neural network? 2 | 3 | Let's start by defining what a neural network is. A neural network is a piece of software. It's a model of the brain. And like so, it's capable of reproducing some behaviors of the brain, like learning and classifying. That is, it can be taught to recognize the category of a set of things you present to it. 4 | 5 | For example, telling the difference between apples and oranges in a picture. Now let's consider what a neural network is not so that we get some misconceptions out of the way. A neural network is not a series of if/then statements or any other form of hard coded data the software may fetch to produce an answer. 6 | 7 | A neural network is not all there is to machine learning. In fact, you'll see that machine learning is an extensive field of knowledge on its own. And lastly, although neural networks are useful for classifying, they are certainly not the only type of classifier out there. 8 | 9 | Now I'd like to give you an idea of what constitutes a neural network. Simply put, a neural network is a collection of neurons, which are small computing units that perform simple calculations to collectively solve a problem. These neurons may be interconnected in several ways. In the illustration, I'm showing the multi-layer perceptron, a very popular type of neural network and the one we will implement up ahead. -------------------------------------------------------------------------------- /Personal Note/30. Segment Display Recognition.md: -------------------------------------------------------------------------------- 1 | Segment display recognition 2 | 3 | Congratulations for making it this far. You've learned an impressive number of concepts that will be useful in your journey into artificial intelligence and machine learning. Now it's time to have some fun with a practical application of your brand new multilayer perceptron class. The project is a simplified version of Optical Character Recognition, or OCR. So let me tell you a few details about OCR. 4 | 5 | It's a set of algorithms used to recognize characters in a picture. It's useful for many applications like digitizing books or documents. For example, if you have an old book, a contract or a printed document you'd like to convert to a PDF file. A more advanced version of OCR is used for taking notes by hand on your tablet or smartphone. This software is able to learn from your own handwriting. 6 | 7 | Another application is actually reading. I'm talking about systems that are supposed to understand written text in front of them. A nice example would be a robot that reads books out loud. It has to understand the printed characters prior to converting text to speech. Now, about our project, it's a Segment Display Recognition system, which I decided to call SDR. This is not a formal term. I just came up with the name but if you do a web search for systems that recognize digits from seven-segment displays, you'll find lots of interesting projects and research papers. 8 | 9 | The objective here is to recognize digits from a single seven-segment display like this one. The letters shown inside each segment are their standard names, a through g. We are all familiar with these displays and these are the most popular digit patterns for seven-segment displays and they are the ones we will use for training our neural network. So again, the objective is to recognize multi-digit numbers by first recognizing the digits separately. 10 | 11 | This is a suitable task for a neural network because the input may come from a picture or a camera used by some computer vision system. The software will extract the brightness of each segment and send that seven-number vector to our neural network. We'll take it from there. Since we'll receive the brightness of each segment, our neural network will have the chance to generalize and guess what a pattern really means. In other words, not all numbers will have a 100% brightness in every segment that's on. Actually, there may be some ambiguous patterns. 12 | 13 | Take this alarm clock, for example. The second digit looks like a zero but it has its middle segment, segment G, somewhat visible. So it may be an eight. Here's another example. Look at the third digit. It's easy for a human to know that's a four even though its segment F, the leftmost is quite dim. We as human wouldn't expect this digit to be a minus one. Well, for these ambiguous patterns, our neural network may be a very good alternative to a simple lookup table. -------------------------------------------------------------------------------- /Personal Note/31. Challenge on Segment Display.md: -------------------------------------------------------------------------------- 1 | Challenge: Design your own SDR neural network 2 | 3 | Here's a system level challenge for you: design your own segment display recognition neural network. We'll give some thought to the design first, so please don't write any code just yet. Instead, try to answer this question: how would you recognize numbers from 0 to 9? Think about the dimensions of your multilayer perceptron. We already know that we want seven inputs because we'll receive seven brightness levels, one for each segment, but try to answer these questions: how many inputs and outputs would you use? 4 | 5 | How many hidden layers and how many neurons per hidden layer would you use? And also, try to think how you want the data to be presented at the output of your neural network. Your solution should be a diagram like this one, showing the inputs, the neurons, their connections, and the outputs. Don't worry about showing the bias inputs for all neurons. They will be there, just not in the diagram. Give some thought to this. As with many computing problems, this is one of the most important steps. This should take you about 15 minutes. -------------------------------------------------------------------------------- /Personal Note/32. SDR Challenge Solution.md: -------------------------------------------------------------------------------- 1 | Solution: Design your own SDR neural network 2 | 3 | I came up with three possible solutions for this single digit SDR neural network. The first has seven inputs and one output. The second has seven inputs and 10 outputs. And the third has seven inputs and seven outputs. Let me tell you what I was thinking when I designed each of them. My first network has seven inputs, one hidden layer with seven neurons and one output. In fact, all three designs have just one hidden layer with seven neurons. 4 | 5 | My thinking was to allow each segment to affect one neuron exclusively if needed. You could have used more or less neurons in this layer, and you could have used more hidden layers. Now, since I only have one output neuron, my output will be a real value between zero and one. So some calculations have to be made to output numbers from zero to nine. I was thinking of assigning buckets to encode each digit. 6 | 7 | For example, anything from 0.1 to 0.1999 many nines, will correspond to one. Anything from 0.5 to 0.5999 many nines will correspond to five. That is, the recognized digit will be the truncated integer of 10 times the output. Although this is a valid model, a one output neural network for a multi-class problem is almost always avoided and discouraged. The reason is that it has many setbacks and the alternatives are much better at little to no cost. Here we have two things that are wrong about this approach. 8 | 9 | First, we are solving a classification problem with a regression tool. The sigmoid function in the output makes it a binary classification tool. So assigning the output values to uniformly distributed buckets in the output domain is not an easy task with the nonlinear sigmoid because those buckets are in so uniformly distributed for the input domain. The reason for this is that we are using a good tool for the wrong problem. In other words, we are drilling with a screwdriver, it may get the job done but our drill would do a much better job. 10 | 11 | Here's the second reason not to use this model. We are assigning the values to consecutive buckets in the output. That is, we are mapping our unrelated classes to sequential values as if our classes needed to be sorted that way. Yes, in our segment recognition system, we are using digits but they don't have to represent their values, they're just symbols for now. Think about it with a different example like this one with four classes, bird, dog, cat, and mouse. There's no real reason to say that dog is greater than mouse or bird is greater than cat. So with this mapping we're constraining our classifiers to satisfy an additional and useless requirement. This adds complexity to the learning struggle of our neural network. So this is my first system. 12 | 13 | Once again, this is a valid model but there's a universally accepted alternative. My second network is that alternative. It has the required seven inputs and 10 outputs in a technique known as one-hot encoding. What this means is that each of the outputs represents one of the output classes, and so each of the output neurons will raise its hand if you will, whenever we entered the pattern that neuron is sensitive to. The way for us to show the recognized number would be to report the class of the output with the greatest value. 14 | 15 | Notice that we've solved both problems of the previous model. All of the classes have their own independent neuron, so they're not constrained by each other. How's that for a convenient alternative to the one output system. There's really no reason not to choose this system. My third system is something different. This one has the seven inputs and seven outputs that also represent the same seven segments. So yes, whenever you enter say the pattern for number four, it will output the same pattern for number four. 16 | 17 | Now this may not sound like a very good idea at first, but think of it like the text predictor in your smartphone, which suggests to complete the words you are typing. Well, this system will suggest to complete the number you are entering, if we were to edit the number pattern we are sending into the network. My objective here is not to predict numbers entered segment by segment over time, because that's not done in reality. However, this system is able to let you peek into the reasoning of a neural network. 18 | 19 | This system will show you how certain it is of the pattern it has recognized. -------------------------------------------------------------------------------- /Personal Note/33. Challenge on Training SDR NN.md: -------------------------------------------------------------------------------- 1 | Challenge: Train your own SDR neural network 2 | 3 | 4 | It's time to train this segment display recognition neural network you designed. So here's what you have to do in this challenge. Create the neural network you just designed. Train it, either using the provided data sets or your own. Now my data set is the very minimum. We'll simply train the network with the 10 ideal patterns. So you may add your own patterns if you want. Train the network for a large number of epochs, something between 1,000 and 3,000 epochs will do the trick. 5 | 6 | Just like the XR training challenge, we will not use the validation or test faces. Finally, test the neural network with your own samples. Do this in the text terminal to keep it quick and simple. Now, the demo programs I will show you in the next video implement all three models with sliders, buttons and other GUI controls. And you'll have the chance to use your neural network in these demos if you'd like. 7 | 8 | So remember, you're only expected to test your neural network in the terminal. Of course, if you'd still like to make your own GUI, please feel free to do so. So as usual, I prepared some testing code at the bottom of the main function. Here you should be ready to write your own code. But I've provided the very minimum training set for the three models I showed you in case you decided to implement one of them. This code works, but it simply prints out the mean squared error obtained after training each neural network. 9 | 10 | So, starting at line 74, we have the dataset for the seven to one neural network. Notice that the patterns in the input are entered with zeros and ones. Once again, you may want to add patterns representing the same numbers with plausible values, like 0.25 for a dim segment and 0.87 for a bright one. Take a minute to read these lines and verify that each sample corresponds to the number in the comment at the right. Now for the labels, notice that I'm assigning each pattern to the midpoint of its intended bucket. For example, the pattern for three is mapped to 0.35 and the pattern for seven is mapped to 0.75. 11 | 12 | Scrolling down to line 95, we have the patterns for the seven to 10 neural network. The patterns are the same as before, but notice the nice identity matrix look of the labels. That's how one-hot encoding looks like, no pun intended. Finally, here's the simplistic data set for the seven to seven neural network, starting at line 116. Again, at first glance it may look silly to assign the same input values to the outputs, but actually I find it very interesting. 13 | 14 | Remember, you are expected to test the segment recognition architecture you designed. So if you designed one of the three I included here, you're welcome to use it's part of the code and feel free to comment out or delete the lines where the other models are used. Then again, you may test all three if you want. Either way, test your system by feeding different patterns to the run function. Try playing around with more training patterns, more or less training epochs, or tweak the bias and learning rate to see how the behavior changes. 15 | 16 | This shouldn't take you much longer than 15 minutes. So go train that network and come back to see my demo programs with a hopefully nice GUI. -------------------------------------------------------------------------------- /Personal Note/34. Train SDR NN Challenge Solution.md: -------------------------------------------------------------------------------- 1 | Solution: Train your own SDR neural network 2 | 3 | I made three applications for our segment display recognition neural network, all with a graphical user interface to allow you to see the neural network working as you tweak the input values. First, let me show you how the source files are organized. I left the NeuralNetworks folder for your convenience at the last point we left it, reporting the errors for the three models, along with the trained XOR and all previous tests. Now the GUI applications are all located in the SDRNN folder. Inside, you'll find the project folders called SDRNN_7to1, 7to10 and 7to7. 4 | 5 | To get your hands on this code, you'll need to download the Guice framework from guice.com. Make sure you read the instructions in the README file in the GitHub repository. However, if you don't feel like installing Guice, you may test the code with the executable files I placed in the Executables folder. There you'll find all three applications for Windows, macOS and Linux. Now, inside each project folder, you'll see the folder structure created by Guice. The projects are inside the Builds folder. 6 | 7 | There you'll file a Make file for Linux, an Xcode project for macOS and a Visual Studio project for Windows. Now let's look at the code for the 7to1 system, which is located under the Source folder. The code for our application is in SDRNN_7to1_GUI.h. Be advised that there's a lot of code to implement the graphical stuff, so you may be interested in two functions that deal with the neural network. These functions are present in all three implementations. The first is run_ann where the neural network is run once every time the user changes the input controls and the second one is train_ann where the back propagation happens. I'd rather show you the rest in the running application. So let's start with the 7to1 model. 8 | 9 | At the left, I created a set of seven sliders to act as the segments in the input. Moving them causes their color to simulate, becoming brighter or dimmer. At the right, we have parameters and results. You may enter the number of epochs to train next, which will happen when you press the train button. As results, the application displays the last reported training error, the number of epochs trained so far, the raw output value and the finally recognized number. I also included a reset button to start over. So let me train this network in steps of 100 epochs. Pay attention to the training error as it drops. I'll keep training until the error drops under 0.001. 10 | 11 | Now, none of these applications has a valid excuse for not recognizing a trained pattern. So all numbers from zero to nine must be correctly recognized. Let me show you a few patterns. Here's number one. Here's number four. And number nine. Okay, remember, I mentioned something wrong about this model? Now let me show you what it is. Let's enter the pattern for number zero. Yes, it's recognized correctly but let me slowly change the middle segment to turn it into an eight and back. Watch closely. Did you see that. 12 | 13 | Now ask yourself is it necessary to go through one, two, three, all the way to eight? Does changing the brightness of that segment make the pattern look more like a four or a five? Of course not. Our neural network had to satisfy that constraint and in doing so, it has sacrificed its ability to generalize. Now let's look at the one hot encoding model, the 7to10 network. Notice that we have the same controls except for the raw output, which is now showing as 10 raw outputs. So remember, the output with the highest value will be the one reported as the recognized number. Let me train this network for 1,000 epochs and then I'll train it in steps of 100 until I get an error of about 0.001. So again, let's see if it recognizes number three correctly. 14 | 15 | Number five. And number seven. And now for the zero to eight and back test, let me enter the pattern for zero. Now before I slide the middle segment, remember that there's no reason for the output to show anything other than zero and eight. So let's see it. There you have it. So now it feels like we're using a neural network free from the shackles of a forced sequence in the trained patterns. 16 | 17 | Finally, let's look at the 7to7 network. Now I'm not showing the raw outputs any longer but another set of sliders simulating an actual LED seven-segment display. This display will show us what the neural network infers from our input. Again, if our input is a valid pattern, there's no excuse to output anything other than that pattern because the network if very well trained. The interesting part is the output we get when we entered an invalid pattern. Let me train the network to an acceptable error. 18 | 19 | And now I want to show you a nice example of how the neural network struggles to get the incoming pattern right. This pattern at the left is very special because depending on the state of the bottom left and the bottom right segments, it could be a two or a three. If both segments are on, it's invalid, as well as when both segments are off. So at this point, both segments are off in the pattern at the left. And the output at the right is showing something between two and three. It's ambiguous. 20 | 21 | Notice as I slowly increase the brightness of the leftmost segment, the output become more confident of seeing a two and less confident of seeing a three. So you'll see those two sliders go in opposite directions in the display at the right. In fact, whenever I move either of those sliders, the sliders at the right will be having a tug of war towards the correct pattern. I don't expect this video to be enough for you, so please play around with the three systems, hopefully with your own version of MLP.CPP. 22 | 23 | -------------------------------------------------------------------------------- /Personal Note/35. Conclusion.md: -------------------------------------------------------------------------------- 1 | Next steps 2 | 3 | I hope you have fun learning how to use C++ to create neural networks. Of course, there's so much more you can learn about neural networks. So as next steps, I suggest the following. Put your models to the test by using training, validation, and testing datasets. Assess your neural networks for usual problems like overfitting and underfitting. Experiment with more hidden layers and with more or less neurons per layer. 4 | 5 | And you may want to try all of these things with a readily available neural network tool, like TensorFlow or PyTorch, instead of writing the code all by yourself. Thank you for taking the course. Now I'll leave you to get to work on your next neural network application. Till next time. -------------------------------------------------------------------------------- /Personal Note/4. Why C++.md: -------------------------------------------------------------------------------- 1 | Why C++? 2 | 3 | So here are some of the most important aspects about C++ I'd like you to know. First, C++ runs the world. It's undoubtedly one of the most popular programming languages of all times. And you can see its applications everywhere, in operating systems, database software, web browsers, banking, cloud systems, compilers, embedded systems, you name it. 4 | There are lots of libraries available for pretty much any application of machine learning, and this includes lots of neural network variants in PyTorch and TensorFlow. Lastly, you may run your code anywhere because we'll only use the Standard Template Library. That's why I don't expect you to have any platform-related trouble when setting up your environment or trying out your code. -------------------------------------------------------------------------------- /Personal Note/5. Applications of ML.md: -------------------------------------------------------------------------------- 1 | The many applications of machine learning 2 | 3 | Since neural networks are an important part of machine learning, I'd like to tell you just a few of the most popular applications of machine learning, which is the field of computer science related to training machines to solve problems. Machine learning algorithms are widely used for image recognition where a machine can be taught to make sense of still images or video. An example of this would be face recognition in surveillance systems. 4 | 5 | Speech recognition is something you may already be used to. Well, this is another typical applications of machine learning where the input from thousands and thousands of people has taught a machine what their words mean over time. These people may have different voices and accents and this makes the computer even better at understanding what new people are saying. Prediction is a big application of machine learning, which is useful at producing stock market advice, weather forecasting, and even at many stages of healthcare, like diagnostics and treatment. Another big application of machine learning are recommender systems, which I'm sure you are experiencing all the time in your video services like Netflix and Amazon Prime Video, in online stores like amazon.com and even in the advertisements you get in social media. 6 | 7 | These systems don't only learn from your product feedback, whether you like the product or not or how many stars you gave to it but also from your purchase or view history and from the data of people who behave like you. Finally, an example of machine learning I like very much is akinator.com, which is a geni capable of telling who or what you're thinking of by simply asking questions to you. 8 | 9 | A similar one is 20q.net, which plays the game 20 Questions against you. You should try these two whenever you get bored. They're surprisingly good. Now let me briefly tell you about the three broad paradigms of machine learning and some of their best-known tools. First, we have supervised learning where you actively teach the AI by providing it with lots of data samples, along with the results you expect. Here are two big problems supervised learning can solve. Regression where the AI comes up with the continuous function that behaves like the data you provided. I'm talking about tools like the linear regressions you may have used in spreadsheets. This is useful for making stock market predictions and weather forecasting. 10 | 11 | Next, we have classification or recognizing the category a sample belongs to. Among other famous tools, we have the support vector machine, neural networks and decision trees, which are very suitable for implementing the software behind akinator.com and 20q.net. The second paradigm of machine learning is unsupervised learning where you simply feed data into the AI and it makes sense of this data on its own. 12 | 13 | Among unsupervised learning tools, we have a big family of clustering algorithms to divide samples into clusters, thus learning from the data. This applies to medical imaging and recommender systems. Anomaly detection systems are capable of finding uncommon samples or outliers. This is crucial for detecting credit card fraud, typos in text or medical conditions to name a few. Finally, there's a whole family of neural networks that learn from the data. 14 | 15 | Among these, we have auto encoders, self-organizing maps and deep belief networks. The third paradigm of machine learning is reinforcement learning where the AI is composed of a feedback loop with rewards and penalties based on its behavior. For this area, I'll just mention the most famous application today. AI agents capable of learning how to play video games without supervision. If you've seen one of these projects, you may know that they often exceed human capabilities, which I personally find mind blowing. -------------------------------------------------------------------------------- /Personal Note/6. Types of Classifiers.md: -------------------------------------------------------------------------------- 1 | Types of classifiers 2 | 3 | So let me show you a few classifiers in machine learning and where neural networks fit in. First, we have the logistic regression, which is a function with an input vector and a single return value. Depending on the values in the input vector, the function will return a value between zero and one as a way of classifying the input as belonging to class zero or class one. This classifier is technically a very simplistic neural network known as a perceptron. We'll take a detailed look at perceptrons shortly. 4 | 5 | Another type of classifier is the K-nearest neighbors algorithm. To understand how it works, suppose that we plot our samples in a 2D plane so that we have some samples that belong to one category shown as squares, and some other samples belonging to a second category shown as triangles. This algorithm takes in a new sample of an unknown category shown as a circle near the center and classifies it in the same category as the majority of its K-nearest neighbors. To avoid ties, K must be an odd number. 6 | 7 | And even so, its value is very crucial. For instance, suppose we chose K equals three for this plot. Then the new sample would be classified as a triangle because most of its neighbors are triangles. However, if K equals five, then it would fall into the square category. That said, the advantage of this algorithm is its evident simplicity. There are very few calculations going on. 8 | 9 | Support vector machines are very similar to neural networks in their input and output structure, and they often solve the same problem. Take this plot for example. A support vector machine is capable of finding the line that acts as a boundary between the two categories of points shown as squares and triangles respectively. This boundary is a line for this 2D example, but in general, it's known as a hyperplane. So here we have a valid boundary with a potential inconvenience. 10 | 11 | It's too close to some samples. Here's another boundary that's way too close to some samples. One of the most useful features of support vector machines is that they are capable of finding something close to the optimal boundary, that is a line that separates the categories while maximizing its distance to every point in the plot. Here's an example of such boundary. A seemingly different approach to classification are decision trees. These algorithms have a tree-like structure of questions very much like a series of if-else statements, but not quite. It turns out that a decision tree uses training algorithm based on information theory to produce the shortest possible tree. That is, to classify a new sample in the smallest number of steps. 12 | 13 | In the illustration, we have a decision tree to determine whether a passenger of the Titanic survived or not. This tree was produced by observing data and noticing that most of the survivors in the Titanic were women and young children with not so many siblings. It's not perfect. It may fail sometimes because it's based on probabilities, but the goal is to come up with a decision tree that asks fewer questions, while getting it right more often. The tree is very simple, and any programmer could write the code for this decision tree. 14 | 15 | The really tricky part is getting a computer to figure that out from the survivor data. And finally we have the classifier we are interested in, feedforward neural networks. Here are some features of neural networks that make them stand out among other tools. They were biologically inspired to mimic the brain. So although they end up performing like other tools, their architecture is very flexible. They may have as many outputs as needed in a single network, and their learning algorithm is very clever and simple. -------------------------------------------------------------------------------- /Personal Note/7. Types of Neural Networks.md: -------------------------------------------------------------------------------- 1 | Types of neural networks 2 | 3 | As you could see, machine learning has an extensive set of tools for pretty much any problem out there. Now let's zero in on neural networks. There are dozens of neural network types but I'd like to mention three very special types. First we have Hopfield neural networks, which have a fully connected architecture that is every neuron in the network sends its output to all the other neurons. 4 | 5 | It does have inputs and outputs. The inputs modify what's going on inside the network, including the output values. The logic behind this architecture is to let the neurons collectively perform the necessary computation in an emerging manner. That is the individual neurons aren't aware of the big picture but we can influence the whole network to produce the outputs we need given the inputs we feed into the network. Perhaps the best known category of neural networks is the Feedforward model, where we have a set of inputs, a series of layers of neurons with signals propagating forward until they reach the output. 6 | 7 | The success of this type of neural networks led to the development of a large family called deep neural networks which have a large number of neuron layers between the input and the output. One example of this evolution of deep neural networks are convolutional neural networks. An application I like very much is the deep dream generator which is a convolutional neural network that takes in a picture as input, then processes it through the many, many layers it has and finally shows you the modifications it has added to the original picture. 8 | 9 | These modifications, including formation it has collected over time from thousands of pictures it has seen before. So it seems the AI has been dreaming with your picture distorting it with interesting imagery in the process. -------------------------------------------------------------------------------- /Personal Note/8. Multilayer Perceptrons.md: -------------------------------------------------------------------------------- 1 | Multilayer perceptrons 2 | 3 | Now we've come to the point when we'll talk about the multilayer perceptron. This is the best-known feedforward neural network and it's one of the oldest models of the brain. It consists of neurons organized in layers and the data traverses the network from input to output. 4 | 5 | This is typically sketched from left to right. Feedforward neural networks have a so-called fully connected architecture between layers of neurons. Here's an example. First, we have the input layer. It contains the inputs of the network, technically known as the input vector. This is the only layer that does not contain neurons. You can think of these elements as input terminals. This is important, especially for the implementation, so I'll say it again. You don't have to place neurons in this layer. Second, we have the set of hidden layers, which are composed of neurons. Notice how all neurons from the first hidden layer are taking in all of the inputs from the input layer. 6 | 7 | The second hidden layer is composed of neurons that take all of the outputs from the first hidden layer. This is what fully connected means in this context. These layers are called hidden because the neural network does not expose them to the outside world. The outside world modifies the behavior of the network through its inputs, and the outside world gets modified by the network through its outputs, but the neurons in the middle layers are hidden. 8 | 9 | Lastly, we have the output layer, which is the last layer of neurons. There are as many neurons in this layer as outputs in the network. So there you have it. Now we are ready to start writing some code. -------------------------------------------------------------------------------- /Personal Note/9. Neurons and Brain.md: -------------------------------------------------------------------------------- 1 | Neurons and the brain 2 | 3 | Earlier, I mentioned that neural networks were inspired by the brain. So it makes sense to take a quick view at the ultimate computer. There are many things we know about the brain and so many other things we don't. Here are some important things we do know. The brain is made of neurons. 4 | 5 | The brain is a biological neural network. Each neuron in the brain performs a very modest cognitive function contributing to larger cognitive functions. Finally, the combination of these cognitive functions results in our brain's mental ability. Everything the brain does is the result of these tiny computations, feelings, memories, movement, speech, dreams, all of it. So let's take some of the known facts of neurons to come up with a computational model of a biological neuron. 6 | 7 | The structure of biological neurons consists of several parts. We are only interested in three parts of the neuron. Dendrites take electrical signals coming from nerves or other neurons. These signals may come from sense organs, like the eyes, tongue, or ears. The nucleus of the neuron is where some processing is believed to happen so that the combination of inputs can yield some response. 8 | 9 | This response is usually interpreted as binary so that if the stimulus from the inputs is high enough it is said that the neuron fires an output signal. This output signal goes through an extension, called the axon, into other neurons or some other part of the body that will react to this signal, such as muscles. Now let's build a computational model of a neuron based on these three elements. -------------------------------------------------------------------------------- /Personal Note/README.md: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aryashah2k/TrainNeuralNetworksCPlusPlus/9e80af206fa9224665df5d990a15a20e089bebda/Personal Note/README.md -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Train Neural Networks In C++ 2 | 3 | -------------------------------------------------------------------------------- /SDRNN/Executables/Linux/SDRNN_7to1: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aryashah2k/TrainNeuralNetworksCPlusPlus/9e80af206fa9224665df5d990a15a20e089bebda/SDRNN/Executables/Linux/SDRNN_7to1 -------------------------------------------------------------------------------- /SDRNN/Executables/Linux/SDRNN_7to10: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aryashah2k/TrainNeuralNetworksCPlusPlus/9e80af206fa9224665df5d990a15a20e089bebda/SDRNN/Executables/Linux/SDRNN_7to10 -------------------------------------------------------------------------------- /SDRNN/Executables/Linux/SDRNN_7to7: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aryashah2k/TrainNeuralNetworksCPlusPlus/9e80af206fa9224665df5d990a15a20e089bebda/SDRNN/Executables/Linux/SDRNN_7to7 -------------------------------------------------------------------------------- /SDRNN/Executables/MacOSX/SDRNN_7to1.app.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aryashah2k/TrainNeuralNetworksCPlusPlus/9e80af206fa9224665df5d990a15a20e089bebda/SDRNN/Executables/MacOSX/SDRNN_7to1.app.zip -------------------------------------------------------------------------------- /SDRNN/Executables/MacOSX/SDRNN_7to10.app.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aryashah2k/TrainNeuralNetworksCPlusPlus/9e80af206fa9224665df5d990a15a20e089bebda/SDRNN/Executables/MacOSX/SDRNN_7to10.app.zip -------------------------------------------------------------------------------- /SDRNN/Executables/MacOSX/SDRNN_7to7.app.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aryashah2k/TrainNeuralNetworksCPlusPlus/9e80af206fa9224665df5d990a15a20e089bebda/SDRNN/Executables/MacOSX/SDRNN_7to7.app.zip -------------------------------------------------------------------------------- /SDRNN/Executables/Windows/SDRNN_7to1.exe: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aryashah2k/TrainNeuralNetworksCPlusPlus/9e80af206fa9224665df5d990a15a20e089bebda/SDRNN/Executables/Windows/SDRNN_7to1.exe -------------------------------------------------------------------------------- /SDRNN/Executables/Windows/SDRNN_7to10.exe: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aryashah2k/TrainNeuralNetworksCPlusPlus/9e80af206fa9224665df5d990a15a20e089bebda/SDRNN/Executables/Windows/SDRNN_7to10.exe -------------------------------------------------------------------------------- /SDRNN/Executables/Windows/SDRNN_7to7.exe: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aryashah2k/TrainNeuralNetworksCPlusPlus/9e80af206fa9224665df5d990a15a20e089bebda/SDRNN/Executables/Windows/SDRNN_7to7.exe -------------------------------------------------------------------------------- /SDRNN/SDRNN_7to1/Builds/LinuxMakefile/Makefile: -------------------------------------------------------------------------------- 1 | # Automatically generated makefile, created by the Projucer 2 | # Don't edit this file! Your changes will be overwritten when you re-save the Projucer project! 3 | 4 | # build with "V=1" for verbose builds 5 | ifeq ($(V), 1) 6 | V_AT = 7 | else 8 | V_AT = @ 9 | endif 10 | 11 | # (this disables dependency generation if multiple architectures are set) 12 | DEPFLAGS := $(if $(word 2, $(TARGET_ARCH)), , -MMD) 13 | 14 | ifndef STRIP 15 | STRIP=strip 16 | endif 17 | 18 | ifndef AR 19 | AR=ar 20 | endif 21 | 22 | ifndef CONFIG 23 | CONFIG=Debug 24 | endif 25 | 26 | JUCE_ARCH_LABEL := $(shell uname -m) 27 | 28 | ifeq ($(CONFIG),Debug) 29 | JUCE_BINDIR := build 30 | JUCE_LIBDIR := build 31 | JUCE_OBJDIR := build/intermediate/Debug 32 | JUCE_OUTDIR := build 33 | 34 | ifeq ($(TARGET_ARCH),) 35 | TARGET_ARCH := 36 | endif 37 | 38 | JUCE_CPPFLAGS := $(DEPFLAGS) "-DLINUX=1" "-DDEBUG=1" "-D_DEBUG=1" "-DJUCER_LINUX_MAKE_6D53C8B4=1" "-DJUCE_APP_VERSION=1.0.0" "-DJUCE_APP_VERSION_HEX=0x10000" "-DJUCE_DISPLAY_SPLASH_SCREEN=0" "-DJUCE_USE_DARK_SPLASH_SCREEN=1" "-DJUCE_PROJUCER_VERSION=0x60004" "-DJUCE_MODULE_AVAILABLE_juce_core=1" "-DJUCE_MODULE_AVAILABLE_juce_data_structures=1" "-DJUCE_MODULE_AVAILABLE_juce_events=1" "-DJUCE_MODULE_AVAILABLE_juce_graphics=1" "-DJUCE_MODULE_AVAILABLE_juce_gui_basics=1" "-DJUCE_GLOBAL_MODULE_SETTINGS_INCLUDED=1" "-DJUCE_STANDALONE_APPLICATION=1" $(shell pkg-config --cflags freetype2 libcurl) -pthread -I../../JuceLibraryCode -I$(HOME)/JUCE/modules $(CPPFLAGS) 39 | JUCE_CPPFLAGS_APP := "-DJucePlugin_Build_VST=0" "-DJucePlugin_Build_VST3=0" "-DJucePlugin_Build_AU=0" "-DJucePlugin_Build_AUv3=0" "-DJucePlugin_Build_RTAS=0" "-DJucePlugin_Build_AAX=0" "-DJucePlugin_Build_Standalone=0" "-DJucePlugin_Build_Unity=0" 40 | JUCE_TARGET_APP := SDRNN_7to1 41 | 42 | JUCE_CFLAGS += $(JUCE_CPPFLAGS) $(TARGET_ARCH) -g -ggdb -O0 $(CFLAGS) 43 | JUCE_CXXFLAGS += $(JUCE_CFLAGS) -std=c++14 $(CXXFLAGS) 44 | JUCE_LDFLAGS += $(TARGET_ARCH) -L$(JUCE_BINDIR) -L$(JUCE_LIBDIR) $(shell pkg-config --libs freetype2 libcurl) -fvisibility=hidden -lrt -ldl -lpthread $(LDFLAGS) 45 | 46 | CLEANCMD = rm -rf $(JUCE_OUTDIR)/$(TARGET) $(JUCE_OBJDIR) 47 | endif 48 | 49 | ifeq ($(CONFIG),Release) 50 | JUCE_BINDIR := build 51 | JUCE_LIBDIR := build 52 | JUCE_OBJDIR := build/intermediate/Release 53 | JUCE_OUTDIR := build 54 | 55 | ifeq ($(TARGET_ARCH),) 56 | TARGET_ARCH := 57 | endif 58 | 59 | JUCE_CPPFLAGS := $(DEPFLAGS) "-DLINUX=1" "-DNDEBUG=1" "-DJUCER_LINUX_MAKE_6D53C8B4=1" "-DJUCE_APP_VERSION=1.0.0" "-DJUCE_APP_VERSION_HEX=0x10000" "-DJUCE_DISPLAY_SPLASH_SCREEN=0" "-DJUCE_USE_DARK_SPLASH_SCREEN=1" "-DJUCE_PROJUCER_VERSION=0x60004" "-DJUCE_MODULE_AVAILABLE_juce_core=1" "-DJUCE_MODULE_AVAILABLE_juce_data_structures=1" "-DJUCE_MODULE_AVAILABLE_juce_events=1" "-DJUCE_MODULE_AVAILABLE_juce_graphics=1" "-DJUCE_MODULE_AVAILABLE_juce_gui_basics=1" "-DJUCE_GLOBAL_MODULE_SETTINGS_INCLUDED=1" "-DJUCE_STANDALONE_APPLICATION=1" $(shell pkg-config --cflags freetype2 libcurl) -pthread -I../../JuceLibraryCode -I$(HOME)/JUCE/modules $(CPPFLAGS) 60 | JUCE_CPPFLAGS_APP := "-DJucePlugin_Build_VST=0" "-DJucePlugin_Build_VST3=0" "-DJucePlugin_Build_AU=0" "-DJucePlugin_Build_AUv3=0" "-DJucePlugin_Build_RTAS=0" "-DJucePlugin_Build_AAX=0" "-DJucePlugin_Build_Standalone=0" "-DJucePlugin_Build_Unity=0" 61 | JUCE_TARGET_APP := SDRNN_7to1 62 | 63 | JUCE_CFLAGS += $(JUCE_CPPFLAGS) $(TARGET_ARCH) -O3 $(CFLAGS) 64 | JUCE_CXXFLAGS += $(JUCE_CFLAGS) -std=c++14 $(CXXFLAGS) 65 | JUCE_LDFLAGS += $(TARGET_ARCH) -L$(JUCE_BINDIR) -L$(JUCE_LIBDIR) $(shell pkg-config --libs freetype2 libcurl) -fvisibility=hidden -lrt -ldl -lpthread $(LDFLAGS) 66 | 67 | CLEANCMD = rm -rf $(JUCE_OUTDIR)/$(TARGET) $(JUCE_OBJDIR) 68 | endif 69 | 70 | OBJECTS_APP := \ 71 | $(JUCE_OBJDIR)/Main_90ebc5c2.o \ 72 | $(JUCE_OBJDIR)/MLP_216ae892.o \ 73 | $(JUCE_OBJDIR)/include_juce_core_f26d17db.o \ 74 | $(JUCE_OBJDIR)/include_juce_data_structures_7471b1e3.o \ 75 | $(JUCE_OBJDIR)/include_juce_events_fd7d695.o \ 76 | $(JUCE_OBJDIR)/include_juce_graphics_f817e147.o \ 77 | $(JUCE_OBJDIR)/include_juce_gui_basics_e3f79785.o \ 78 | 79 | .PHONY: clean all strip 80 | 81 | all : $(JUCE_OUTDIR)/$(JUCE_TARGET_APP) 82 | 83 | $(JUCE_OUTDIR)/$(JUCE_TARGET_APP) : $(OBJECTS_APP) $(RESOURCES) 84 | @command -v pkg-config >/dev/null 2>&1 || { echo >&2 "pkg-config not installed. Please, install it."; exit 1; } 85 | @pkg-config --print-errors freetype2 libcurl 86 | @echo Linking "SDRNN_7to1 - App" 87 | -$(V_AT)mkdir -p $(JUCE_BINDIR) 88 | -$(V_AT)mkdir -p $(JUCE_LIBDIR) 89 | -$(V_AT)mkdir -p $(JUCE_OUTDIR) 90 | $(V_AT)$(CXX) -o $(JUCE_OUTDIR)/$(JUCE_TARGET_APP) $(OBJECTS_APP) $(JUCE_LDFLAGS) $(JUCE_LDFLAGS_APP) $(RESOURCES) $(TARGET_ARCH) 91 | 92 | $(JUCE_OBJDIR)/Main_90ebc5c2.o: ../../Source/Main.cpp 93 | -$(V_AT)mkdir -p $(JUCE_OBJDIR) 94 | @echo "Compiling Main.cpp" 95 | $(V_AT)$(CXX) $(JUCE_CXXFLAGS) $(JUCE_CPPFLAGS_APP) $(JUCE_CFLAGS_APP) -o "$@" -c "$<" 96 | 97 | $(JUCE_OBJDIR)/MLP_216ae892.o: ../../Source/MLP.cpp 98 | -$(V_AT)mkdir -p $(JUCE_OBJDIR) 99 | @echo "Compiling MLP.cpp" 100 | $(V_AT)$(CXX) $(JUCE_CXXFLAGS) $(JUCE_CPPFLAGS_APP) $(JUCE_CFLAGS_APP) -o "$@" -c "$<" 101 | 102 | $(JUCE_OBJDIR)/include_juce_core_f26d17db.o: ../../JuceLibraryCode/include_juce_core.cpp 103 | -$(V_AT)mkdir -p $(JUCE_OBJDIR) 104 | @echo "Compiling include_juce_core.cpp" 105 | $(V_AT)$(CXX) $(JUCE_CXXFLAGS) $(JUCE_CPPFLAGS_APP) $(JUCE_CFLAGS_APP) -o "$@" -c "$<" 106 | 107 | $(JUCE_OBJDIR)/include_juce_data_structures_7471b1e3.o: ../../JuceLibraryCode/include_juce_data_structures.cpp 108 | -$(V_AT)mkdir -p $(JUCE_OBJDIR) 109 | @echo "Compiling include_juce_data_structures.cpp" 110 | $(V_AT)$(CXX) $(JUCE_CXXFLAGS) $(JUCE_CPPFLAGS_APP) $(JUCE_CFLAGS_APP) -o "$@" -c "$<" 111 | 112 | $(JUCE_OBJDIR)/include_juce_events_fd7d695.o: ../../JuceLibraryCode/include_juce_events.cpp 113 | -$(V_AT)mkdir -p $(JUCE_OBJDIR) 114 | @echo "Compiling include_juce_events.cpp" 115 | $(V_AT)$(CXX) $(JUCE_CXXFLAGS) $(JUCE_CPPFLAGS_APP) $(JUCE_CFLAGS_APP) -o "$@" -c "$<" 116 | 117 | $(JUCE_OBJDIR)/include_juce_graphics_f817e147.o: ../../JuceLibraryCode/include_juce_graphics.cpp 118 | -$(V_AT)mkdir -p $(JUCE_OBJDIR) 119 | @echo "Compiling include_juce_graphics.cpp" 120 | $(V_AT)$(CXX) $(JUCE_CXXFLAGS) $(JUCE_CPPFLAGS_APP) $(JUCE_CFLAGS_APP) -o "$@" -c "$<" 121 | 122 | $(JUCE_OBJDIR)/include_juce_gui_basics_e3f79785.o: ../../JuceLibraryCode/include_juce_gui_basics.cpp 123 | -$(V_AT)mkdir -p $(JUCE_OBJDIR) 124 | @echo "Compiling include_juce_gui_basics.cpp" 125 | $(V_AT)$(CXX) $(JUCE_CXXFLAGS) $(JUCE_CPPFLAGS_APP) $(JUCE_CFLAGS_APP) -o "$@" -c "$<" 126 | 127 | clean: 128 | @echo Cleaning SDRNN_7to1 129 | $(V_AT)$(CLEANCMD) 130 | 131 | strip: 132 | @echo Stripping SDRNN_7to1 133 | -$(V_AT)$(STRIP) --strip-unneeded $(JUCE_OUTDIR)/$(TARGET) 134 | 135 | -include $(OBJECTS_APP:%.o=%.d) 136 | -------------------------------------------------------------------------------- /SDRNN/SDRNN_7to1/Builds/MacOSX/Info-App.plist: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | CFBundleExecutable 7 | ${EXECUTABLE_NAME} 8 | CFBundleIconFile 9 | 10 | CFBundleIdentifier 11 | com.JUCE.SDRNN_7to1 12 | CFBundleName 13 | SDRNN_7to1 14 | CFBundleDisplayName 15 | SDRNN_7to1 16 | CFBundlePackageType 17 | APPL 18 | CFBundleSignature 19 | ???? 20 | CFBundleShortVersionString 21 | 1.0.0 22 | CFBundleVersion 23 | 1.0.0 24 | NSHumanReadableCopyright 25 | 26 | NSHighResolutionCapable 27 | 28 | 29 | 30 | -------------------------------------------------------------------------------- /SDRNN/SDRNN_7to1/Builds/MacOSX/RecentFilesMenuTemplate.nib: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aryashah2k/TrainNeuralNetworksCPlusPlus/9e80af206fa9224665df5d990a15a20e089bebda/SDRNN/SDRNN_7to1/Builds/MacOSX/RecentFilesMenuTemplate.nib -------------------------------------------------------------------------------- /SDRNN/SDRNN_7to1/Builds/MacOSX/SDRNN_7to1.xcodeproj/project.xcworkspace/xcshareddata/WorkspaceSettings.xcsettings: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | BuildSystemType 6 | Original 7 | DisableBuildSystemDeprecationWarning 8 | 9 | 10 | 11 | -------------------------------------------------------------------------------- /SDRNN/SDRNN_7to1/Builds/VisualStudio2019/SDRNN_7to1.sln: -------------------------------------------------------------------------------- 1 | Microsoft Visual Studio Solution File, Format Version 11.00 2 | # Visual Studio 2019 3 | 4 | Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "SDRNN_7to1 - App", "SDRNN_7to1_App.vcxproj", "{8DF5565C-C8A8-A592-2C12-7F87D68B3D9E}" 5 | EndProject 6 | Global 7 | GlobalSection(SolutionConfigurationPlatforms) = preSolution 8 | Debug|x64 = Debug|x64 9 | Release|x64 = Release|x64 10 | EndGlobalSection 11 | GlobalSection(ProjectConfigurationPlatforms) = postSolution 12 | {8DF5565C-C8A8-A592-2C12-7F87D68B3D9E}.Debug|x64.ActiveCfg = Debug|x64 13 | {8DF5565C-C8A8-A592-2C12-7F87D68B3D9E}.Debug|x64.Build.0 = Debug|x64 14 | {8DF5565C-C8A8-A592-2C12-7F87D68B3D9E}.Release|x64.ActiveCfg = Release|x64 15 | {8DF5565C-C8A8-A592-2C12-7F87D68B3D9E}.Release|x64.Build.0 = Release|x64 16 | EndGlobalSection 17 | GlobalSection(SolutionProperties) = preSolution 18 | HideSolutionNode = FALSE 19 | EndGlobalSection 20 | EndGlobal 21 | -------------------------------------------------------------------------------- /SDRNN/SDRNN_7to1/Builds/VisualStudio2019/resources.rc: -------------------------------------------------------------------------------- 1 | #pragma code_page(65001) 2 | 3 | #ifdef JUCE_USER_DEFINED_RC_FILE 4 | #include JUCE_USER_DEFINED_RC_FILE 5 | #else 6 | 7 | #undef WIN32_LEAN_AND_MEAN 8 | #define WIN32_LEAN_AND_MEAN 9 | #include 10 | 11 | VS_VERSION_INFO VERSIONINFO 12 | FILEVERSION 1,0,0,0 13 | BEGIN 14 | BLOCK "StringFileInfo" 15 | BEGIN 16 | BLOCK "040904E4" 17 | BEGIN 18 | VALUE "CompanyName", "JUCE\0" 19 | VALUE "FileDescription", "SDRNN_7to1\0" 20 | VALUE "FileVersion", "1.0.0\0" 21 | VALUE "ProductName", "SDRNN_7to1\0" 22 | VALUE "ProductVersion", "1.0.0\0" 23 | END 24 | END 25 | 26 | BLOCK "VarFileInfo" 27 | BEGIN 28 | VALUE "Translation", 0x409, 1252 29 | END 30 | END 31 | 32 | #endif 33 | -------------------------------------------------------------------------------- /SDRNN/SDRNN_7to1/JuceLibraryCode/JuceHeader.h: -------------------------------------------------------------------------------- 1 | /* 2 | 3 | IMPORTANT! This file is auto-generated each time you save your 4 | project - if you alter its contents, your changes may be overwritten! 5 | 6 | This is the header file that your files should include in order to get all the 7 | JUCE library headers. You should avoid including the JUCE headers directly in 8 | your own source files, because that wouldn't pick up the correct configuration 9 | options for your app. 10 | 11 | */ 12 | 13 | #pragma once 14 | 15 | 16 | #include 17 | #include 18 | #include 19 | #include 20 | #include 21 | 22 | 23 | #if defined (JUCE_PROJUCER_VERSION) && JUCE_PROJUCER_VERSION < JUCE_VERSION 24 | /** If you've hit this error then the version of the Projucer that was used to generate this project is 25 | older than the version of the JUCE modules being included. To fix this error, re-save your project 26 | using the latest version of the Projucer or, if you aren't using the Projucer to manage your project, 27 | remove the JUCE_PROJUCER_VERSION define from the AppConfig.h file. 28 | */ 29 | #error "This project was last saved using an outdated version of the Projucer! Re-save this project with the latest version to fix this error." 30 | #endif 31 | 32 | #if ! DONT_SET_USING_JUCE_NAMESPACE 33 | // If your code uses a lot of JUCE classes, then this will obviously save you 34 | // a lot of typing, but can be disabled by setting DONT_SET_USING_JUCE_NAMESPACE. 35 | using namespace juce; 36 | #endif 37 | 38 | #if ! JUCE_DONT_DECLARE_PROJECTINFO 39 | namespace ProjectInfo 40 | { 41 | const char* const projectName = "SDRNN_7to1"; 42 | const char* const companyName = "JUCE"; 43 | const char* const versionString = "1.0.0"; 44 | const int versionNumber = 0x10000; 45 | } 46 | #endif 47 | -------------------------------------------------------------------------------- /SDRNN/SDRNN_7to1/JuceLibraryCode/ReadMe.txt: -------------------------------------------------------------------------------- 1 | 2 | Important Note!! 3 | ================ 4 | 5 | The purpose of this folder is to contain files that are auto-generated by the Projucer, 6 | and ALL files in this folder will be mercilessly DELETED and completely re-written whenever 7 | the Projucer saves your project. 8 | 9 | Therefore, it's a bad idea to make any manual changes to the files in here, or to 10 | put any of your own files in here if you don't want to lose them. (Of course you may choose 11 | to add the folder's contents to your version-control system so that you can re-merge your own 12 | modifications after the Projucer has saved its changes). 13 | -------------------------------------------------------------------------------- /SDRNN/SDRNN_7to1/JuceLibraryCode/include_juce_core.cpp: -------------------------------------------------------------------------------- 1 | /* 2 | 3 | IMPORTANT! This file is auto-generated each time you save your 4 | project - if you alter its contents, your changes may be overwritten! 5 | 6 | */ 7 | 8 | #include 9 | -------------------------------------------------------------------------------- /SDRNN/SDRNN_7to1/JuceLibraryCode/include_juce_core.mm: -------------------------------------------------------------------------------- 1 | /* 2 | 3 | IMPORTANT! This file is auto-generated each time you save your 4 | project - if you alter its contents, your changes may be overwritten! 5 | 6 | */ 7 | 8 | #include 9 | -------------------------------------------------------------------------------- /SDRNN/SDRNN_7to1/JuceLibraryCode/include_juce_data_structures.cpp: -------------------------------------------------------------------------------- 1 | /* 2 | 3 | IMPORTANT! This file is auto-generated each time you save your 4 | project - if you alter its contents, your changes may be overwritten! 5 | 6 | */ 7 | 8 | #include 9 | -------------------------------------------------------------------------------- /SDRNN/SDRNN_7to1/JuceLibraryCode/include_juce_data_structures.mm: -------------------------------------------------------------------------------- 1 | /* 2 | 3 | IMPORTANT! This file is auto-generated each time you save your 4 | project - if you alter its contents, your changes may be overwritten! 5 | 6 | */ 7 | 8 | #include 9 | -------------------------------------------------------------------------------- /SDRNN/SDRNN_7to1/JuceLibraryCode/include_juce_events.cpp: -------------------------------------------------------------------------------- 1 | /* 2 | 3 | IMPORTANT! This file is auto-generated each time you save your 4 | project - if you alter its contents, your changes may be overwritten! 5 | 6 | */ 7 | 8 | #include 9 | -------------------------------------------------------------------------------- /SDRNN/SDRNN_7to1/JuceLibraryCode/include_juce_events.mm: -------------------------------------------------------------------------------- 1 | /* 2 | 3 | IMPORTANT! This file is auto-generated each time you save your 4 | project - if you alter its contents, your changes may be overwritten! 5 | 6 | */ 7 | 8 | #include 9 | -------------------------------------------------------------------------------- /SDRNN/SDRNN_7to1/JuceLibraryCode/include_juce_graphics.cpp: -------------------------------------------------------------------------------- 1 | /* 2 | 3 | IMPORTANT! This file is auto-generated each time you save your 4 | project - if you alter its contents, your changes may be overwritten! 5 | 6 | */ 7 | 8 | #include 9 | -------------------------------------------------------------------------------- /SDRNN/SDRNN_7to1/JuceLibraryCode/include_juce_graphics.mm: -------------------------------------------------------------------------------- 1 | /* 2 | 3 | IMPORTANT! This file is auto-generated each time you save your 4 | project - if you alter its contents, your changes may be overwritten! 5 | 6 | */ 7 | 8 | #include 9 | -------------------------------------------------------------------------------- /SDRNN/SDRNN_7to1/JuceLibraryCode/include_juce_gui_basics.cpp: -------------------------------------------------------------------------------- 1 | /* 2 | 3 | IMPORTANT! This file is auto-generated each time you save your 4 | project - if you alter its contents, your changes may be overwritten! 5 | 6 | */ 7 | 8 | #include 9 | -------------------------------------------------------------------------------- /SDRNN/SDRNN_7to1/JuceLibraryCode/include_juce_gui_basics.mm: -------------------------------------------------------------------------------- 1 | /* 2 | 3 | IMPORTANT! This file is auto-generated each time you save your 4 | project - if you alter its contents, your changes may be overwritten! 5 | 6 | */ 7 | 8 | #include 9 | -------------------------------------------------------------------------------- /SDRNN/SDRNN_7to1/SDRNN_7to1.jucer: -------------------------------------------------------------------------------- 1 | 2 | 3 | 6 | 7 | 8 | 9 | 10 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | 45 | 46 | 47 | 48 | 49 | 50 | 51 | 52 | 53 | 54 | 55 | 56 | 57 | 58 | 59 | 60 | 61 | 62 | 63 | 64 | 65 | 66 | 67 | 68 | 69 | 70 | -------------------------------------------------------------------------------- /SDRNN/SDRNN_7to1/Source/MLP.cpp: -------------------------------------------------------------------------------- 1 | #include "MLP.h" 2 | 3 | double frand(){ 4 | return (2.0*(double)rand() / RAND_MAX) - 1.0; 5 | } 6 | 7 | 8 | // Return a new Perceptron object with the specified number of inputs (+1 for the bias). 9 | Perceptron::Perceptron(int inputs, double bias){ 10 | this->bias = bias; 11 | weights.resize(inputs+1); 12 | generate(weights.begin(),weights.end(),frand); 13 | } 14 | 15 | // Run the perceptron. x is a vector with the input values. 16 | double Perceptron::run(vector x){ 17 | x.push_back(bias); 18 | double sum = inner_product(x.begin(),x.end(),weights.begin(),(double)0.0); 19 | return sigmoid(sum); 20 | } 21 | 22 | // Set the weights. w_init is a vector with the weights. 23 | void Perceptron::set_weights(vector w_init){ 24 | weights = w_init; 25 | } 26 | 27 | // Evaluate the sigmoid function for the floating point input x. 28 | double Perceptron::sigmoid(double x){ 29 | return 1.0/(1.0 + exp(-x)); 30 | } 31 | 32 | 33 | // Return a new MultiLayerPerceptron object with the specified parameters. 34 | MultiLayerPerceptron::MultiLayerPerceptron(vector layers, double bias, double eta) { 35 | this->layers = layers; 36 | this->bias = bias; 37 | this->eta = eta; 38 | 39 | for (int i = 0; i < layers.size(); i++){ 40 | values.push_back(vector(layers[i],0.0)); 41 | d.push_back(vector(layers[i],0.0)); 42 | network.push_back(vector()); 43 | if (i > 0) //network[0] is the input layer,so it has no neurons 44 | for (int j = 0; j < layers[i]; j++) 45 | network[i].push_back(Perceptron(layers[i-1], bias)); 46 | } 47 | } 48 | 49 | 50 | // Set the weights. w_init is a vector of vectors of vectors with the weights for all but the input layer. 51 | void MultiLayerPerceptron::set_weights(vector > > w_init) { 52 | for (int i = 0; i< w_init.size(); i++) 53 | for (int j = 0; j < w_init[i].size(); j++) 54 | network[i+1][j].set_weights(w_init[i][j]); 55 | } 56 | 57 | void MultiLayerPerceptron::print_weights() { 58 | cout << endl; 59 | for (int i = 1; i < network.size(); i++){ 60 | for (int j = 0; j < layers[i]; j++) { 61 | cout << "Layer " << i+1 << " Neuron " << j << ": "; 62 | for (auto &it: network[i][j].weights) 63 | cout << it <<" "; 64 | cout << endl; 65 | } 66 | } 67 | cout << endl; 68 | } 69 | 70 | // Feed a sample x into the MultiLayer Perceptron. 71 | vector MultiLayerPerceptron::run(vector x) { 72 | values[0] = x; 73 | for (int i = 1; i < network.size(); i++) 74 | for (int j = 0; j < layers[i]; j++) 75 | values[i][j] = network[i][j].run(values[i-1]); 76 | return values.back(); 77 | } 78 | 79 | // Run a single (x,y) pair with the backpropagation algorithm. 80 | double MultiLayerPerceptron::bp(vector x, vector y){ 81 | 82 | // Backpropagation Step by Step: 83 | 84 | // STEP 1: Feed a sample to the network 85 | vector outputs = run(x); 86 | 87 | // STEP 2: Calculate the MSE 88 | vector error; 89 | double MSE = 0.0; 90 | for (int i = 0; i < y.size(); i++){ 91 | error.push_back(y[i] - outputs[i]); 92 | MSE += error[i] * error[i]; 93 | } 94 | MSE /= layers.back(); 95 | 96 | // STEP 3: Calculate the output error terms 97 | for (int i = 0; i < outputs.size(); i++) 98 | d.back()[i] = outputs[i] * (1 - outputs[i]) * (error[i]); 99 | 100 | // STEP 4: Calculate the error term of each unit on each layer 101 | for (int i = network.size()-2; i > 0; i--) 102 | for (int h = 0; h < network[i].size(); h++){ 103 | double fwd_error = 0.0; 104 | for (int k = 0; k < layers[i+1]; k++) 105 | fwd_error += network[i+1][k].weights[h] * d[i+1][k]; 106 | d[i][h] = values[i][h] * (1-values[i][h]) * fwd_error; 107 | } 108 | 109 | // STEPS 5 & 6: Calculate the deltas and update the weights 110 | for (int i = 1; i < network.size(); i++) 111 | for (int j = 0; j < layers[i]; j++) 112 | for (int k = 0; k < layers[i-1]+1; k++){ 113 | double delta; 114 | if (k==layers[i-1]) 115 | delta = eta * d[i][j] * bias; 116 | else 117 | delta = eta * d[i][j] * values[i-1][k]; 118 | network[i][j].weights[k] += delta; 119 | } 120 | return MSE; 121 | } 122 | 123 | -------------------------------------------------------------------------------- /SDRNN/SDRNN_7to1/Source/MLP.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | #include 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | 10 | using namespace std; 11 | 12 | class Perceptron { 13 | public: 14 | vector weights; 15 | double bias; 16 | Perceptron(int inputs, double bias=1.0); 17 | double run(vector x); 18 | void set_weights(vector w_init); 19 | double sigmoid(double x); 20 | }; 21 | 22 | class MultiLayerPerceptron { 23 | public: 24 | MultiLayerPerceptron(vector layers, double bias=1.0, double eta = 0.5); 25 | void set_weights(vector > > w_init); 26 | void print_weights(); 27 | vector run(vector x); 28 | double bp(vector x, vector y); 29 | 30 | vector layers; 31 | double bias; 32 | double eta; 33 | vector > network; 34 | vector > values; 35 | vector > d; 36 | }; 37 | 38 | -------------------------------------------------------------------------------- /SDRNN/SDRNN_7to1/Source/Main.cpp: -------------------------------------------------------------------------------- 1 | /* 2 | ============================================================================== 3 | 4 | This file contains the startup code for a PIP. 5 | 6 | ============================================================================== 7 | */ 8 | 9 | #include 10 | #include "SDRNN_7to1_GUI.h" 11 | 12 | class Application : public juce::JUCEApplication 13 | { 14 | public: 15 | //============================================================================== 16 | Application() = default; 17 | 18 | const juce::String getApplicationName() override { return "SDRNN_7to1"; } 19 | const juce::String getApplicationVersion() override { return "1.0.0"; } 20 | 21 | void initialise (const juce::String&) override 22 | { 23 | mainWindow.reset (new MainWindow ("SDRNN_7to1", new MainContentComponent, *this)); 24 | } 25 | 26 | void shutdown() override { mainWindow = nullptr; } 27 | 28 | private: 29 | class MainWindow : public juce::DocumentWindow 30 | { 31 | public: 32 | MainWindow (const juce::String& name, juce::Component* c, JUCEApplication& a) 33 | : DocumentWindow (name, juce::Desktop::getInstance().getDefaultLookAndFeel() 34 | .findColour (ResizableWindow::backgroundColourId), 35 | juce::DocumentWindow::allButtons), 36 | app (a) 37 | { 38 | setUsingNativeTitleBar (true); 39 | setContentOwned (c, true); 40 | 41 | #if JUCE_ANDROID || JUCE_IOS 42 | setFullScreen (true); 43 | #else 44 | setResizable (true, false); 45 | setResizeLimits (300, 250, 10000, 10000); 46 | centreWithSize (getWidth(), getHeight()); 47 | #endif 48 | 49 | setVisible (true); 50 | } 51 | 52 | void closeButtonPressed() override 53 | { 54 | app.systemRequestedQuit(); 55 | } 56 | 57 | private: 58 | JUCEApplication& app; 59 | 60 | //============================================================================== 61 | JUCE_DECLARE_NON_COPYABLE_WITH_LEAK_DETECTOR (MainWindow) 62 | }; 63 | 64 | std::unique_ptr mainWindow; 65 | }; 66 | 67 | //============================================================================== 68 | START_JUCE_APPLICATION (Application) 69 | -------------------------------------------------------------------------------- /SDRNN/SDRNN_7to1/Source/SDRNN_7to1_GUI.h: -------------------------------------------------------------------------------- 1 | /* 2 | ============================================================================== 3 | 4 | This file is part of the JUCE tutorials. 5 | Copyright (c) 2020 - Raw Material Software Limited 6 | 7 | The code included in this file is provided under the terms of the ISC license 8 | http://www.isc.org/downloads/software-support-policy/isc-license. Permission 9 | To use, copy, modify, and/or distribute this software for any purpose with or 10 | without fee is hereby granted provided that the above copyright notice and 11 | this permission notice appear in all copies. 12 | 13 | THE SOFTWARE IS PROVIDED "AS IS" WITHOUT ANY WARRANTY, AND ALL WARRANTIES, 14 | WHETHER EXPRESSED OR IMPLIED, INCLUDING MERCHANTABILITY AND FITNESS FOR 15 | PURPOSE, ARE DISCLAIMED. 16 | 17 | ============================================================================== 18 | */ 19 | 20 | /******************************************************************************* 21 | The block below describes the properties of this PIP. A PIP is a short snippet 22 | of code that can be read by the Projucer and used to generate a JUCE project. 23 | 24 | BEGIN_JUCE_PIP_METADATA 25 | 26 | name: SDRNN_7to1 27 | version: 1.0.0 28 | vendor: JUCE 29 | website: http://juce.com 30 | description: Segment Display Recognition Neural Network (7 to 1 model). 31 | 32 | dependencies: juce_core, juce_data_structures, juce_events, juce_graphics, 33 | juce_gui_basics 34 | exporters: xcode_mac, vs2019, linux_make 35 | 36 | type: Component 37 | mainClass: MainContentComponent 38 | 39 | useLocalCopy: 1 40 | 41 | END_JUCE_PIP_METADATA 42 | 43 | *******************************************************************************/ 44 | 45 | 46 | #pragma once 47 | #include "MLP.h" 48 | #include 49 | 50 | #define WIN_W 500 51 | #define WIN_H 350 52 | 53 | #define O1X 20 54 | #define O1Y 20 55 | #define VSW 72 56 | #define VSL 100 57 | #define HSW 50 58 | #define HSL 100 59 | #define V1X 0+O1X 60 | #define V2X V1X+VSW+VSL 61 | #define H1Y 0+O1Y 62 | #define H2Y H1Y+HSW+VSL-15 63 | #define H3Y H2Y+HSW+VSL-15 64 | #define SLaX V1X+VSW 65 | #define SLaY H1Y 66 | #define SLbX V2X 67 | #define SLbY H1Y+HSW 68 | #define SLcX V2X 69 | #define SLcY H2Y+HSW 70 | #define SLdX V1X+VSW 71 | #define SLdY H3Y 72 | #define SLeX V1X 73 | #define SLeY H2Y+HSW 74 | #define SLfX V1X 75 | #define SLfY H1Y+HSW 76 | #define SLgX V1X+VSW 77 | #define SLgY H2Y 78 | #define SEGW 25 79 | #define SEGL 100 80 | #define aX SLaX 81 | #define aY SLaY+((HSW*5)/12) 82 | #define bX SLbX 83 | #define bY SLbY 84 | #define cX SLcX 85 | #define cY SLcY 86 | #define dX SLdX 87 | #define dY SLdY+((HSW*5)/12) 88 | #define eX SLeX+((VSW*5)/8) 89 | #define eY SLeY 90 | #define fX SLfX+((VSW*5)/8) 91 | #define fY SLfY 92 | #define gX SLgX 93 | #define gY SLgY+((HSW*5)/12) 94 | 95 | #define OFFSET 230 96 | 97 | //============================================================================== 98 | class MainContentComponent : public juce::Component, 99 | public juce::Slider::Listener, 100 | public juce::Button::Listener, 101 | public juce::TextEditor::Listener{ 102 | public: 103 | //============================================================================== 104 | MainContentComponent(){ 105 | srand(time(NULL)); 106 | rand(); 107 | sdrnn = new MultiLayerPerceptron({7,7,1}); 108 | LookAndFeel *l = &getLookAndFeel(); 109 | l->setColour(juce::Slider::thumbColourId,juce::Colour(110,110,110)); 110 | l->setColour(juce::Slider::textBoxOutlineColourId,Colour(240,240,240)); 111 | l->setColour(juce::Slider::textBoxTextColourId,juce::Colours::black); 112 | l->setColour(juce::Slider::backgroundColourId,juce::Colour(OFFSET,OFFSET,OFFSET)); 113 | l->setColour(juce::Slider::trackColourId, juce::Colour(OFFSET, OFFSET, OFFSET)); 114 | l->setColour(juce::TextButton::buttonColourId, juce::Colour(OFFSET, OFFSET, OFFSET)); 115 | l->setColour(juce::TextButton::textColourOffId, juce::Colours::black); 116 | l->setColour(juce::TextButton::textColourOnId, juce::Colours::black); 117 | l->setColour(juce::Label::textColourId, juce::Colours::black); 118 | l->setColour(juce::Label::backgroundColourId, juce::Colour(240,240,240)); 119 | l->setColour(juce::TextEditor::ColourIds::textColourId, juce::Colours::black); 120 | l->setColour(juce::TextEditor::ColourIds::backgroundColourId, juce::Colours::white); 121 | 122 | addAndMakeVisible(lbl_epochs_txt); 123 | addAndMakeVisible(entry_epochs); 124 | lbl_epochs_txt.setText("Epochs to Train:", no); 125 | entry_epochs.setText("10"); 126 | 127 | addAndMakeVisible(btn_train); 128 | btn_train.setButtonText("Train some more"); 129 | btn_train.addListener(this); 130 | 131 | addAndMakeVisible(lbl_err_txt); 132 | addAndMakeVisible(lbl_err); 133 | lbl_err_txt.setText("Training Error:",no); 134 | lbl_err.setText("---",no); 135 | lbl_err.setJustificationType(juce::Justification::centred); 136 | 137 | addAndMakeVisible(lbl_tepochs_txt); 138 | addAndMakeVisible(lbl_tepochs); 139 | lbl_tepochs_txt.setText("Epochs so far:",no); 140 | lbl_tepochs.setText("0",no); 141 | lbl_tepochs.setJustificationType(juce::Justification::centred); 142 | 143 | addAndMakeVisible(lbl_out); 144 | addAndMakeVisible(lbl_out_txt); 145 | lbl_out_txt.setText("Raw Output:",no); 146 | lbl_out.setJustificationType(juce::Justification::centred); 147 | 148 | addAndMakeVisible(lbl_int); 149 | addAndMakeVisible(lbl_int_txt); 150 | lbl_int_txt.setText("Number Output:", no); 151 | lbl_int.setFont(60); 152 | lbl_int.setJustificationType(juce::Justification::centred); 153 | 154 | addAndMakeVisible(btn_reset); 155 | btn_reset.setButtonText("Reset"); 156 | btn_reset.addListener(this); 157 | 158 | addAndMakeVisible (slider_a); 159 | slider_a.setRange (0.0, 1.0); 160 | slider_a.addListener (this); 161 | slider_a.setTextBoxStyle(juce::Slider::TextBoxAbove,true,60,20); 162 | slider_a.setNumDecimalPlacesToDisplay(2); 163 | 164 | addAndMakeVisible(slider_b); 165 | slider_b.setRange(0.0,1.0); 166 | slider_b.addListener(this); 167 | slider_b.setSliderStyle(juce::Slider::LinearVertical); 168 | slider_b.setTextBoxStyle(juce::Slider::TextBoxRight,true,60,20); 169 | slider_b.setNumDecimalPlacesToDisplay(2); 170 | 171 | addAndMakeVisible(slider_c); 172 | slider_c.setRange(0.0,1.0); 173 | slider_c.addListener(this); 174 | slider_c.setSliderStyle(juce::Slider::LinearVertical); 175 | slider_c.setTextBoxStyle(juce::Slider::TextBoxRight,true,60,20); 176 | slider_c.setNumDecimalPlacesToDisplay(2); 177 | 178 | addAndMakeVisible(slider_d); 179 | slider_d.setRange(0.0,1.0); 180 | slider_d.addListener(this); 181 | slider_d.setTextBoxStyle(juce::Slider::TextBoxAbove,true,60,20); 182 | slider_d.setNumDecimalPlacesToDisplay(2); 183 | 184 | addAndMakeVisible(slider_e); 185 | slider_e.setRange(0.0,1.0); 186 | slider_e.addListener(this); 187 | slider_e.setSliderStyle(juce::Slider::LinearVertical); 188 | slider_e.setTextBoxStyle(juce::Slider::TextBoxLeft,true,60,20); 189 | slider_e.setNumDecimalPlacesToDisplay(2); 190 | 191 | addAndMakeVisible(slider_f); 192 | slider_f.setRange(0.0,1.0); 193 | slider_f.addListener(this); 194 | slider_f.setSliderStyle(juce::Slider::LinearVertical); 195 | slider_f.setTextBoxStyle(juce::Slider::TextBoxLeft,true,60,20); 196 | slider_f.setNumDecimalPlacesToDisplay(2); 197 | 198 | addAndMakeVisible(slider_g); 199 | slider_g.setRange(0.0,1.0); 200 | slider_g.addListener(this); 201 | slider_g.setTextBoxStyle(juce::Slider::TextBoxAbove,true,60,20); 202 | slider_g.setNumDecimalPlacesToDisplay(2); 203 | 204 | setSize(WIN_W, WIN_H); 205 | run_ann(); 206 | } 207 | 208 | 209 | #define DX 90 210 | #define DY 30 211 | #define LEN 90 212 | #define HEI 25 213 | #define X0 290 214 | #define X1 (X0+DX) 215 | #define Y0 30 216 | #define Y1 (Y0+DY) 217 | #define Y2 (Y1+DY) 218 | #define Y3 (Y2+DY) 219 | #define Y4 (Y3+DY) 220 | #define Y5 (Y4+DY) 221 | #define YN (Y5-10) 222 | #define YR (Y5+DY) 223 | 224 | void resized() override{ 225 | slider_a.setBounds(SLaX,SLaY,HSL,HSW); 226 | slider_b.setBounds(SLbX,SLbY,VSW,VSL); 227 | slider_c.setBounds(SLcX,SLcY,VSW,VSL); 228 | slider_d.setBounds(SLdX,SLdY,HSL,HSW); 229 | slider_e.setBounds(SLeX,SLeY,VSW,VSL); 230 | slider_f.setBounds(SLfX,SLfY,VSW,VSL); 231 | slider_g.setBounds(SLgX,SLgY,HSL,HSW); 232 | 233 | lbl_epochs_txt.setBounds (X0, Y0, LEN, HEI); 234 | entry_epochs.setBounds (X1, Y0, LEN, HEI); 235 | btn_train.setBounds (X1, Y1, LEN, HEI); 236 | lbl_err_txt.setBounds (X0, Y2, LEN, HEI); 237 | lbl_err.setBounds (X1, Y2, LEN, HEI); 238 | lbl_tepochs_txt.setBounds (X0, Y3, LEN, HEI); 239 | lbl_tepochs.setBounds (X1, Y3, LEN, HEI); 240 | lbl_out_txt.setBounds (X0, Y4, LEN, HEI); 241 | lbl_out.setBounds (X1, Y4, LEN, HEI); 242 | lbl_int_txt.setBounds (X0, Y5, LEN, HEI); 243 | lbl_int.setBounds (X1, YN, LEN, 50); 244 | btn_reset.setBounds (X0, YR, LEN, HEI); 245 | 246 | setSize(WIN_W,WIN_H); 247 | } 248 | 249 | void sliderValueChanged (juce::Slider* slider) override { 250 | Colour c( (int)(slider->getValue()*(255-OFFSET)) + OFFSET, 251 | OFFSET - (int)(slider->getValue()* OFFSET), 252 | OFFSET - (int)(slider->getValue()* OFFSET) ); 253 | 254 | slider->setColour(juce::Slider::backgroundColourId,c); 255 | slider->setColour(juce::Slider::trackColourId,c); 256 | run_ann(); 257 | repaint(); 258 | } 259 | 260 | void run_ann(){ 261 | vector x; 262 | x.push_back(slider_a.getValue()); 263 | x.push_back(slider_b.getValue()); 264 | x.push_back(slider_c.getValue()); 265 | x.push_back(slider_d.getValue()); 266 | x.push_back(slider_e.getValue()); 267 | x.push_back(slider_f.getValue()); 268 | x.push_back(slider_g.getValue()); 269 | double theoutput = sdrnn->run(x)[0]; 270 | lbl_out.setText(to_string(theoutput),no); 271 | lbl_int.setText(to_string(min((int)(theoutput * 10), 9)),no); 272 | } 273 | 274 | void train_ann(){ 275 | double MSE; 276 | int epochs = entry_epochs.getText().getIntValue(); 277 | for (int i = 0; i < epochs; i++){ 278 | MSE = 0.0; 279 | MSE += sdrnn->bp({1,1,1,1,1,1,0}, {0.05}); //0 pattern 280 | MSE += sdrnn->bp({0,1,1,0,0,0,0}, {0.15}); //1 pattern 281 | MSE += sdrnn->bp({1,1,0,1,1,0,1}, {0.25}); //2 pattern 282 | MSE += sdrnn->bp({1,1,1,1,0,0,1}, {0.35}); //3 pattern 283 | MSE += sdrnn->bp({0,1,1,0,0,1,1}, {0.45}); //4 pattern 284 | MSE += sdrnn->bp({1,0,1,1,0,1,1}, {0.55}); //5 pattern 285 | MSE += sdrnn->bp({1,0,1,1,1,1,1}, {0.65}); //6 pattern 286 | MSE += sdrnn->bp({1,1,1,0,0,0,0}, {0.75}); //7 pattern 287 | MSE += sdrnn->bp({1,1,1,1,1,1,1}, {0.85}); //8 pattern 288 | MSE += sdrnn->bp({1,1,1,1,0,1,1}, {0.95}); //9 pattern 289 | } 290 | MSE /= 10.0; 291 | lbl_err.setText(to_string(MSE), no); 292 | tepochs += epochs; 293 | lbl_tepochs.setText(to_string(tepochs), no); 294 | run_ann(); 295 | } 296 | 297 | void buttonClicked(juce::Button* button) override{ 298 | if (button == &btn_train) 299 | train_ann(); 300 | if (button == &btn_reset){ 301 | delete(sdrnn); 302 | sdrnn = new MultiLayerPerceptron({7,7,1}); 303 | tepochs = 0; 304 | lbl_err.setText("---", no); 305 | lbl_tepochs.setText(to_string(tepochs), no); 306 | run_ann(); 307 | } 308 | } 309 | 310 | void paint(juce::Graphics& g) override { 311 | g.fillAll(juce::Colour(240,240,240)); 312 | 313 | g.setColour(juce::Colour( 314 | (int)(slider_a.getValue()*(255-OFFSET)) + OFFSET, 315 | OFFSET - (int)(slider_a.getValue()* OFFSET), 316 | OFFSET - (int)(slider_a.getValue()* OFFSET) )); 317 | g.fillRect(aX,aY,SEGL,SEGW); 318 | 319 | g.setColour(juce::Colour( 320 | (int)(slider_b.getValue()*(255-OFFSET)) + OFFSET, 321 | OFFSET - (int)(slider_b.getValue()* OFFSET), 322 | OFFSET - (int)(slider_b.getValue()* OFFSET) )); 323 | g.fillRect(bX,bY,SEGW,SEGL); 324 | 325 | g.setColour(juce::Colour( 326 | (int)(slider_c.getValue()*(255-OFFSET)) + OFFSET, 327 | OFFSET - (int)(slider_c.getValue()* OFFSET), 328 | OFFSET - (int)(slider_c.getValue()* OFFSET) )); 329 | g.fillRect(cX,cY,SEGW,SEGL); 330 | 331 | g.setColour(juce::Colour( 332 | (int)(slider_d.getValue()*(255-OFFSET)) + OFFSET, 333 | OFFSET - (int)(slider_d.getValue()* OFFSET), 334 | OFFSET - (int)(slider_d.getValue()* OFFSET) )); 335 | g.fillRect(dX,dY,SEGL,SEGW); 336 | 337 | g.setColour(juce::Colour( 338 | (int)(slider_e.getValue()*(255-OFFSET)) + OFFSET, 339 | OFFSET - (int)(slider_e.getValue()* OFFSET), 340 | OFFSET - (int)(slider_e.getValue()* OFFSET) )); 341 | g.fillRect(eX,eY,SEGW,SEGL); 342 | 343 | g.setColour(juce::Colour( 344 | (int)(slider_f.getValue()*(255-OFFSET)) + OFFSET, 345 | OFFSET - (int)(slider_f.getValue()* OFFSET), 346 | OFFSET - (int)(slider_f.getValue()* OFFSET) )); 347 | g.fillRect(fX,fY,SEGW,SEGL); 348 | 349 | g.setColour(juce::Colour( 350 | (int)(slider_g.getValue()*(255-OFFSET)) + OFFSET, 351 | OFFSET - (int)(slider_g.getValue()* OFFSET), 352 | OFFSET - (int)(slider_g.getValue()* OFFSET) )); 353 | g.fillRect(gX,gY,SEGL,SEGW); 354 | 355 | } 356 | 357 | MultiLayerPerceptron * sdrnn; 358 | juce::NotificationType no = juce::dontSendNotification; 359 | int tepochs = 0; 360 | 361 | private: 362 | juce::Slider slider_a; 363 | juce::Slider slider_b; 364 | juce::Slider slider_c; 365 | juce::Slider slider_d; 366 | juce::Slider slider_e; 367 | juce::Slider slider_f; 368 | juce::Slider slider_g; 369 | 370 | juce::Label lbl_epochs_txt; 371 | juce::TextEditor entry_epochs; 372 | juce::TextButton btn_train; 373 | juce::Label lbl_err_txt; 374 | juce::Label lbl_err; 375 | juce::Label lbl_tepochs_txt; 376 | juce::Label lbl_tepochs; 377 | juce::Label lbl_out_txt; 378 | juce::Label lbl_out; 379 | juce::Label lbl_int_txt; 380 | juce::Label lbl_int; 381 | juce::TextButton btn_reset; 382 | 383 | //============================================================================== 384 | JUCE_DECLARE_NON_COPYABLE_WITH_LEAK_DETECTOR (MainContentComponent) 385 | }; 386 | -------------------------------------------------------------------------------- /SDRNN/SDRNN_7to10/Builds/LinuxMakefile/Makefile: -------------------------------------------------------------------------------- 1 | # Automatically generated makefile, created by the Projucer 2 | # Don't edit this file! Your changes will be overwritten when you re-save the Projucer project! 3 | 4 | # build with "V=1" for verbose builds 5 | ifeq ($(V), 1) 6 | V_AT = 7 | else 8 | V_AT = @ 9 | endif 10 | 11 | # (this disables dependency generation if multiple architectures are set) 12 | DEPFLAGS := $(if $(word 2, $(TARGET_ARCH)), , -MMD) 13 | 14 | ifndef STRIP 15 | STRIP=strip 16 | endif 17 | 18 | ifndef AR 19 | AR=ar 20 | endif 21 | 22 | ifndef CONFIG 23 | CONFIG=Debug 24 | endif 25 | 26 | JUCE_ARCH_LABEL := $(shell uname -m) 27 | 28 | ifeq ($(CONFIG),Debug) 29 | JUCE_BINDIR := build 30 | JUCE_LIBDIR := build 31 | JUCE_OBJDIR := build/intermediate/Debug 32 | JUCE_OUTDIR := build 33 | 34 | ifeq ($(TARGET_ARCH),) 35 | TARGET_ARCH := 36 | endif 37 | 38 | JUCE_CPPFLAGS := $(DEPFLAGS) "-DLINUX=1" "-DDEBUG=1" "-D_DEBUG=1" "-DJUCER_LINUX_MAKE_6D53C8B4=1" "-DJUCE_APP_VERSION=1.0.0" "-DJUCE_APP_VERSION_HEX=0x10000" "-DJUCE_DISPLAY_SPLASH_SCREEN=0" "-DJUCE_USE_DARK_SPLASH_SCREEN=1" "-DJUCE_PROJUCER_VERSION=0x60004" "-DJUCE_MODULE_AVAILABLE_juce_core=1" "-DJUCE_MODULE_AVAILABLE_juce_data_structures=1" "-DJUCE_MODULE_AVAILABLE_juce_events=1" "-DJUCE_MODULE_AVAILABLE_juce_graphics=1" "-DJUCE_MODULE_AVAILABLE_juce_gui_basics=1" "-DJUCE_GLOBAL_MODULE_SETTINGS_INCLUDED=1" "-DJUCE_STANDALONE_APPLICATION=1" $(shell pkg-config --cflags freetype2 libcurl) -pthread -I../../JuceLibraryCode -I$(HOME)/JUCE/modules $(CPPFLAGS) 39 | JUCE_CPPFLAGS_APP := "-DJucePlugin_Build_VST=0" "-DJucePlugin_Build_VST3=0" "-DJucePlugin_Build_AU=0" "-DJucePlugin_Build_AUv3=0" "-DJucePlugin_Build_RTAS=0" "-DJucePlugin_Build_AAX=0" "-DJucePlugin_Build_Standalone=0" "-DJucePlugin_Build_Unity=0" 40 | JUCE_TARGET_APP := SDRNN_7to10 41 | 42 | JUCE_CFLAGS += $(JUCE_CPPFLAGS) $(TARGET_ARCH) -g -ggdb -O0 $(CFLAGS) 43 | JUCE_CXXFLAGS += $(JUCE_CFLAGS) -std=c++14 $(CXXFLAGS) 44 | JUCE_LDFLAGS += $(TARGET_ARCH) -L$(JUCE_BINDIR) -L$(JUCE_LIBDIR) $(shell pkg-config --libs freetype2 libcurl) -fvisibility=hidden -lrt -ldl -lpthread $(LDFLAGS) 45 | 46 | CLEANCMD = rm -rf $(JUCE_OUTDIR)/$(TARGET) $(JUCE_OBJDIR) 47 | endif 48 | 49 | ifeq ($(CONFIG),Release) 50 | JUCE_BINDIR := build 51 | JUCE_LIBDIR := build 52 | JUCE_OBJDIR := build/intermediate/Release 53 | JUCE_OUTDIR := build 54 | 55 | ifeq ($(TARGET_ARCH),) 56 | TARGET_ARCH := 57 | endif 58 | 59 | JUCE_CPPFLAGS := $(DEPFLAGS) "-DLINUX=1" "-DNDEBUG=1" "-DJUCER_LINUX_MAKE_6D53C8B4=1" "-DJUCE_APP_VERSION=1.0.0" "-DJUCE_APP_VERSION_HEX=0x10000" "-DJUCE_DISPLAY_SPLASH_SCREEN=0" "-DJUCE_USE_DARK_SPLASH_SCREEN=1" "-DJUCE_PROJUCER_VERSION=0x60004" "-DJUCE_MODULE_AVAILABLE_juce_core=1" "-DJUCE_MODULE_AVAILABLE_juce_data_structures=1" "-DJUCE_MODULE_AVAILABLE_juce_events=1" "-DJUCE_MODULE_AVAILABLE_juce_graphics=1" "-DJUCE_MODULE_AVAILABLE_juce_gui_basics=1" "-DJUCE_GLOBAL_MODULE_SETTINGS_INCLUDED=1" "-DJUCE_STANDALONE_APPLICATION=1" $(shell pkg-config --cflags freetype2 libcurl) -pthread -I../../JuceLibraryCode -I$(HOME)/JUCE/modules $(CPPFLAGS) 60 | JUCE_CPPFLAGS_APP := "-DJucePlugin_Build_VST=0" "-DJucePlugin_Build_VST3=0" "-DJucePlugin_Build_AU=0" "-DJucePlugin_Build_AUv3=0" "-DJucePlugin_Build_RTAS=0" "-DJucePlugin_Build_AAX=0" "-DJucePlugin_Build_Standalone=0" "-DJucePlugin_Build_Unity=0" 61 | JUCE_TARGET_APP := SDRNN_7to10 62 | 63 | JUCE_CFLAGS += $(JUCE_CPPFLAGS) $(TARGET_ARCH) -O3 $(CFLAGS) 64 | JUCE_CXXFLAGS += $(JUCE_CFLAGS) -std=c++14 $(CXXFLAGS) 65 | JUCE_LDFLAGS += $(TARGET_ARCH) -L$(JUCE_BINDIR) -L$(JUCE_LIBDIR) $(shell pkg-config --libs freetype2 libcurl) -fvisibility=hidden -lrt -ldl -lpthread $(LDFLAGS) 66 | 67 | CLEANCMD = rm -rf $(JUCE_OUTDIR)/$(TARGET) $(JUCE_OBJDIR) 68 | endif 69 | 70 | OBJECTS_APP := \ 71 | $(JUCE_OBJDIR)/Main_90ebc5c2.o \ 72 | $(JUCE_OBJDIR)/MLP_216ae892.o \ 73 | $(JUCE_OBJDIR)/include_juce_core_f26d17db.o \ 74 | $(JUCE_OBJDIR)/include_juce_data_structures_7471b1e3.o \ 75 | $(JUCE_OBJDIR)/include_juce_events_fd7d695.o \ 76 | $(JUCE_OBJDIR)/include_juce_graphics_f817e147.o \ 77 | $(JUCE_OBJDIR)/include_juce_gui_basics_e3f79785.o \ 78 | 79 | .PHONY: clean all strip 80 | 81 | all : $(JUCE_OUTDIR)/$(JUCE_TARGET_APP) 82 | 83 | $(JUCE_OUTDIR)/$(JUCE_TARGET_APP) : $(OBJECTS_APP) $(RESOURCES) 84 | @command -v pkg-config >/dev/null 2>&1 || { echo >&2 "pkg-config not installed. Please, install it."; exit 1; } 85 | @pkg-config --print-errors freetype2 libcurl 86 | @echo Linking "SDRNN_7to10 - App" 87 | -$(V_AT)mkdir -p $(JUCE_BINDIR) 88 | -$(V_AT)mkdir -p $(JUCE_LIBDIR) 89 | -$(V_AT)mkdir -p $(JUCE_OUTDIR) 90 | $(V_AT)$(CXX) -o $(JUCE_OUTDIR)/$(JUCE_TARGET_APP) $(OBJECTS_APP) $(JUCE_LDFLAGS) $(JUCE_LDFLAGS_APP) $(RESOURCES) $(TARGET_ARCH) 91 | 92 | $(JUCE_OBJDIR)/Main_90ebc5c2.o: ../../Source/Main.cpp 93 | -$(V_AT)mkdir -p $(JUCE_OBJDIR) 94 | @echo "Compiling Main.cpp" 95 | $(V_AT)$(CXX) $(JUCE_CXXFLAGS) $(JUCE_CPPFLAGS_APP) $(JUCE_CFLAGS_APP) -o "$@" -c "$<" 96 | 97 | $(JUCE_OBJDIR)/MLP_216ae892.o: ../../Source/MLP.cpp 98 | -$(V_AT)mkdir -p $(JUCE_OBJDIR) 99 | @echo "Compiling MLP.cpp" 100 | $(V_AT)$(CXX) $(JUCE_CXXFLAGS) $(JUCE_CPPFLAGS_APP) $(JUCE_CFLAGS_APP) -o "$@" -c "$<" 101 | 102 | $(JUCE_OBJDIR)/include_juce_core_f26d17db.o: ../../JuceLibraryCode/include_juce_core.cpp 103 | -$(V_AT)mkdir -p $(JUCE_OBJDIR) 104 | @echo "Compiling include_juce_core.cpp" 105 | $(V_AT)$(CXX) $(JUCE_CXXFLAGS) $(JUCE_CPPFLAGS_APP) $(JUCE_CFLAGS_APP) -o "$@" -c "$<" 106 | 107 | $(JUCE_OBJDIR)/include_juce_data_structures_7471b1e3.o: ../../JuceLibraryCode/include_juce_data_structures.cpp 108 | -$(V_AT)mkdir -p $(JUCE_OBJDIR) 109 | @echo "Compiling include_juce_data_structures.cpp" 110 | $(V_AT)$(CXX) $(JUCE_CXXFLAGS) $(JUCE_CPPFLAGS_APP) $(JUCE_CFLAGS_APP) -o "$@" -c "$<" 111 | 112 | $(JUCE_OBJDIR)/include_juce_events_fd7d695.o: ../../JuceLibraryCode/include_juce_events.cpp 113 | -$(V_AT)mkdir -p $(JUCE_OBJDIR) 114 | @echo "Compiling include_juce_events.cpp" 115 | $(V_AT)$(CXX) $(JUCE_CXXFLAGS) $(JUCE_CPPFLAGS_APP) $(JUCE_CFLAGS_APP) -o "$@" -c "$<" 116 | 117 | $(JUCE_OBJDIR)/include_juce_graphics_f817e147.o: ../../JuceLibraryCode/include_juce_graphics.cpp 118 | -$(V_AT)mkdir -p $(JUCE_OBJDIR) 119 | @echo "Compiling include_juce_graphics.cpp" 120 | $(V_AT)$(CXX) $(JUCE_CXXFLAGS) $(JUCE_CPPFLAGS_APP) $(JUCE_CFLAGS_APP) -o "$@" -c "$<" 121 | 122 | $(JUCE_OBJDIR)/include_juce_gui_basics_e3f79785.o: ../../JuceLibraryCode/include_juce_gui_basics.cpp 123 | -$(V_AT)mkdir -p $(JUCE_OBJDIR) 124 | @echo "Compiling include_juce_gui_basics.cpp" 125 | $(V_AT)$(CXX) $(JUCE_CXXFLAGS) $(JUCE_CPPFLAGS_APP) $(JUCE_CFLAGS_APP) -o "$@" -c "$<" 126 | 127 | clean: 128 | @echo Cleaning SDRNN_7to10 129 | $(V_AT)$(CLEANCMD) 130 | 131 | strip: 132 | @echo Stripping SDRNN_7to10 133 | -$(V_AT)$(STRIP) --strip-unneeded $(JUCE_OUTDIR)/$(TARGET) 134 | 135 | -include $(OBJECTS_APP:%.o=%.d) 136 | -------------------------------------------------------------------------------- /SDRNN/SDRNN_7to10/Builds/MacOSX/Info-App.plist: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | CFBundleExecutable 7 | ${EXECUTABLE_NAME} 8 | CFBundleIconFile 9 | 10 | CFBundleIdentifier 11 | com.JUCE.SDRNN_7to10 12 | CFBundleName 13 | SDRNN_7to10 14 | CFBundleDisplayName 15 | SDRNN_7to10 16 | CFBundlePackageType 17 | APPL 18 | CFBundleSignature 19 | ???? 20 | CFBundleShortVersionString 21 | 1.0.0 22 | CFBundleVersion 23 | 1.0.0 24 | NSHumanReadableCopyright 25 | 26 | NSHighResolutionCapable 27 | 28 | 29 | 30 | -------------------------------------------------------------------------------- /SDRNN/SDRNN_7to10/Builds/MacOSX/RecentFilesMenuTemplate.nib: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aryashah2k/TrainNeuralNetworksCPlusPlus/9e80af206fa9224665df5d990a15a20e089bebda/SDRNN/SDRNN_7to10/Builds/MacOSX/RecentFilesMenuTemplate.nib -------------------------------------------------------------------------------- /SDRNN/SDRNN_7to10/Builds/MacOSX/SDRNN_7to10.xcodeproj/project.xcworkspace/xcshareddata/WorkspaceSettings.xcsettings: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | BuildSystemType 6 | Original 7 | DisableBuildSystemDeprecationWarning 8 | 9 | 10 | 11 | -------------------------------------------------------------------------------- /SDRNN/SDRNN_7to10/Builds/VisualStudio2019/SDRNN_7to10.sln: -------------------------------------------------------------------------------- 1 | Microsoft Visual Studio Solution File, Format Version 11.00 2 | # Visual Studio 2019 3 | 4 | Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "SDRNN_7to10 - App", "SDRNN_7to10_App.vcxproj", "{4D39D39C-3B6D-CA66-655A-AF763AD0FB73}" 5 | EndProject 6 | Global 7 | GlobalSection(SolutionConfigurationPlatforms) = preSolution 8 | Debug|x64 = Debug|x64 9 | Release|x64 = Release|x64 10 | EndGlobalSection 11 | GlobalSection(ProjectConfigurationPlatforms) = postSolution 12 | {4D39D39C-3B6D-CA66-655A-AF763AD0FB73}.Debug|x64.ActiveCfg = Debug|x64 13 | {4D39D39C-3B6D-CA66-655A-AF763AD0FB73}.Debug|x64.Build.0 = Debug|x64 14 | {4D39D39C-3B6D-CA66-655A-AF763AD0FB73}.Release|x64.ActiveCfg = Release|x64 15 | {4D39D39C-3B6D-CA66-655A-AF763AD0FB73}.Release|x64.Build.0 = Release|x64 16 | EndGlobalSection 17 | GlobalSection(SolutionProperties) = preSolution 18 | HideSolutionNode = FALSE 19 | EndGlobalSection 20 | EndGlobal 21 | -------------------------------------------------------------------------------- /SDRNN/SDRNN_7to10/Builds/VisualStudio2019/resources.rc: -------------------------------------------------------------------------------- 1 | #pragma code_page(65001) 2 | 3 | #ifdef JUCE_USER_DEFINED_RC_FILE 4 | #include JUCE_USER_DEFINED_RC_FILE 5 | #else 6 | 7 | #undef WIN32_LEAN_AND_MEAN 8 | #define WIN32_LEAN_AND_MEAN 9 | #include 10 | 11 | VS_VERSION_INFO VERSIONINFO 12 | FILEVERSION 1,0,0,0 13 | BEGIN 14 | BLOCK "StringFileInfo" 15 | BEGIN 16 | BLOCK "040904E4" 17 | BEGIN 18 | VALUE "CompanyName", "JUCE\0" 19 | VALUE "FileDescription", "SDRNN_7to10\0" 20 | VALUE "FileVersion", "1.0.0\0" 21 | VALUE "ProductName", "SDRNN_7to10\0" 22 | VALUE "ProductVersion", "1.0.0\0" 23 | END 24 | END 25 | 26 | BLOCK "VarFileInfo" 27 | BEGIN 28 | VALUE "Translation", 0x409, 1252 29 | END 30 | END 31 | 32 | #endif 33 | -------------------------------------------------------------------------------- /SDRNN/SDRNN_7to10/JuceLibraryCode/JuceHeader.h: -------------------------------------------------------------------------------- 1 | /* 2 | 3 | IMPORTANT! This file is auto-generated each time you save your 4 | project - if you alter its contents, your changes may be overwritten! 5 | 6 | This is the header file that your files should include in order to get all the 7 | JUCE library headers. You should avoid including the JUCE headers directly in 8 | your own source files, because that wouldn't pick up the correct configuration 9 | options for your app. 10 | 11 | */ 12 | 13 | #pragma once 14 | 15 | 16 | #include 17 | #include 18 | #include 19 | #include 20 | #include 21 | 22 | 23 | #if defined (JUCE_PROJUCER_VERSION) && JUCE_PROJUCER_VERSION < JUCE_VERSION 24 | /** If you've hit this error then the version of the Projucer that was used to generate this project is 25 | older than the version of the JUCE modules being included. To fix this error, re-save your project 26 | using the latest version of the Projucer or, if you aren't using the Projucer to manage your project, 27 | remove the JUCE_PROJUCER_VERSION define from the AppConfig.h file. 28 | */ 29 | #error "This project was last saved using an outdated version of the Projucer! Re-save this project with the latest version to fix this error." 30 | #endif 31 | 32 | #if ! DONT_SET_USING_JUCE_NAMESPACE 33 | // If your code uses a lot of JUCE classes, then this will obviously save you 34 | // a lot of typing, but can be disabled by setting DONT_SET_USING_JUCE_NAMESPACE. 35 | using namespace juce; 36 | #endif 37 | 38 | #if ! JUCE_DONT_DECLARE_PROJECTINFO 39 | namespace ProjectInfo 40 | { 41 | const char* const projectName = "SDRNN_7to10"; 42 | const char* const companyName = "JUCE"; 43 | const char* const versionString = "1.0.0"; 44 | const int versionNumber = 0x10000; 45 | } 46 | #endif 47 | -------------------------------------------------------------------------------- /SDRNN/SDRNN_7to10/JuceLibraryCode/ReadMe.txt: -------------------------------------------------------------------------------- 1 | 2 | Important Note!! 3 | ================ 4 | 5 | The purpose of this folder is to contain files that are auto-generated by the Projucer, 6 | and ALL files in this folder will be mercilessly DELETED and completely re-written whenever 7 | the Projucer saves your project. 8 | 9 | Therefore, it's a bad idea to make any manual changes to the files in here, or to 10 | put any of your own files in here if you don't want to lose them. (Of course you may choose 11 | to add the folder's contents to your version-control system so that you can re-merge your own 12 | modifications after the Projucer has saved its changes). 13 | -------------------------------------------------------------------------------- /SDRNN/SDRNN_7to10/JuceLibraryCode/include_juce_core.cpp: -------------------------------------------------------------------------------- 1 | /* 2 | 3 | IMPORTANT! This file is auto-generated each time you save your 4 | project - if you alter its contents, your changes may be overwritten! 5 | 6 | */ 7 | 8 | #include 9 | -------------------------------------------------------------------------------- /SDRNN/SDRNN_7to10/JuceLibraryCode/include_juce_core.mm: -------------------------------------------------------------------------------- 1 | /* 2 | 3 | IMPORTANT! This file is auto-generated each time you save your 4 | project - if you alter its contents, your changes may be overwritten! 5 | 6 | */ 7 | 8 | #include 9 | -------------------------------------------------------------------------------- /SDRNN/SDRNN_7to10/JuceLibraryCode/include_juce_data_structures.cpp: -------------------------------------------------------------------------------- 1 | /* 2 | 3 | IMPORTANT! This file is auto-generated each time you save your 4 | project - if you alter its contents, your changes may be overwritten! 5 | 6 | */ 7 | 8 | #include 9 | -------------------------------------------------------------------------------- /SDRNN/SDRNN_7to10/JuceLibraryCode/include_juce_data_structures.mm: -------------------------------------------------------------------------------- 1 | /* 2 | 3 | IMPORTANT! This file is auto-generated each time you save your 4 | project - if you alter its contents, your changes may be overwritten! 5 | 6 | */ 7 | 8 | #include 9 | -------------------------------------------------------------------------------- /SDRNN/SDRNN_7to10/JuceLibraryCode/include_juce_events.cpp: -------------------------------------------------------------------------------- 1 | /* 2 | 3 | IMPORTANT! This file is auto-generated each time you save your 4 | project - if you alter its contents, your changes may be overwritten! 5 | 6 | */ 7 | 8 | #include 9 | -------------------------------------------------------------------------------- /SDRNN/SDRNN_7to10/JuceLibraryCode/include_juce_events.mm: -------------------------------------------------------------------------------- 1 | /* 2 | 3 | IMPORTANT! This file is auto-generated each time you save your 4 | project - if you alter its contents, your changes may be overwritten! 5 | 6 | */ 7 | 8 | #include 9 | -------------------------------------------------------------------------------- /SDRNN/SDRNN_7to10/JuceLibraryCode/include_juce_graphics.cpp: -------------------------------------------------------------------------------- 1 | /* 2 | 3 | IMPORTANT! This file is auto-generated each time you save your 4 | project - if you alter its contents, your changes may be overwritten! 5 | 6 | */ 7 | 8 | #include 9 | -------------------------------------------------------------------------------- /SDRNN/SDRNN_7to10/JuceLibraryCode/include_juce_graphics.mm: -------------------------------------------------------------------------------- 1 | /* 2 | 3 | IMPORTANT! This file is auto-generated each time you save your 4 | project - if you alter its contents, your changes may be overwritten! 5 | 6 | */ 7 | 8 | #include 9 | -------------------------------------------------------------------------------- /SDRNN/SDRNN_7to10/JuceLibraryCode/include_juce_gui_basics.cpp: -------------------------------------------------------------------------------- 1 | /* 2 | 3 | IMPORTANT! This file is auto-generated each time you save your 4 | project - if you alter its contents, your changes may be overwritten! 5 | 6 | */ 7 | 8 | #include 9 | -------------------------------------------------------------------------------- /SDRNN/SDRNN_7to10/JuceLibraryCode/include_juce_gui_basics.mm: -------------------------------------------------------------------------------- 1 | /* 2 | 3 | IMPORTANT! This file is auto-generated each time you save your 4 | project - if you alter its contents, your changes may be overwritten! 5 | 6 | */ 7 | 8 | #include 9 | -------------------------------------------------------------------------------- /SDRNN/SDRNN_7to10/SDRNN_7to10.jucer: -------------------------------------------------------------------------------- 1 | 2 | 3 | 6 | 7 | 8 | 9 | 10 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | 45 | 46 | 47 | 48 | 49 | 50 | 51 | 52 | 53 | 54 | 55 | 56 | 57 | 58 | 59 | 60 | 61 | 62 | 63 | 64 | 65 | 66 | 67 | 68 | 69 | 70 | -------------------------------------------------------------------------------- /SDRNN/SDRNN_7to10/Source/MLP.cpp: -------------------------------------------------------------------------------- 1 | #include "MLP.h" 2 | 3 | double frand(){ 4 | return (2.0*(double)rand() / RAND_MAX) - 1.0; 5 | } 6 | 7 | 8 | // Return a new Perceptron object with the specified number of inputs (+1 for the bias). 9 | Perceptron::Perceptron(int inputs, double bias){ 10 | this->bias = bias; 11 | weights.resize(inputs+1); 12 | generate(weights.begin(),weights.end(),frand); 13 | } 14 | 15 | // Run the perceptron. x is a vector with the input values. 16 | double Perceptron::run(vector x){ 17 | x.push_back(bias); 18 | double sum = inner_product(x.begin(),x.end(),weights.begin(),(double)0.0); 19 | return sigmoid(sum); 20 | } 21 | 22 | // Set the weights. w_init is a vector with the weights. 23 | void Perceptron::set_weights(vector w_init){ 24 | weights = w_init; 25 | } 26 | 27 | // Evaluate the sigmoid function for the floating point input x. 28 | double Perceptron::sigmoid(double x){ 29 | return 1.0/(1.0 + exp(-x)); 30 | } 31 | 32 | 33 | // Return a new MultiLayerPerceptron object with the specified parameters. 34 | MultiLayerPerceptron::MultiLayerPerceptron(vector layers, double bias, double eta) { 35 | this->layers = layers; 36 | this->bias = bias; 37 | this->eta = eta; 38 | 39 | for (int i = 0; i < layers.size(); i++){ 40 | values.push_back(vector(layers[i],0.0)); 41 | d.push_back(vector(layers[i],0.0)); 42 | network.push_back(vector()); 43 | if (i > 0) //network[0] is the input layer,so it has no neurons 44 | for (int j = 0; j < layers[i]; j++) 45 | network[i].push_back(Perceptron(layers[i-1], bias)); 46 | } 47 | } 48 | 49 | 50 | // Set the weights. w_init is a vector of vectors of vectors with the weights for all but the input layer. 51 | void MultiLayerPerceptron::set_weights(vector > > w_init) { 52 | for (int i = 0; i< w_init.size(); i++) 53 | for (int j = 0; j < w_init[i].size(); j++) 54 | network[i+1][j].set_weights(w_init[i][j]); 55 | } 56 | 57 | void MultiLayerPerceptron::print_weights() { 58 | cout << endl; 59 | for (int i = 1; i < network.size(); i++){ 60 | for (int j = 0; j < layers[i]; j++) { 61 | cout << "Layer " << i+1 << " Neuron " << j << ": "; 62 | for (auto &it: network[i][j].weights) 63 | cout << it <<" "; 64 | cout << endl; 65 | } 66 | } 67 | cout << endl; 68 | } 69 | 70 | // Feed a sample x into the MultiLayer Perceptron. 71 | vector MultiLayerPerceptron::run(vector x) { 72 | values[0] = x; 73 | for (int i = 1; i < network.size(); i++) 74 | for (int j = 0; j < layers[i]; j++) 75 | values[i][j] = network[i][j].run(values[i-1]); 76 | return values.back(); 77 | } 78 | 79 | // Run a single (x,y) pair with the backpropagation algorithm. 80 | double MultiLayerPerceptron::bp(vector x, vector y){ 81 | 82 | // Backpropagation Step by Step: 83 | 84 | // STEP 1: Feed a sample to the network 85 | vector outputs = run(x); 86 | 87 | // STEP 2: Calculate the MSE 88 | vector error; 89 | double MSE = 0.0; 90 | for (int i = 0; i < y.size(); i++){ 91 | error.push_back(y[i] - outputs[i]); 92 | MSE += error[i] * error[i]; 93 | } 94 | MSE /= layers.back(); 95 | 96 | // STEP 3: Calculate the output error terms 97 | for (int i = 0; i < outputs.size(); i++) 98 | d.back()[i] = outputs[i] * (1 - outputs[i]) * (error[i]); 99 | 100 | // STEP 4: Calculate the error term of each unit on each layer 101 | for (int i = network.size()-2; i > 0; i--) 102 | for (int h = 0; h < network[i].size(); h++){ 103 | double fwd_error = 0.0; 104 | for (int k = 0; k < layers[i+1]; k++) 105 | fwd_error += network[i+1][k].weights[h] * d[i+1][k]; 106 | d[i][h] = values[i][h] * (1-values[i][h]) * fwd_error; 107 | } 108 | 109 | // STEPS 5 & 6: Calculate the deltas and update the weights 110 | for (int i = 1; i < network.size(); i++) 111 | for (int j = 0; j < layers[i]; j++) 112 | for (int k = 0; k < layers[i-1]+1; k++){ 113 | double delta; 114 | if (k==layers[i-1]) 115 | delta = eta * d[i][j] * bias; 116 | else 117 | delta = eta * d[i][j] * values[i-1][k]; 118 | network[i][j].weights[k] += delta; 119 | } 120 | return MSE; 121 | } 122 | 123 | -------------------------------------------------------------------------------- /SDRNN/SDRNN_7to10/Source/MLP.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | #include 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | 10 | using namespace std; 11 | 12 | class Perceptron { 13 | public: 14 | vector weights; 15 | double bias; 16 | Perceptron(int inputs, double bias=1.0); 17 | double run(vector x); 18 | void set_weights(vector w_init); 19 | double sigmoid(double x); 20 | }; 21 | 22 | class MultiLayerPerceptron { 23 | public: 24 | MultiLayerPerceptron(vector layers, double bias=1.0, double eta = 0.5); 25 | void set_weights(vector > > w_init); 26 | void print_weights(); 27 | vector run(vector x); 28 | double bp(vector x, vector y); 29 | 30 | vector layers; 31 | double bias; 32 | double eta; 33 | vector > network; 34 | vector > values; 35 | vector > d; 36 | }; 37 | 38 | -------------------------------------------------------------------------------- /SDRNN/SDRNN_7to10/Source/Main.cpp: -------------------------------------------------------------------------------- 1 | /* 2 | ============================================================================== 3 | 4 | This file contains the startup code for a PIP. 5 | 6 | ============================================================================== 7 | */ 8 | 9 | #include 10 | #include "SDRNN_7to10_GUI.h" 11 | 12 | class Application : public juce::JUCEApplication 13 | { 14 | public: 15 | //============================================================================== 16 | Application() = default; 17 | 18 | const juce::String getApplicationName() override { return "SDRNN_7to10"; } 19 | const juce::String getApplicationVersion() override { return "1.0.0"; } 20 | 21 | void initialise (const juce::String&) override 22 | { 23 | mainWindow.reset (new MainWindow ("SDRNN_7to10", new MainContentComponent, *this)); 24 | } 25 | 26 | void shutdown() override { mainWindow = nullptr; } 27 | 28 | private: 29 | class MainWindow : public juce::DocumentWindow 30 | { 31 | public: 32 | MainWindow (const juce::String& name, juce::Component* c, JUCEApplication& a) 33 | : DocumentWindow (name, juce::Desktop::getInstance().getDefaultLookAndFeel() 34 | .findColour (ResizableWindow::backgroundColourId), 35 | juce::DocumentWindow::allButtons), 36 | app (a) 37 | { 38 | setUsingNativeTitleBar (true); 39 | setContentOwned (c, true); 40 | 41 | #if JUCE_ANDROID || JUCE_IOS 42 | setFullScreen (true); 43 | #else 44 | setResizable (true, false); 45 | setResizeLimits (300, 250, 10000, 10000); 46 | centreWithSize (getWidth(), getHeight()); 47 | #endif 48 | 49 | setVisible (true); 50 | } 51 | 52 | void closeButtonPressed() override 53 | { 54 | app.systemRequestedQuit(); 55 | } 56 | 57 | private: 58 | JUCEApplication& app; 59 | 60 | //============================================================================== 61 | JUCE_DECLARE_NON_COPYABLE_WITH_LEAK_DETECTOR (MainWindow) 62 | }; 63 | 64 | std::unique_ptr mainWindow; 65 | }; 66 | 67 | //============================================================================== 68 | START_JUCE_APPLICATION (Application) 69 | -------------------------------------------------------------------------------- /SDRNN/SDRNN_7to7/Builds/LinuxMakefile/Makefile: -------------------------------------------------------------------------------- 1 | # Automatically generated makefile, created by the Projucer 2 | # Don't edit this file! Your changes will be overwritten when you re-save the Projucer project! 3 | 4 | # build with "V=1" for verbose builds 5 | ifeq ($(V), 1) 6 | V_AT = 7 | else 8 | V_AT = @ 9 | endif 10 | 11 | # (this disables dependency generation if multiple architectures are set) 12 | DEPFLAGS := $(if $(word 2, $(TARGET_ARCH)), , -MMD) 13 | 14 | ifndef STRIP 15 | STRIP=strip 16 | endif 17 | 18 | ifndef AR 19 | AR=ar 20 | endif 21 | 22 | ifndef CONFIG 23 | CONFIG=Debug 24 | endif 25 | 26 | JUCE_ARCH_LABEL := $(shell uname -m) 27 | 28 | ifeq ($(CONFIG),Debug) 29 | JUCE_BINDIR := build 30 | JUCE_LIBDIR := build 31 | JUCE_OBJDIR := build/intermediate/Debug 32 | JUCE_OUTDIR := build 33 | 34 | ifeq ($(TARGET_ARCH),) 35 | TARGET_ARCH := 36 | endif 37 | 38 | JUCE_CPPFLAGS := $(DEPFLAGS) "-DLINUX=1" "-DDEBUG=1" "-D_DEBUG=1" "-DJUCER_LINUX_MAKE_6D53C8B4=1" "-DJUCE_APP_VERSION=1.0.0" "-DJUCE_APP_VERSION_HEX=0x10000" "-DJUCE_DISPLAY_SPLASH_SCREEN=0" "-DJUCE_USE_DARK_SPLASH_SCREEN=1" "-DJUCE_PROJUCER_VERSION=0x60004" "-DJUCE_MODULE_AVAILABLE_juce_core=1" "-DJUCE_MODULE_AVAILABLE_juce_data_structures=1" "-DJUCE_MODULE_AVAILABLE_juce_events=1" "-DJUCE_MODULE_AVAILABLE_juce_graphics=1" "-DJUCE_MODULE_AVAILABLE_juce_gui_basics=1" "-DJUCE_GLOBAL_MODULE_SETTINGS_INCLUDED=1" "-DJUCE_STANDALONE_APPLICATION=1" $(shell pkg-config --cflags freetype2 libcurl) -pthread -I../../JuceLibraryCode -I$(HOME)/JUCE/modules $(CPPFLAGS) 39 | JUCE_CPPFLAGS_APP := "-DJucePlugin_Build_VST=0" "-DJucePlugin_Build_VST3=0" "-DJucePlugin_Build_AU=0" "-DJucePlugin_Build_AUv3=0" "-DJucePlugin_Build_RTAS=0" "-DJucePlugin_Build_AAX=0" "-DJucePlugin_Build_Standalone=0" "-DJucePlugin_Build_Unity=0" 40 | JUCE_TARGET_APP := SDRNN_7to7 41 | 42 | JUCE_CFLAGS += $(JUCE_CPPFLAGS) $(TARGET_ARCH) -g -ggdb -O0 $(CFLAGS) 43 | JUCE_CXXFLAGS += $(JUCE_CFLAGS) -std=c++14 $(CXXFLAGS) 44 | JUCE_LDFLAGS += $(TARGET_ARCH) -L$(JUCE_BINDIR) -L$(JUCE_LIBDIR) $(shell pkg-config --libs freetype2 libcurl) -fvisibility=hidden -lrt -ldl -lpthread $(LDFLAGS) 45 | 46 | CLEANCMD = rm -rf $(JUCE_OUTDIR)/$(TARGET) $(JUCE_OBJDIR) 47 | endif 48 | 49 | ifeq ($(CONFIG),Release) 50 | JUCE_BINDIR := build 51 | JUCE_LIBDIR := build 52 | JUCE_OBJDIR := build/intermediate/Release 53 | JUCE_OUTDIR := build 54 | 55 | ifeq ($(TARGET_ARCH),) 56 | TARGET_ARCH := 57 | endif 58 | 59 | JUCE_CPPFLAGS := $(DEPFLAGS) "-DLINUX=1" "-DNDEBUG=1" "-DJUCER_LINUX_MAKE_6D53C8B4=1" "-DJUCE_APP_VERSION=1.0.0" "-DJUCE_APP_VERSION_HEX=0x10000" "-DJUCE_DISPLAY_SPLASH_SCREEN=0" "-DJUCE_USE_DARK_SPLASH_SCREEN=1" "-DJUCE_PROJUCER_VERSION=0x60004" "-DJUCE_MODULE_AVAILABLE_juce_core=1" "-DJUCE_MODULE_AVAILABLE_juce_data_structures=1" "-DJUCE_MODULE_AVAILABLE_juce_events=1" "-DJUCE_MODULE_AVAILABLE_juce_graphics=1" "-DJUCE_MODULE_AVAILABLE_juce_gui_basics=1" "-DJUCE_GLOBAL_MODULE_SETTINGS_INCLUDED=1" "-DJUCE_STANDALONE_APPLICATION=1" $(shell pkg-config --cflags freetype2 libcurl) -pthread -I../../JuceLibraryCode -I$(HOME)/JUCE/modules $(CPPFLAGS) 60 | JUCE_CPPFLAGS_APP := "-DJucePlugin_Build_VST=0" "-DJucePlugin_Build_VST3=0" "-DJucePlugin_Build_AU=0" "-DJucePlugin_Build_AUv3=0" "-DJucePlugin_Build_RTAS=0" "-DJucePlugin_Build_AAX=0" "-DJucePlugin_Build_Standalone=0" "-DJucePlugin_Build_Unity=0" 61 | JUCE_TARGET_APP := SDRNN_7to7 62 | 63 | JUCE_CFLAGS += $(JUCE_CPPFLAGS) $(TARGET_ARCH) -O3 $(CFLAGS) 64 | JUCE_CXXFLAGS += $(JUCE_CFLAGS) -std=c++14 $(CXXFLAGS) 65 | JUCE_LDFLAGS += $(TARGET_ARCH) -L$(JUCE_BINDIR) -L$(JUCE_LIBDIR) $(shell pkg-config --libs freetype2 libcurl) -fvisibility=hidden -lrt -ldl -lpthread $(LDFLAGS) 66 | 67 | CLEANCMD = rm -rf $(JUCE_OUTDIR)/$(TARGET) $(JUCE_OBJDIR) 68 | endif 69 | 70 | OBJECTS_APP := \ 71 | $(JUCE_OBJDIR)/Main_90ebc5c2.o \ 72 | $(JUCE_OBJDIR)/MLP_216ae892.o \ 73 | $(JUCE_OBJDIR)/include_juce_core_f26d17db.o \ 74 | $(JUCE_OBJDIR)/include_juce_data_structures_7471b1e3.o \ 75 | $(JUCE_OBJDIR)/include_juce_events_fd7d695.o \ 76 | $(JUCE_OBJDIR)/include_juce_graphics_f817e147.o \ 77 | $(JUCE_OBJDIR)/include_juce_gui_basics_e3f79785.o \ 78 | 79 | .PHONY: clean all strip 80 | 81 | all : $(JUCE_OUTDIR)/$(JUCE_TARGET_APP) 82 | 83 | $(JUCE_OUTDIR)/$(JUCE_TARGET_APP) : $(OBJECTS_APP) $(RESOURCES) 84 | @command -v pkg-config >/dev/null 2>&1 || { echo >&2 "pkg-config not installed. Please, install it."; exit 1; } 85 | @pkg-config --print-errors freetype2 libcurl 86 | @echo Linking "SDRNN_7to7 - App" 87 | -$(V_AT)mkdir -p $(JUCE_BINDIR) 88 | -$(V_AT)mkdir -p $(JUCE_LIBDIR) 89 | -$(V_AT)mkdir -p $(JUCE_OUTDIR) 90 | $(V_AT)$(CXX) -o $(JUCE_OUTDIR)/$(JUCE_TARGET_APP) $(OBJECTS_APP) $(JUCE_LDFLAGS) $(JUCE_LDFLAGS_APP) $(RESOURCES) $(TARGET_ARCH) 91 | 92 | $(JUCE_OBJDIR)/Main_90ebc5c2.o: ../../Source/Main.cpp 93 | -$(V_AT)mkdir -p $(JUCE_OBJDIR) 94 | @echo "Compiling Main.cpp" 95 | $(V_AT)$(CXX) $(JUCE_CXXFLAGS) $(JUCE_CPPFLAGS_APP) $(JUCE_CFLAGS_APP) -o "$@" -c "$<" 96 | 97 | $(JUCE_OBJDIR)/MLP_216ae892.o: ../../Source/MLP.cpp 98 | -$(V_AT)mkdir -p $(JUCE_OBJDIR) 99 | @echo "Compiling MLP.cpp" 100 | $(V_AT)$(CXX) $(JUCE_CXXFLAGS) $(JUCE_CPPFLAGS_APP) $(JUCE_CFLAGS_APP) -o "$@" -c "$<" 101 | 102 | $(JUCE_OBJDIR)/include_juce_core_f26d17db.o: ../../JuceLibraryCode/include_juce_core.cpp 103 | -$(V_AT)mkdir -p $(JUCE_OBJDIR) 104 | @echo "Compiling include_juce_core.cpp" 105 | $(V_AT)$(CXX) $(JUCE_CXXFLAGS) $(JUCE_CPPFLAGS_APP) $(JUCE_CFLAGS_APP) -o "$@" -c "$<" 106 | 107 | $(JUCE_OBJDIR)/include_juce_data_structures_7471b1e3.o: ../../JuceLibraryCode/include_juce_data_structures.cpp 108 | -$(V_AT)mkdir -p $(JUCE_OBJDIR) 109 | @echo "Compiling include_juce_data_structures.cpp" 110 | $(V_AT)$(CXX) $(JUCE_CXXFLAGS) $(JUCE_CPPFLAGS_APP) $(JUCE_CFLAGS_APP) -o "$@" -c "$<" 111 | 112 | $(JUCE_OBJDIR)/include_juce_events_fd7d695.o: ../../JuceLibraryCode/include_juce_events.cpp 113 | -$(V_AT)mkdir -p $(JUCE_OBJDIR) 114 | @echo "Compiling include_juce_events.cpp" 115 | $(V_AT)$(CXX) $(JUCE_CXXFLAGS) $(JUCE_CPPFLAGS_APP) $(JUCE_CFLAGS_APP) -o "$@" -c "$<" 116 | 117 | $(JUCE_OBJDIR)/include_juce_graphics_f817e147.o: ../../JuceLibraryCode/include_juce_graphics.cpp 118 | -$(V_AT)mkdir -p $(JUCE_OBJDIR) 119 | @echo "Compiling include_juce_graphics.cpp" 120 | $(V_AT)$(CXX) $(JUCE_CXXFLAGS) $(JUCE_CPPFLAGS_APP) $(JUCE_CFLAGS_APP) -o "$@" -c "$<" 121 | 122 | $(JUCE_OBJDIR)/include_juce_gui_basics_e3f79785.o: ../../JuceLibraryCode/include_juce_gui_basics.cpp 123 | -$(V_AT)mkdir -p $(JUCE_OBJDIR) 124 | @echo "Compiling include_juce_gui_basics.cpp" 125 | $(V_AT)$(CXX) $(JUCE_CXXFLAGS) $(JUCE_CPPFLAGS_APP) $(JUCE_CFLAGS_APP) -o "$@" -c "$<" 126 | 127 | clean: 128 | @echo Cleaning SDRNN_7to7 129 | $(V_AT)$(CLEANCMD) 130 | 131 | strip: 132 | @echo Stripping SDRNN_7to7 133 | -$(V_AT)$(STRIP) --strip-unneeded $(JUCE_OUTDIR)/$(TARGET) 134 | 135 | -include $(OBJECTS_APP:%.o=%.d) 136 | -------------------------------------------------------------------------------- /SDRNN/SDRNN_7to7/Builds/MacOSX/Info-App.plist: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | CFBundleExecutable 7 | ${EXECUTABLE_NAME} 8 | CFBundleIconFile 9 | 10 | CFBundleIdentifier 11 | com.JUCE.SDRNN_7to7 12 | CFBundleName 13 | SDRNN_7to7 14 | CFBundleDisplayName 15 | SDRNN_7to7 16 | CFBundlePackageType 17 | APPL 18 | CFBundleSignature 19 | ???? 20 | CFBundleShortVersionString 21 | 1.0.0 22 | CFBundleVersion 23 | 1.0.0 24 | NSHumanReadableCopyright 25 | 26 | NSHighResolutionCapable 27 | 28 | 29 | 30 | -------------------------------------------------------------------------------- /SDRNN/SDRNN_7to7/Builds/MacOSX/RecentFilesMenuTemplate.nib: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aryashah2k/TrainNeuralNetworksCPlusPlus/9e80af206fa9224665df5d990a15a20e089bebda/SDRNN/SDRNN_7to7/Builds/MacOSX/RecentFilesMenuTemplate.nib -------------------------------------------------------------------------------- /SDRNN/SDRNN_7to7/Builds/MacOSX/SDRNN_7to7.xcodeproj/project.xcworkspace/xcshareddata/WorkspaceSettings.xcsettings: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | BuildSystemType 6 | Original 7 | DisableBuildSystemDeprecationWarning 8 | 9 | 10 | 11 | -------------------------------------------------------------------------------- /SDRNN/SDRNN_7to7/Builds/VisualStudio2019/SDRNN_7to7.sln: -------------------------------------------------------------------------------- 1 | Microsoft Visual Studio Solution File, Format Version 11.00 2 | # Visual Studio 2019 3 | 4 | Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "SDRNN_7to7 - App", "SDRNN_7to7_App.vcxproj", "{FC3BE287-0BD7-12B8-1EE0-45F031C7E46B}" 5 | EndProject 6 | Global 7 | GlobalSection(SolutionConfigurationPlatforms) = preSolution 8 | Debug|x64 = Debug|x64 9 | Release|x64 = Release|x64 10 | EndGlobalSection 11 | GlobalSection(ProjectConfigurationPlatforms) = postSolution 12 | {FC3BE287-0BD7-12B8-1EE0-45F031C7E46B}.Debug|x64.ActiveCfg = Debug|x64 13 | {FC3BE287-0BD7-12B8-1EE0-45F031C7E46B}.Debug|x64.Build.0 = Debug|x64 14 | {FC3BE287-0BD7-12B8-1EE0-45F031C7E46B}.Release|x64.ActiveCfg = Release|x64 15 | {FC3BE287-0BD7-12B8-1EE0-45F031C7E46B}.Release|x64.Build.0 = Release|x64 16 | EndGlobalSection 17 | GlobalSection(SolutionProperties) = preSolution 18 | HideSolutionNode = FALSE 19 | EndGlobalSection 20 | EndGlobal 21 | -------------------------------------------------------------------------------- /SDRNN/SDRNN_7to7/Builds/VisualStudio2019/resources.rc: -------------------------------------------------------------------------------- 1 | #pragma code_page(65001) 2 | 3 | #ifdef JUCE_USER_DEFINED_RC_FILE 4 | #include JUCE_USER_DEFINED_RC_FILE 5 | #else 6 | 7 | #undef WIN32_LEAN_AND_MEAN 8 | #define WIN32_LEAN_AND_MEAN 9 | #include 10 | 11 | VS_VERSION_INFO VERSIONINFO 12 | FILEVERSION 1,0,0,0 13 | BEGIN 14 | BLOCK "StringFileInfo" 15 | BEGIN 16 | BLOCK "040904E4" 17 | BEGIN 18 | VALUE "CompanyName", "JUCE\0" 19 | VALUE "FileDescription", "SDRNN_7to7\0" 20 | VALUE "FileVersion", "1.0.0\0" 21 | VALUE "ProductName", "SDRNN_7to7\0" 22 | VALUE "ProductVersion", "1.0.0\0" 23 | END 24 | END 25 | 26 | BLOCK "VarFileInfo" 27 | BEGIN 28 | VALUE "Translation", 0x409, 1252 29 | END 30 | END 31 | 32 | #endif 33 | -------------------------------------------------------------------------------- /SDRNN/SDRNN_7to7/JuceLibraryCode/JuceHeader.h: -------------------------------------------------------------------------------- 1 | /* 2 | 3 | IMPORTANT! This file is auto-generated each time you save your 4 | project - if you alter its contents, your changes may be overwritten! 5 | 6 | This is the header file that your files should include in order to get all the 7 | JUCE library headers. You should avoid including the JUCE headers directly in 8 | your own source files, because that wouldn't pick up the correct configuration 9 | options for your app. 10 | 11 | */ 12 | 13 | #pragma once 14 | 15 | 16 | #include 17 | #include 18 | #include 19 | #include 20 | #include 21 | 22 | 23 | #if defined (JUCE_PROJUCER_VERSION) && JUCE_PROJUCER_VERSION < JUCE_VERSION 24 | /** If you've hit this error then the version of the Projucer that was used to generate this project is 25 | older than the version of the JUCE modules being included. To fix this error, re-save your project 26 | using the latest version of the Projucer or, if you aren't using the Projucer to manage your project, 27 | remove the JUCE_PROJUCER_VERSION define from the AppConfig.h file. 28 | */ 29 | #error "This project was last saved using an outdated version of the Projucer! Re-save this project with the latest version to fix this error." 30 | #endif 31 | 32 | #if ! DONT_SET_USING_JUCE_NAMESPACE 33 | // If your code uses a lot of JUCE classes, then this will obviously save you 34 | // a lot of typing, but can be disabled by setting DONT_SET_USING_JUCE_NAMESPACE. 35 | using namespace juce; 36 | #endif 37 | 38 | #if ! JUCE_DONT_DECLARE_PROJECTINFO 39 | namespace ProjectInfo 40 | { 41 | const char* const projectName = "SDRNN_7to7"; 42 | const char* const companyName = "JUCE"; 43 | const char* const versionString = "1.0.0"; 44 | const int versionNumber = 0x10000; 45 | } 46 | #endif 47 | -------------------------------------------------------------------------------- /SDRNN/SDRNN_7to7/JuceLibraryCode/ReadMe.txt: -------------------------------------------------------------------------------- 1 | 2 | Important Note!! 3 | ================ 4 | 5 | The purpose of this folder is to contain files that are auto-generated by the Projucer, 6 | and ALL files in this folder will be mercilessly DELETED and completely re-written whenever 7 | the Projucer saves your project. 8 | 9 | Therefore, it's a bad idea to make any manual changes to the files in here, or to 10 | put any of your own files in here if you don't want to lose them. (Of course you may choose 11 | to add the folder's contents to your version-control system so that you can re-merge your own 12 | modifications after the Projucer has saved its changes). 13 | -------------------------------------------------------------------------------- /SDRNN/SDRNN_7to7/JuceLibraryCode/include_juce_core.cpp: -------------------------------------------------------------------------------- 1 | /* 2 | 3 | IMPORTANT! This file is auto-generated each time you save your 4 | project - if you alter its contents, your changes may be overwritten! 5 | 6 | */ 7 | 8 | #include 9 | -------------------------------------------------------------------------------- /SDRNN/SDRNN_7to7/JuceLibraryCode/include_juce_core.mm: -------------------------------------------------------------------------------- 1 | /* 2 | 3 | IMPORTANT! This file is auto-generated each time you save your 4 | project - if you alter its contents, your changes may be overwritten! 5 | 6 | */ 7 | 8 | #include 9 | -------------------------------------------------------------------------------- /SDRNN/SDRNN_7to7/JuceLibraryCode/include_juce_data_structures.cpp: -------------------------------------------------------------------------------- 1 | /* 2 | 3 | IMPORTANT! This file is auto-generated each time you save your 4 | project - if you alter its contents, your changes may be overwritten! 5 | 6 | */ 7 | 8 | #include 9 | -------------------------------------------------------------------------------- /SDRNN/SDRNN_7to7/JuceLibraryCode/include_juce_data_structures.mm: -------------------------------------------------------------------------------- 1 | /* 2 | 3 | IMPORTANT! This file is auto-generated each time you save your 4 | project - if you alter its contents, your changes may be overwritten! 5 | 6 | */ 7 | 8 | #include 9 | -------------------------------------------------------------------------------- /SDRNN/SDRNN_7to7/JuceLibraryCode/include_juce_events.cpp: -------------------------------------------------------------------------------- 1 | /* 2 | 3 | IMPORTANT! This file is auto-generated each time you save your 4 | project - if you alter its contents, your changes may be overwritten! 5 | 6 | */ 7 | 8 | #include 9 | -------------------------------------------------------------------------------- /SDRNN/SDRNN_7to7/JuceLibraryCode/include_juce_events.mm: -------------------------------------------------------------------------------- 1 | /* 2 | 3 | IMPORTANT! This file is auto-generated each time you save your 4 | project - if you alter its contents, your changes may be overwritten! 5 | 6 | */ 7 | 8 | #include 9 | -------------------------------------------------------------------------------- /SDRNN/SDRNN_7to7/JuceLibraryCode/include_juce_graphics.cpp: -------------------------------------------------------------------------------- 1 | /* 2 | 3 | IMPORTANT! This file is auto-generated each time you save your 4 | project - if you alter its contents, your changes may be overwritten! 5 | 6 | */ 7 | 8 | #include 9 | -------------------------------------------------------------------------------- /SDRNN/SDRNN_7to7/JuceLibraryCode/include_juce_graphics.mm: -------------------------------------------------------------------------------- 1 | /* 2 | 3 | IMPORTANT! This file is auto-generated each time you save your 4 | project - if you alter its contents, your changes may be overwritten! 5 | 6 | */ 7 | 8 | #include 9 | -------------------------------------------------------------------------------- /SDRNN/SDRNN_7to7/JuceLibraryCode/include_juce_gui_basics.cpp: -------------------------------------------------------------------------------- 1 | /* 2 | 3 | IMPORTANT! This file is auto-generated each time you save your 4 | project - if you alter its contents, your changes may be overwritten! 5 | 6 | */ 7 | 8 | #include 9 | -------------------------------------------------------------------------------- /SDRNN/SDRNN_7to7/JuceLibraryCode/include_juce_gui_basics.mm: -------------------------------------------------------------------------------- 1 | /* 2 | 3 | IMPORTANT! This file is auto-generated each time you save your 4 | project - if you alter its contents, your changes may be overwritten! 5 | 6 | */ 7 | 8 | #include 9 | -------------------------------------------------------------------------------- /SDRNN/SDRNN_7to7/SDRNN_7to7.jucer: -------------------------------------------------------------------------------- 1 | 2 | 3 | 6 | 7 | 8 | 9 | 10 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | 45 | 46 | 47 | 48 | 49 | 50 | 51 | 52 | 53 | 54 | 55 | 56 | 57 | 58 | 59 | 60 | 61 | 62 | 63 | 64 | 65 | 66 | 67 | 68 | 69 | 70 | -------------------------------------------------------------------------------- /SDRNN/SDRNN_7to7/Source/MLP.cpp: -------------------------------------------------------------------------------- 1 | #include "MLP.h" 2 | 3 | double frand(){ 4 | return (2.0*(double)rand() / RAND_MAX) - 1.0; 5 | } 6 | 7 | 8 | // Return a new Perceptron object with the specified number of inputs (+1 for the bias). 9 | Perceptron::Perceptron(int inputs, double bias){ 10 | this->bias = bias; 11 | weights.resize(inputs+1); 12 | generate(weights.begin(),weights.end(),frand); 13 | } 14 | 15 | // Run the perceptron. x is a vector with the input values. 16 | double Perceptron::run(vector x){ 17 | x.push_back(bias); 18 | double sum = inner_product(x.begin(),x.end(),weights.begin(),(double)0.0); 19 | return sigmoid(sum); 20 | } 21 | 22 | // Set the weights. w_init is a vector with the weights. 23 | void Perceptron::set_weights(vector w_init){ 24 | weights = w_init; 25 | } 26 | 27 | // Evaluate the sigmoid function for the floating point input x. 28 | double Perceptron::sigmoid(double x){ 29 | return 1.0/(1.0 + exp(-x)); 30 | } 31 | 32 | 33 | // Return a new MultiLayerPerceptron object with the specified parameters. 34 | MultiLayerPerceptron::MultiLayerPerceptron(vector layers, double bias, double eta) { 35 | this->layers = layers; 36 | this->bias = bias; 37 | this->eta = eta; 38 | 39 | for (int i = 0; i < layers.size(); i++){ 40 | values.push_back(vector(layers[i],0.0)); 41 | d.push_back(vector(layers[i],0.0)); 42 | network.push_back(vector()); 43 | if (i > 0) //network[0] is the input layer,so it has no neurons 44 | for (int j = 0; j < layers[i]; j++) 45 | network[i].push_back(Perceptron(layers[i-1], bias)); 46 | } 47 | } 48 | 49 | 50 | // Set the weights. w_init is a vector of vectors of vectors with the weights for all but the input layer. 51 | void MultiLayerPerceptron::set_weights(vector > > w_init) { 52 | for (int i = 0; i< w_init.size(); i++) 53 | for (int j = 0; j < w_init[i].size(); j++) 54 | network[i+1][j].set_weights(w_init[i][j]); 55 | } 56 | 57 | void MultiLayerPerceptron::print_weights() { 58 | cout << endl; 59 | for (int i = 1; i < network.size(); i++){ 60 | for (int j = 0; j < layers[i]; j++) { 61 | cout << "Layer " << i+1 << " Neuron " << j << ": "; 62 | for (auto &it: network[i][j].weights) 63 | cout << it <<" "; 64 | cout << endl; 65 | } 66 | } 67 | cout << endl; 68 | } 69 | 70 | // Feed a sample x into the MultiLayer Perceptron. 71 | vector MultiLayerPerceptron::run(vector x) { 72 | values[0] = x; 73 | for (int i = 1; i < network.size(); i++) 74 | for (int j = 0; j < layers[i]; j++) 75 | values[i][j] = network[i][j].run(values[i-1]); 76 | return values.back(); 77 | } 78 | 79 | // Run a single (x,y) pair with the backpropagation algorithm. 80 | double MultiLayerPerceptron::bp(vector x, vector y){ 81 | 82 | // Backpropagation Step by Step: 83 | 84 | // STEP 1: Feed a sample to the network 85 | vector outputs = run(x); 86 | 87 | // STEP 2: Calculate the MSE 88 | vector error; 89 | double MSE = 0.0; 90 | for (int i = 0; i < y.size(); i++){ 91 | error.push_back(y[i] - outputs[i]); 92 | MSE += error[i] * error[i]; 93 | } 94 | MSE /= layers.back(); 95 | 96 | // STEP 3: Calculate the output error terms 97 | for (int i = 0; i < outputs.size(); i++) 98 | d.back()[i] = outputs[i] * (1 - outputs[i]) * (error[i]); 99 | 100 | // STEP 4: Calculate the error term of each unit on each layer 101 | for (int i = network.size()-2; i > 0; i--) 102 | for (int h = 0; h < network[i].size(); h++){ 103 | double fwd_error = 0.0; 104 | for (int k = 0; k < layers[i+1]; k++) 105 | fwd_error += network[i+1][k].weights[h] * d[i+1][k]; 106 | d[i][h] = values[i][h] * (1-values[i][h]) * fwd_error; 107 | } 108 | 109 | // STEPS 5 & 6: Calculate the deltas and update the weights 110 | for (int i = 1; i < network.size(); i++) 111 | for (int j = 0; j < layers[i]; j++) 112 | for (int k = 0; k < layers[i-1]+1; k++){ 113 | double delta; 114 | if (k==layers[i-1]) 115 | delta = eta * d[i][j] * bias; 116 | else 117 | delta = eta * d[i][j] * values[i-1][k]; 118 | network[i][j].weights[k] += delta; 119 | } 120 | return MSE; 121 | } 122 | 123 | -------------------------------------------------------------------------------- /SDRNN/SDRNN_7to7/Source/MLP.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | #include 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | 10 | using namespace std; 11 | 12 | class Perceptron { 13 | public: 14 | vector weights; 15 | double bias; 16 | Perceptron(int inputs, double bias=1.0); 17 | double run(vector x); 18 | void set_weights(vector w_init); 19 | double sigmoid(double x); 20 | }; 21 | 22 | class MultiLayerPerceptron { 23 | public: 24 | MultiLayerPerceptron(vector layers, double bias=1.0, double eta = 0.5); 25 | void set_weights(vector > > w_init); 26 | void print_weights(); 27 | vector run(vector x); 28 | double bp(vector x, vector y); 29 | 30 | vector layers; 31 | double bias; 32 | double eta; 33 | vector > network; 34 | vector > values; 35 | vector > d; 36 | }; 37 | 38 | -------------------------------------------------------------------------------- /SDRNN/SDRNN_7to7/Source/Main.cpp: -------------------------------------------------------------------------------- 1 | /* 2 | ============================================================================== 3 | 4 | This file contains the startup code for a PIP. 5 | 6 | ============================================================================== 7 | */ 8 | 9 | #include 10 | #include "SDRNN_7to7_GUI.h" 11 | 12 | class Application : public juce::JUCEApplication 13 | { 14 | public: 15 | //============================================================================== 16 | Application() = default; 17 | 18 | const juce::String getApplicationName() override { return "SDRNN_7to7"; } 19 | const juce::String getApplicationVersion() override { return "1.0.0"; } 20 | 21 | void initialise (const juce::String&) override 22 | { 23 | mainWindow.reset (new MainWindow ("SDRNN_7to7", new MainContentComponent, *this)); 24 | } 25 | 26 | void shutdown() override { mainWindow = nullptr; } 27 | 28 | private: 29 | class MainWindow : public juce::DocumentWindow 30 | { 31 | public: 32 | MainWindow (const juce::String& name, juce::Component* c, JUCEApplication& a) 33 | : DocumentWindow (name, juce::Desktop::getInstance().getDefaultLookAndFeel() 34 | .findColour (ResizableWindow::backgroundColourId), 35 | juce::DocumentWindow::allButtons), 36 | app (a) 37 | { 38 | setUsingNativeTitleBar (true); 39 | setContentOwned (c, true); 40 | 41 | #if JUCE_ANDROID || JUCE_IOS 42 | setFullScreen (true); 43 | #else 44 | setResizable (true, false); 45 | setResizeLimits (300, 250, 10000, 10000); 46 | centreWithSize (getWidth(), getHeight()); 47 | #endif 48 | 49 | setVisible (true); 50 | } 51 | 52 | void closeButtonPressed() override 53 | { 54 | app.systemRequestedQuit(); 55 | } 56 | 57 | private: 58 | JUCEApplication& app; 59 | 60 | //============================================================================== 61 | JUCE_DECLARE_NON_COPYABLE_WITH_LEAK_DETECTOR (MainWindow) 62 | }; 63 | 64 | std::unique_ptr mainWindow; 65 | }; 66 | 67 | //============================================================================== 68 | START_JUCE_APPLICATION (Application) 69 | --------------------------------------------------------------------------------