├── .gitignore
├── Example
├── EXAMPLE.md
└── ExampleDataSet.csv
├── LICENSE
├── README.md
└── Src
├── NeuralNetwork.args.json
├── NeuralNetwork.sln
├── NeuralNetwork.vcxproj
├── NeuralNetwork.vcxproj.filters
├── NeuralNetwork.vcxproj.user
├── NeuralNetwork
├── NeuralNetwork.cpp
├── NeuralNetwork.h
├── NeuralNetworkTrainer.cpp
├── NeuralNetworkTrainer.h
├── TrainingDataReader.cpp
└── TrainingDataReader.h
├── cmdParser.h
└── main.cpp
/.gitignore:
--------------------------------------------------------------------------------
1 | **/Build
2 | **/.vs
3 |
--------------------------------------------------------------------------------
/Example/EXAMPLE.md:
--------------------------------------------------------------------------------
1 | # NeuralNetwork Example
2 |
3 | You can use the supplied training data file to test the neural network...
4 |
5 | Run the compiled exe with the following parameters:
6 |
7 | -d ExampleDataSet.csv -in 16 -hidden 16 -out 3
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2016 BobbyAnguelov
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # NeuralNetwork
2 | Simple Back Propagation Neural Network
3 |
4 | The example code makes use of Florian Rappl's command parser: [github](https://github.com/FlorianRappl/CmdParser )
5 |
6 | # Disclaimer
7 | This code is meant to be a simple implementation of the back-propagation neural network discussed in the tutorial below:
8 |
9 | [https://takinginitiative.wordpress.com/2008/04/03/basic-neural-network-tutorial-theory/](https://takinginitiative.wordpress.com/2008/04/03/basic-neural-network-tutorial-theory/)
10 |
11 | [https://takinginitiative.wordpress.com/2008/04/23/basic-neural-network-tutorial-c-implementation-and-source-code/](https://takinginitiative.wordpress.com/2008/04/23/basic-neural-network-tutorial-c-implementation-and-source-code/)
12 |
13 | It is intended as a reference/example implementation and will not be maintained or supported.
--------------------------------------------------------------------------------
/Src/NeuralNetwork.args.json:
--------------------------------------------------------------------------------
1 | {
2 | "DataCollection": [
3 | {
4 | "Id": "e545a2d5-189a-4686-89ff-6aeb892bfb50",
5 | "Command": "-d ../../Example/ExampleDataSet.csv -in 16 -hidden 16 -out 3"
6 | }
7 | ]
8 | }
--------------------------------------------------------------------------------
/Src/NeuralNetwork.sln:
--------------------------------------------------------------------------------
1 |
2 | Microsoft Visual Studio Solution File, Format Version 12.00
3 | # Visual Studio 15
4 | VisualStudioVersion = 15.0.26430.16
5 | MinimumVisualStudioVersion = 10.0.40219.1
6 | Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "NeuralNetwork", "NeuralNetwork.vcxproj", "{BAF18831-29E0-404C-9321-1F2B233E3014}"
7 | EndProject
8 | Global
9 | GlobalSection(SolutionConfigurationPlatforms) = preSolution
10 | Debug|x64 = Debug|x64
11 | Release|x64 = Release|x64
12 | EndGlobalSection
13 | GlobalSection(ProjectConfigurationPlatforms) = postSolution
14 | {BAF18831-29E0-404C-9321-1F2B233E3014}.Debug|x64.ActiveCfg = Debug|x64
15 | {BAF18831-29E0-404C-9321-1F2B233E3014}.Debug|x64.Build.0 = Debug|x64
16 | {BAF18831-29E0-404C-9321-1F2B233E3014}.Release|x64.ActiveCfg = Release|x64
17 | {BAF18831-29E0-404C-9321-1F2B233E3014}.Release|x64.Build.0 = Release|x64
18 | EndGlobalSection
19 | GlobalSection(SolutionProperties) = preSolution
20 | HideSolutionNode = FALSE
21 | EndGlobalSection
22 | EndGlobal
23 |
--------------------------------------------------------------------------------
/Src/NeuralNetwork.vcxproj:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | Debug
6 | x64
7 |
8 |
9 | Release
10 | x64
11 |
12 |
13 |
14 | 15.0
15 | {BAF18831-29E0-404C-9321-1F2B233E3014}
16 | Win32Proj
17 | NeuralNetwork
18 | 10.0.15063.0
19 |
20 |
21 |
22 | Application
23 | true
24 | v141
25 | Unicode
26 |
27 |
28 | Application
29 | false
30 | v141
31 | true
32 | Unicode
33 |
34 |
35 |
36 |
37 |
38 |
39 |
40 |
41 |
42 |
43 |
44 |
45 |
46 |
47 | true
48 | $(SolutionDir)\..\Build\$(Platform)_$(Configuration)\
49 | $(SolutionDir)\..\Build\Int\$(Platform)_$(Configuration)\
50 |
51 |
52 | false
53 | $(SolutionDir)\..\Build\$(Platform)_$(Configuration)\
54 | $(SolutionDir)\..\Build\Int\$(Platform)_$(Configuration)\
55 |
56 |
57 |
58 |
59 |
60 | Level4
61 | Disabled
62 | _DEBUG;_CONSOLE;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)
63 | true
64 |
65 |
66 | Console
67 |
68 |
69 |
70 |
71 | Level4
72 |
73 |
74 | MaxSpeed
75 | true
76 | true
77 | NDEBUG;_CONSOLE;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)
78 | true
79 |
80 |
81 | Console
82 | true
83 | true
84 |
85 |
86 |
87 |
88 |
89 |
90 |
91 |
92 |
93 |
94 |
95 |
96 |
97 |
98 |
99 |
100 |
101 |
--------------------------------------------------------------------------------
/Src/NeuralNetwork.vcxproj.filters:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
--------------------------------------------------------------------------------
/Src/NeuralNetwork.vcxproj.user:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | true
5 |
6 |
7 |
8 | $(OutDir)
9 | WindowsLocalDebugger
10 |
11 |
12 | -d ../../Example/ExampleDataSet.csv -in 16 -hidden 16 -out 3
13 | $(OutDir)
14 | WindowsLocalDebugger
15 |
16 |
--------------------------------------------------------------------------------
/Src/NeuralNetwork/NeuralNetwork.cpp:
--------------------------------------------------------------------------------
1 | //-------------------------------------------------------------------------
2 | // Simple back-propagation neural network example
3 | // 2017 - Bobby Anguelov
4 | // MIT license: https://opensource.org/licenses/MIT
5 | //-------------------------------------------------------------------------
6 |
7 | #include "NeuralNetwork.h"
8 | #include
9 | #include
10 | #include
11 | #include
12 |
13 | //-------------------------------------------------------------------------
14 |
15 | namespace BPN
16 | {
17 | Network::Network( Settings const& settings )
18 | : m_numInputs( settings.m_numInputs )
19 | , m_numHidden( settings.m_numHidden )
20 | , m_numOutputs( settings.m_numOutputs )
21 | {
22 | assert( settings.m_numInputs > 0 && settings.m_numOutputs > 0 && settings.m_numHidden > 0 );
23 | InitializeNetwork();
24 | InitializeWeights();
25 | }
26 |
27 | Network::Network( Settings const& settings, std::vector const& weights )
28 | : m_numInputs( settings.m_numInputs )
29 | , m_numHidden( settings.m_numHidden )
30 | , m_numOutputs( settings.m_numOutputs )
31 | {
32 | assert( settings.m_numInputs > 0 && settings.m_numOutputs > 0 && settings.m_numHidden > 0 );
33 | InitializeNetwork();
34 | LoadWeights( weights );
35 | }
36 |
37 | void Network::InitializeNetwork()
38 | {
39 | // Create storage and initialize the neurons and the outputs
40 | //-------------------------------------------------------------------------
41 |
42 | // Add bias neurons
43 | int32_t const totalNumInputs = m_numInputs + 1;
44 | int32_t const totalNumHiddens = m_numHidden + 1;
45 |
46 | m_inputNeurons.resize( totalNumInputs );
47 | m_hiddenNeurons.resize( totalNumHiddens );
48 | m_outputNeurons.resize( m_numOutputs );
49 | m_clampedOutputs.resize( m_numOutputs );
50 |
51 | memset( m_inputNeurons.data(), 0, m_inputNeurons.size() * sizeof( double ) );
52 | memset( m_hiddenNeurons.data(), 0, m_hiddenNeurons.size() * sizeof( double ) );
53 | memset( m_outputNeurons.data(), 0, m_outputNeurons.size() * sizeof( double ) );
54 | memset( m_clampedOutputs.data(), 0, m_clampedOutputs.size() * sizeof( int32_t ) );
55 |
56 | // Set bias values
57 | m_inputNeurons.back() = -1.0;
58 | m_hiddenNeurons.back() = -1.0;
59 |
60 | // Create storage and initialize and layer weights
61 | //-------------------------------------------------------------------------
62 |
63 | int32_t const numInputHiddenWeights = totalNumInputs * totalNumHiddens;
64 | int32_t const numHiddenOutputWeights = totalNumHiddens * m_numOutputs;
65 | m_weightsInputHidden.resize( numInputHiddenWeights );
66 | m_weightsHiddenOutput.resize( numHiddenOutputWeights );
67 | }
68 |
69 | void Network::InitializeWeights()
70 | {
71 | std::random_device rd;
72 | std::mt19937 generator( rd() );
73 |
74 | double const distributionRangeHalfWidth = ( 2.4 / m_numInputs );
75 | double const standardDeviation = distributionRangeHalfWidth * 2 / 6;
76 | std::normal_distribution<> normalDistribution( 0, standardDeviation );
77 |
78 | // Set weights to normally distributed random values between [-2.4 / numInputs, 2.4 / numInputs]
79 | for ( int32_t inputIdx = 0; inputIdx <= m_numInputs; inputIdx++ )
80 | {
81 | for ( int32_t hiddenIdx = 0; hiddenIdx < m_numHidden; hiddenIdx++ )
82 | {
83 | int32_t const weightIdx = GetInputHiddenWeightIndex( inputIdx, hiddenIdx );
84 | double const weight = normalDistribution( generator );
85 | m_weightsInputHidden[weightIdx] = weight;
86 | }
87 | }
88 |
89 | // Set weights to normally distributed random values between [-2.4 / numInputs, 2.4 / numInputs]
90 | for ( int32_t hiddenIdx = 0; hiddenIdx <= m_numHidden; hiddenIdx++ )
91 | {
92 | for ( int32_t outputIdx = 0; outputIdx < m_numOutputs; outputIdx++ )
93 | {
94 | int32_t const weightIdx = GetHiddenOutputWeightIndex( hiddenIdx, outputIdx );
95 | double const weight = normalDistribution( generator );
96 | m_weightsHiddenOutput[weightIdx] = weight;
97 | }
98 | }
99 | }
100 |
101 | void Network::LoadWeights( std::vector const& weights )
102 | {
103 | int32_t const numInputHiddenWeights = m_numInputs * m_numHidden;
104 | int32_t const numHiddenOutputWeights = m_numHidden * m_numOutputs;
105 | assert( weights.size() == numInputHiddenWeights + numHiddenOutputWeights );
106 |
107 | int32_t weightIdx = 0;
108 | for ( auto InputHiddenIdx = 0; InputHiddenIdx < numInputHiddenWeights; InputHiddenIdx++ )
109 | {
110 | m_weightsInputHidden[InputHiddenIdx] = weights[weightIdx];
111 | weightIdx++;
112 | }
113 |
114 | for ( auto HiddenOutputIdx = 0; HiddenOutputIdx < numHiddenOutputWeights; HiddenOutputIdx++ )
115 | {
116 | m_weightsHiddenOutput[HiddenOutputIdx] = weights[weightIdx];
117 | weightIdx++;
118 | }
119 | }
120 |
121 | std::vector const& Network::Evaluate( std::vector const& input )
122 | {
123 | assert( input.size() == m_numInputs );
124 | assert( m_inputNeurons.back() == -1.0 && m_hiddenNeurons.back() == -1.0 );
125 |
126 | // Set input values
127 | //-------------------------------------------------------------------------
128 |
129 | memcpy( m_inputNeurons.data(), input.data(), input.size() * sizeof( double ) );
130 |
131 | // Update hidden neurons
132 | //-------------------------------------------------------------------------
133 |
134 | for ( int32_t hiddenIdx = 0; hiddenIdx < m_numHidden; hiddenIdx++ )
135 | {
136 | m_hiddenNeurons[hiddenIdx] = 0;
137 |
138 | // Get weighted sum of pattern and bias neuron
139 | for ( int32_t inputIdx = 0; inputIdx <= m_numInputs; inputIdx++ )
140 | {
141 | int32_t const weightIdx = GetInputHiddenWeightIndex( inputIdx, hiddenIdx );
142 | m_hiddenNeurons[hiddenIdx] += m_inputNeurons[inputIdx] * m_weightsInputHidden[weightIdx];
143 | }
144 |
145 | // Apply activation function
146 | m_hiddenNeurons[hiddenIdx] = SigmoidActivationFunction( m_hiddenNeurons[hiddenIdx] );
147 | }
148 |
149 | // Calculate output values - include bias neuron
150 | //-------------------------------------------------------------------------
151 |
152 | for ( int32_t outputIdx = 0; outputIdx < m_numOutputs; outputIdx++ )
153 | {
154 | m_outputNeurons[outputIdx] = 0;
155 |
156 | // Get weighted sum of pattern and bias neuron
157 | for ( int32_t hiddenIdx = 0; hiddenIdx <= m_numHidden; hiddenIdx++ )
158 | {
159 | int32_t const weightIdx = GetHiddenOutputWeightIndex( hiddenIdx, outputIdx );
160 | m_outputNeurons[outputIdx] += m_hiddenNeurons[hiddenIdx] * m_weightsHiddenOutput[weightIdx];
161 | }
162 |
163 | // Apply activation function and clamp the result
164 | m_outputNeurons[outputIdx] = SigmoidActivationFunction( m_outputNeurons[outputIdx] );
165 | m_clampedOutputs[outputIdx] = ClampOutputValue( m_outputNeurons[outputIdx] );
166 | }
167 |
168 | return m_clampedOutputs;
169 | }
170 | }
--------------------------------------------------------------------------------
/Src/NeuralNetwork/NeuralNetwork.h:
--------------------------------------------------------------------------------
1 | //-------------------------------------------------------------------------
2 | // Simple back-propagation neural network example
3 | // 2017 - Bobby Anguelov
4 | // MIT license: https://opensource.org/licenses/MIT
5 | //-------------------------------------------------------------------------
6 | // A simple neural network supporting only a single hidden layer
7 |
8 | #pragma once
9 | #include
10 | #include
11 |
12 | //-------------------------------------------------------------------------
13 |
14 | namespace BPN
15 | {
16 | enum class ActivationFunctionType
17 | {
18 | Sigmoid
19 | };
20 |
21 | //-------------------------------------------------------------------------
22 |
23 | class Network
24 | {
25 | friend class NetworkTrainer;
26 |
27 | //-------------------------------------------------------------------------
28 |
29 | inline static double SigmoidActivationFunction( double x )
30 | {
31 | return 1.0 / ( 1.0 + std::exp( -x ) );
32 | }
33 |
34 | inline static int32_t ClampOutputValue( double x )
35 | {
36 | if ( x < 0.1 ) return 0;
37 | else if ( x > 0.9 ) return 1;
38 | else return -1;
39 | }
40 |
41 | public:
42 |
43 | struct Settings
44 | {
45 | uint32_t m_numInputs;
46 | uint32_t m_numHidden;
47 | uint32_t m_numOutputs;
48 | };
49 |
50 | public:
51 |
52 | Network( Settings const& settings );
53 | Network( Settings const& settings, std::vector const& weights );
54 |
55 | std::vector const& Evaluate( std::vector const& input );
56 |
57 | std::vector const& GetInputHiddenWeights() const { return m_weightsInputHidden; }
58 | std::vector const& GetHiddenOutputWeights() const { return m_weightsHiddenOutput; }
59 |
60 | private:
61 |
62 | void InitializeNetwork();
63 | void InitializeWeights();
64 | void LoadWeights( std::vector const& weights );
65 |
66 | int32_t GetInputHiddenWeightIndex( int32_t inputIdx, int32_t hiddenIdx ) const { return inputIdx * m_numHidden + hiddenIdx; }
67 | int32_t GetHiddenOutputWeightIndex( int32_t hiddenIdx, int32_t outputIdx ) const { return hiddenIdx * m_numOutputs + outputIdx; }
68 |
69 | private:
70 |
71 | int32_t m_numInputs;
72 | int32_t m_numHidden;
73 | int32_t m_numOutputs;
74 |
75 | std::vector m_inputNeurons;
76 | std::vector m_hiddenNeurons;
77 | std::vector m_outputNeurons;
78 |
79 | std::vector m_clampedOutputs;
80 |
81 | std::vector m_weightsInputHidden;
82 | std::vector m_weightsHiddenOutput;
83 | };
84 | }
--------------------------------------------------------------------------------
/Src/NeuralNetwork/NeuralNetworkTrainer.cpp:
--------------------------------------------------------------------------------
1 | //-------------------------------------------------------------------------
2 | // Simple back-propagation neural network example
3 | // 2017 - Bobby Anguelov
4 | // MIT license: https://opensource.org/licenses/MIT
5 | //-------------------------------------------------------------------------
6 |
7 | #include "NeuralNetworkTrainer.h"
8 | #include
9 | #include
10 |
11 | //-------------------------------------------------------------------------
12 |
13 | namespace BPN
14 | {
15 | NetworkTrainer::NetworkTrainer( Settings const& settings, Network* pNetwork )
16 | : m_pNetwork( pNetwork )
17 | , m_learningRate( settings.m_learningRate )
18 | , m_momentum( settings.m_momentum )
19 | , m_desiredAccuracy( settings.m_desiredAccuracy )
20 | , m_maxEpochs( settings.m_maxEpochs )
21 | , m_useBatchLearning( settings.m_useBatchLearning )
22 | , m_currentEpoch( 0 )
23 | , m_trainingSetAccuracy( 0 )
24 | , m_validationSetAccuracy( 0 )
25 | , m_generalizationSetAccuracy( 0 )
26 | , m_trainingSetMSE( 0 )
27 | , m_validationSetMSE( 0 )
28 | , m_generalizationSetMSE( 0 )
29 | {
30 | assert( pNetwork != nullptr );
31 |
32 | m_deltaInputHidden.resize( pNetwork->m_weightsInputHidden.size() );
33 | m_deltaHiddenOutput.resize( pNetwork->m_weightsHiddenOutput.size() );
34 | m_errorGradientsHidden.resize( pNetwork->m_hiddenNeurons.size() );
35 | m_errorGradientsOutput.resize( pNetwork->m_outputNeurons.size() );
36 |
37 | memset( m_deltaInputHidden.data(), 0, sizeof( double ) * m_deltaInputHidden.size() );
38 | memset( m_deltaHiddenOutput.data(), 0, sizeof( double ) * m_deltaHiddenOutput.size() );
39 | memset( m_errorGradientsHidden.data(), 0, sizeof( double ) * m_errorGradientsHidden.size() );
40 | memset( m_errorGradientsOutput.data(), 0, sizeof( double ) * m_errorGradientsOutput.size() );
41 | }
42 |
43 | void NetworkTrainer::Train( TrainingData const& trainingData )
44 | {
45 | // Reset training state
46 | m_currentEpoch = 0;
47 | m_trainingSetAccuracy = 0;
48 | m_validationSetAccuracy = 0;
49 | m_generalizationSetAccuracy = 0;
50 | m_trainingSetMSE = 0;
51 | m_validationSetMSE = 0;
52 | m_generalizationSetMSE = 0;
53 |
54 | // Print header
55 | //-------------------------------------------------------------------------
56 |
57 | std::cout << std::endl << " Neural Network Training Starting: " << std::endl
58 | << "==========================================================================" << std::endl
59 | << " LR: " << m_learningRate << ", Momentum: " << m_momentum << ", Max Epochs: " << m_maxEpochs << std::endl
60 | << " " << m_pNetwork->m_numInputs<< " Input Neurons, " << m_pNetwork->m_numHidden << " Hidden Neurons, " << m_pNetwork->m_numOutputs<< " Output Neurons" << std::endl
61 | << "==========================================================================" << std::endl << std::endl;
62 |
63 | // Train network using training dataset for training and generalization dataset for testing
64 | //--------------------------------------------------------------------------------------------------------
65 |
66 | while ( ( m_trainingSetAccuracy < m_desiredAccuracy || m_generalizationSetAccuracy < m_desiredAccuracy ) && m_currentEpoch < m_maxEpochs )
67 | {
68 | // Use training set to train network
69 | RunEpoch( trainingData.m_trainingSet );
70 |
71 | // Get generalization set accuracy and MSE
72 | GetSetAccuracyAndMSE( trainingData.m_generalizationSet, m_generalizationSetAccuracy, m_generalizationSetMSE );
73 |
74 | std::cout << "Epoch :" << m_currentEpoch;
75 | std::cout << " Training Set Accuracy:" << m_trainingSetAccuracy << "%, MSE: " << m_trainingSetMSE;
76 | std::cout << " Generalization Set Accuracy:" << m_generalizationSetAccuracy << "%, MSE: " << m_generalizationSetMSE << std::endl;
77 |
78 | m_currentEpoch++;
79 | }
80 |
81 | // Get validation set accuracy and MSE
82 | GetSetAccuracyAndMSE( trainingData.m_validationSet, m_validationSetAccuracy, m_validationSetMSE );
83 |
84 | // Print validation accuracy and MSE
85 | std::cout << std::endl << "Training Complete!!! - > Elapsed Epochs: " << m_currentEpoch << std::endl;
86 | std::cout << " Validation Set Accuracy: " << m_validationSetAccuracy << std::endl;
87 | std::cout << " Validation Set MSE: " << m_validationSetMSE << std::endl << std::endl;
88 | }
89 |
90 | double NetworkTrainer::GetHiddenErrorGradient( int32_t hiddenIdx ) const
91 | {
92 | // Get sum of hidden->output weights * output error gradients
93 | double weightedSum = 0;
94 | for ( auto outputIdx = 0; outputIdx < m_pNetwork->m_numOutputs; outputIdx++ )
95 | {
96 | int32_t const weightIdx = m_pNetwork->GetHiddenOutputWeightIndex( hiddenIdx, outputIdx );
97 | weightedSum += m_pNetwork->m_weightsHiddenOutput[weightIdx] * m_errorGradientsOutput[outputIdx];
98 | }
99 |
100 | // Return error gradient
101 | return m_pNetwork->m_hiddenNeurons[hiddenIdx] * ( 1.0 - m_pNetwork->m_hiddenNeurons[hiddenIdx] ) * weightedSum;
102 | }
103 |
104 | void NetworkTrainer::RunEpoch( TrainingSet const& trainingSet )
105 | {
106 | double incorrectEntries = 0;
107 | double MSE = 0;
108 |
109 | for ( auto const& trainingEntry : trainingSet )
110 | {
111 | // Feed inputs through network and back propagate errors
112 | m_pNetwork->Evaluate( trainingEntry.m_inputs );
113 | Backpropagate( trainingEntry.m_expectedOutputs );
114 |
115 | // Check all outputs from neural network against desired values
116 | bool resultCorrect = true;
117 | for ( int outputIdx = 0; outputIdx < m_pNetwork->m_numOutputs; outputIdx++ )
118 | {
119 | if ( m_pNetwork->m_clampedOutputs[outputIdx] != trainingEntry.m_expectedOutputs[outputIdx] )
120 | {
121 | resultCorrect = false;
122 | }
123 |
124 | // Calculate MSE
125 | MSE += pow( ( m_pNetwork->m_outputNeurons[outputIdx] - trainingEntry.m_expectedOutputs[outputIdx] ), 2);
126 | }
127 |
128 | if ( !resultCorrect )
129 | {
130 | incorrectEntries++;
131 | }
132 | }
133 |
134 | // If using batch learning - update the weights
135 | if ( m_useBatchLearning )
136 | {
137 | UpdateWeights();
138 | }
139 |
140 | // Update training accuracy and MSE
141 | m_trainingSetAccuracy = 100.0 - ( incorrectEntries / trainingSet.size() * 100.0 );
142 | m_trainingSetMSE = MSE / ( m_pNetwork->m_numOutputs * trainingSet.size() );
143 | }
144 |
145 | void NetworkTrainer::Backpropagate( std::vector const& expectedOutputs )
146 | {
147 | // Modify deltas between hidden and output layers
148 | //--------------------------------------------------------------------------------------------------------
149 | for ( auto OutputIdx = 0; OutputIdx < m_pNetwork->m_numOutputs; OutputIdx++ )
150 | {
151 | // Get error gradient for every output node
152 | m_errorGradientsOutput[OutputIdx] = GetOutputErrorGradient( (double) expectedOutputs[OutputIdx], m_pNetwork->m_outputNeurons[OutputIdx] );
153 |
154 | // For all nodes in hidden layer and bias neuron
155 | for ( auto hiddenIdx = 0; hiddenIdx <= m_pNetwork->m_numHidden; hiddenIdx++ )
156 | {
157 | int32_t const weightIdx = m_pNetwork->GetHiddenOutputWeightIndex( hiddenIdx, OutputIdx );
158 |
159 | // Calculate change in weight
160 | if ( m_useBatchLearning )
161 | {
162 | m_deltaHiddenOutput[weightIdx] += m_learningRate * m_pNetwork->m_hiddenNeurons[hiddenIdx] * m_errorGradientsOutput[OutputIdx];
163 | }
164 | else
165 | {
166 | m_deltaHiddenOutput[weightIdx] = m_learningRate * m_pNetwork->m_hiddenNeurons[hiddenIdx] * m_errorGradientsOutput[OutputIdx] + m_momentum * m_deltaHiddenOutput[weightIdx];
167 | }
168 | }
169 | }
170 |
171 | // Modify deltas between input and hidden layers
172 | //--------------------------------------------------------------------------------------------------------
173 |
174 | for ( auto hiddenIdx = 0; hiddenIdx <= m_pNetwork->m_numHidden; hiddenIdx++ )
175 | {
176 | // Get error gradient for every hidden node
177 | m_errorGradientsHidden[hiddenIdx] = GetHiddenErrorGradient( hiddenIdx );
178 |
179 | // For all nodes in input layer and bias neuron
180 | for ( auto inputIdx = 0; inputIdx <= m_pNetwork->m_numInputs; inputIdx++ )
181 | {
182 | int32_t const weightIdx = m_pNetwork->GetInputHiddenWeightIndex( inputIdx, hiddenIdx );
183 |
184 | // Calculate change in weight
185 | if ( m_useBatchLearning )
186 | {
187 | m_deltaInputHidden[weightIdx] += m_learningRate * m_pNetwork->m_inputNeurons[inputIdx] * m_errorGradientsHidden[hiddenIdx];
188 | }
189 | else
190 | {
191 | m_deltaInputHidden[weightIdx] = m_learningRate * m_pNetwork->m_inputNeurons[inputIdx] * m_errorGradientsHidden[hiddenIdx] + m_momentum * m_deltaInputHidden[weightIdx];
192 | }
193 | }
194 | }
195 |
196 | // If using stochastic learning update the weights immediately
197 | if ( !m_useBatchLearning )
198 | {
199 | UpdateWeights();
200 | }
201 | }
202 |
203 | void NetworkTrainer::UpdateWeights()
204 | {
205 | // Input -> hidden weights
206 | //--------------------------------------------------------------------------------------------------------
207 |
208 | for ( auto InputIdx = 0; InputIdx <= m_pNetwork->m_numInputs; InputIdx++ )
209 | {
210 | for ( auto hiddenIdx = 0; hiddenIdx <= m_pNetwork->m_numHidden; hiddenIdx++ )
211 | {
212 | int32_t const weightIdx = m_pNetwork->GetInputHiddenWeightIndex( InputIdx, hiddenIdx );
213 | m_pNetwork->m_weightsInputHidden[weightIdx] += m_deltaInputHidden[weightIdx];
214 |
215 | // Clear delta only if using batch (previous delta is needed for momentum
216 | if ( m_useBatchLearning )
217 | {
218 | m_deltaInputHidden[weightIdx] = 0;
219 | }
220 | }
221 | }
222 |
223 | // Hidden -> output weights
224 | //--------------------------------------------------------------------------------------------------------
225 |
226 | for ( auto hiddenIdx = 0; hiddenIdx <= m_pNetwork->m_numHidden; hiddenIdx++ )
227 | {
228 | for ( auto outputIdx = 0; outputIdx < m_pNetwork->m_numOutputs; outputIdx++ )
229 | {
230 | int32_t const weightIdx = m_pNetwork->GetHiddenOutputWeightIndex( hiddenIdx, outputIdx );
231 | m_pNetwork->m_weightsHiddenOutput[weightIdx] += m_deltaHiddenOutput[weightIdx];
232 |
233 | // Clear delta only if using batch (previous delta is needed for momentum)
234 | if ( m_useBatchLearning )
235 | {
236 | m_deltaInputHidden[weightIdx] = 0;
237 | }
238 | }
239 | }
240 | }
241 |
242 | void NetworkTrainer::GetSetAccuracyAndMSE( TrainingSet const& trainingSet, double& accuracy, double& MSE ) const
243 | {
244 | accuracy = 0;
245 | MSE = 0;
246 |
247 | double numIncorrectResults = 0;
248 | for ( auto const& trainingEntry : trainingSet )
249 | {
250 | m_pNetwork->Evaluate( trainingEntry.m_inputs );
251 |
252 | // Check if the network outputs match the expected outputs
253 | bool correctResult = true;
254 | for ( int32_t outputIdx = 0; outputIdx < m_pNetwork->m_numOutputs; outputIdx++ )
255 | {
256 | if ( (double) m_pNetwork->m_clampedOutputs[outputIdx] != trainingEntry.m_expectedOutputs[outputIdx] )
257 | {
258 | correctResult = false;
259 | }
260 |
261 | MSE += pow( ( m_pNetwork->m_outputNeurons[outputIdx] - trainingEntry.m_expectedOutputs[outputIdx] ), 2 );
262 | }
263 |
264 | if ( !correctResult )
265 | {
266 | numIncorrectResults++;
267 | }
268 | }
269 |
270 | accuracy = 100.0f - ( numIncorrectResults / trainingSet.size() * 100.0 );
271 | MSE = MSE / ( m_pNetwork->m_numOutputs * trainingSet.size() );
272 | }
273 |
274 | }
--------------------------------------------------------------------------------
/Src/NeuralNetwork/NeuralNetworkTrainer.h:
--------------------------------------------------------------------------------
1 | //-------------------------------------------------------------------------
2 | // Simple back-propagation neural network example
3 | // 2017 - Bobby Anguelov
4 | // MIT license: https://opensource.org/licenses/MIT
5 | //-------------------------------------------------------------------------
6 | // Basic Gradient Descent NN Trainer with Momentum and Batch Learning
7 |
8 | #pragma once
9 |
10 | #include "NeuralNetwork.h"
11 | #include
12 |
13 | namespace BPN
14 | {
15 | struct TrainingEntry
16 | {
17 | std::vector m_inputs;
18 | std::vector m_expectedOutputs;
19 | };
20 |
21 | typedef std::vector TrainingSet;
22 |
23 | struct TrainingData
24 | {
25 | TrainingSet m_trainingSet;
26 | TrainingSet m_generalizationSet;
27 | TrainingSet m_validationSet;
28 | };
29 |
30 | //-------------------------------------------------------------------------
31 |
32 | class NetworkTrainer
33 | {
34 | public:
35 |
36 | struct Settings
37 | {
38 | // Learning params
39 | double m_learningRate = 0.001;
40 | double m_momentum = 0.9;
41 | bool m_useBatchLearning = false;
42 |
43 | // Stopping conditions
44 | uint32_t m_maxEpochs = 150;
45 | double m_desiredAccuracy = 90;
46 | };
47 |
48 | public:
49 |
50 | NetworkTrainer( Settings const& settings, Network* pNetwork );
51 |
52 | void Train( TrainingData const& trainingData );
53 |
54 | private:
55 |
56 | inline double GetOutputErrorGradient( double desiredValue, double outputValue ) const { return outputValue * ( 1.0 - outputValue ) * ( desiredValue - outputValue ); }
57 | double GetHiddenErrorGradient( int32_t hiddenIdx ) const;
58 |
59 | void RunEpoch( TrainingSet const& trainingSet );
60 | void Backpropagate( std::vector const& expectedOutputs );
61 | void UpdateWeights();
62 |
63 | void GetSetAccuracyAndMSE( TrainingSet const& trainingSet, double& accuracy, double& mse ) const;
64 |
65 | private:
66 |
67 | Network* m_pNetwork; // Network to train
68 |
69 | // Training settings
70 | double m_learningRate; // Adjusts the step size of the weight update
71 | double m_momentum; // Improves performance of stochastic learning (don't use for batch)
72 | double m_desiredAccuracy; // Target accuracy for training
73 | uint32_t m_maxEpochs; // Max number of training epochs
74 | bool m_useBatchLearning; // Should we use batch learning
75 |
76 | // Training data
77 | std::vector m_deltaInputHidden; // Delta for input hidden layer
78 | std::vector m_deltaHiddenOutput; // Delta for hidden output layer
79 | std::vector m_errorGradientsHidden; // Error gradients for the hidden layer
80 | std::vector m_errorGradientsOutput; // Error gradients for the outputs
81 |
82 | uint32_t m_currentEpoch; // Epoch counter
83 | double m_trainingSetAccuracy;
84 | double m_validationSetAccuracy;
85 | double m_generalizationSetAccuracy;
86 | double m_trainingSetMSE;
87 | double m_validationSetMSE;
88 | double m_generalizationSetMSE;
89 | };
90 | }
--------------------------------------------------------------------------------
/Src/NeuralNetwork/TrainingDataReader.cpp:
--------------------------------------------------------------------------------
1 | //-------------------------------------------------------------------------
2 | // Simple back-propagation neural network example
3 | // 2017 - Bobby Anguelov
4 | // MIT license: https://opensource.org/licenses/MIT
5 | //-------------------------------------------------------------------------
6 |
7 | #include "TrainingDataReader.h"
8 | #include
9 | #include
10 | #include
11 | #include
12 |
13 | //-------------------------------------------------------------------------
14 |
15 |
16 | namespace BPN
17 | {
18 | TrainingDataReader::TrainingDataReader( std::string const& filename, int32_t numInputs, int32_t numOutputs )
19 | : m_filename( filename )
20 | , m_numInputs( numInputs )
21 | , m_numOutputs( numOutputs )
22 | {
23 | assert( !filename.empty() && m_numInputs > 0 && m_numOutputs > 0 );
24 | }
25 |
26 | bool TrainingDataReader::ReadData()
27 | {
28 | assert( !m_filename.empty() );
29 |
30 | std::fstream inputFile;
31 | inputFile.open( m_filename, std::ios::in );
32 |
33 | if ( inputFile.is_open() )
34 | {
35 | std::string line;
36 |
37 | // Read data
38 | //-------------------------------------------------------------------------
39 |
40 | int32_t const totalValuesToRead = m_numInputs + m_numOutputs;
41 |
42 | while ( !inputFile.eof() )
43 | {
44 | std::getline( inputFile, line );
45 | if ( line.length() > 2 )
46 | {
47 | m_entries.push_back( TrainingEntry() );
48 | TrainingEntry& entry = m_entries.back();
49 |
50 | char* cstr = new char[line.size() + 1];
51 | strcpy_s( cstr, line.size() + 1, line.c_str() );
52 |
53 | // Read values
54 | int i = 0;
55 | char* nextToken = nullptr;
56 | char* pToken = strtok_s( cstr, ",", &nextToken );
57 |
58 | while ( pToken != nullptr && i < totalValuesToRead )
59 | {
60 | if ( i < m_numInputs )
61 | {
62 | entry.m_inputs.push_back( atof( pToken ) );
63 | }
64 | else
65 | {
66 | double const outputValue = atof( pToken );
67 | entry.m_expectedOutputs.push_back( (int32_t) outputValue );
68 | }
69 |
70 | pToken = strtok_s( nullptr, ",", &nextToken );
71 | i++;
72 | }
73 | }
74 | }
75 |
76 | inputFile.close();
77 |
78 | if ( !m_entries.empty() )
79 | {
80 | CreateTrainingData();
81 | }
82 |
83 | std::cout << "Input file: " << m_filename << "\nRead complete: " << m_entries.size() << " inputs loaded" << std::endl;
84 | return true;
85 | }
86 | else
87 | {
88 | std::cout << "Error Opening Input File: " << m_filename << std::endl;
89 | return false;
90 | }
91 | }
92 |
93 | void TrainingDataReader::CreateTrainingData()
94 | {
95 | assert( !m_entries.empty() );
96 |
97 | std::random_shuffle( m_entries.begin(), m_entries.end() );
98 |
99 | // Training set
100 | int32_t const numEntries = (int32_t) m_entries.size();
101 | int32_t const numTrainingEntries = (int32_t) ( 0.6 * numEntries );
102 | int32_t const numGeneralizationEntries = (int32_t) ( ceil( 0.2 * numEntries ) );
103 |
104 | int32_t entryIdx = 0;
105 | for ( ; entryIdx < numTrainingEntries; entryIdx++ )
106 | {
107 | m_data.m_trainingSet.push_back( m_entries[entryIdx] );
108 | }
109 |
110 | // Generalization set
111 | for ( ; entryIdx < numTrainingEntries + numGeneralizationEntries; entryIdx++ )
112 | {
113 | m_data.m_generalizationSet.push_back( m_entries[entryIdx] );
114 | }
115 |
116 | // Validation set
117 | for ( ; entryIdx < numEntries; entryIdx++ )
118 | {
119 | m_data.m_validationSet.push_back( m_entries[entryIdx] );
120 | }
121 | }
122 | }
--------------------------------------------------------------------------------
/Src/NeuralNetwork/TrainingDataReader.h:
--------------------------------------------------------------------------------
1 | //-------------------------------------------------------------------------
2 | // Simple back-propagation neural network example
3 | // 2017 - Bobby Anguelov
4 | // MIT license: https://opensource.org/licenses/MIT
5 | //-------------------------------------------------------------------------
6 |
7 | #pragma once
8 |
9 | #include "NeuralNetworkTrainer.h"
10 | #include
11 |
12 | //-------------------------------------------------------------------------
13 |
14 | namespace BPN
15 | {
16 | class TrainingDataReader
17 | {
18 | public:
19 |
20 | TrainingDataReader( std::string const& filename, int32_t numInputs, int32_t numOutputs );
21 |
22 | bool ReadData();
23 |
24 | inline int32_t GetNumInputs() const { return m_numInputs; }
25 | inline int32_t GetNumOutputs() const { return m_numOutputs; }
26 |
27 | inline int32_t GetNumTrainingSets() const { return 0; }
28 | TrainingData const& GetTrainingData() const { return m_data; }
29 |
30 | private:
31 |
32 | void CreateTrainingData();
33 |
34 | private:
35 |
36 | std::string m_filename;
37 | int32_t m_numInputs;
38 | int32_t m_numOutputs;
39 |
40 | std::vector m_entries;
41 | TrainingData m_data;
42 | };
43 | }
--------------------------------------------------------------------------------
/Src/cmdParser.h:
--------------------------------------------------------------------------------
1 | /*
2 | This file is part of the C++ CmdParser utility.
3 | Copyright (c) 2015 - 2016 Florian Rappl
4 | */
5 |
6 | #pragma once
7 | #include
8 | #include
9 | #include
10 | #include
11 | #include
12 | #include
13 |
14 | namespace cli {
15 | struct CallbackArgs {
16 | const std::vector& arguments;
17 | std::ostream& output;
18 | std::ostream& error;
19 | };
20 | class Parser {
21 | private:
22 | class CmdBase {
23 | public:
24 | explicit CmdBase( const std::string& name, const std::string& alternative, const std::string& description, bool required, bool dominant, bool variadic ) :
25 | name( name ),
26 | command( name.size() > 0 ? "-" + name : "" ),
27 | alternative( alternative.size() > 0 ? "--" + alternative : "" ),
28 | description( description ),
29 | required( required ),
30 | handled( false ),
31 | arguments( {} ),
32 | dominant( dominant ),
33 | variadic( variadic ) {
34 | }
35 |
36 | virtual ~CmdBase() {
37 | }
38 |
39 | std::string name;
40 | std::string command;
41 | std::string alternative;
42 | std::string description;
43 | bool required;
44 | bool handled;
45 | std::vector arguments;
46 | bool const dominant;
47 | bool const variadic;
48 |
49 | virtual std::string print_value() const = 0;
50 | virtual bool parse( std::ostream& output, std::ostream& error ) = 0;
51 |
52 | bool is( const std::string& given ) const {
53 | return given == command || given == alternative;
54 | }
55 | };
56 |
57 | template
58 | struct ArgumentCountChecker
59 | {
60 | static constexpr bool Variadic = false;
61 | };
62 |
63 | template
64 | struct ArgumentCountChecker>
65 | {
66 | static constexpr bool Variadic = true;
67 | };
68 |
69 | template
70 | class CmdFunction final : public CmdBase {
71 | public:
72 | explicit CmdFunction( const std::string& name, const std::string& alternative, const std::string& description, bool required, bool dominant ) :
73 | CmdBase( name, alternative, description, required, dominant, ArgumentCountChecker::Variadic ) {
74 | }
75 |
76 | virtual bool parse( std::ostream& output, std::ostream& error ) {
77 | try
78 | {
79 | CallbackArgs args{ arguments, output, error };
80 | value = callback( args );
81 | return true;
82 | }
83 | catch ( ... )
84 | {
85 | return false;
86 | }
87 | }
88 |
89 | virtual std::string print_value() const {
90 | return "";
91 | }
92 |
93 | std::function callback;
94 | T value;
95 | };
96 |
97 | template
98 | class CmdArgument final : public CmdBase {
99 | public:
100 | explicit CmdArgument( const std::string& name, const std::string& alternative, const std::string& description, bool required, bool dominant ) :
101 | CmdBase( name, alternative, description, required, dominant, ArgumentCountChecker::Variadic ) {
102 | }
103 |
104 | virtual bool parse( std::ostream&, std::ostream& ) {
105 | try
106 | {
107 | value = Parser::parse( arguments, value );
108 | return true;
109 | }
110 | catch ( ... )
111 | {
112 | return false;
113 | }
114 | }
115 |
116 | virtual std::string print_value() const {
117 | return stringify( value );
118 | }
119 |
120 | T value;
121 | };
122 |
123 | static int parse( const std::vector& elements, const int& ) {
124 | if ( elements.size() != 1 )
125 | throw std::bad_cast();
126 |
127 | return std::stoi( elements[0] );
128 | }
129 |
130 | static bool parse( const std::vector& elements, const bool& defval ) {
131 | if ( elements.size() != 0 )
132 | throw std::runtime_error( "A boolean command line parameter cannot have any arguments." );
133 |
134 | return !defval;
135 | }
136 |
137 | static double parse( const std::vector& elements, const double& ) {
138 | if ( elements.size() != 1 )
139 | throw std::bad_cast();
140 |
141 | return std::stod( elements[0] );
142 | }
143 |
144 | static float parse( const std::vector& elements, const float& ) {
145 | if ( elements.size() != 1 )
146 | throw std::bad_cast();
147 |
148 | return std::stof( elements[0] );
149 | }
150 |
151 | static long double parse( const std::vector& elements, const long double& ) {
152 | if ( elements.size() != 1 )
153 | throw std::bad_cast();
154 |
155 | return std::stold( elements[0] );
156 | }
157 |
158 | static unsigned int parse( const std::vector& elements, const unsigned int& ) {
159 | if ( elements.size() != 1 )
160 | throw std::bad_cast();
161 |
162 | return static_cast( std::stoul( elements[0] ) );
163 | }
164 |
165 | static unsigned long parse( const std::vector& elements, const unsigned long& ) {
166 | if ( elements.size() != 1 )
167 | throw std::bad_cast();
168 |
169 | return std::stoul( elements[0] );
170 | }
171 |
172 | static long parse( const std::vector& elements, const long& ) {
173 | if ( elements.size() != 1 )
174 | throw std::bad_cast();
175 |
176 | return std::stol( elements[0] );
177 | }
178 |
179 | static std::string parse( const std::vector& elements, const std::string& ) {
180 | if ( elements.size() != 1 )
181 | throw std::bad_cast();
182 |
183 | return elements[0];
184 | }
185 |
186 | template
187 | static std::vector parse( const std::vector& elements, const std::vector& ) {
188 | const T defval = T();
189 | std::vector values{};
190 | std::vector buffer( 1 );
191 |
192 | for ( const auto& element : elements )
193 | {
194 | buffer[0] = element;
195 | values.push_back( parse( buffer, defval ) );
196 | }
197 |
198 | return values;
199 | }
200 |
201 | template
202 | static std::string stringify( const T& value ) {
203 | return std::to_string( value );
204 | }
205 |
206 | template
207 | static std::string stringify( const std::vector& values ) {
208 | std::stringstream ss{};
209 | ss << "[ ";
210 |
211 | for ( const auto& value : values )
212 | {
213 | ss << stringify( value ) << " ";
214 | }
215 |
216 | ss << "]";
217 | return ss.str();
218 | }
219 |
220 | static std::string stringify( const std::string& str ) {
221 | return str;
222 | }
223 |
224 | public:
225 | explicit Parser( int argc, const char** argv ) :
226 | _appname( argv[0] ) {
227 | for ( int i = 1; i < argc; ++i )
228 | {
229 | _arguments.push_back( argv[i] );
230 | }
231 | enable_help();
232 | }
233 |
234 | explicit Parser( int argc, char** argv ) :
235 | _appname( argv[0] ) {
236 | for ( int i = 1; i < argc; ++i )
237 | {
238 | _arguments.push_back( argv[i] );
239 | }
240 | enable_help();
241 | }
242 |
243 | ~Parser() {
244 | for ( int i = 0, n = _commands.size(); i < n; ++i )
245 | {
246 | delete _commands[i];
247 | }
248 | }
249 |
250 | bool has_help() const {
251 | for ( const auto command : _commands )
252 | {
253 | if ( command->name == "h" && command->alternative == "--help" )
254 | {
255 | return true;
256 | }
257 | }
258 |
259 | return false;
260 | }
261 |
262 | void enable_help() {
263 | set_callback( "h", "help", std::function( [this] ( CallbackArgs& args ) {
264 | args.output << this->usage();
265 | exit( 0 );
266 | return false;
267 | } ), "", true );
268 | }
269 |
270 | void disable_help() {
271 | for ( auto command = _commands.begin(); command != _commands.end(); ++command )
272 | {
273 | if ( ( *command )->name == "h" && ( *command )->alternative == "--help" )
274 | {
275 | _commands.erase( command );
276 | break;
277 | }
278 | }
279 | }
280 |
281 | template
282 | void set_default( bool is_required, const std::string& description = "" ) {
283 | auto command = new CmdArgument{ "", "", description, is_required, false };
284 | _commands.push_back( command );
285 | }
286 |
287 | template
288 | void set_required( const std::string& name, const std::string& alternative, const std::string& description = "", bool dominant = false ) {
289 | auto command = new CmdArgument{ name, alternative, description, true, dominant };
290 | _commands.push_back( command );
291 | }
292 |
293 | template
294 | void set_optional( const std::string& name, const std::string& alternative, T defaultValue, const std::string& description = "", bool dominant = false ) {
295 | auto command = new CmdArgument{ name, alternative, description, false, dominant };
296 | command->value = defaultValue;
297 | _commands.push_back( command );
298 | }
299 |
300 | template
301 | void set_callback( const std::string& name, const std::string& alternative, std::function callback, const std::string& description = "", bool dominant = false ) {
302 | auto command = new CmdFunction{ name, alternative, description, false, dominant };
303 | command->callback = callback;
304 | _commands.push_back( command );
305 | }
306 |
307 | inline void run_and_exit_if_error() {
308 | if ( run() == false )
309 | {
310 | exit( 1 );
311 | }
312 | }
313 |
314 | inline bool run() {
315 | return run( std::cout, std::cerr );
316 | }
317 |
318 | inline bool run( std::ostream& output ) {
319 | return run( output, std::cerr );
320 | }
321 |
322 | bool run( std::ostream& output, std::ostream& error ) {
323 | if ( _arguments.size() > 0 )
324 | {
325 | auto current = find_default();
326 |
327 | for ( int i = 0, n = _arguments.size(); i < n; ++i )
328 | {
329 | auto isarg = _arguments[i].size() > 0 && _arguments[i][0] == '-';
330 | auto associated = isarg ? find( _arguments[i] ) : nullptr;
331 |
332 | if ( associated != nullptr )
333 | {
334 | current = associated;
335 | associated->handled = true;
336 | }
337 | else if ( current == nullptr )
338 | {
339 | error << no_default();
340 | return false;
341 | }
342 | else
343 | {
344 | current->arguments.push_back( _arguments[i] );
345 | current->handled = true;
346 | if ( !current->variadic )
347 | {
348 | // If the current command is not variadic, then no more arguments
349 | // should be added to it. In this case, switch back to the default
350 | // command.
351 | current = find_default();
352 | }
353 | }
354 | }
355 | }
356 |
357 | // First, parse dominant arguments since they succeed even if required
358 | // arguments are missing.
359 | for ( auto command : _commands )
360 | {
361 | if ( command->handled && command->dominant && !command->parse( output, error ) )
362 | {
363 | error << howto_use( command );
364 | return false;
365 | }
366 | }
367 |
368 | // Next, check for any missing arguments.
369 | for ( auto command : _commands )
370 | {
371 | if ( command->required && !command->handled )
372 | {
373 | error << howto_required( command );
374 | return false;
375 | }
376 | }
377 |
378 | // Finally, parse all remaining arguments.
379 | for ( auto command : _commands )
380 | {
381 | if ( command->handled && !command->dominant && !command->parse( output, error ) )
382 | {
383 | error << howto_use( command );
384 | return false;
385 | }
386 | }
387 |
388 | return true;
389 | }
390 |
391 | template
392 | T get( const std::string& name ) const {
393 | for ( const auto& command : _commands )
394 | {
395 | if ( command->name == name )
396 | {
397 | auto cmd = dynamic_cast*>( command );
398 |
399 | if ( cmd == nullptr )
400 | {
401 | throw std::runtime_error( "Invalid usage of the parameter " + name + " detected." );
402 | }
403 |
404 | return cmd->value;
405 | }
406 | }
407 |
408 | throw std::runtime_error( "The parameter " + name + " could not be found." );
409 | }
410 |
411 | template
412 | T get_if( const std::string& name, std::function callback ) const {
413 | auto value = get( name );
414 | return callback( value );
415 | }
416 |
417 | int requirements() const {
418 | int count = 0;
419 |
420 | for ( const auto& command : _commands )
421 | {
422 | if ( command->required )
423 | {
424 | ++count;
425 | }
426 | }
427 |
428 | return count;
429 | }
430 |
431 | int commands() const {
432 | return static_cast( _commands.size() );
433 | }
434 |
435 | inline const std::string& app_name() const {
436 | return _appname;
437 | }
438 |
439 | protected:
440 | CmdBase* find( const std::string& name ) {
441 | for ( auto command : _commands )
442 | {
443 | if ( command->is( name ) )
444 | {
445 | return command;
446 | }
447 | }
448 |
449 | return nullptr;
450 | }
451 |
452 | CmdBase* find_default() {
453 | for ( auto command : _commands )
454 | {
455 | if ( command->name == "" )
456 | {
457 | return command;
458 | }
459 | }
460 |
461 | return nullptr;
462 | }
463 |
464 | std::string usage() const {
465 | std::stringstream ss{};
466 | ss << "Available parameters:\n\n";
467 |
468 | for ( const auto& command : _commands )
469 | {
470 | ss << " " << command->command << "\t" << command->alternative;
471 |
472 | if ( command->required == true )
473 | {
474 | ss << "\t(required)";
475 | }
476 |
477 | ss << "\n " << command->description;
478 |
479 | if ( command->required == false )
480 | {
481 | ss << "\n " << "This parameter is optional. The default value is '" + command->print_value() << "'.";
482 | }
483 |
484 | ss << "\n\n";
485 | }
486 |
487 | return ss.str();
488 | }
489 |
490 | void print_help( std::stringstream& ss ) const {
491 | if ( has_help() )
492 | {
493 | ss << "For more help use --help or -h.\n";
494 | }
495 | }
496 |
497 | std::string howto_required( CmdBase* command ) const {
498 | std::stringstream ss{};
499 | ss << "The parameter " << command->name << " is required.\n";
500 | ss << command->description << '\n';
501 | print_help( ss );
502 | return ss.str();
503 | }
504 |
505 | std::string howto_use( CmdBase* command ) const {
506 | std::stringstream ss{};
507 | ss << "The parameter " << command->name << " has invalid arguments.\n";
508 | ss << command->description << '\n';
509 | print_help( ss );
510 | return ss.str();
511 | }
512 |
513 | std::string no_default() const {
514 | std::stringstream ss{};
515 | ss << "No default parameter has been specified.\n";
516 | ss << "The given argument must be used with a parameter.\n";
517 | print_help( ss );
518 | return ss.str();
519 | }
520 |
521 | private:
522 | const std::string _appname;
523 | std::vector _arguments;
524 | std::vector _commands;
525 | };
526 | }
--------------------------------------------------------------------------------
/Src/main.cpp:
--------------------------------------------------------------------------------
1 | //-------------------------------------------------------------------------
2 | // Simple back-propagation neural network example
3 | // 2017 - Bobby Anguelov
4 | // MIT license: https://opensource.org/licenses/MIT
5 | //-------------------------------------------------------------------------
6 |
7 | #include "NeuralNetwork/neuralNetworkTrainer.h"
8 | #include "NeuralNetwork/TrainingDataReader.h"
9 | #include
10 |
11 | #if _MSC_VER
12 | #pragma warning(push, 0)
13 | #pragma warning(disable: 4702)
14 | #endif
15 |
16 | #include "cmdParser.h"
17 |
18 | #if _MSC_VER
19 | #pragma warning(pop)
20 | #endif
21 |
22 | //-------------------------------------------------------------------------
23 |
24 | int main( int argc, char* argv[] )
25 | {
26 | cli::Parser cmdParser( argc, argv );
27 | cmdParser.set_required( "d", "DataFile", "Path to training data csv file." );
28 | cmdParser.set_required( "in", "NumInputs", "Num Input neurons." );
29 | cmdParser.set_required( "hidden", "NumHidden", "Num Hidden neurons." );
30 | cmdParser.set_required( "out", "NumOutputs", "Num Output neurons." );
31 |
32 | if ( !cmdParser.run() )
33 | {
34 | std::cout << "Invalid command line arguments";
35 | return 1;
36 | }
37 |
38 | std::string trainingDataPath = cmdParser.get( "d" ).c_str();
39 | uint32_t const numInputs = cmdParser.get( "in" );
40 | uint32_t const numHidden = cmdParser.get( "hidden" );
41 | uint32_t const numOutputs = cmdParser.get( "out" );
42 |
43 | BPN::TrainingDataReader dataReader( trainingDataPath, numInputs, numOutputs );
44 | if ( !dataReader.ReadData() )
45 | {
46 | return 1;
47 | }
48 |
49 | // Create neural network
50 | BPN::Network::Settings networkSettings{ numInputs, numHidden, numOutputs };
51 | BPN::Network nn( networkSettings );
52 |
53 | // Create neural network trainer
54 | BPN::NetworkTrainer::Settings trainerSettings;
55 | trainerSettings.m_learningRate = 0.001;
56 | trainerSettings.m_momentum = 0.9;
57 | trainerSettings.m_useBatchLearning = false;
58 | trainerSettings.m_maxEpochs = 200;
59 | trainerSettings.m_desiredAccuracy = 90;
60 |
61 | BPN::NetworkTrainer trainer( trainerSettings, &nn );
62 | trainer.Train( dataReader.GetTrainingData() );
63 |
64 | return 0;
65 | }
66 |
--------------------------------------------------------------------------------