├── .cproject
├── .gitignore
├── .project
├── .settings
├── language.settings.xml
└── org.eclipse.cdt.managedbuilder.core.prefs
├── DT.c
├── DT.h
├── ELM.h
├── ExampleAlgoTemplate.c
├── ExampleAlgoTemplate.h
├── HW_TripleExpoSmoothing.c
├── HW_TripleExpoSmoothing.h
├── KNN.c
├── KNN.h
├── LICENSE.md
├── Preprocess.c
├── Preprocess.h
├── README.md
├── RF.c
├── RF.h
├── SVM.c
├── SVM.h
├── Test.c
├── Test.h
├── docs
└── CONTRIBUTING.md
└── main.c
/.cproject:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
26 |
27 |
28 |
29 |
30 |
31 |
32 |
35 |
36 |
37 |
38 |
39 |
40 |
41 |
42 |
43 |
44 |
45 |
46 |
47 |
48 |
49 |
50 |
51 |
52 |
53 |
54 |
55 |
56 |
57 |
58 |
59 |
60 |
61 |
62 |
63 |
64 |
65 |
66 |
67 |
68 |
69 |
70 |
71 |
72 |
73 |
74 |
75 |
76 |
77 |
78 |
79 |
80 |
81 |
82 |
83 |
84 |
85 |
86 |
87 |
88 |
89 |
90 |
91 |
92 |
93 |
94 |
95 |
96 |
97 |
98 |
99 |
100 |
101 |
102 |
103 |
104 |
105 |
106 |
107 |
108 |
109 |
110 |
111 |
112 |
113 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | *.exe
2 | cfg/
3 | .vscode/
4 | energydata_complete/
5 | *.rar
6 | source/
7 | heart/
8 | Debug/
9 | dlm/
10 | ds/
11 |
--------------------------------------------------------------------------------
/.project:
--------------------------------------------------------------------------------
1 |
2 |
3 | Micro-LM
4 |
5 |
6 |
7 |
8 |
9 | org.eclipse.cdt.managedbuilder.core.genmakebuilder
10 | clean,full,incremental,
11 |
12 |
13 |
14 |
15 | org.eclipse.cdt.managedbuilder.core.ScannerConfigBuilder
16 | full,incremental,
17 |
18 |
19 |
20 |
21 |
22 | org.eclipse.cdt.core.cnature
23 | org.eclipse.cdt.managedbuilder.core.managedBuildNature
24 | org.eclipse.cdt.managedbuilder.core.ScannerConfigNature
25 |
26 |
27 |
--------------------------------------------------------------------------------
/.settings/language.settings.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
26 |
--------------------------------------------------------------------------------
/.settings/org.eclipse.cdt.managedbuilder.core.prefs:
--------------------------------------------------------------------------------
1 | eclipse.preferences.version=1
2 | environment/buildEnvironmentInclude/cdt.managedbuild.config.gnu.mingw.exe.debug.1421827040/CPATH/delimiter=;
3 | environment/buildEnvironmentInclude/cdt.managedbuild.config.gnu.mingw.exe.debug.1421827040/CPATH/operation=remove
4 | environment/buildEnvironmentInclude/cdt.managedbuild.config.gnu.mingw.exe.debug.1421827040/C_INCLUDE_PATH/delimiter=;
5 | environment/buildEnvironmentInclude/cdt.managedbuild.config.gnu.mingw.exe.debug.1421827040/C_INCLUDE_PATH/operation=remove
6 | environment/buildEnvironmentInclude/cdt.managedbuild.config.gnu.mingw.exe.debug.1421827040/append=true
7 | environment/buildEnvironmentInclude/cdt.managedbuild.config.gnu.mingw.exe.debug.1421827040/appendContributed=true
8 | environment/buildEnvironmentLibrary/cdt.managedbuild.config.gnu.mingw.exe.debug.1421827040/LIBRARY_PATH/delimiter=;
9 | environment/buildEnvironmentLibrary/cdt.managedbuild.config.gnu.mingw.exe.debug.1421827040/LIBRARY_PATH/operation=remove
10 | environment/buildEnvironmentLibrary/cdt.managedbuild.config.gnu.mingw.exe.debug.1421827040/append=true
11 | environment/buildEnvironmentLibrary/cdt.managedbuild.config.gnu.mingw.exe.debug.1421827040/appendContributed=true
12 |
--------------------------------------------------------------------------------
/DT.c:
--------------------------------------------------------------------------------
1 | #include "ELM.h"
2 |
3 | #ifdef DT
4 |
5 | #include "dt.h"
6 | #include
7 | #include
8 |
9 |
10 | #ifdef REGRESSION
11 | float (*pRegress)(float X[]) = decisionTree_regression;
12 | #else
13 | int (*pClassf)(float X[]) = decisionTree_classification;
14 | #endif
15 |
16 |
17 | #ifndef REGRESSION
18 | int decisionTree_classification(float X[])
19 | {
20 | int currentNode = 0;
21 | while (1)
22 | {
23 | if (feature[currentNode] >= 0)
24 | {
25 | if (X[feature[currentNode]] <= threshold[currentNode])
26 | {
27 | #ifdef DEBUG
28 | printf("\ncurrent node: %d, X:%f <= %f\n", currentNode, X[feature[currentNode]], threshold[currentNode]);
29 | fflush(stdout);
30 | #endif
31 | currentNode = children_left[currentNode];
32 | }
33 | else
34 | {
35 | #ifdef DEBUG
36 | printf("\ncurrent node: %d, X:%f > %f\n", currentNode, X[feature[currentNode]], threshold[currentNode]);
37 | fflush(stdout);
38 | #endif
39 | currentNode = children_right[currentNode];
40 | }
41 | }
42 | else
43 | { // Leaf node
44 | /*{
45 | int j;
46 | int maxClass;
47 | int maxValue = 0;
48 | for (j = 0; j < N_CLASS; j++)
49 | {
50 | if (values[currentNode][j] >= maxValue)
51 | {
52 | maxValue = values[currentNode][j];
53 | maxClass = target_classes[j];
54 | }
55 | }
56 | return maxClass;
57 | }
58 | break;*/
59 | int j;
60 | for (j = 0; j < N_LEAVES; j++) {
61 | if (leaf_nodes[j][0] == currentNode) {
62 | int maxIdx = leaf_nodes[j][1];
63 | int maxClass = target_classes[maxIdx];
64 | printf("\ncurrent node: %d, decision: %d\n", currentNode, maxClass);
65 | fflush(stdout);
66 | return maxClass;
67 | }
68 | }
69 | }
70 | }
71 | }
72 | #else
73 | float decisionTree_regression(float X[])
74 | {
75 | int currentNode = 0;
76 | while (1)
77 | {
78 | if (feature[currentNode] >= 0)
79 | {
80 | if (X[feature[currentNode]] <= threshold[currentNode])
81 | {
82 | currentNode = children_left[currentNode];
83 | }
84 | else
85 | {
86 | currentNode = children_right[currentNode];
87 | }
88 | }
89 | else
90 | { // Leaf node
91 | return values[currentNode][0];
92 | }
93 | }
94 | }
95 | #endif
96 |
97 | #endif
98 |
--------------------------------------------------------------------------------
/DT.h:
--------------------------------------------------------------------------------
1 | #ifdef DT
2 |
3 | #ifndef DT_H
4 |
5 | #define DT_H
6 |
7 | #include "ELM.h"
8 | #include
9 |
10 |
11 | #include "PPParams.h"
12 | #include "DT_params.h"
13 |
14 |
15 | #ifdef REGRESSION
16 | float decisionTree_regression(float []);
17 | #else
18 | int decisionTree_classification(float []);
19 | #endif // REGRESSION
20 |
21 | #endif
22 |
23 | #endif // DT
24 |
--------------------------------------------------------------------------------
/ELM.h:
--------------------------------------------------------------------------------
1 | #ifndef ELM_H
2 | #define ELM_H
3 |
4 | #include
5 |
6 | /*
7 | * Begin configuration
8 | */
9 |
10 | //Define the ML algorithm
11 |
12 | //#define SVM //Support Vector Machine
13 | //#define DT //Decisional Tree
14 | //#define KNN //K-Nearest Neighbours
15 | //#define TripleES //Holt-Winters Triple exponential smoothing
16 | #define RF //Random Forest
17 |
18 |
19 | //Define if it is a regression problem
20 |
21 | //#define REGRESSION
22 |
23 | //Define if it's a test on the Test Set
24 |
25 | //#define DS_TEST
26 |
27 | //Define if you need debug messages
28 | //#define DEBUG
29 |
30 |
31 | //end configuration
32 |
33 |
34 | //guard to prevent compiling without an algorithm
35 | #if !defined(RF) && !defined(DT) && !defined(KNN) && !defined(SVM) && !defined(TripleEs)
36 | #error You have to define one algorithm
37 | #endif
38 |
39 | #ifdef DS_TEST
40 | #include "testing_set.h"
41 | #include "Test.h"
42 | #endif
43 |
44 | #include "Preprocess.h"
45 |
46 |
47 |
48 | #ifdef SVM
49 | #include "SVM.h"
50 | #endif
51 |
52 | #ifdef DT
53 | #include "DT.h"
54 | #endif
55 |
56 | #ifdef KNN
57 | #include "KNN.h"
58 | #endif
59 |
60 | #ifdef RF
61 | #include "RF.h"
62 | #endif
63 |
64 | #ifdef TripleES
65 | #include "TripleES_params.h"
66 | int* HW_TripleExpoSmoothing(int arrayD[], int vlen, double alpha, double beta,
67 | double gamma, int slen, int n_preds, double scaling_factor);
68 | #endif
69 |
70 |
71 | #ifdef REGRESSION
72 | extern float (*pRegress)(float X[]);
73 | #else
74 | extern int (*pClassf)(float X[]);
75 | #endif
76 |
77 | #endif
78 |
--------------------------------------------------------------------------------
/ExampleAlgoTemplate.c:
--------------------------------------------------------------------------------
1 | /*
2 |
3 |
4 | #include "ELM.h"
5 |
6 | #ifdef NEW_ALGO
7 |
8 | #include "newAlgo.h"
9 | #include
10 | #include
11 |
12 |
13 | #ifdef REGRESSION
14 | float (*pRegress)(float X[]) = newAlgo_regression;
15 | #else
16 | int (*pClassf)(float X[]) = newAlgo_classification;
17 | #endif
18 |
19 |
20 | #ifndef REGRESSION
21 | int newAlgo_classification(float X[])
22 | {
23 | // classification algorithm implementation
24 | }
25 | #else
26 | float newAlgo_regression(float X[])
27 | {
28 | // regression algorithm implementation
29 | }
30 | #endif
31 |
32 | #endif
33 |
34 | */
35 |
--------------------------------------------------------------------------------
/ExampleAlgoTemplate.h:
--------------------------------------------------------------------------------
1 | /*
2 | ///////////////////////////////////////////////////////////////////////////
3 | This is a template on how to implement new ML algorithm on Micro-LM to make it compatible with all the feature of this application
4 | To make it fully compatible with the test function the ELM.h should be modified too: the define for the new algorithm must be added .
5 | Also don't forget to add your define to the guard that prevents the compiling without an algorithm selected.
6 |
7 |
8 | #ifdef NEW_ALGO
9 |
10 | #ifndef NEW_ALGO_H
11 |
12 | #define NEW_ALGO_H
13 |
14 | #include "ELM.h"
15 | #include
16 |
17 |
18 | #include "PPParams.h"
19 | #include "Algo_params.h"
20 |
21 |
22 | #ifdef REGRESSION
23 | float newAlgo_regression(float []);
24 | #else
25 | int newAlgo_classification(float []);
26 | #endif // REGRESSION
27 |
28 | #endif //NEW_ALGO_H
29 |
30 | #endif // NEW_ALGO
31 |
32 |
33 | */
34 |
--------------------------------------------------------------------------------
/HW_TripleExpoSmoothing.c:
--------------------------------------------------------------------------------
1 | #include "ELM.h"
2 |
3 | #ifdef TES
4 |
5 | #include
6 | #include
7 | #include "HW_TripleExpoSmoothing.h"
8 | #include "TripleES_params.h"
9 |
10 | int* HW_TripleExpoSmoothing(int arrayD[], int vlen, double alpha, double beta, double gamma,
11 | int slen, int n_preds, double scaling_factor)
12 | {
13 | //initial trend
14 | int tot = vlen + n_preds;
15 | double sum = 0.0;
16 | double initial_trend = 0.0;
17 | for (int i = 0; i < slen; i++)
18 | {
19 | sum += (double)(((arrayD[i + slen] - arrayD[i])) / slen);
20 | }
21 | initial_trend = sum / (double)slen;
22 |
23 | //initial_seasonal_components
24 | double *seasonals;
25 | seasonals = malloc(tot * sizeof(double));
26 | double *season_averages;
27 | season_averages = malloc(tot * sizeof(double));
28 |
29 | for (int j = 0; j < tot; j++)
30 | {
31 | seasonals[j] = 0.0;
32 | season_averages[j] = 0.0;
33 | }
34 |
35 | int n_seasons = (int)(vlen / slen);
36 | double somma = 0.0;
37 | //let's calculate season averages
38 | for (int j = 0; j < n_seasons; j++)
39 | {
40 | for (int h = slen * j; h < ((slen * j) + slen); h++)
41 | {
42 | somma += arrayD[h];
43 | }
44 | season_averages[j] = somma / (double) slen;
45 | }
46 | //let's calculate initial values
47 | for (int i = 0; i < slen; i++)
48 | {
49 | double sum_of_vals_over_avg = 0.0;
50 | for (int j = 0; j < n_seasons; j++)
51 | {
52 | sum_of_vals_over_avg += arrayD[(slen * j) + i] - season_averages[j];
53 | }
54 | seasonals[i] = sum_of_vals_over_avg / (double)n_seasons;
55 | }
56 |
57 |
58 | //triple_exponential_smoothing
59 | double smooth = 0.0;
60 | double trend = 0.0;
61 |
62 | int *result;
63 | result = malloc(tot * sizeof(int));
64 | double *Smooth;
65 | Smooth = malloc(tot * sizeof(double));
66 | double *Trend;
67 | Trend = malloc(tot * sizeof(double));
68 | double *Season;
69 | Season = malloc(tot * sizeof(double));
70 | double *PredictedDeviation;
71 | PredictedDeviation = malloc(tot * sizeof(double));
72 |
73 | for (int j = 0; j < tot; j++)
74 | {
75 | result[j] = 0;
76 | Smooth[j] = 0.0;
77 | Trend[j] = 0.0;
78 | Season[j] = 0.0;
79 | PredictedDeviation[j] = 0.0;
80 | }
81 |
82 | for (int i = 0; i < tot; i++)
83 | {
84 | if (i == 0) //components initialization
85 | {
86 | smooth = (double)arrayD[i];
87 | trend = initial_trend;
88 | result[i] = arrayD[i];
89 | Smooth[i] = smooth;
90 | Trend[i] = trend;
91 | Season[i] = seasonals[i % slen];
92 |
93 | PredictedDeviation[i] = 0;
94 | continue;
95 | }
96 | if (i >= vlen) //predicting
97 | {
98 | int m = i - vlen + 1;
99 | result[i] = (int)(((smooth + m * trend) + seasonals[i % slen]));
100 |
101 | // when predicting we increase uncertainty on each step
102 | PredictedDeviation[i] = PredictedDeviation[i-1] * 1.01;
103 | }
104 | else
105 | {
106 | double val = (double)arrayD[i];
107 | double last_smooth = smooth;
108 | smooth = (alpha * (val - seasonals[i % slen])) + ((1 - alpha) * (smooth + trend));
109 | trend = (beta * (smooth - last_smooth)) + ((1 - beta) * trend);
110 | seasonals[i % slen] = (gamma * (val - smooth)) + ((1 - gamma) * seasonals[i % slen]);
111 | result[i] = (int)(smooth + trend + seasonals[i % slen]);
112 |
113 | Smooth[i] = smooth;
114 | Trend[i] = trend;
115 | Season[i] = seasonals[i % slen];
116 | }
117 | }
118 | int *res;
119 | res = malloc(n_preds*sizeof(int));
120 | for (int i=0; i
6 |
7 | #include "TripleES_params.h"
8 |
9 | int* HW_TripleExpoSmoothing(int arrayD[], int vlen, double alpha, double beta, double gamma,
10 | int slen, int n_preds, double scaling_factor);
11 |
12 | #endif
13 |
14 |
15 |
16 |
--------------------------------------------------------------------------------
/KNN.c:
--------------------------------------------------------------------------------
1 | #include "ELM.h"
2 |
3 | #ifdef KNN
4 |
5 | #include "KNN.h"
6 | #include
7 |
8 | #include "math.h"
9 | #include // To check if works on STM32
10 | #include // To check if works on STM32
11 |
12 | #ifdef DS_TEST
13 | #ifdef REGRESSION
14 | float (*pRegress)(float X[]) = knn_regression;
15 | #else
16 | int (*pClassf)(float X[]) = knn_classification;
17 | #endif
18 | #endif
19 |
20 |
21 | struct neighbour{
22 | int id;
23 | float score;
24 | #ifdef REGRESSION
25 | float label;
26 | #endif
27 | };
28 | int struct_cmp_by_score_dec(const void *, const void *);
29 |
30 | //int regressionLabels[N_CLASS]; //Attention!!!!!!!!
31 |
32 |
33 |
34 | #ifndef REGRESSION
35 | int knn_classification(float X[]) {
36 | // KNN
37 | // https://www.geeksforgeeks.org/weighted-k-nn/
38 | struct neighbour neighbours[N_TRAIN];
39 |
40 | int j;
41 | for(j=0; j < N_TRAIN; j++){
42 | neighbours[j].id = j;
43 |
44 | float acc=0;
45 | bool skip=false;
46 | int k;
47 | for(k=0; k < N_FEATURE; k++) {
48 | acc+=(X[k] - X_train[j][k])*(X[k] - X_train[j][k]);
49 | if (acc > 10000000) {
50 | neighbours[j].score = 0;
51 | skip=true;
52 | break;
53 | }
54 | }
55 | if (!skip){
56 | acc = sqrt(acc);
57 | if (acc < 0.00000001) {
58 | neighbours[j].score = 100000000;
59 | } else {
60 | neighbours[j].score = 1 / acc;
61 | }
62 | }
63 | }
64 | qsort(neighbours, N_TRAIN, sizeof(struct neighbour), struct_cmp_by_score_dec);
65 |
66 | {
67 | int n;
68 | float scores[N_CLASS];
69 | memset(scores, 0, N_CLASS*sizeof(float));
70 | for(n=0; n bestScore) {
77 | bestScore = scores[n];
78 | bestClass = n;
79 | }
80 | }
81 | return(bestClass);
82 | }
83 | }
84 | #endif
85 |
86 | #ifdef REGRESSION
87 | float knn_regression(float X[]) {
88 | // KNN
89 | // https://www.geeksforgeeks.org/weighted-k-nn/
90 | struct neighbour neighbours[N_TRAIN];
91 |
92 | int j;
93 | for(j=0; j < N_TRAIN; j++){
94 | neighbours[j].id = j;
95 |
96 | // if needed, could be used also for classification
97 | neighbours[j].label = y_train[j];
98 |
99 | float acc=0;
100 | bool skip=false;
101 | int k;
102 | for(k=0; k < N_FEATURE; k++) {
103 | acc+=(X[k] - X_train[j][k])*(X[k] - X_train[j][k]);
104 | if (acc > 10000000) {
105 | neighbours[j].score = 0;
106 | skip=true;
107 | break;
108 | }
109 | }
110 | if (!skip){
111 | acc = sqrt(acc);
112 | if (acc < 0.00000001) {
113 | neighbours[j].score = 100000000;
114 | } else {
115 | neighbours[j].score = 1 / acc;
116 | }
117 | }
118 | }
119 | qsort(neighbours, N_TRAIN, sizeof(struct neighbour), struct_cmp_by_score_dec);
120 |
121 | {
122 | float totalScore = 0;
123 | float pred = 0;
124 | int n = 0;
125 | for(n=0; nscore - 100000.f*ib->score);
148 | /* float comparison: returns negative if b > a
149 | and positive if a > b. We multiplied result by 100.0
150 | to preserve decimal fraction */
151 | //Decreasing
152 | }
153 | #endif
154 |
155 |
156 |
157 |
158 |
--------------------------------------------------------------------------------
/KNN.h:
--------------------------------------------------------------------------------
1 | #ifdef KNN
2 |
3 | #ifndef KNN_H
4 |
5 | #define KNN_H
6 |
7 | #include
8 |
9 |
10 | #include "PPParams.h"
11 |
12 | #include "ELM.h"
13 | #include "training_set.h"
14 | #include "KNN_params.h"
15 |
16 | #ifdef REGRESSION
17 | float knn_regression(float[]);
18 | #else
19 | int knn_classification(float[]);
20 | #endif // REGRESSION
21 |
22 | #endif
23 |
24 | #endif // KNN
25 |
--------------------------------------------------------------------------------
/LICENSE.md:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2020 Edge-Learning-Machine
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/Preprocess.c:
--------------------------------------------------------------------------------
1 | #include "preprocess.h"
2 | //ccc
3 |
4 | float X_t[N_FEATURE];
5 |
6 |
7 |
8 | #ifdef STANDARD_SCALING
9 | //normalize_std(float* X, float* s_x, float* u_x) {
10 | scale_std(float* X) {
11 | int i = 0;
12 | for (i = 0; i < N_ORIG_FEATURE; i++) {
13 | X[i] = (X[i] - u_x[i]) / s_x[i];
14 | }
15 | }
16 | #elif defined(MINMAX_SCALING)
17 | scale_mm(float* X) {
18 | int i = 0;
19 | for (i = 0; i < N_ORIG_FEATURE; i++) {
20 | X[i] = (s_x[i] * X[i]) + m_x[i];
21 | }
22 | }
23 | #endif
24 |
25 |
26 | float *PCA_transform(float *X){
27 | int i=0, j=0;
28 | #ifdef DO_PCA
29 | for (i=0; i
7 | #include
8 |
9 |
10 | #ifdef REGRESSION
11 | float (*pRegress)(float X[]) = randomForest_regression;
12 | #else
13 | int (*pClassf)(float X[]) = randomForest_classification;
14 | #endif
15 |
16 |
17 |
18 | #ifndef REGRESSION
19 | int randomForest_classification(float X[])
20 | {
21 | int currentNode = 0;
22 | float probab[FOREST_DIM][N_CLASS];
23 | bool next_tree = 0;
24 |
25 | int maxClass = -1;
26 | float maxValue = 0;
27 |
28 | float result[N_CLASS];
29 |
30 | //compute the classification for each tree
31 | for (int i = 0; i < FOREST_DIM; i++)
32 | {
33 | next_tree = 0;
34 | currentNode = 0;
35 |
36 | #ifdef DEBUG
37 | printf("\n\nalbero %d della forest", i);
38 | #endif
39 | while (next_tree == 0)
40 | {
41 |
42 | //travel the tree
43 | if (*(forest_feature[i] + currentNode) >= 0)
44 | {
45 | if (X[*(forest_feature[i] + currentNode)] <= *(forest_threshold[i] + currentNode))
46 | {
47 | #ifdef DEBUG
48 | printf("\ncurrent node: %d, X:%f <= %f\n", currentNode, X[*(forest_feature[i] + currentNode)], *(forest_threshold[i] + currentNode));
49 | fflush(stdout);
50 | #endif
51 |
52 | currentNode = *(forest_children_left[i] + currentNode);
53 | }
54 | else
55 | {
56 | #ifdef DEBUG
57 | printf("\ncurrent node: %d, X:%f <= %f\n", currentNode, X[*(forest_feature[i] + currentNode)], *(forest_threshold[i] + currentNode));
58 | fflush(stdout);
59 | #endif
60 |
61 | currentNode = *(forest_children_right[i] + currentNode);
62 | }
63 | }
64 | else
65 | { // Leaf node
66 |
67 | // inside each leaf note are stored the number of sample divided between the classes; the probability of each prediction is value/total samples
68 | int j, k;
69 | int total_samples = 0;
70 |
71 | for (k = 0; k < forest_num_leaves[i]; k++)
72 | {
73 | if (*(forest_leaves[i] + k * (N_CLASS + 2)) == currentNode)
74 | {
75 | for (j = 0; j < N_CLASS; j++)
76 | {
77 | total_samples += *(forest_leaves[i] + k * (N_CLASS + 2) + j + 2);
78 | }
79 | for (j = 0; j < N_CLASS; j++)
80 | {
81 | probab[i][j] = (float)*(forest_leaves[i] + k * (N_CLASS + 2) + j + 2) / (float)total_samples;
82 | #ifdef DEBUG
83 | printf("\nProbab class: %d = %f", j, probab[i][j]);
84 | fflush(stdout);
85 | #endif
86 | }
87 | }
88 |
89 | }
90 | next_tree = 1;
91 | }
92 |
93 | }
94 | }
95 | /// once the prob array is built, it's required to compute the soft majority voting
96 | // for each classes the probability between all the trees are added togheter and the highest probability is the "winning" class
97 |
98 |
99 |
100 |
101 | for (int i = 0; i < FOREST_DIM; i++)
102 | {
103 | for (int j = 0; j < N_CLASS; j++)
104 | {
105 | if (i == 0)
106 | result[j] = 0; //init the array
107 | result[j] += probab[i][j];
108 | #ifdef DEBUG
109 | printf("\n\n\nResult probab class: %d = %f", j, result[j]);
110 | fflush(stdout);
111 | #endif
112 | }
113 | }
114 |
115 |
116 | for (int j = 0; j < N_CLASS; j++)
117 | {
118 | if (result[j] >= maxValue)
119 | {
120 | maxValue = result[j];
121 | maxClass = target_classes[j];
122 | }
123 | }
124 | return maxClass;
125 | }
126 |
127 |
128 |
129 | #else
130 | float randomForest_regression(float X[])
131 | {
132 | int currentNode = 0;
133 | bool next_tree = 0;
134 | float result = 0;
135 |
136 | for (int i = 0; i < FOREST_DIM; i++)
137 | {
138 | next_tree = 0;
139 |
140 | while (next_tree == 0)
141 | {
142 |
143 | //travel the tree
144 | if (*(forest_feature[i] + currentNode) >= 0)
145 | {
146 | if (X[*(forest_feature[i] + currentNode)] <= *(forest_threshold[i] + currentNode))
147 | {
148 | #ifdef DEBUG
149 | printf("\ncurrent node: %d, X:%f <= %f\n", currentNode, X[*(forest_feature[i] + currentNode)], *(forest_threshold[i] + currentNode));
150 | fflush(stdout);
151 | #endif
152 |
153 | currentNode = *(forest_children_left[i] + currentNode);
154 | }
155 | else
156 | {
157 | #ifdef DEBUG
158 | printf("\ncurrent node: %d, X:%f <= %f\n", currentNode, X[*(forest_feature[i] + currentNode)], *(forest_threshold[i] + currentNode));
159 | fflush(stdout);
160 | #endif
161 |
162 | currentNode = *(forest_children_right[i] + currentNode);
163 | }
164 | }
165 | else
166 | { // Leaf node
167 | result += (*(forest_values[i] + currentNode);
168 | }
169 | next_tree = 1;
170 | }
171 | return result / (float)FOREST_DIM;
172 | }
173 | #endif
174 |
175 | #endif
--------------------------------------------------------------------------------
/RF.h:
--------------------------------------------------------------------------------
1 | #ifdef RF
2 |
3 | #ifndef RF_H
4 |
5 | #define RF_H
6 |
7 | #include "ELM.h"
8 | #include
9 |
10 |
11 | #include "PPParams.h"
12 | #include "RF_params.h"
13 |
14 | #ifdef REGRESSION
15 | float randomForest_regression(float[]);
16 | #else
17 | int randomForest_classification(float[]);
18 | #endif //REGRESSION
19 |
20 | #endif //RF_H
21 |
22 | #endif //RF
23 |
24 |
--------------------------------------------------------------------------------
/SVM.c:
--------------------------------------------------------------------------------
1 | #include "ELM.h"
2 |
3 | #ifdef SVM
4 |
5 | #include "svm.h"
6 | #include
7 | #include
8 |
9 |
10 | #ifdef REGRESSION
11 | float (*pRegress)(float X[]) = svm_regression;
12 | #else
13 | int (*pClassf)(float X[]) = svm_classification;
14 | #endif
15 |
16 |
17 | float *PCA_transform(float *);
18 |
19 |
20 |
21 |
22 | #ifndef REGRESSION
23 | int svm_classification(float X[])
24 | {
25 | int m;
26 | if (N_CLASS == 2)
27 | {
28 | float y = bias[0];
29 | int k;
30 | for (k = 0; k < N_FEATURE; k++)
31 | {
32 | y += support_vectors[0][k] * X[k];
33 | }
34 | if (y < 0)
35 | {
36 | return 0;
37 | }
38 | else
39 | {
40 | return 1;
41 | }
42 | }
43 | else
44 | {
45 | // float bestDistance = -1000000;
46 | // for (m = 0; m < N_CLASS; m++)
47 | // {
48 | // float y = bias[m];
49 | // int k;
50 | // for (k = 0; k < N_FEATURE; k++)
51 | // {
52 | // y += support_vectors[m][k] * X[k];
53 | // }
54 | // if (y > bestDistance)
55 | // {
56 | // bestDistance = y;
57 | // return m;
58 | // }
59 | // }
60 | float dot[WEIGTH_DIM];
61 | int out[WEIGTH_DIM];
62 | int prediction;
63 | for (int i = 0; i < WEIGTH_DIM; i++)
64 | {
65 | float distance = 0;
66 | dot[i] = bias[i];
67 | for (int k = 0; k < N_FEATURE; k++)
68 | {
69 | dot[i] += X[k] * support_vectors[i][k];
70 | }
71 | if(dot[i] > 0)
72 | {
73 | out[i] = 0;
74 | }
75 | else
76 | {
77 | out[i] = 1;
78 | }
79 | }
80 |
81 | for(int i = 0; i < WEIGTH_DIM; i++)
82 | {
83 | out[i] = Truth_Table[out[i]][i];
84 | }
85 |
86 | // find the most frequent class
87 | int max_count = 0;
88 | for(int i = 0; i < N_CLASS; i++)
89 | {
90 | int count = 0;
91 | for(int j = 0; j < WEIGTH_DIM; j++)
92 | {
93 | if(out[j] == i)
94 | {
95 | count++;
96 | }
97 | }
98 | if(count > max_count)
99 | {
100 | max_count = count;
101 | prediction = i;
102 | }
103 | }
104 | return prediction;
105 | }
106 | }
107 | #endif
108 |
109 | #ifdef REGRESSION
110 | float svm_regression(float X[])
111 | {
112 | float y = bias[0];
113 | int k;
114 | for (k = 0; k < N_FEATURE; k++)
115 | {
116 | y += support_vectors[0][k] * X[k];
117 | }
118 |
119 | /*
120 | #ifdef MINMAX_SCALING
121 | y = y / S_Y;
122 | #elif defined (STANDARD_SCALING)
123 | y = y * S_Y + U_Y;
124 | #endif
125 | */
126 | return y;
127 | }
128 | #endif
129 |
130 | #endif
131 |
132 |
--------------------------------------------------------------------------------
/SVM.h:
--------------------------------------------------------------------------------
1 | #ifdef SVM
2 | #ifndef SVM_H
3 |
4 | #define SVM_H
5 |
6 | #include
7 |
8 |
9 | #include "PPParams.h"
10 | #include "SVM_params.h"
11 | #include "ELM.h"
12 |
13 | #ifdef REGRESSION
14 | float svm_regression(float []);
15 | #else
16 | int svm_classification(float []);
17 | #endif // REGRESSION
18 |
19 | #endif
20 |
21 | #endif // SVM
22 |
--------------------------------------------------------------------------------
/Test.c:
--------------------------------------------------------------------------------
1 | #include "ELM.h"
2 |
3 | #ifdef DS_TEST
4 |
5 | #include "Test.h"
6 |
7 |
8 | void RunTest()
9 | {
10 |
11 | #ifndef REGRESSION
12 | int predictedLabels[N_TEST];
13 | #else
14 | float predictions[N_TEST];
15 | #endif
16 |
17 | int nCorrect = 0;
18 | int i = 0;
19 | int j;
20 |
21 | float temp[N_FEATURE];
22 |
23 | float* X_t;
24 |
25 | for (i = 0; i < N_TEST; i++)
26 | {
27 | for (j = 0; j < N_FEATURE; j++)
28 | {
29 | #ifdef DEBUG
30 | // printf("\n Feature %d = %f ", j, X_test[i][j]);
31 | #endif
32 | temp[j] = X_test[i][j];
33 | }
34 | X_t = preprocess(temp);
35 |
36 | #ifdef DEBUG
37 | for (j = 0; j < N_FEATURE; j++)
38 | {
39 | // printf("\n Feature %d after PP = %f ", i, X_t[j]);
40 | }
41 | #endif
42 |
43 | #ifndef REGRESSION
44 | {
45 |
46 | predictedLabels[i] = (*pClassf)(X_t);
47 |
48 | printf("Test %d: Predicted %d Test %d\n", i, predictedLabels[i], y_test[i]);
49 | if (predictedLabels[i] == y_test[i])
50 | {
51 | nCorrect++;
52 | }
53 | }
54 | #else
55 | {
56 | predictions[i] = (*pRegress)(X_t);
57 | }
58 | #endif
59 | }
60 |
61 | #ifndef REGRESSION
62 | {
63 | printf("\nClassifier rate: %f\n", (float)nCorrect * 100.0f / (float)N_TEST);
64 | //fflush(stdout);
65 | }
66 | #else
67 | {
68 | //TBD, figure for regression
69 | }
70 | #endif
71 | }
72 |
73 | #endif //DS_TEST
74 |
--------------------------------------------------------------------------------
/Test.h:
--------------------------------------------------------------------------------
1 | #ifdef DS_TEST
2 |
3 | #ifndef TEST_H
4 | #define TEST_H
5 |
6 | #include
7 | #include
8 | #include "ELM.h"
9 |
10 | void RunTest();
11 |
12 |
13 | #endif
14 | #endif
--------------------------------------------------------------------------------
/docs/CONTRIBUTING.md:
--------------------------------------------------------------------------------
1 | ## How to contribute to ELM Desk-LM
2 |
3 | #### **Did you find a bug?**
4 |
5 | * **Ensure the bug was not already reported** by searching on GitHub under [Issues](https://github.com/Edge-Learning-Machine/Micro-LM/issues).
6 |
7 | * If you're unable to find an open issue addressing the problem, [open a new one](https://github.com/Edge-Learning-Machine/Micro-LM/new). Be sure to include a **title and clear description**, as much relevant information as possible, and a **code sample** or an **executable test case** demonstrating the expected behavior that is not occurring.
8 |
9 | #### **Did you write a patch that fixes a bug?**
10 |
11 | * Open a new GitHub pull request with the patch.
12 |
13 | * Ensure the description clearly describes the problem and solution. Include the relevant issue number if applicable.
14 |
15 | #### **Do you intend to add a new feature or change an existing one?**
16 |
17 | * Suggest your change writing to franz at elios.unige.it and start writing code.
18 |
19 | * Do not open an issue on GitHub until you have collected positive feedback about the change. GitHub issues are primarily intended for bug reports and fixes.
20 |
21 | #### **Do you have questions about the source code?**
22 |
23 | * Ask any question about how to use ELM to franz at elios.unige.it.
24 |
25 | Thank you!
26 |
27 | The ELM Team
28 |
--------------------------------------------------------------------------------
/main.c:
--------------------------------------------------------------------------------
1 | #include
2 |
3 | #include "ELM.h"
4 |
5 |
6 | int main() {
7 |
8 | #ifdef DS_TEST
9 | //run the appropriate test using the configuration from elm.h
10 | RunTest();
11 | return 0;
12 | #endif
13 |
14 | #ifdef SVM
15 |
16 | #ifdef REGRESSION
17 | float X[] = { -0.8322759271291997, 0.3636749003861144, -0.17494119397906097, 0.07352810470119044, 0.1553397535369414, -0.06761655411472672, 0.0855392275355406, 0.04066219161654173, 0.11185559235083005, -0.08477616078227904, -0.15167252286243968, 0.07126487390801126, -0.03208499724042752, -0.02247560477462619, 0.03957488251247664, -0.002522386112650082, -0.014566087847607604, -0.04683558153996702, 0.04913937521052978, 0.029633163164301403, -0.017169611237453017, -0.041526437737699026, 0.00774344884336123, 0.0014122109847697463, 0.007470577064585154 };
18 | float* X_t = preprocess(X);
19 | (*pRegress)(X_t);
20 | #else
21 | //float X[] = { 1, 2, 3, 4, 5, 6 };
22 | //float Y[] = { -0.20647835151039698, -0.4513772469140586, -0.22887836700722633, -0.10757521477907099 };
23 | float* X_t = preprocess(X);
24 | (*pClassf)(X_t);
25 | #endif
26 | #endif
27 |
28 |
29 | #ifdef DT
30 |
31 | #ifdef REGRESSION
32 | float X[] = { -0.8322759271291997, 0.3636749003861144, -0.17494119397906097, 0.07352810470119044, 0.1553397535369414, -0.06761655411472672, 0.0855392275355406, 0.04066219161654173, 0.11185559235083005, -0.08477616078227904, -0.15167252286243968, 0.07126487390801126, -0.03208499724042752, -0.02247560477462619, 0.03957488251247664, -0.002522386112650082, -0.014566087847607604, -0.04683558153996702, 0.04913937521052978, 0.029633163164301403, -0.017169611237453017, -0.041526437737699026, 0.00774344884336123, 0.0014122109847697463, 0.007470577064585154 };
33 | float* X_t = preprocess(X);
34 | (*pRegress)(X_t);
35 | #else
36 | //float X[] = { 41,0,1,130,204,0,0,172,0,1.4,2,0,2 };
37 | //float X[] = { 56,1,1,120,236,0,1,178,0,0.8,2,0,2 };
38 | float X[] = { 57,0,1,130,236,0,0,174,0,0,1,1,2 };
39 |
40 | float* X_t = preprocess(X);
41 | int out = (*pClassf)(X_t);
42 | printf("%d", out);
43 |
44 | #endif
45 | #endif
46 |
47 |
48 | #ifdef KNN
49 |
50 | #ifdef REGRESSION
51 | float X[] = { -0.8322759271291997, 0.3636749003861144, -0.17494119397906097, 0.07352810470119044, 0.1553397535369414, -0.06761655411472672, 0.0855392275355406, 0.04066219161654173, 0.11185559235083005, -0.08477616078227904, -0.15167252286243968, 0.07126487390801126, -0.03208499724042752, -0.02247560477462619, 0.03957488251247664, -0.002522386112650082, -0.014566087847607604, -0.04683558153996702, 0.04913937521052978, 0.029633163164301403, -0.017169611237453017, -0.041526437737699026, 0.00774344884336123, 0.0014122109847697463, 0.007470577064585154 };
52 | float* X_t = preprocess(X);
53 | (*pRegress)(X_t);
54 | #else
55 | float X[] = { -0.01419929665534347, 0.7123654720811174, 0.24148214073025193, 1.0513512053890597, 0.7123654720811174, 0.24148214073025193, 1.0513512053890597, 0.7123654720811174, 0.24148214073025193, 1.0513512053890597 };
56 | float Y[] = { -0.20647835151039698, -0.4513772469140586, -0.22887836700722633, -0.10757521477907099 };
57 | float* X_t = preprocess(X);
58 | (*pClassf)(X_t);
59 | #endif
60 | #endif
61 |
62 |
63 | #ifdef TripleES
64 | #include "TripleES_params.h"
65 | //Holt-Winters model coefficients
66 |
67 | int n_preds = 24; //predictions horizon
68 |
69 | int arrayD[] = { 80115, 79885, 89325, 101930, 121630, 116475, 106495, 102795, 108055, 116125, 131030, 149020, 157590, 150715, 149295, 150100, 144780, 150690, 163840, 166235, 139520, 105895, 96780, 82520, 80125, 75335, 85105, 102080, 125135, 118030, 109225, 102475, 102240, 115840, 130540, 144325, 148970, 149150, 148040, 148810, 149830, 150570, 149440, 150520, 129130, 103815, 92175, 82765, 76315, 75130, 82640, 88795, 118430, 115190, 110940, 98860, 104185, 108665, 126640, 140435, 152470, 146275, 148020, 147735, 145750, 149285, 159725, 161085, 135520, 112945, 100200, 87615, 87835, 88845, 92350, 104465, 115940, 128950, 141385, 144440, 143250, 133190, 131240, 142480, 157375, 152500, 153735, 151195, 150645, 147435, 152525, 146875, 125245, 117925, 96700, 93610, 89060, 89345, 90575, 98290, 112570, 129470, 141405, 152560, 152580, 141170, 147550, 161110, 166335, 166780, 163140, 157305, 159055, 160020, 168345, 169900, 142710, 112955, 97345, 81675, 79510, 78350, 88045, 99790, 123780, 111325, 99440, 97655, 97655, 102565, 119930, 135755, 140120, 141730, 142220, 145360, 145335, 150410, 161520, 153415, 134720, 107065, 95045, 79515, 78335, 74670, 81990, 97950, 119345, 113115, 98880, 94000, 93660, 104185, 119750, 135990, 146455, 139165, 147225, 144935, 151370, 156080, 161385, 165010, 134090, 105585, 92855, 79270, 79980, 78110, 85785, 100010, 123880, 116335, 104290, 101440, 97635, 108265, 121250, 140850, 138555, 140990, 141525, 141590, 140610, 139515, 146215, 142425, 123945, 101360, 88170, 76050, 70335, 72150, 80195, 94945, 121910, 113950, 106495, 97290, 98860, 105635, 114380, 132335, 146630, 141995, 142815, 146020, 152120, 151790, 155665, 155890, 123395, 103080, 95155, 80285 };
70 |
71 | int vlen = sizeof(arrayD) / sizeof(int);
72 |
73 | int* finalRes = HW_TripleExpoSmoothing(arrayD, vlen, ALPHA, BETA, GAMMA, SEASON_LENGTH, n_preds, SCALING_FACTOR);
74 |
75 | for (int i = 0; i < n_preds; i++)
76 | {
77 | printf("Triple Exponential Smoothing for season length=%d and n_preds=%d, prediction nr. %d has value: %d\n", SEASON_LENGTH, n_preds, i + 1, finalRes[i]);
78 | }
79 |
80 | free(finalRes);
81 |
82 | #endif
83 |
84 |
85 | #ifdef RF
86 |
87 | //float X[] = { 41,0,1,130,204,0,0,172,0,1.4,2,0,2 };
88 | //float X[] = { 56,1,1,120,236,0,1,178,0,0.8,2,0,2 };
89 | float X[] = { 57,0,1,130,236,0,0,174,0,0,1,1,2 };
90 |
91 | float* X_t = preprocess(X);
92 | int out = (*pClassf)(X_t);
93 | printf("\n%d\n", out);
94 |
95 | #endif
96 |
97 | }
98 |
--------------------------------------------------------------------------------