├── .gitignore ├── 01_decision_tree ├── arvore_decisao.png ├── decisionTree.ipynb ├── decisionTree │ └── decisionTree.ino └── decision_tree_classifier.h ├── 02_random_forest ├── randomForestClassifier.ipynb ├── randomForestClassifier │ └── randomForestClassifier.ino ├── randomForestRegressor.ipynb ├── randomForestRegressor │ └── randomForestRegressor.ino ├── random_forest_classifier.h ├── random_forest_classifier.png ├── random_forest_regressor.h └── random_forest_regressor.png ├── 03_XGBoost ├── XGBClassifier.h ├── XGBoostClassifier.ipynb └── XGBoostClassifier │ ├── XGBClassifier.h │ └── XGBoostClassifier.ino ├── 04_GaussianNB ├── GaussianNB │ ├── GaussianNB.h │ └── GaussianNB.ino └── Guassian.ipynb ├── 05_support_vector_machine ├── SVMClassifier.ipynb └── SVMClassifier │ ├── SVMClassifier.h │ └── SVMClassifier.ino ├── 06_SEFR ├── SEFRClassifier.ipynb └── SEFRClassifier │ ├── SEFRClassifier.h │ └── SEFRClassifier.ino ├── 07_principal_components_analysis ├── PCA.ipynb ├── PCA │ ├── PCA.h │ └── PCA.ino └── data │ └── decathlon.csv ├── 08_Logistic_Regressor ├── LogisticRegressor.ipynb └── LogisticRegressor │ ├── LogisticRegressor.h │ └── LogisticRegressor.ino ├── 09_K_Means ├── Kmeans.ipynb ├── Kmeans │ ├── KMeans.h │ └── Kmeans.ino └── data │ └── Mall_Customers.csv ├── 10_Linear_Regressor ├── LinearRegressor.ipynb └── LinearRegressor │ ├── LinearRegressor.h │ └── LinearRegressor.ino ├── 11_GMM ├── GMM.ipynb ├── GMM │ ├── GMM.h │ └── GMM.ino └── data │ └── Mall_Customers.csv ├── 12_MLP ├── MLP.ipynb ├── MLP │ ├── MLP.ino │ └── model.h ├── data │ └── FuelConsumption.csv ├── figures │ ├── heatmap.png │ ├── hist_test.png │ ├── hist_training.png │ ├── history_traing.png │ ├── pairplot.png │ ├── prediction_test.png │ └── prediction_train.png ├── library │ └── EloquentTinyML-main.zip ├── models │ ├── model.keras │ ├── model_quant_float32.h │ ├── model_quant_float32.tflite │ ├── model_quant_int8.h │ └── model_quant_int8.tflite └── requirements.txt ├── 13_CNN ├── CNN.ipynb ├── CNN │ ├── CNN.ino │ └── model.h ├── figures │ ├── confusion_matrix.png │ ├── history_traing.png │ └── model.png ├── library │ └── EloquentTinyML-main.zip ├── models │ ├── model.keras │ ├── model_quant_float32.h │ ├── model_quant_float32.tflite │ ├── model_quant_int8.h │ └── model_quant_int8.tflite └── requirements.txt ├── 14_XGBRegression ├── XGBoostRegressor.ipynb ├── XGBoostRegressor │ ├── XGBRegressor.h │ └── XGBoostRegressor.ino └── requirements.txt ├── 15_Poisson_Regressor ├── PoissonRegressor.ipynb ├── PoissonRegressor │ ├── PoissonRegressor.h │ └── PoissonRegressor.ino ├── data │ └── FuelConsumption.csv ├── figures │ ├── heatmap.png │ └── pairplot.png └── requirements.txt ├── 16_KNN ├── KNN │ ├── KNN.h │ └── KNN.ino ├── KNeighborsClassifier.ipynb ├── data │ └── KNNAlgorithmDataset.csv ├── figures │ ├── ROC_test.png │ ├── ROC_train.png │ ├── confusion_matrix_test.png │ ├── confusion_matrix_train.png │ ├── diagnosis.png │ ├── heatmap.png │ ├── pairplot.png │ └── result_k_value.png └── requirements.txt ├── 17_ElasticNet ├── ElasticNet.ipynb ├── ElasticNet │ ├── ElasticNet.h │ └── ElasticNet.ino ├── data │ └── FuelConsumption.csv ├── figures │ ├── heatmap.png │ └── pairplot.png └── requirements.txt ├── 18_LSTM ├── LSTM.ipynb ├── LSTM │ ├── LSTM.ino │ └── model.h ├── README.md ├── data │ └── FuelConsumption.csv ├── eloquent_tensorflow │ ├── __init__.py │ ├── __pycache__ │ │ └── __init__.cpython-39.pyc │ └── template.jinja ├── figures │ ├── fig0.png │ ├── fig1.png │ ├── fig10.png │ ├── fig11.png │ ├── fig12.png │ ├── fig13.png │ ├── fig14.png │ ├── fig15.png │ ├── fig16.png │ ├── fig17.png │ ├── fig2.png │ ├── fig3.png │ ├── fig4.png │ ├── fig5.png │ ├── fig6.png │ ├── fig7.png │ ├── fig8.png │ ├── fig9.png │ ├── heatmap.png │ ├── history_traing.png │ ├── model.png │ └── pairplot.png ├── libraries │ ├── ESP32 │ │ ├── EloquentTinyML-main.zip │ │ └── tflm_esp32-2.0.0.zip │ └── Python │ │ └── python-eloquent-tensorflow-main.zip └── requirements.txt ├── 19_Autoencoder ├── Autoencoder.ipynb ├── Autoencoder │ ├── autoencoder.ino │ ├── autoencoder_decoder.ino │ ├── autoencoder_encoder.ino │ ├── model_autoencoder.h │ ├── model_decoder.h │ └── model_encoder.h ├── README.md ├── eloquent_tensorflow │ ├── __init__.py │ ├── __pycache__ │ │ └── __init__.cpython-39.pyc │ └── template.jinja ├── figures │ ├── autoencoder_results.png │ ├── fig0.png │ ├── fig1.png │ ├── fig10.png │ ├── fig11.png │ ├── fig12.png │ ├── fig2.png │ ├── fig3.png │ ├── fig4.png │ ├── fig5.png │ ├── fig6.png │ ├── fig7.png │ ├── fig8.png │ ├── fig9.png │ └── history_traing.png ├── libraries │ ├── ESP32 │ │ ├── EloquentTinyML-main.zip │ │ └── tflm_esp32-2.0.0.zip │ └── Python │ │ └── python-eloquent-tensorflow-main.zip └── requirements.txt ├── 20_Q_Learning ├── Q_Learning │ └── Q_Learning.ino ├── README.md └── figures │ ├── fig0.png │ ├── fig1.png │ └── fig2.png ├── 21_Huber_Regressor ├── HuberRegressor.ipynb ├── HuberRegressor │ └── HuberRegressor.h ├── README.md ├── data │ └── FuelConsumption.csv ├── figures │ ├── fig0.png │ ├── fig1.png │ ├── fig10.png │ ├── fig11.png │ ├── fig12.png │ ├── fig13.png │ ├── fig2.png │ ├── fig3.png │ ├── fig4.png │ ├── fig5.png │ ├── fig6.png │ ├── fig7.png │ ├── fig8.png │ ├── fig9.png │ ├── heatmap.png │ ├── huber.png │ └── pairplot.png └── requirements.txt ├── 22_QAT ├── QAT.ipynb ├── QAT │ ├── QAT.ino │ └── model.h ├── README.md ├── data │ └── FuelConsumption.csv ├── figures │ ├── fig0.png │ ├── fig00.png │ ├── fig1.png │ ├── fig10.png │ ├── fig11.png │ ├── fig12.png │ ├── fig13.png │ ├── fig14.png │ ├── fig15.png │ ├── fig16.png │ ├── fig17.png │ ├── fig18.png │ ├── fig19.png │ ├── fig2.png │ ├── fig20.png │ ├── fig21.png │ ├── fig3.png │ ├── fig4.png │ ├── fig5.png │ ├── fig6.png │ ├── fig7.png │ ├── fig8.png │ ├── fig9.png │ ├── heatmap.png │ ├── hist_testing.png │ ├── hist_testing_q.png │ ├── hist_training.png │ ├── hist_training_q.png │ ├── history_traing.png │ ├── history_traing_q.png │ └── pairplot.png ├── library │ └── EloquentTinyML-main.zip ├── models │ ├── model.keras │ ├── model_quant_float32.h │ ├── model_quant_float32.tflite │ ├── model_quant_int8.h │ ├── model_quant_int8.tflite │ ├── q_aware_model_quant_float32.h │ ├── q_aware_model_quant_float32.tflite │ ├── q_aware_model_quant_int8.h │ └── q_aware_model_quant_int8.tflite └── requirements.txt ├── 23_PTP ├── ArduinoCode │ ├── CNN_Pruning.ino │ ├── model_magnitude.h │ ├── model_original.h │ ├── model_pruned_L1.h │ ├── model_pruned_interative.h │ └── model_random.h ├── Pruning.ipynb ├── README.md ├── figures │ ├── acc_quantized_model.png │ ├── fig0.png │ ├── fig01.png │ ├── fig02.png │ ├── fig03.png │ ├── fig04.png │ ├── fig1.png │ ├── fig2.png │ ├── fig3.png │ ├── fig4.png │ ├── fig5.png │ ├── history_pruning_traing.png │ ├── history_traing.png │ ├── model.png │ ├── model_size_pruned_comparison.png │ ├── size_PTQ_comparative.png │ └── time_acc_pruning_comparative.png ├── library │ └── EloquentTinyML-main.zip ├── models │ ├── model_original.keras │ ├── model_original_quant_float32.h │ ├── model_original_quant_float32.tflite │ ├── original_model.keras │ ├── pruned_model_L1.keras │ ├── pruned_model_L1_quant_float32.h │ ├── pruned_model_L1_quant_float32.tflite │ ├── pruned_model_interative.keras │ ├── pruned_model_interative_quant_float32.h │ ├── pruned_model_interative_quant_float32.tflite │ ├── pruned_model_magnitude.keras │ ├── pruned_model_magnitude_quant_float32.h │ ├── pruned_model_magnitude_quant_float32.tflite │ ├── pruned_model_random.keras │ ├── pruned_model_random_quant_float32.h │ └── pruned_model_random_quant_float32.tflite └── requirements.txt ├── 24_Knowledge_Distillation ├── ArduinoCode │ ├── CNN_Knowledge_Distillation.ino │ ├── student_model_quant_float32.h │ └── teacher_model_quant_float32.h ├── KD.ipynb ├── README.md ├── figures │ ├── acc_quantized_model.png │ ├── fig0.png │ ├── fig00.png │ ├── fig1.png │ ├── fig10.png │ ├── fig11.png │ ├── fig2.png │ ├── fig3.png │ ├── fig4.png │ ├── fig5.png │ ├── fig6.png │ ├── fig7.png │ ├── fig8.png │ ├── fig9.png │ ├── history_teacher_model.png │ └── size_KD_comparative.png ├── library │ └── EloquentTinyML-main.zip ├── models │ ├── student_model_quant_float32.h │ ├── student_model_quant_float32.tflite │ ├── teacher_model.keras │ ├── teacher_model_quant_float32.h │ └── teacher_model_quant_float32.tflite └── requirements.txt ├── 25_MicromobileNet ├── ArduinoCode │ ├── MobileNet.ino │ ├── model.h │ ├── model_micro.h │ ├── model_milli.h │ ├── model_nano.h │ └── model_pico.h ├── Micromobilenet.ipynb ├── README.md ├── figures │ ├── fig00.png │ ├── fig01.png │ ├── fig02.png │ ├── fig03.png │ ├── fig04.png │ ├── fig05.png │ ├── fig06.png │ ├── fig07.png │ ├── fig08.png │ ├── fig09.png │ ├── fig10.png │ ├── fig11.png │ ├── fig12.png │ ├── fig13.png │ ├── fig14.png │ ├── fig15.png │ └── fig16.png ├── micromobilenet │ ├── __init__.py │ ├── __pycache__ │ │ └── __init__.cpython-39.pyc │ ├── architectures │ │ ├── BaseMobileNet.py │ │ ├── Config.py │ │ ├── MicroMobileNet.py │ │ ├── MilliMobileNet.py │ │ ├── MobileNet.py │ │ ├── NanoMobileNet.py │ │ ├── PicoMobileNet.py │ │ ├── __init__.py │ │ └── __pycache__ │ │ │ ├── BaseMobileNet.cpython-39.pyc │ │ │ ├── Config.cpython-39.pyc │ │ │ ├── MicroMobileNet.cpython-39.pyc │ │ │ ├── MilliMobileNet.cpython-39.pyc │ │ │ ├── MobileNet.cpython-39.pyc │ │ │ ├── NanoMobileNet.cpython-39.pyc │ │ │ ├── PicoMobileNet.cpython-39.pyc │ │ │ └── __init__.cpython-39.pyc │ ├── convert │ │ ├── Environment.py │ │ ├── LayerData.py │ │ ├── Loader.py │ │ ├── MobileNetConverter.py │ │ ├── __init__.py │ │ ├── __pycache__ │ │ │ ├── Environment.cpython-39.pyc │ │ │ ├── LayerData.cpython-39.pyc │ │ │ ├── Loader.cpython-39.pyc │ │ │ ├── MobileNetConverter.cpython-39.pyc │ │ │ └── __init__.cpython-39.pyc │ │ └── templates │ │ │ ├── BaseMobileNet.jinja │ │ │ ├── ops │ │ │ ├── argmax.jinja │ │ │ ├── conv3x3x1.jinja │ │ │ ├── depthwise_conv.jinja │ │ │ ├── dot.jinja │ │ │ ├── maxpool.jinja │ │ │ ├── mult3x3.jinja │ │ │ ├── pad.jinja │ │ │ ├── pointwise_conv.jinja │ │ │ └── softmax.jinja │ │ │ └── predict_file.jinja │ ├── converters.py │ ├── load.py │ ├── runner.py │ └── utils.py ├── models │ ├── mobilenetV1_0.1_96x96_greyscale_weights.h5 │ ├── mobilenetV1_0.25_96x96_greyscale_weights.h5 │ ├── mobilenetV1_0.2_96x96_greyscale_weights.h5 │ ├── mobilenetV2_0.05_96x96_greyscale_weights.h5 │ ├── mobilenetV2_0.1_96x96_greyscale_weights.h5 │ ├── mobilenetV2_0.35_96x96_greyscale_weights.h5 │ ├── model_TL_quant_float32.h │ ├── model_TL_quant_float32.tflite │ ├── model_TL_quant_int8.h │ └── model_stripped .keras └── requirements.txt ├── 26_MutinominalNB ├── ArduinoCode │ ├── MultinomialNB.h │ └── MultinominalNB.ino ├── MutinomialNB.ipynb ├── README.md ├── figures │ ├── fig00.png │ ├── fig01.png │ ├── fig02.png │ ├── fig03.png │ ├── fig04.png │ ├── fig05.png │ ├── fig06.png │ ├── fig07.png │ ├── fig08.png │ ├── fig09.png │ ├── fig10.png │ └── fig11.png └── requirements.txt └── README.md /.gitignore: -------------------------------------------------------------------------------- 1 | .venv 2 | .vscode -------------------------------------------------------------------------------- /01_decision_tree/arvore_decisao.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/01_decision_tree/arvore_decisao.png -------------------------------------------------------------------------------- /01_decision_tree/decisionTree/decisionTree.ino: -------------------------------------------------------------------------------- 1 | int predict(float *x) { 2 | if (x[2] <= 2.449999988079071) { 3 | return 0; 4 | } 5 | 6 | else { 7 | if (x[3] <= 1.6500000357627869) { 8 | if (x[2] <= 4.950000047683716) { 9 | return 1; 10 | } 11 | 12 | else { 13 | if (x[0] <= 6.150000095367432) { 14 | if (x[1] <= 2.450000047683716) { 15 | return 2; 16 | } 17 | 18 | else { 19 | return 1; 20 | } 21 | } 22 | 23 | else { 24 | return 2; 25 | } 26 | } 27 | } 28 | 29 | else { 30 | if (x[2] <= 4.8500001430511475) { 31 | if (x[1] <= 3.100000023841858) { 32 | return 2; 33 | } 34 | 35 | else { 36 | return 1; 37 | } 38 | } 39 | 40 | else { 41 | return 2; 42 | } 43 | } 44 | } 45 | }; 46 | 47 | void setup() { 48 | Serial.begin(115200); 49 | 50 | } 51 | 52 | void loop() { 53 | float X_1[] = {5.1, 3.5, 1.4, 0.2}; 54 | int result_1 = predict(X_1); 55 | Serial.print("Result of predict with input X1:"); 56 | Serial.println(result_1); 57 | delay(2000); 58 | 59 | float X_2[] = {6.2, 2.2, 4.5, 1.5}; 60 | int result_2 = predict(X_2); 61 | Serial.print("Result of predict with input X2:"); 62 | Serial.println(result_2); 63 | delay(2000); 64 | 65 | float X_3[] = {6.1, 3.0, 4.9, 1.8}; 66 | int result_3 = predict(X_3); 67 | Serial.print("Result of predict with input X3:"); 68 | Serial.println(result_3); 69 | delay(2000); 70 | } 71 | -------------------------------------------------------------------------------- /01_decision_tree/decision_tree_classifier.h: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/01_decision_tree/decision_tree_classifier.h -------------------------------------------------------------------------------- /02_random_forest/random_forest_classifier.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | #include 3 | namespace Eloquent { 4 | namespace ML { 5 | namespace Port { 6 | class GaussianNB { 7 | public: 8 | /** 9 | * Predict class for features vector 10 | */ 11 | int predict(float *x) { 12 | float votes[3] = { 0.0f }; 13 | float theta[4] = { 0 }; 14 | float sigma[4] = { 0 }; 15 | theta[0] = 4.964516129032; theta[1] = 3.377419354839; theta[2] = 1.464516129032; theta[3] = 0.248387096774; 16 | sigma[0] = 0.111966704288; sigma[1] = 0.136586891592; sigma[2] = 0.033257026868; sigma[3] = 0.011529659543; 17 | votes[0] = 0.295238095238 - gauss(x, theta, sigma); 18 | theta[0] = 5.862162162162; theta[1] = 2.724324324324; theta[2] = 4.210810810811; theta[3] = 1.302702702703; 19 | sigma[0] = 0.275325057719; sigma[1] = 0.087246168019; sigma[2] = 0.239342588764; sigma[3] = 0.041344049684; 20 | votes[1] = 0.352380952381 - gauss(x, theta, sigma); 21 | theta[0] = 6.559459459459; theta[1] = 2.986486486486; theta[2] = 5.545945945946; theta[3] = 2.005405405405; 22 | sigma[0] = 0.422410521562; sigma[1] = 0.096303874374; sigma[2] = 0.288429513527; sigma[3] = 0.085916730473; 23 | votes[2] = 0.352380952381 - gauss(x, theta, sigma); 24 | // return argmax of votes 25 | uint8_t classIdx = 0; 26 | float maxVotes = votes[0]; 27 | 28 | for (uint8_t i = 1; i < 3; i++) { 29 | if (votes[i] > maxVotes) { 30 | classIdx = i; 31 | maxVotes = votes[i]; 32 | } 33 | } 34 | 35 | return classIdx; 36 | } 37 | 38 | protected: 39 | /** 40 | * Compute gaussian value 41 | */ 42 | float gauss(float *x, float *theta, float *sigma) { 43 | float gauss = 0.0f; 44 | 45 | for (uint16_t i = 0; i < 4; i++) { 46 | gauss += log(sigma[i]); 47 | gauss += abs(x[i] - theta[i]) / sigma[i]; 48 | } 49 | 50 | return gauss; 51 | } 52 | }; 53 | } 54 | } 55 | } -------------------------------------------------------------------------------- /02_random_forest/random_forest_classifier.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/02_random_forest/random_forest_classifier.png -------------------------------------------------------------------------------- /02_random_forest/random_forest_regressor.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/02_random_forest/random_forest_regressor.png -------------------------------------------------------------------------------- /03_XGBoost/XGBClassifier.h: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/03_XGBoost/XGBClassifier.h -------------------------------------------------------------------------------- /03_XGBoost/XGBoostClassifier/XGBClassifier.h: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/03_XGBoost/XGBoostClassifier/XGBClassifier.h -------------------------------------------------------------------------------- /03_XGBoost/XGBoostClassifier/XGBoostClassifier.ino: -------------------------------------------------------------------------------- 1 | // Arduino sketch 2 | #include "XGBClassifier.h" 3 | 4 | Eloquent::ML::Port::XGBClassifier classifier; 5 | 6 | void setup() { 7 | Serial.begin(115200); 8 | 9 | } 10 | 11 | void loop() { 12 | float X_1[] = {5.1, 3.5, 1.4, 0.2}; 13 | int result_1 = classifier.predict(X_1); 14 | Serial.print("Result of predict with input X1:"); 15 | Serial.println(result_1); 16 | delay(2000); 17 | 18 | float X_2[] = {6.2, 2.2, 4.5, 1.5}; 19 | int result_2 = classifier.predict(X_2); 20 | Serial.print("Result of predict with input X2:"); 21 | Serial.println(result_2); 22 | delay(2000); 23 | 24 | float X_3[] = {6.1, 3.0, 4.9, 1.8}; 25 | int result_3 = classifier.predict(X_3); 26 | Serial.print("Result of predict with input X3:"); 27 | Serial.println(result_3); 28 | delay(2000); 29 | } 30 | -------------------------------------------------------------------------------- /04_GaussianNB/GaussianNB/GaussianNB.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | #include 3 | namespace Eloquent { 4 | namespace ML { 5 | namespace Port { 6 | class GaussianNB { 7 | public: 8 | /** 9 | * Predict class for features vector 10 | */ 11 | int predict(float *x) { 12 | float votes[3] = { 0.0f }; 13 | float theta[4] = { 0 }; 14 | float sigma[4] = { 0 }; 15 | theta[0] = 4.964516129032; theta[1] = 3.377419354839; theta[2] = 1.464516129032; theta[3] = 0.248387096774; 16 | sigma[0] = 0.111966704288; sigma[1] = 0.136586891592; sigma[2] = 0.033257026868; sigma[3] = 0.011529659543; 17 | votes[0] = 0.295238095238 - gauss(x, theta, sigma); 18 | theta[0] = 5.862162162162; theta[1] = 2.724324324324; theta[2] = 4.210810810811; theta[3] = 1.302702702703; 19 | sigma[0] = 0.275325057719; sigma[1] = 0.087246168019; sigma[2] = 0.239342588764; sigma[3] = 0.041344049684; 20 | votes[1] = 0.352380952381 - gauss(x, theta, sigma); 21 | theta[0] = 6.559459459459; theta[1] = 2.986486486486; theta[2] = 5.545945945946; theta[3] = 2.005405405405; 22 | sigma[0] = 0.422410521562; sigma[1] = 0.096303874374; sigma[2] = 0.288429513527; sigma[3] = 0.085916730473; 23 | votes[2] = 0.352380952381 - gauss(x, theta, sigma); 24 | // return argmax of votes 25 | uint8_t classIdx = 0; 26 | float maxVotes = votes[0]; 27 | 28 | for (uint8_t i = 1; i < 3; i++) { 29 | if (votes[i] > maxVotes) { 30 | classIdx = i; 31 | maxVotes = votes[i]; 32 | } 33 | } 34 | 35 | return classIdx; 36 | } 37 | 38 | protected: 39 | /** 40 | * Compute gaussian value 41 | */ 42 | float gauss(float *x, float *theta, float *sigma) { 43 | float gauss = 0.0f; 44 | 45 | for (uint16_t i = 0; i < 4; i++) { 46 | gauss += log(sigma[i]); 47 | gauss += abs(x[i] - theta[i]) / sigma[i]; 48 | } 49 | 50 | return gauss; 51 | } 52 | }; 53 | } 54 | } 55 | } -------------------------------------------------------------------------------- /04_GaussianNB/GaussianNB/GaussianNB.ino: -------------------------------------------------------------------------------- 1 | // Arduino sketch 2 | #include "GaussianNB.h" 3 | 4 | Eloquent::ML::Port::GaussianNB classifier; 5 | 6 | void setup() { 7 | Serial.begin(115200); 8 | 9 | } 10 | 11 | void loop() { 12 | float X_1[] = {5.1, 3.5, 1.4, 0.2}; 13 | int result_1 = classifier.predict(X_1); 14 | Serial.print("Result of predict with input X1:"); 15 | Serial.println(result_1); 16 | delay(2000); 17 | 18 | float X_2[] = {6.2, 2.2, 4.5, 1.5}; 19 | int result_2 = classifier.predict(X_2); 20 | Serial.print("Result of predict with input X2:"); 21 | Serial.println(result_2); 22 | delay(2000); 23 | 24 | float X_3[] = {6.1, 3.0, 4.9, 1.8}; 25 | int result_3 = classifier.predict(X_3); 26 | Serial.print("Result of predict with input X3:"); 27 | Serial.println(result_3); 28 | delay(2000); 29 | } 30 | -------------------------------------------------------------------------------- /05_support_vector_machine/SVMClassifier/SVMClassifier.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | #include 3 | namespace Eloquent { 4 | namespace ML { 5 | namespace Port { 6 | class SVM { 7 | public: 8 | /** 9 | * Predict class for features vector 10 | */ 11 | int predict(float *x) { 12 | float kernels[3] = { 0 }; 13 | float decisions[1] = { 0 }; 14 | int votes[2] = { 0 }; 15 | kernels[0] = compute_kernel(x, 3.4 , 1.9 , 0.2 ); 16 | kernels[1] = compute_kernel(x, 3.3 , 1.7 , 0.5 ); 17 | kernels[2] = compute_kernel(x, 2.4 , 3.3 , 1.0 ); 18 | float decision = -0.833910342285; 19 | decision = decision - ( + kernels[0] * -0.31945543931 + kernels[1] * -0.240101867421 ); 20 | decision = decision - ( + kernels[2] * 0.559557306731 ); 21 | return decision > 0 ? 0 : 1; 22 | } 23 | protected: 24 | /** 25 | * Compute kernel between feature vector and support vector. 26 | * Kernel type: linear 27 | */ 28 | float compute_kernel(float *x, ...) { 29 | va_list w; 30 | va_start(w, 3); 31 | float kernel = 0.0; 32 | for (uint16_t i = 0; i < 3; i++) { 33 | kernel += x[i] * va_arg(w, double); 34 | } 35 | return kernel; 36 | } 37 | }; 38 | } 39 | } 40 | } 41 | -------------------------------------------------------------------------------- /05_support_vector_machine/SVMClassifier/SVMClassifier.ino: -------------------------------------------------------------------------------- 1 | #include "SVMClassifier.h" 2 | 3 | Eloquent::ML::Port::SVM classifier; 4 | 5 | void setup() { 6 | Serial.begin(115200); 7 | 8 | } 9 | 10 | void loop() { 11 | float X_1[] = {3.6, 1. , 0.2}; 12 | int result_1 = classifier.predict(X_1); 13 | Serial.print("Result of predict with input X1:"); 14 | Serial.println(result_1); 15 | delay(2000); 16 | 17 | float X_2[] = {2.9, 4.7, 1.4}; 18 | int result_2 = classifier.predict(X_2); 19 | Serial.print("Result of predict with input X2:"); 20 | Serial.println(result_2); 21 | delay(2000); 22 | 23 | } 24 | -------------------------------------------------------------------------------- /06_SEFR/SEFRClassifier/SEFRClassifier.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | #include 3 | namespace Eloquent { 4 | namespace ML { 5 | namespace Port { 6 | class SEFR { 7 | public: 8 | /** 9 | * Predict class for features vector 10 | */ 11 | int predict(float *x) { 12 | return dot(x, 0.089210262828 , -0.097438891661 , 0.48245740461 , 0.68784598788 ) <= 2.0193337291074043 ? 0 : 1; 13 | } 14 | 15 | protected: 16 | /** 17 | * Compute dot product 18 | */ 19 | float dot(float *x, ...) { 20 | va_list w; 21 | va_start(w, 4); 22 | float dot = 0.0; 23 | 24 | for (uint16_t i = 0; i < 4; i++) { 25 | const float wi = va_arg(w, double); 26 | dot += x[i] * wi; 27 | } 28 | 29 | return dot; 30 | } 31 | }; 32 | } 33 | } 34 | } -------------------------------------------------------------------------------- /06_SEFR/SEFRClassifier/SEFRClassifier.ino: -------------------------------------------------------------------------------- 1 | #include "SEFRClassifier.h" 2 | 3 | Eloquent::ML::Port::SEFR classifier; 4 | 5 | void setup() { 6 | Serial.begin(115200); 7 | 8 | } 9 | 10 | void loop() { 11 | float X_1[] = {3.6, 1. , 0.2}; 12 | int result_1 = classifier.predict(X_1); 13 | Serial.print("Result of predict with input X1:"); 14 | Serial.println(result_1); 15 | delay(2000); 16 | 17 | float X_2[] = {2.9, 4.7, 1.4}; 18 | int result_2 = classifier.predict(X_2); 19 | Serial.print("Result of predict with input X2:"); 20 | Serial.println(result_2); 21 | delay(2000); 22 | 23 | } 24 | -------------------------------------------------------------------------------- /07_principal_components_analysis/PCA/PCA.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | namespace Eloquent { 3 | namespace ML { 4 | namespace Port { 5 | class PCA { 6 | public: 7 | /** 8 | * Apply dimensionality reduction 9 | * @warn Will override the source vector if no dest provided! 10 | */ 11 | void transform(float *x, float *dest = NULL) { 12 | static float u[2] = { 0 }; 13 | u[0] = dot(x, -0.010208828727 , 0.009522664636 , 0.07801034572 , 0.004103495259 , -0.003935853874 , -0.004232033634 , 0.21088426957 , -0.00282519103 , 0.974263356619 ); 14 | u[1] = dot(x, -0.015215508318 , 0.016476395215 , 0.128355383147 , 0.008750586985 , -0.049637455695 , -0.049172415055 , 0.964175225747 , -0.0112017847 , -0.219782142624 ); 15 | memcpy(dest != NULL ? dest : x, u, sizeof(float) * 2); 16 | } 17 | 18 | protected: 19 | /** 20 | * Compute dot product with varargs 21 | */ 22 | float dot(float *x, ...) { 23 | va_list w; 24 | va_start(w, 9); 25 | static float mean[] = { 10.998048780488 , 7.26 , 14.477073170732 , 1.976829268293 , 49.616341463415 , 14.605853658537 , 44.325609756098 , 4.76243902439 , 58.316585365854 }; 26 | float dot = 0.0; 27 | 28 | for (uint16_t i = 0; i < 9; i++) { 29 | dot += (x[i] - mean[i]) * va_arg(w, double); 30 | } 31 | 32 | return dot; 33 | } 34 | }; 35 | } 36 | } 37 | } -------------------------------------------------------------------------------- /07_principal_components_analysis/PCA/PCA.ino: -------------------------------------------------------------------------------- 1 | #include "PCA.h" 2 | 3 | Eloquent::ML::Port::PCA pca; 4 | 5 | 6 | 7 | void setup() { 8 | Serial.begin(115200); 9 | 10 | } 11 | 12 | void loop() { 13 | float X_1[9] = {11.04, 7.58, 14.83, 2.07, 49.81, 14.69, 43.75, 5.02, 63.19}; 14 | float result_1[2]; 15 | pca.transform(X_1, result_1); 16 | Serial.print("Result of predict with input X1:"); 17 | for (int i = 0; i < 2; i++) { 18 | Serial.print(" "); 19 | Serial.print(result_1[i]); 20 | } 21 | Serial.println(); // Adiciona uma nova linha no final 22 | delay(2000); 23 | 24 | float X_2[9] = {10.76, 7.4 , 14.26, 1.86, 49.37, 14.05, 50.72, 4.92, 60.15}; 25 | float result_2[2]; 26 | pca.transform(X_2, result_2); 27 | Serial.print("Result of predict with input X2:"); 28 | for (int i = 0; i < 2; i++) { 29 | Serial.print(" "); 30 | Serial.print(result_2[i]); 31 | } 32 | Serial.println(); // Adiciona uma nova linha no final 33 | delay(2000); 34 | 35 | } 36 | -------------------------------------------------------------------------------- /07_principal_components_analysis/data/decathlon.csv: -------------------------------------------------------------------------------- 1 | "","100m","Long.jump","Shot.put","High.jump","400m","110m.hurdle","Discus","Pole.vault","Javeline","1500m","Rank","Points","Competition" 2 | "SEBRLE",11.04,7.58,14.83,2.07,49.81,14.69,43.75,5.02,63.19,291.7,1,8217,"Decastar" 3 | "CLAY",10.76,7.4,14.26,1.86,49.37,14.05,50.72,4.92,60.15,301.5,2,8122,"Decastar" 4 | "KARPOV",11.02,7.3,14.77,2.04,48.37,14.09,48.95,4.92,50.31,300.2,3,8099,"Decastar" 5 | "BERNARD",11.02,7.23,14.25,1.92,48.93,14.99,40.87,5.32,62.77,280.1,4,8067,"Decastar" 6 | "YURKOV",11.34,7.09,15.19,2.1,50.42,15.31,46.26,4.72,63.44,276.4,5,8036,"Decastar" 7 | "WARNERS",11.11,7.6,14.31,1.98,48.68,14.23,41.1,4.92,51.77,278.1,6,8030,"Decastar" 8 | "ZSIVOCZKY",11.13,7.3,13.48,2.01,48.62,14.17,45.67,4.42,55.37,268,7,8004,"Decastar" 9 | "McMULLEN",10.83,7.31,13.76,2.13,49.91,14.38,44.41,4.42,56.37,285.1,8,7995,"Decastar" 10 | "MARTINEAU",11.64,6.81,14.57,1.95,50.14,14.93,47.6,4.92,52.33,262.1,9,7802,"Decastar" 11 | "HERNU",11.37,7.56,14.41,1.86,51.1,15.06,44.99,4.82,57.19,285.1,10,7733,"Decastar" 12 | "BARRAS",11.33,6.97,14.09,1.95,49.48,14.48,42.1,4.72,55.4,282,11,7708,"Decastar" 13 | "NOOL",11.33,7.27,12.68,1.98,49.2,15.29,37.92,4.62,57.44,266.6,12,7651,"Decastar" 14 | "BOURGUIGNON",11.36,6.8,13.46,1.86,51.16,15.67,40.49,5.02,54.68,291.7,13,7313,"Decastar" 15 | "Sebrle",10.85,7.84,16.36,2.12,48.36,14.05,48.72,5,70.52,280.01,1,8893,"OlympicG" 16 | "Clay",10.44,7.96,15.23,2.06,49.19,14.13,50.11,4.9,69.71,282,2,8820,"OlympicG" 17 | "Karpov",10.5,7.81,15.93,2.09,46.81,13.97,51.65,4.6,55.54,278.11,3,8725,"OlympicG" 18 | "Macey",10.89,7.47,15.73,2.15,48.97,14.56,48.34,4.4,58.46,265.42,4,8414,"OlympicG" 19 | "Warners",10.62,7.74,14.48,1.97,47.97,14.01,43.73,4.9,55.39,278.05,5,8343,"OlympicG" 20 | "Zsivoczky",10.91,7.14,15.31,2.12,49.4,14.95,45.62,4.7,63.45,269.54,6,8287,"OlympicG" 21 | "Hernu",10.97,7.19,14.65,2.03,48.73,14.25,44.72,4.8,57.76,264.35,7,8237,"OlympicG" 22 | "Nool",10.8,7.53,14.26,1.88,48.81,14.8,42.05,5.4,61.33,276.33,8,8235,"OlympicG" 23 | "Bernard",10.69,7.48,14.8,2.12,49.13,14.17,44.75,4.4,55.27,276.31,9,8225,"OlympicG" 24 | "Schwarzl",10.98,7.49,14.01,1.94,49.76,14.25,42.43,5.1,56.32,273.56,10,8102,"OlympicG" 25 | "Pogorelov",10.95,7.31,15.1,2.06,50.79,14.21,44.6,5,53.45,287.63,11,8084,"OlympicG" 26 | "Schoenbeck",10.9,7.3,14.77,1.88,50.3,14.34,44.41,5,60.89,278.82,12,8077,"OlympicG" 27 | "Barras",11.14,6.99,14.91,1.94,49.41,14.37,44.83,4.6,64.55,267.09,13,8067,"OlympicG" 28 | "Smith",10.85,6.81,15.24,1.91,49.27,14.01,49.02,4.2,61.52,272.74,14,8023,"OlympicG" 29 | "Averyanov",10.55,7.34,14.44,1.94,49.72,14.39,39.88,4.8,54.51,271.02,15,8021,"OlympicG" 30 | "Ojaniemi",10.68,7.5,14.97,1.94,49.12,15.01,40.35,4.6,59.26,275.71,16,8006,"OlympicG" 31 | "Smirnov",10.89,7.07,13.88,1.94,49.11,14.77,42.47,4.7,60.88,263.31,17,7993,"OlympicG" 32 | "Qi",11.06,7.34,13.55,1.97,49.65,14.78,45.13,4.5,60.79,272.63,18,7934,"OlympicG" 33 | "Drews",10.87,7.38,13.07,1.88,48.51,14.01,40.11,5,51.53,274.21,19,7926,"OlympicG" 34 | "Parkhomenko",11.14,6.61,15.69,2.03,51.04,14.88,41.9,4.8,65.82,277.94,20,7918,"OlympicG" 35 | "Terek",10.92,6.94,15.15,1.94,49.56,15.12,45.62,5.3,50.62,290.36,21,7893,"OlympicG" 36 | "Gomez",11.08,7.26,14.57,1.85,48.61,14.41,40.95,4.4,60.71,269.7,22,7865,"OlympicG" 37 | "Turi",11.08,6.91,13.62,2.03,51.67,14.26,39.83,4.8,59.34,290.01,23,7708,"OlympicG" 38 | "Lorenzo",11.1,7.03,13.22,1.85,49.34,15.38,40.22,4.5,58.36,263.08,24,7592,"OlympicG" 39 | "Karlivans",11.33,7.26,13.3,1.97,50.54,14.98,43.34,4.5,52.92,278.67,25,7583,"OlympicG" 40 | "Korkizoglou",10.86,7.07,14.81,1.94,51.16,14.96,46.07,4.7,53.05,317,26,7573,"OlympicG" 41 | "Uldal",11.23,6.99,13.53,1.85,50.95,15.09,43.01,4.5,60,281.7,27,7495,"OlympicG" 42 | "Casarsa",11.36,6.68,14.92,1.94,53.2,15.39,48.66,4.4,58.62,296.12,28,7404,"OlympicG" 43 | -------------------------------------------------------------------------------- /08_Logistic_Regressor/LogisticRegressor/LogisticRegressor.h: -------------------------------------------------------------------------------- 1 | namespace Eloquent 2 | { 3 | namespace ML 4 | { 5 | namespace Port 6 | { 7 | class LogisticRegressor 8 | { 9 | public: 10 | float predict(float *x) 11 | { 12 | float probability = 0; 13 | float coefficients[4] = {0.37895195105851737, -0.864988943034597, 2.2039107306380346, 0.9146609339089985}; 14 | 15 | float z = -6.01143820166656; 16 | for (int i = 0; i < 4; i++) 17 | { 18 | z += coefficients[i] * x[i]; 19 | } 20 | 21 | probability = 1 / (1 + exp(-1 * z)); 22 | if (probability >= 0.5) 23 | { 24 | return 1; 25 | } 26 | else 27 | { 28 | return 0; 29 | } 30 | } 31 | }; 32 | } 33 | } 34 | } 35 | -------------------------------------------------------------------------------- /08_Logistic_Regressor/LogisticRegressor/LogisticRegressor.ino: -------------------------------------------------------------------------------- 1 | #include "LogisticRegressor.h" 2 | 3 | Eloquent::ML::Port::LogisticRegressor LogisticRegressor; 4 | 5 | void setup() 6 | { 7 | Serial.begin(115200); 8 | } 9 | 10 | void loop() 11 | { 12 | float X_1[] = {6., 2.7, 5.1, 1.6}; 13 | int result_1 = LogisticRegressor.predict(X_1); 14 | Serial.print("Result of predict with input X1 (real value = 1):"); 15 | Serial.println(result_1); 16 | delay(2000); 17 | 18 | float X_2[] = {4.8, 3.1, 1.6, 0.2}; 19 | int result_2 = LogisticRegressor.predict(X_2); 20 | Serial.print("Result of predict with input X2 (real value = 0):"); 21 | Serial.println(result_2); 22 | delay(2000); 23 | } 24 | -------------------------------------------------------------------------------- /09_K_Means/Kmeans/KMeans.h: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | namespace Eloquent 4 | { 5 | namespace ML 6 | { 7 | namespace Port 8 | { 9 | class KMeans 10 | { 11 | private: 12 | float centroids[7][2] = {{45.65789473684211, 44.42105263157895}, 13 | {29.249999999999996, 72.19444444444443}, 14 | {30.55555555555556, 13.05555555555555}, 15 | {24.033333333333328, 48.43333333333333}, 16 | {31.285714285714292, 90.39285714285712}, 17 | {51.035714285714285, 14.999999999999986}, 18 | {63.9090909090909, 50.63636363636363}}; 19 | 20 | public: 21 | int predict(float *x) 22 | { 23 | float euclidean_distance = 0; 24 | float euclidean_distance_old = 999999999; 25 | int cluster_member = -1; 26 | for (int index = 0; index < 7; ++index) 27 | { 28 | float error_square = 0; 29 | for (int index_value = 0; index_value < 2; ++index_value) 30 | { 31 | error_square += pow(centroids[index][index_value] - x[index_value], 2); 32 | } 33 | euclidean_distance = sqrt(error_square); 34 | if (euclidean_distance < euclidean_distance_old) 35 | { 36 | euclidean_distance_old = euclidean_distance; 37 | cluster_member = index; 38 | } 39 | } 40 | return cluster_member; 41 | } 42 | }; 43 | } 44 | } 45 | } 46 | -------------------------------------------------------------------------------- /09_K_Means/Kmeans/Kmeans.ino: -------------------------------------------------------------------------------- 1 | #include "KMeans.h" 2 | 3 | Eloquent::ML::Port::KMeans k_means; 4 | 5 | 6 | void setup() { 7 | Serial.begin(9600); 8 | 9 | 10 | } 11 | 12 | void loop() { 13 | // Input value 14 | float input[] = {15, 39}; 15 | // Predict 16 | int cluster = k_means.predict(input); 17 | // Exiba o cluster encontrado 18 | Serial.print("Cluster member (real is 2): "); 19 | Serial.println(cluster); 20 | 21 | delay(2000); 22 | } 23 | -------------------------------------------------------------------------------- /09_K_Means/data/Mall_Customers.csv: -------------------------------------------------------------------------------- 1 | CustomerID,Gender,Age,Annual Income (k$),Spending Score (1-100) 2 | 1,Male,19,15,39 3 | 2,Male,21,15,81 4 | 3,Female,20,16,6 5 | 4,Female,23,16,77 6 | 5,Female,31,17,40 7 | 6,Female,22,17,76 8 | 7,Female,35,18,6 9 | 8,Female,23,18,94 10 | 9,Male,64,19,3 11 | 10,Female,30,19,72 12 | 11,Male,67,19,14 13 | 12,Female,35,19,99 14 | 13,Female,58,20,15 15 | 14,Female,24,20,77 16 | 15,Male,37,20,13 17 | 16,Male,22,20,79 18 | 17,Female,35,21,35 19 | 18,Male,20,21,66 20 | 19,Male,52,23,29 21 | 20,Female,35,23,98 22 | 21,Male,35,24,35 23 | 22,Male,25,24,73 24 | 23,Female,46,25,5 25 | 24,Male,31,25,73 26 | 25,Female,54,28,14 27 | 26,Male,29,28,82 28 | 27,Female,45,28,32 29 | 28,Male,35,28,61 30 | 29,Female,40,29,31 31 | 30,Female,23,29,87 32 | 31,Male,60,30,4 33 | 32,Female,21,30,73 34 | 33,Male,53,33,4 35 | 34,Male,18,33,92 36 | 35,Female,49,33,14 37 | 36,Female,21,33,81 38 | 37,Female,42,34,17 39 | 38,Female,30,34,73 40 | 39,Female,36,37,26 41 | 40,Female,20,37,75 42 | 41,Female,65,38,35 43 | 42,Male,24,38,92 44 | 43,Male,48,39,36 45 | 44,Female,31,39,61 46 | 45,Female,49,39,28 47 | 46,Female,24,39,65 48 | 47,Female,50,40,55 49 | 48,Female,27,40,47 50 | 49,Female,29,40,42 51 | 50,Female,31,40,42 52 | 51,Female,49,42,52 53 | 52,Male,33,42,60 54 | 53,Female,31,43,54 55 | 54,Male,59,43,60 56 | 55,Female,50,43,45 57 | 56,Male,47,43,41 58 | 57,Female,51,44,50 59 | 58,Male,69,44,46 60 | 59,Female,27,46,51 61 | 60,Male,53,46,46 62 | 61,Male,70,46,56 63 | 62,Male,19,46,55 64 | 63,Female,67,47,52 65 | 64,Female,54,47,59 66 | 65,Male,63,48,51 67 | 66,Male,18,48,59 68 | 67,Female,43,48,50 69 | 68,Female,68,48,48 70 | 69,Male,19,48,59 71 | 70,Female,32,48,47 72 | 71,Male,70,49,55 73 | 72,Female,47,49,42 74 | 73,Female,60,50,49 75 | 74,Female,60,50,56 76 | 75,Male,59,54,47 77 | 76,Male,26,54,54 78 | 77,Female,45,54,53 79 | 78,Male,40,54,48 80 | 79,Female,23,54,52 81 | 80,Female,49,54,42 82 | 81,Male,57,54,51 83 | 82,Male,38,54,55 84 | 83,Male,67,54,41 85 | 84,Female,46,54,44 86 | 85,Female,21,54,57 87 | 86,Male,48,54,46 88 | 87,Female,55,57,58 89 | 88,Female,22,57,55 90 | 89,Female,34,58,60 91 | 90,Female,50,58,46 92 | 91,Female,68,59,55 93 | 92,Male,18,59,41 94 | 93,Male,48,60,49 95 | 94,Female,40,60,40 96 | 95,Female,32,60,42 97 | 96,Male,24,60,52 98 | 97,Female,47,60,47 99 | 98,Female,27,60,50 100 | 99,Male,48,61,42 101 | 100,Male,20,61,49 102 | 101,Female,23,62,41 103 | 102,Female,49,62,48 104 | 103,Male,67,62,59 105 | 104,Male,26,62,55 106 | 105,Male,49,62,56 107 | 106,Female,21,62,42 108 | 107,Female,66,63,50 109 | 108,Male,54,63,46 110 | 109,Male,68,63,43 111 | 110,Male,66,63,48 112 | 111,Male,65,63,52 113 | 112,Female,19,63,54 114 | 113,Female,38,64,42 115 | 114,Male,19,64,46 116 | 115,Female,18,65,48 117 | 116,Female,19,65,50 118 | 117,Female,63,65,43 119 | 118,Female,49,65,59 120 | 119,Female,51,67,43 121 | 120,Female,50,67,57 122 | 121,Male,27,67,56 123 | 122,Female,38,67,40 124 | 123,Female,40,69,58 125 | 124,Male,39,69,91 126 | 125,Female,23,70,29 127 | 126,Female,31,70,77 128 | 127,Male,43,71,35 129 | 128,Male,40,71,95 130 | 129,Male,59,71,11 131 | 130,Male,38,71,75 132 | 131,Male,47,71,9 133 | 132,Male,39,71,75 134 | 133,Female,25,72,34 135 | 134,Female,31,72,71 136 | 135,Male,20,73,5 137 | 136,Female,29,73,88 138 | 137,Female,44,73,7 139 | 138,Male,32,73,73 140 | 139,Male,19,74,10 141 | 140,Female,35,74,72 142 | 141,Female,57,75,5 143 | 142,Male,32,75,93 144 | 143,Female,28,76,40 145 | 144,Female,32,76,87 146 | 145,Male,25,77,12 147 | 146,Male,28,77,97 148 | 147,Male,48,77,36 149 | 148,Female,32,77,74 150 | 149,Female,34,78,22 151 | 150,Male,34,78,90 152 | 151,Male,43,78,17 153 | 152,Male,39,78,88 154 | 153,Female,44,78,20 155 | 154,Female,38,78,76 156 | 155,Female,47,78,16 157 | 156,Female,27,78,89 158 | 157,Male,37,78,1 159 | 158,Female,30,78,78 160 | 159,Male,34,78,1 161 | 160,Female,30,78,73 162 | 161,Female,56,79,35 163 | 162,Female,29,79,83 164 | 163,Male,19,81,5 165 | 164,Female,31,81,93 166 | 165,Male,50,85,26 167 | 166,Female,36,85,75 168 | 167,Male,42,86,20 169 | 168,Female,33,86,95 170 | 169,Female,36,87,27 171 | 170,Male,32,87,63 172 | 171,Male,40,87,13 173 | 172,Male,28,87,75 174 | 173,Male,36,87,10 175 | 174,Male,36,87,92 176 | 175,Female,52,88,13 177 | 176,Female,30,88,86 178 | 177,Male,58,88,15 179 | 178,Male,27,88,69 180 | 179,Male,59,93,14 181 | 180,Male,35,93,90 182 | 181,Female,37,97,32 183 | 182,Female,32,97,86 184 | 183,Male,46,98,15 185 | 184,Female,29,98,88 186 | 185,Female,41,99,39 187 | 186,Male,30,99,97 188 | 187,Female,54,101,24 189 | 188,Male,28,101,68 190 | 189,Female,41,103,17 191 | 190,Female,36,103,85 192 | 191,Female,34,103,23 193 | 192,Female,32,103,69 194 | 193,Male,33,113,8 195 | 194,Female,38,113,91 196 | 195,Female,47,120,16 197 | 196,Female,35,120,79 198 | 197,Female,45,126,28 199 | 198,Male,32,126,74 200 | 199,Male,32,137,18 201 | 200,Male,30,137,83 202 | -------------------------------------------------------------------------------- /10_Linear_Regressor/LinearRegressor/LinearRegressor.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | #include 3 | namespace Eloquent { 4 | namespace ML { 5 | namespace Port { 6 | class LinearRegression { 7 | public: 8 | /** 9 | * Predict class for features vector 10 | */ 11 | float predict(float *x) { 12 | return dot(x, 8.519119048170, -63.742223819482, 120.195642585753, 48.382158099066, -247.051828054808, 139.424879828758, 59.514918286320, 67.272041311423, 336.697813626620, -48.176667846134) + 37.12282895581433; 13 | } 14 | 15 | protected: 16 | /** 17 | * Compute dot product 18 | */ 19 | float dot(float *x, ...) { 20 | va_list w; 21 | va_start(w, 10); 22 | float dot = 0.0; 23 | 24 | for (uint16_t i = 0; i < 10; i++) { 25 | const float wi = va_arg(w, double); 26 | dot += x[i] * wi; 27 | } 28 | 29 | return dot; 30 | } 31 | }; 32 | } 33 | } 34 | } -------------------------------------------------------------------------------- /10_Linear_Regressor/LinearRegressor/LinearRegressor.ino: -------------------------------------------------------------------------------- 1 | #include "LinearRegressor.h" 2 | 3 | Eloquent::ML::Port::LinearRegression LinearRegressor; 4 | 5 | void setup() 6 | { 7 | Serial.begin(115200); 8 | } 9 | 10 | void loop() 11 | { 12 | float X_1[] = {0.02717829, 0.05068012, 0.01750591, -0.03321323, -0.00707277, 0.04597154, -0.06549067, 0.07120998, -0.09643495, -0.05906719}; 13 | int result_1 = LinearRegressor.predict(X_1); 14 | Serial.print("Result of predict with input X1 (real value = 13):"); 15 | Serial.println(result_1); 16 | delay(2000); 17 | 18 | float X_2[] = {-0.07816532, -0.04464164, -0.0730303 , -0.05731319, -0.08412613,-0.07427747, -0.02499266, -0.03949338, -0.01811369, -0.08391984}; 19 | int result_2 = LinearRegressor.predict(X_2); 20 | Serial.print("Result of predict with input X2 (real value = 40):"); 21 | Serial.println(result_2); 22 | delay(2000); 23 | } 24 | -------------------------------------------------------------------------------- /11_GMM/GMM/GMM.h: -------------------------------------------------------------------------------- 1 | #include 2 | using namespace std; 3 | 4 | const int num_components = 7; 5 | const int num_features = 2; 6 | 7 | namespace TKSF 8 | { 9 | namespace ML 10 | { 11 | namespace Port 12 | { 13 | class GMM 14 | { 15 | private: 16 | float means[num_components][num_features] = { 17 | {56.60931459453662, 49.92251390822188}, 18 | {29.264921745717615, 74.5990614961554}, 19 | {43.54207887008299, 10.496799818644392}, 20 | {41.078202034837176, 34.79082042618642}, 21 | {31.101406340255828, 90.2290512000496}, 22 | {31.37324943091291, 61.64977286632278}, 23 | {22.872325540804805, 49.948720505624074}, 24 | }; 25 | 26 | float covariances[num_components][num_features][num_features] = { 27 | {{83.24599787684558, 2.890435963609233}, {2.890435963609235, 35.68617486791098}}, 28 | {{29.310245867234695, -3.6811694981292513}, {-3.68116949812925, 11.065658605910743}}, 29 | {{176.1500923010437, 13.01774646138936}, {13.017746461389358, 29.65985065852867}}, 30 | {{75.42208070910617, -6.070541531090654}, {-6.0705415310906545, 97.8692020437219}}, 31 | {{29.297433369873897, 4.575820469751143}, {4.575820469751143, 23.020446452070658}}, 32 | {{33.43445449031923, -13.501586578314814}, {-13.501586578314813, 6.152388053936477}}, 33 | {{16.965821015319975, -2.507134562074277}, {-2.507134562074276, 45.30126223322478}}, 34 | }; 35 | 36 | float coefficients[num_components] = {0.1984245445224706, 0.14337213895448095, 0.1649683239019345, 0.1911025085379841, 0.1413696101261269, 0.036998614584665225, 0.12376425937233766}; 37 | 38 | float component_pdf(float x[num_features], float mean[num_features], float covariance[num_features][num_features]) 39 | { 40 | float det = covariance[0][0] * covariance[1][1] - covariance[0][1] * covariance[1][0]; 41 | float inv_cov[num_features][num_features] = {{covariance[1][1] / det, -covariance[0][1] / det}, {-covariance[1][0] / det, covariance[0][0] / det}}; 42 | float exponent = -0.5 * (inv_cov[0][0] * (x[0] - mean[0]) * (x[0] - mean[0]) + 2 * inv_cov[0][1] * (x[0] - mean[0]) * (x[1] - mean[1]) + inv_cov[1][1] * (x[1] - mean[1]) * (x[1] - mean[1])); 43 | float coefficient = 1.0 / sqrt(2 * M_PI * det); 44 | return coefficient * exp(exponent); 45 | } 46 | 47 | public: 48 | int predict(float x[num_features]) 49 | { 50 | float probabilities[num_components] = {0}; 51 | for (int i = 0; i < num_components; ++i) 52 | { 53 | probabilities[i] = coefficients[i] * component_pdf(x, means[i], covariances[i]); 54 | } 55 | int maxIndex = 0; 56 | for (int i = 1; i < num_components; ++i) 57 | { 58 | if (probabilities[i] > probabilities[maxIndex]) 59 | { 60 | maxIndex = i; 61 | } 62 | } 63 | return maxIndex; 64 | } 65 | }; 66 | } 67 | } 68 | } 69 | -------------------------------------------------------------------------------- /11_GMM/GMM/GMM.ino: -------------------------------------------------------------------------------- 1 | #include "GMM.h" 2 | 3 | TKSF::ML::Port::GMM GMM; 4 | 5 | 6 | void setup() { 7 | Serial.begin(9600); 8 | 9 | 10 | } 11 | 12 | void loop() { 13 | // Input value 14 | float input[] = {19, 39}; 15 | // Predict 16 | int cluster = GMM.predict(input); 17 | Serial.print("Cluster member (real is 6): "); 18 | Serial.println(cluster); 19 | 20 | delay(2000); 21 | } 22 | -------------------------------------------------------------------------------- /11_GMM/data/Mall_Customers.csv: -------------------------------------------------------------------------------- 1 | CustomerID,Gender,Age,Annual Income (k$),Spending Score (1-100) 2 | 1,Male,19,15,39 3 | 2,Male,21,15,81 4 | 3,Female,20,16,6 5 | 4,Female,23,16,77 6 | 5,Female,31,17,40 7 | 6,Female,22,17,76 8 | 7,Female,35,18,6 9 | 8,Female,23,18,94 10 | 9,Male,64,19,3 11 | 10,Female,30,19,72 12 | 11,Male,67,19,14 13 | 12,Female,35,19,99 14 | 13,Female,58,20,15 15 | 14,Female,24,20,77 16 | 15,Male,37,20,13 17 | 16,Male,22,20,79 18 | 17,Female,35,21,35 19 | 18,Male,20,21,66 20 | 19,Male,52,23,29 21 | 20,Female,35,23,98 22 | 21,Male,35,24,35 23 | 22,Male,25,24,73 24 | 23,Female,46,25,5 25 | 24,Male,31,25,73 26 | 25,Female,54,28,14 27 | 26,Male,29,28,82 28 | 27,Female,45,28,32 29 | 28,Male,35,28,61 30 | 29,Female,40,29,31 31 | 30,Female,23,29,87 32 | 31,Male,60,30,4 33 | 32,Female,21,30,73 34 | 33,Male,53,33,4 35 | 34,Male,18,33,92 36 | 35,Female,49,33,14 37 | 36,Female,21,33,81 38 | 37,Female,42,34,17 39 | 38,Female,30,34,73 40 | 39,Female,36,37,26 41 | 40,Female,20,37,75 42 | 41,Female,65,38,35 43 | 42,Male,24,38,92 44 | 43,Male,48,39,36 45 | 44,Female,31,39,61 46 | 45,Female,49,39,28 47 | 46,Female,24,39,65 48 | 47,Female,50,40,55 49 | 48,Female,27,40,47 50 | 49,Female,29,40,42 51 | 50,Female,31,40,42 52 | 51,Female,49,42,52 53 | 52,Male,33,42,60 54 | 53,Female,31,43,54 55 | 54,Male,59,43,60 56 | 55,Female,50,43,45 57 | 56,Male,47,43,41 58 | 57,Female,51,44,50 59 | 58,Male,69,44,46 60 | 59,Female,27,46,51 61 | 60,Male,53,46,46 62 | 61,Male,70,46,56 63 | 62,Male,19,46,55 64 | 63,Female,67,47,52 65 | 64,Female,54,47,59 66 | 65,Male,63,48,51 67 | 66,Male,18,48,59 68 | 67,Female,43,48,50 69 | 68,Female,68,48,48 70 | 69,Male,19,48,59 71 | 70,Female,32,48,47 72 | 71,Male,70,49,55 73 | 72,Female,47,49,42 74 | 73,Female,60,50,49 75 | 74,Female,60,50,56 76 | 75,Male,59,54,47 77 | 76,Male,26,54,54 78 | 77,Female,45,54,53 79 | 78,Male,40,54,48 80 | 79,Female,23,54,52 81 | 80,Female,49,54,42 82 | 81,Male,57,54,51 83 | 82,Male,38,54,55 84 | 83,Male,67,54,41 85 | 84,Female,46,54,44 86 | 85,Female,21,54,57 87 | 86,Male,48,54,46 88 | 87,Female,55,57,58 89 | 88,Female,22,57,55 90 | 89,Female,34,58,60 91 | 90,Female,50,58,46 92 | 91,Female,68,59,55 93 | 92,Male,18,59,41 94 | 93,Male,48,60,49 95 | 94,Female,40,60,40 96 | 95,Female,32,60,42 97 | 96,Male,24,60,52 98 | 97,Female,47,60,47 99 | 98,Female,27,60,50 100 | 99,Male,48,61,42 101 | 100,Male,20,61,49 102 | 101,Female,23,62,41 103 | 102,Female,49,62,48 104 | 103,Male,67,62,59 105 | 104,Male,26,62,55 106 | 105,Male,49,62,56 107 | 106,Female,21,62,42 108 | 107,Female,66,63,50 109 | 108,Male,54,63,46 110 | 109,Male,68,63,43 111 | 110,Male,66,63,48 112 | 111,Male,65,63,52 113 | 112,Female,19,63,54 114 | 113,Female,38,64,42 115 | 114,Male,19,64,46 116 | 115,Female,18,65,48 117 | 116,Female,19,65,50 118 | 117,Female,63,65,43 119 | 118,Female,49,65,59 120 | 119,Female,51,67,43 121 | 120,Female,50,67,57 122 | 121,Male,27,67,56 123 | 122,Female,38,67,40 124 | 123,Female,40,69,58 125 | 124,Male,39,69,91 126 | 125,Female,23,70,29 127 | 126,Female,31,70,77 128 | 127,Male,43,71,35 129 | 128,Male,40,71,95 130 | 129,Male,59,71,11 131 | 130,Male,38,71,75 132 | 131,Male,47,71,9 133 | 132,Male,39,71,75 134 | 133,Female,25,72,34 135 | 134,Female,31,72,71 136 | 135,Male,20,73,5 137 | 136,Female,29,73,88 138 | 137,Female,44,73,7 139 | 138,Male,32,73,73 140 | 139,Male,19,74,10 141 | 140,Female,35,74,72 142 | 141,Female,57,75,5 143 | 142,Male,32,75,93 144 | 143,Female,28,76,40 145 | 144,Female,32,76,87 146 | 145,Male,25,77,12 147 | 146,Male,28,77,97 148 | 147,Male,48,77,36 149 | 148,Female,32,77,74 150 | 149,Female,34,78,22 151 | 150,Male,34,78,90 152 | 151,Male,43,78,17 153 | 152,Male,39,78,88 154 | 153,Female,44,78,20 155 | 154,Female,38,78,76 156 | 155,Female,47,78,16 157 | 156,Female,27,78,89 158 | 157,Male,37,78,1 159 | 158,Female,30,78,78 160 | 159,Male,34,78,1 161 | 160,Female,30,78,73 162 | 161,Female,56,79,35 163 | 162,Female,29,79,83 164 | 163,Male,19,81,5 165 | 164,Female,31,81,93 166 | 165,Male,50,85,26 167 | 166,Female,36,85,75 168 | 167,Male,42,86,20 169 | 168,Female,33,86,95 170 | 169,Female,36,87,27 171 | 170,Male,32,87,63 172 | 171,Male,40,87,13 173 | 172,Male,28,87,75 174 | 173,Male,36,87,10 175 | 174,Male,36,87,92 176 | 175,Female,52,88,13 177 | 176,Female,30,88,86 178 | 177,Male,58,88,15 179 | 178,Male,27,88,69 180 | 179,Male,59,93,14 181 | 180,Male,35,93,90 182 | 181,Female,37,97,32 183 | 182,Female,32,97,86 184 | 183,Male,46,98,15 185 | 184,Female,29,98,88 186 | 185,Female,41,99,39 187 | 186,Male,30,99,97 188 | 187,Female,54,101,24 189 | 188,Male,28,101,68 190 | 189,Female,41,103,17 191 | 190,Female,36,103,85 192 | 191,Female,34,103,23 193 | 192,Female,32,103,69 194 | 193,Male,33,113,8 195 | 194,Female,38,113,91 196 | 195,Female,47,120,16 197 | 196,Female,35,120,79 198 | 197,Female,45,126,28 199 | 198,Male,32,126,74 200 | 199,Male,32,137,18 201 | 200,Male,30,137,83 202 | -------------------------------------------------------------------------------- /12_MLP/MLP/MLP.ino: -------------------------------------------------------------------------------- 1 | #include "model.h" 2 | #include 3 | #include 4 | #include 5 | #include 6 | 7 | #define NUMBER_OF_INPUTS 3 8 | #define NUMBER_OF_OUTPUTS 1 9 | 10 | // in future projects you may need to tweek this value: it's a trial and error process 11 | #define TENSOR_ARENA_SIZE 8*1024 12 | 13 | double start_time = -1; 14 | double end_time = -1; 15 | double width_time = -1; 16 | 17 | 18 | 19 | Eloquent::TinyML::TensorFlow::TensorFlow ml; 20 | 21 | uint8_t *loadedModel; 22 | 23 | void setup() { 24 | Serial.begin(115200); 25 | SPIFFS.begin(true); 26 | delay(3000); 27 | storeModel(); 28 | loadModel(); 29 | if (!ml.begin(loadedModel)) 30 | { 31 | Serial.println("Cannot inialize model"); 32 | Serial.println(ml.getErrorMessage()); 33 | delay(60000); 34 | } 35 | delay(4000); 36 | } 37 | 38 | void loop() { 39 | 40 | float input[3] = {0.51428571, 0.55555556, 0.41841004}; 41 | 42 | //start_time = millis(); 43 | start_time = micros(); 44 | float predicted = ml.predict(input); 45 | //end_time = millis(); 46 | end_time = micros(); 47 | 48 | width_time = end_time - start_time; 49 | 50 | Serial.print("Predict: "); 51 | Serial.println(predicted); 52 | Serial.print("Real: "); 53 | Serial.println(15.4); 54 | Serial.print("Processing time: "); 55 | Serial.println(width_time); 56 | Serial.println(" "); 57 | delay(500); 58 | 59 | } 60 | 61 | 62 | void storeModel() { 63 | File file = SPIFFS.open("/sine.bin", "wb"); 64 | file.write(model, model_len); 65 | file.close(); 66 | } 67 | 68 | 69 | /** 70 | * Load model from SPIFFS 71 | */ 72 | void loadModel() { 73 | File file = SPIFFS.open("/sine.bin", "rb"); 74 | size_t modelSize = file.size(); 75 | 76 | Serial.print("Found model on filesystem of size "); 77 | Serial.print(modelSize); 78 | Serial.print(": it should be "); 79 | Serial.println(model_len); 80 | 81 | // allocate memory 82 | loadedModel = (uint8_t*) malloc(modelSize); 83 | 84 | // copy data from file 85 | for (size_t i = 0; i < modelSize; i++) 86 | loadedModel[i] = file.read(); 87 | 88 | file.close(); 89 | } 90 | -------------------------------------------------------------------------------- /12_MLP/figures/heatmap.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/12_MLP/figures/heatmap.png -------------------------------------------------------------------------------- /12_MLP/figures/hist_test.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/12_MLP/figures/hist_test.png -------------------------------------------------------------------------------- /12_MLP/figures/hist_training.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/12_MLP/figures/hist_training.png -------------------------------------------------------------------------------- /12_MLP/figures/history_traing.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/12_MLP/figures/history_traing.png -------------------------------------------------------------------------------- /12_MLP/figures/pairplot.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/12_MLP/figures/pairplot.png -------------------------------------------------------------------------------- /12_MLP/figures/prediction_test.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/12_MLP/figures/prediction_test.png -------------------------------------------------------------------------------- /12_MLP/figures/prediction_train.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/12_MLP/figures/prediction_train.png -------------------------------------------------------------------------------- /12_MLP/library/EloquentTinyML-main.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/12_MLP/library/EloquentTinyML-main.zip -------------------------------------------------------------------------------- /12_MLP/models/model.keras: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/12_MLP/models/model.keras -------------------------------------------------------------------------------- /12_MLP/models/model_quant_float32.tflite: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/12_MLP/models/model_quant_float32.tflite -------------------------------------------------------------------------------- /12_MLP/models/model_quant_int8.tflite: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/12_MLP/models/model_quant_int8.tflite -------------------------------------------------------------------------------- /12_MLP/requirements.txt: -------------------------------------------------------------------------------- 1 | scikit-learn==1.3.2 2 | tensorflow==2.15.0 3 | pandas==1.4.0 4 | numpy==1.24.0 5 | matplotlib==3.7.1 6 | seaborn==0.11.2 7 | -------------------------------------------------------------------------------- /13_CNN/CNN/CNN.ino: -------------------------------------------------------------------------------- 1 | #include "model.h" 2 | #include 3 | #include 4 | #include 5 | #include 6 | 7 | #define NUMBER_OF_INPUTS 3 8 | #define NUMBER_OF_OUTPUTS 1 9 | 10 | // in future projects you may need to tweek this value: it's a trial and error process 11 | #define TENSOR_ARENA_SIZE 8*1024 12 | 13 | double start_time = -1; 14 | double end_time = -1; 15 | double width_time = -1; 16 | 17 | 18 | 19 | Eloquent::TinyML::TensorFlow::TensorFlow ml; 20 | 21 | uint8_t *loadedModel; 22 | 23 | void setup() { 24 | Serial.begin(115200); 25 | SPIFFS.begin(true); 26 | delay(3000); 27 | storeModel(); 28 | loadModel(); 29 | if (!ml.begin(loadedModel)) 30 | { 31 | Serial.println("Cannot inialize model"); 32 | Serial.println(ml.getErrorMessage()); 33 | delay(60000); 34 | } 35 | delay(4000); 36 | } 37 | 38 | void loop() { 39 | 40 | float input[3] = {0.51428571, 0.55555556, 0.41841004}; 41 | 42 | //start_time = millis(); 43 | start_time = micros(); 44 | float predicted = ml.predict(input); 45 | //end_time = millis(); 46 | end_time = micros(); 47 | 48 | width_time = end_time - start_time; 49 | 50 | Serial.print("Predict: "); 51 | Serial.println(predicted); 52 | Serial.print("Real: "); 53 | Serial.println(15.4); 54 | Serial.print("Processing time: "); 55 | Serial.println(width_time); 56 | Serial.println(" "); 57 | delay(500); 58 | 59 | } 60 | 61 | 62 | void storeModel() { 63 | File file = SPIFFS.open("/sine.bin", "wb"); 64 | file.write(model, model_len); 65 | file.close(); 66 | } 67 | 68 | 69 | /** 70 | * Load model from SPIFFS 71 | */ 72 | void loadModel() { 73 | File file = SPIFFS.open("/sine.bin", "rb"); 74 | size_t modelSize = file.size(); 75 | 76 | Serial.print("Found model on filesystem of size "); 77 | Serial.print(modelSize); 78 | Serial.print(": it should be "); 79 | Serial.println(model_len); 80 | 81 | // allocate memory 82 | loadedModel = (uint8_t*) malloc(modelSize); 83 | 84 | // copy data from file 85 | for (size_t i = 0; i < modelSize; i++) 86 | loadedModel[i] = file.read(); 87 | 88 | file.close(); 89 | } 90 | -------------------------------------------------------------------------------- /13_CNN/figures/confusion_matrix.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/13_CNN/figures/confusion_matrix.png -------------------------------------------------------------------------------- /13_CNN/figures/history_traing.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/13_CNN/figures/history_traing.png -------------------------------------------------------------------------------- /13_CNN/figures/model.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/13_CNN/figures/model.png -------------------------------------------------------------------------------- /13_CNN/library/EloquentTinyML-main.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/13_CNN/library/EloquentTinyML-main.zip -------------------------------------------------------------------------------- /13_CNN/models/model.keras: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/13_CNN/models/model.keras -------------------------------------------------------------------------------- /13_CNN/models/model_quant_float32.tflite: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/13_CNN/models/model_quant_float32.tflite -------------------------------------------------------------------------------- /13_CNN/models/model_quant_int8.tflite: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/13_CNN/models/model_quant_int8.tflite -------------------------------------------------------------------------------- /13_CNN/requirements.txt: -------------------------------------------------------------------------------- 1 | scikit-learn==1.0.2 2 | tensorflow==2.15.0 3 | numpy==1.24.0 4 | matplotlib==3.7.1 5 | seaborn==0.11.2 -------------------------------------------------------------------------------- /14_XGBRegression/XGBoostRegressor/XGBoostRegressor.ino: -------------------------------------------------------------------------------- 1 | #include "XGBRegressor.h" 2 | 3 | 4 | void setup() { 5 | Serial.begin(115200); 6 | } 7 | 8 | void loop() { 9 | double X_1[] = { 2.71782911e-02, 5.06801187e-02, 1.75059115e-02, 10 | -3.32135761e-02, -7.07277125e-03, 4.59715403e-02, 11 | -6.54906725e-02, 7.12099798e-02, -9.64332229e-02, 12 | -5.90671943e-02}; 13 | double result_1 = score(X_1); 14 | Serial.print("Result of predict with input X1 (real value = 69):"); 15 | Serial.println(String(result_1, 7)); 16 | delay(2000); 17 | 18 | } 19 | 20 | 21 | -------------------------------------------------------------------------------- /14_XGBRegression/requirements.txt: -------------------------------------------------------------------------------- 1 | scikit-learn==1.0.2 2 | m2cgen==0.10.0 3 | numpy==1.24.0 4 | matplotlib==3.7.1 5 | xgboost==1.6.2 6 | seaborn==0.11.2 7 | pandas==1.4.0 -------------------------------------------------------------------------------- /15_Poisson_Regressor/PoissonRegressor/PoissonRegressor.h: -------------------------------------------------------------------------------- 1 | #include 2 | double score(double *input) 3 | { 4 | return exp(1.7347124654302846 + input[0] * 0.011406244946132144 + input[1] * 0.01010646886054758 + input[2] * 0.0028201461971878914); 5 | } 6 | -------------------------------------------------------------------------------- /15_Poisson_Regressor/PoissonRegressor/PoissonRegressor.ino: -------------------------------------------------------------------------------- 1 | #include "PoissonRegressor.h" 2 | 3 | void setup() 4 | { 5 | Serial.begin(115200); 6 | } 7 | 8 | void loop() 9 | { 10 | double X_1[] = {4.6, 8, 304}; 11 | float result_1 = score(X_1); 12 | Serial.print("Result of predict with input X1 (real value = 15.4):"); 13 | Serial.println(result_1); 14 | delay(2000); 15 | 16 | double X_2[] = {1.5, 4, 216}; 17 | float result_2 = score(X_2); 18 | Serial.print("Result of predict with input X2 (real value = 11.3):"); 19 | Serial.println(result_2); 20 | delay(2000); 21 | } 22 | -------------------------------------------------------------------------------- /15_Poisson_Regressor/figures/heatmap.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/15_Poisson_Regressor/figures/heatmap.png -------------------------------------------------------------------------------- /15_Poisson_Regressor/figures/pairplot.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/15_Poisson_Regressor/figures/pairplot.png -------------------------------------------------------------------------------- /15_Poisson_Regressor/requirements.txt: -------------------------------------------------------------------------------- 1 | scikit-learn==1.0.2 2 | m2cgen==0.10.0 3 | numpy==1.24.0 4 | seaborn==0.11.2 5 | pandas==1.4.0 6 | matplotlib==3.7.1 -------------------------------------------------------------------------------- /16_KNN/KNN/KNN.ino: -------------------------------------------------------------------------------- 1 | // Include the generated header file 2 | #include "KNN.h" 3 | 4 | KNeighborsClassifier knn; 5 | 6 | float X_test[1][30] = { 7 | {12.47, 18.6, 81.09, 481.9, 0.09965, 0.1058, 0.08005, 0.03821, 0.1925, 0.06373, 8 | 0.3961, 1.044, 2.497, 30.29, 0.006953, 0.01911, 0.02701, 0.01037, 0.01782, 9 | 0.003586, 14.97, 24.64, 96.05, 677.9, 0.1426, 0.2378, 0.2671, 0.1015, 0.3014, 10 | 0.0875}}; 11 | 12 | float X_test_2[1][30] = { 13 | {1.546e+01, 1.948e+01, 1.017e+02, 7.489e+02, 1.092e-01, 1.223e-01, 14 | 1.466e-01, 8.087e-02, 1.931e-01, 5.796e-02, 4.743e-01, 7.859e-01, 15 | 3.094e+00, 4.831e+01, 6.240e-03, 1.484e-02, 2.813e-02, 1.093e-02, 16 | 1.397e-02, 2.461e-03, 1.926e+01, 2.600e+01, 1.249e+02, 1.156e+03, 17 | 1.546e-01, 2.394e-01, 3.791e-01, 1.514e-01, 2.837e-01, 8.019e-02}}; 18 | 19 | void setup() 20 | { 21 | Serial.begin(9600); 22 | // Initializing the KNN model 23 | Serial.println("KNN model initialized."); 24 | 25 | // Fitting the model with training data 26 | knn.fit(X_train, y_train, 10, 30); 27 | Serial.println("Model fitted with training data."); 28 | } 29 | 30 | void loop() 31 | { 32 | // Example test with X_test 33 | int *predictions = knn.predict(X_test, 1); 34 | 35 | // Printing predictions 36 | Serial.print("Predicted class (Real = 0): "); 37 | Serial.println(predictions[0]); 38 | 39 | predictions = knn.predict(X_test_2, 1); 40 | Serial.print("Predicted class (Real = 1): "); 41 | Serial.println(predictions[0]); 42 | 43 | // Delay before next iteration (adjust as needed) 44 | delay(5000); // 5 seconds delay 45 | 46 | // Clean up memory 47 | delete[] predictions; 48 | } 49 | -------------------------------------------------------------------------------- /16_KNN/figures/ROC_test.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/16_KNN/figures/ROC_test.png -------------------------------------------------------------------------------- /16_KNN/figures/ROC_train.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/16_KNN/figures/ROC_train.png -------------------------------------------------------------------------------- /16_KNN/figures/confusion_matrix_test.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/16_KNN/figures/confusion_matrix_test.png -------------------------------------------------------------------------------- /16_KNN/figures/confusion_matrix_train.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/16_KNN/figures/confusion_matrix_train.png -------------------------------------------------------------------------------- /16_KNN/figures/diagnosis.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/16_KNN/figures/diagnosis.png -------------------------------------------------------------------------------- /16_KNN/figures/heatmap.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/16_KNN/figures/heatmap.png -------------------------------------------------------------------------------- /16_KNN/figures/pairplot.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/16_KNN/figures/pairplot.png -------------------------------------------------------------------------------- /16_KNN/figures/result_k_value.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/16_KNN/figures/result_k_value.png -------------------------------------------------------------------------------- /16_KNN/requirements.txt: -------------------------------------------------------------------------------- 1 | scikit-learn==1.0.2 2 | numpy==1.24.0 3 | seaborn==0.11.2 4 | pandas==1.4.0 5 | matplotlib==3.7.1 -------------------------------------------------------------------------------- /17_ElasticNet/ElasticNet/ElasticNet.h: -------------------------------------------------------------------------------- 1 | double score(double * input) { 2 | return 0.21407132576818277 + input[0] * 0.0 + input[1] * 0.0 + input[2] * 0.048847064561809886; 3 | } 4 | -------------------------------------------------------------------------------- /17_ElasticNet/ElasticNet/ElasticNet.ino: -------------------------------------------------------------------------------- 1 | #include "ElasticNet.h" 2 | 3 | void setup() 4 | { 5 | Serial.begin(115200); 6 | } 7 | 8 | void loop() 9 | { 10 | double X_1[] = {4.6, 8, 304}; 11 | float result_1 = score(X_1); 12 | Serial.print("Result of predict with input X1 (real value = 15.4):"); 13 | Serial.println(result_1); 14 | delay(2000); 15 | 16 | double X_2[] = {1.5, 4, 216}; 17 | float result_2 = score(X_2); 18 | Serial.print("Result of predict with input X2 (real value = 11.3):"); 19 | Serial.println(result_2); 20 | delay(2000); 21 | } -------------------------------------------------------------------------------- /17_ElasticNet/figures/heatmap.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/17_ElasticNet/figures/heatmap.png -------------------------------------------------------------------------------- /17_ElasticNet/figures/pairplot.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/17_ElasticNet/figures/pairplot.png -------------------------------------------------------------------------------- /17_ElasticNet/requirements.txt: -------------------------------------------------------------------------------- 1 | scikit-learn==1.0.2 2 | m2cgen==0.10.0 3 | numpy==1.24.0 4 | seaborn==0.11.2 5 | pandas==1.4.0 6 | matplotlib==3.7.1 -------------------------------------------------------------------------------- /18_LSTM/LSTM/LSTM.ino: -------------------------------------------------------------------------------- 1 | #include 2 | // replace with your own model 3 | #include "model.h" 4 | // include the runtime specific for your board 5 | // either tflm_esp32 or tflm_cortexm 6 | #include 7 | // now you can include the eloquent tinyml wrapper 8 | #include 9 | 10 | // this is trial-and-error process 11 | // when developing a new model, start with a high value 12 | // (e.g. 10000), then decrease until the model stops 13 | // working as expected 14 | #define ARENA_SIZE 30000 15 | 16 | Eloquent::TF::Sequential tf; 17 | 18 | float X_1[3] = {4.6, 8., 304.}; 19 | float X_2[3] = {2., 4., 216.}; 20 | 21 | void predictSample(float *input, float expectedOutput) 22 | { 23 | 24 | while (!tf.begin(tfModel).isOk()) 25 | { 26 | Serial.println(tf.exception.toString()); 27 | delay(1000); 28 | } 29 | 30 | // classify class 0 31 | if (!tf.predict(input).isOk()) 32 | { 33 | Serial.println(tf.exception.toString()); 34 | return; 35 | } 36 | Serial.print("Expcted = "); 37 | Serial.print(expectedOutput); 38 | Serial.print(", predicted = "); 39 | Serial.println(tf.outputs[0]); 40 | } 41 | 42 | void setup() 43 | { 44 | Serial.begin(115200); 45 | delay(3000); 46 | Serial.println("__TENSORFLOW LSTM__"); 47 | 48 | // configure input/output 49 | // (not mandatory if you generated the .h model 50 | // using the eloquent_tensorflow Python package) 51 | tf.setNumInputs(TF_NUM_INPUTS); 52 | tf.setNumOutputs(TF_NUM_OUTPUTS); 53 | 54 | registerNetworkOps(tf); 55 | } 56 | 57 | void loop() 58 | { 59 | /** 60 | * Run prediction 61 | */ 62 | 63 | predictSample(X_1, 17.76); 64 | delay(2000); 65 | 66 | predictSample(X_2, 11.44); 67 | delay(2000); 68 | } -------------------------------------------------------------------------------- /18_LSTM/eloquent_tensorflow/__init__.py: -------------------------------------------------------------------------------- 1 | import hexdump 2 | import numpy as np 3 | import tensorflow as tf 4 | from tempfile import TemporaryDirectory 5 | from jinja2 import Template 6 | from tensorflow.lite.python.convert_phase import ConverterError 7 | import os 8 | 9 | 10 | 11 | def convert_model(model, X: np.ndarray = None, y: np.ndarray = None, model_name: str = 'tfModel') -> str: 12 | """ 13 | Convert TensorFlow model to C header for Arduino 14 | :param model: 15 | :param X: 16 | :param y: 17 | :param model_name: 18 | :return: 19 | """ 20 | input_shape = [d or 1 for d in model.layers[0].input_shape] 21 | num_inputs = np.prod(input_shape[1:]) 22 | num_outputs = model.layers[-1].output_shape[1] 23 | 24 | # give user hint of which layers to include 25 | unique_layers = set([layer.__class__.__name__ for layer in model.layers]) 26 | layer_mapping = { 27 | 'Add': 'Add', 28 | 'AvgPool2D': 'AveragePool2D', 29 | 'Concatenate': 'Concatenation', 30 | 'Conv2D': 'Conv2D', 31 | 'Dense': 'FullyConnected', 32 | 'DepthwiseConv2D': 'DepthwiseConv2D', 33 | 'ELU': 'Elu', 34 | 'LSTM': 'UnidirectionalSequenceLSTM', 35 | 'LeakyReLU': 'LeakyRelu', 36 | 'MaxPool2D': 'MaxPool2D', 37 | 'Maximum': 'Maximum', 38 | 'Minimum': 'Minimum', 39 | 'PReLU': 'Prelu', 40 | 'ReLU': 'Relu', 41 | 'Reshape': 'Reshape', 42 | 'Softmax': 'Softmax' 43 | } 44 | 45 | dependencies = { 46 | 'LSTM': ['Shape', 'Reshape', 'StridedSlice', 'Pack', 'Fill', 'Transpose', 'While', 'Less', 'Add', 'Gather', 'Split', 'Mul', 'Minimum', 'Maximum', 'Relu', 'Tanh', 'Concatenation', 'Slice'] 47 | } 48 | 49 | # detect allowed and not allowed layers 50 | allowed_layers = ['Softmax'] 51 | not_allowed_layers = [] 52 | 53 | for layer in unique_layers: 54 | if layer not in layer_mapping: 55 | not_allowed_layers.append(layer) 56 | continue 57 | 58 | allowed_layers += [layer_mapping[layer]] + dependencies.get(layer, []) 59 | 60 | allowed_layers = set(allowed_layers) 61 | not_allowed_layers = set(not_allowed_layers) 62 | 63 | # convert model to bytes 64 | if 'UnidirectionalSequenceLSTM' in allowed_layers: 65 | # see https://github.com/tensorflow/tflite-micro/issues/2006#issuecomment-1567349993 66 | run_model = tf.function(lambda x: model(x)) 67 | concrete_func = run_model.get_concrete_function(tf.TensorSpec(input_shape, model.inputs[0].dtype)) 68 | 69 | with TemporaryDirectory() as model_dir: 70 | model.save(model_dir, save_format='tf', signatures=concrete_func) 71 | converter = tf.lite.TFLiteConverter.from_saved_model(model_dir) 72 | converted = converter.convert() 73 | else: 74 | converter = tf.lite.TFLiteConverter.from_keras_model(model) 75 | 76 | try: 77 | converted = converter.convert() 78 | except ConverterError: 79 | converter.optimizations = [tf.lite.Optimize.DEFAULT] 80 | converter.experimental_new_converter = True 81 | converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS, tf.lite.OpsSet.SELECT_TF_OPS] 82 | converted = converter.convert() 83 | 84 | model_bytes = hexdump.dump(converted).split(' ') 85 | bytes_array = ', '.join(['0x%02x' % int(byte, 16) for byte in model_bytes]) 86 | model_size = len(model_bytes) 87 | 88 | # use Jinja to generate clean code 89 | # Get the current working directory 90 | current_directory = os.getcwd() 91 | with open(current_directory + '\\eloquent_tensorflow\\template.jinja') as file: 92 | template = Template(file.read()) 93 | 94 | return template.render(**locals()) 95 | -------------------------------------------------------------------------------- /18_LSTM/eloquent_tensorflow/__pycache__/__init__.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/18_LSTM/eloquent_tensorflow/__pycache__/__init__.cpython-39.pyc -------------------------------------------------------------------------------- /18_LSTM/eloquent_tensorflow/template.jinja: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #ifdef __has_attribute 4 | #define HAVE_ATTRIBUTE(x) __has_attribute(x) 5 | #else 6 | #define HAVE_ATTRIBUTE(x) 0 7 | #endif 8 | #if HAVE_ATTRIBUTE(aligned) || (defined(__GNUC__) && !defined(__clang__)) 9 | #define DATA_ALIGN_ATTRIBUTE __attribute__((aligned(4))) 10 | #else 11 | #define DATA_ALIGN_ATTRIBUTE 12 | #endif 13 | 14 | // automatically configure network 15 | {% if num_inputs is not none %}#define TF_NUM_INPUTS {{ num_inputs }}{% endif %} 16 | {% if num_outputs is not none %}#define TF_NUM_OUTPUTS {{ num_outputs }}{% endif %} 17 | #define TF_NUM_OPS {{ allowed_layers | length }} 18 | 19 | {% if allowed_layers | length > 0 %}/** 20 | * Call this function to register the ops 21 | * that have been detected 22 | */ 23 | template 24 | void registerNetworkOps(TF& nn) { 25 | {% for layer in allowed_layers %}nn.resolver.Add{{ layer }}(); 26 | {% endfor %} 27 | } 28 | {% endif %} 29 | 30 | {% if not_allowed_layers | length %}// these layers are used in Python 31 | // but are not allowed in Arduino 32 | {% for layer in not_allowed_layers %}// - {{ layer }} 33 | {% endfor %}{% endif %} 34 | 35 | // model data 36 | const unsigned char {{ model_name }}[{{ model_size }}] DATA_ALIGN_ATTRIBUTE = { {{ bytes_array }} }; -------------------------------------------------------------------------------- /18_LSTM/figures/fig0.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/18_LSTM/figures/fig0.png -------------------------------------------------------------------------------- /18_LSTM/figures/fig1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/18_LSTM/figures/fig1.png -------------------------------------------------------------------------------- /18_LSTM/figures/fig10.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/18_LSTM/figures/fig10.png -------------------------------------------------------------------------------- /18_LSTM/figures/fig11.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/18_LSTM/figures/fig11.png -------------------------------------------------------------------------------- /18_LSTM/figures/fig12.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/18_LSTM/figures/fig12.png -------------------------------------------------------------------------------- /18_LSTM/figures/fig13.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/18_LSTM/figures/fig13.png -------------------------------------------------------------------------------- /18_LSTM/figures/fig14.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/18_LSTM/figures/fig14.png -------------------------------------------------------------------------------- /18_LSTM/figures/fig15.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/18_LSTM/figures/fig15.png -------------------------------------------------------------------------------- /18_LSTM/figures/fig16.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/18_LSTM/figures/fig16.png -------------------------------------------------------------------------------- /18_LSTM/figures/fig17.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/18_LSTM/figures/fig17.png -------------------------------------------------------------------------------- /18_LSTM/figures/fig2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/18_LSTM/figures/fig2.png -------------------------------------------------------------------------------- /18_LSTM/figures/fig3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/18_LSTM/figures/fig3.png -------------------------------------------------------------------------------- /18_LSTM/figures/fig4.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/18_LSTM/figures/fig4.png -------------------------------------------------------------------------------- /18_LSTM/figures/fig5.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/18_LSTM/figures/fig5.png -------------------------------------------------------------------------------- /18_LSTM/figures/fig6.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/18_LSTM/figures/fig6.png -------------------------------------------------------------------------------- /18_LSTM/figures/fig7.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/18_LSTM/figures/fig7.png -------------------------------------------------------------------------------- /18_LSTM/figures/fig8.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/18_LSTM/figures/fig8.png -------------------------------------------------------------------------------- /18_LSTM/figures/fig9.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/18_LSTM/figures/fig9.png -------------------------------------------------------------------------------- /18_LSTM/figures/heatmap.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/18_LSTM/figures/heatmap.png -------------------------------------------------------------------------------- /18_LSTM/figures/history_traing.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/18_LSTM/figures/history_traing.png -------------------------------------------------------------------------------- /18_LSTM/figures/model.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/18_LSTM/figures/model.png -------------------------------------------------------------------------------- /18_LSTM/figures/pairplot.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/18_LSTM/figures/pairplot.png -------------------------------------------------------------------------------- /18_LSTM/libraries/ESP32/EloquentTinyML-main.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/18_LSTM/libraries/ESP32/EloquentTinyML-main.zip -------------------------------------------------------------------------------- /18_LSTM/libraries/ESP32/tflm_esp32-2.0.0.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/18_LSTM/libraries/ESP32/tflm_esp32-2.0.0.zip -------------------------------------------------------------------------------- /18_LSTM/libraries/Python/python-eloquent-tensorflow-main.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/18_LSTM/libraries/Python/python-eloquent-tensorflow-main.zip -------------------------------------------------------------------------------- /18_LSTM/requirements.txt: -------------------------------------------------------------------------------- 1 | scikit-learn==1.0.2 2 | numpy==1.24.0 3 | seaborn==0.11.2 4 | pandas==1.4.0 5 | matplotlib==3.7.1 6 | tensorflow==2.15.0 7 | jinja2 8 | hexdump 9 | -------------------------------------------------------------------------------- /19_Autoencoder/eloquent_tensorflow/__init__.py: -------------------------------------------------------------------------------- 1 | import hexdump 2 | import numpy as np 3 | import tensorflow as tf 4 | from tempfile import TemporaryDirectory 5 | from jinja2 import Template 6 | from tensorflow.lite.python.convert_phase import ConverterError 7 | import os 8 | 9 | 10 | 11 | def convert_model(model, X: np.ndarray = None, y: np.ndarray = None, model_name: str = 'tfModel') -> str: 12 | """ 13 | Convert TensorFlow model to C header for Arduino 14 | :param model: 15 | :param X: 16 | :param y: 17 | :param model_name: 18 | :return: 19 | """ 20 | input_shape = [d or 1 for d in model.layers[0].input_shape] 21 | num_inputs = np.prod(input_shape[1:]) 22 | num_outputs = model.layers[-1].output_shape[1] 23 | 24 | # give user hint of which layers to include 25 | unique_layers = set([layer.__class__.__name__ for layer in model.layers]) 26 | layer_mapping = { 27 | 'Add': 'Add', 28 | 'AvgPool2D': 'AveragePool2D', 29 | 'Concatenate': 'Concatenation', 30 | 'Conv2D': 'Conv2D', 31 | 'Dense': 'FullyConnected', 32 | 'DepthwiseConv2D': 'DepthwiseConv2D', 33 | 'ELU': 'Elu', 34 | 'LSTM': 'UnidirectionalSequenceLSTM', 35 | 'LeakyReLU': 'LeakyRelu', 36 | 'MaxPool2D': 'MaxPool2D', 37 | 'Maximum': 'Maximum', 38 | 'Minimum': 'Minimum', 39 | 'PReLU': 'Prelu', 40 | 'ReLU': 'Relu', 41 | 'Reshape': 'Reshape', 42 | 'Softmax': 'Softmax' 43 | } 44 | 45 | dependencies = { 46 | 'LSTM': ['Shape', 'Reshape', 'StridedSlice', 'Pack', 'Fill', 'Transpose', 'While', 'Less', 'Add', 'Gather', 'Split', 'Mul', 'Minimum', 'Maximum', 'Relu', 'Tanh', 'Concatenation', 'Slice'] 47 | } 48 | 49 | # detect allowed and not allowed layers 50 | allowed_layers = ['Softmax'] 51 | not_allowed_layers = [] 52 | 53 | for layer in unique_layers: 54 | if layer not in layer_mapping: 55 | not_allowed_layers.append(layer) 56 | continue 57 | 58 | allowed_layers += [layer_mapping[layer]] + dependencies.get(layer, []) 59 | 60 | allowed_layers = set(allowed_layers) 61 | not_allowed_layers = set(not_allowed_layers) 62 | 63 | # convert model to bytes 64 | if 'UnidirectionalSequenceLSTM' in allowed_layers: 65 | # see https://github.com/tensorflow/tflite-micro/issues/2006#issuecomment-1567349993 66 | run_model = tf.function(lambda x: model(x)) 67 | concrete_func = run_model.get_concrete_function(tf.TensorSpec(input_shape, model.inputs[0].dtype)) 68 | 69 | with TemporaryDirectory() as model_dir: 70 | model.save(model_dir, save_format='tf', signatures=concrete_func) 71 | converter = tf.lite.TFLiteConverter.from_saved_model(model_dir) 72 | converted = converter.convert() 73 | else: 74 | converter = tf.lite.TFLiteConverter.from_keras_model(model) 75 | 76 | try: 77 | converted = converter.convert() 78 | except ConverterError: 79 | converter.optimizations = [tf.lite.Optimize.DEFAULT] 80 | converter.experimental_new_converter = True 81 | converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS, tf.lite.OpsSet.SELECT_TF_OPS] 82 | converted = converter.convert() 83 | 84 | model_bytes = hexdump.dump(converted).split(' ') 85 | bytes_array = ', '.join(['0x%02x' % int(byte, 16) for byte in model_bytes]) 86 | model_size = len(model_bytes) 87 | 88 | # use Jinja to generate clean code 89 | # Get the current working directory 90 | current_directory = os.getcwd() 91 | with open(current_directory + '\\eloquent_tensorflow\\template.jinja') as file: 92 | template = Template(file.read()) 93 | 94 | return template.render(**locals()) 95 | -------------------------------------------------------------------------------- /19_Autoencoder/eloquent_tensorflow/__pycache__/__init__.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/19_Autoencoder/eloquent_tensorflow/__pycache__/__init__.cpython-39.pyc -------------------------------------------------------------------------------- /19_Autoencoder/eloquent_tensorflow/template.jinja: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #ifdef __has_attribute 4 | #define HAVE_ATTRIBUTE(x) __has_attribute(x) 5 | #else 6 | #define HAVE_ATTRIBUTE(x) 0 7 | #endif 8 | #if HAVE_ATTRIBUTE(aligned) || (defined(__GNUC__) && !defined(__clang__)) 9 | #define DATA_ALIGN_ATTRIBUTE __attribute__((aligned(4))) 10 | #else 11 | #define DATA_ALIGN_ATTRIBUTE 12 | #endif 13 | 14 | // automatically configure network 15 | {% if num_inputs is not none %}#define TF_NUM_INPUTS {{ num_inputs }}{% endif %} 16 | {% if num_outputs is not none %}#define TF_NUM_OUTPUTS {{ num_outputs }}{% endif %} 17 | #define TF_NUM_OPS {{ allowed_layers | length }} 18 | 19 | {% if allowed_layers | length > 0 %}/** 20 | * Call this function to register the ops 21 | * that have been detected 22 | */ 23 | template 24 | void registerNetworkOps(TF& nn) { 25 | {% for layer in allowed_layers %}nn.resolver.Add{{ layer }}(); 26 | {% endfor %} 27 | } 28 | {% endif %} 29 | 30 | {% if not_allowed_layers | length %}// these layers are used in Python 31 | // but are not allowed in Arduino 32 | {% for layer in not_allowed_layers %}// - {{ layer }} 33 | {% endfor %}{% endif %} 34 | 35 | // model data 36 | const unsigned char {{ model_name }}[{{ model_size }}] DATA_ALIGN_ATTRIBUTE = { {{ bytes_array }} }; -------------------------------------------------------------------------------- /19_Autoencoder/figures/autoencoder_results.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/19_Autoencoder/figures/autoencoder_results.png -------------------------------------------------------------------------------- /19_Autoencoder/figures/fig0.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/19_Autoencoder/figures/fig0.png -------------------------------------------------------------------------------- /19_Autoencoder/figures/fig1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/19_Autoencoder/figures/fig1.png -------------------------------------------------------------------------------- /19_Autoencoder/figures/fig10.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/19_Autoencoder/figures/fig10.png -------------------------------------------------------------------------------- /19_Autoencoder/figures/fig11.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/19_Autoencoder/figures/fig11.png -------------------------------------------------------------------------------- /19_Autoencoder/figures/fig12.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/19_Autoencoder/figures/fig12.png -------------------------------------------------------------------------------- /19_Autoencoder/figures/fig2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/19_Autoencoder/figures/fig2.png -------------------------------------------------------------------------------- /19_Autoencoder/figures/fig3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/19_Autoencoder/figures/fig3.png -------------------------------------------------------------------------------- /19_Autoencoder/figures/fig4.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/19_Autoencoder/figures/fig4.png -------------------------------------------------------------------------------- /19_Autoencoder/figures/fig5.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/19_Autoencoder/figures/fig5.png -------------------------------------------------------------------------------- /19_Autoencoder/figures/fig6.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/19_Autoencoder/figures/fig6.png -------------------------------------------------------------------------------- /19_Autoencoder/figures/fig7.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/19_Autoencoder/figures/fig7.png -------------------------------------------------------------------------------- /19_Autoencoder/figures/fig8.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/19_Autoencoder/figures/fig8.png -------------------------------------------------------------------------------- /19_Autoencoder/figures/fig9.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/19_Autoencoder/figures/fig9.png -------------------------------------------------------------------------------- /19_Autoencoder/figures/history_traing.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/19_Autoencoder/figures/history_traing.png -------------------------------------------------------------------------------- /19_Autoencoder/libraries/ESP32/EloquentTinyML-main.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/19_Autoencoder/libraries/ESP32/EloquentTinyML-main.zip -------------------------------------------------------------------------------- /19_Autoencoder/libraries/ESP32/tflm_esp32-2.0.0.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/19_Autoencoder/libraries/ESP32/tflm_esp32-2.0.0.zip -------------------------------------------------------------------------------- /19_Autoencoder/libraries/Python/python-eloquent-tensorflow-main.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/19_Autoencoder/libraries/Python/python-eloquent-tensorflow-main.zip -------------------------------------------------------------------------------- /19_Autoencoder/requirements.txt: -------------------------------------------------------------------------------- 1 | scikit-learn==1.0.2 2 | numpy==1.24.0 3 | seaborn==0.11.2 4 | pandas==1.4.0 5 | matplotlib==3.7.1 6 | tensorflow==2.15.0 -------------------------------------------------------------------------------- /20_Q_Learning/Q_Learning/Q_Learning.ino: -------------------------------------------------------------------------------- 1 | #include // To store the Q-Table in non-volatile memory 2 | #define ALPHA 0.5 // Learning rate 3 | #define GAMMA 0.9 // Discount factor 4 | #define EPSILON 0.5 // Probability of choosing a random action 5 | #define NUM_STATES 4095 // Number of possible states 6 | #define NUM_ACTIONS 5 // Number of possible actions 7 | 8 | float qTable[NUM_STATES][NUM_ACTIONS]; 9 | int currentState = 0; 10 | int action = 0; 11 | float reward = 0; 12 | 13 | void initializeQTable() 14 | { 15 | for (int i = 0; i < NUM_STATES; i++) 16 | { 17 | for (int j = 0; j < NUM_ACTIONS; j++) 18 | { 19 | qTable[i][j] = 0.0; 20 | } 21 | } 22 | } 23 | 24 | int chooseAction(int state) 25 | { 26 | if (random(0, 100) < (EPSILON * 100)) 27 | { 28 | return random(0, NUM_ACTIONS); // Choose a random action 29 | } 30 | else 31 | { 32 | int maxAction = 0; 33 | float maxValue = qTable[state][0]; 34 | for (int i = 1; i < NUM_ACTIONS; i++) 35 | { 36 | if (qTable[state][i] > maxValue) 37 | { 38 | maxAction = i; 39 | maxValue = qTable[state][i]; 40 | } 41 | } 42 | return maxAction; // Choose the best action 43 | } 44 | } 45 | 46 | void updateQTable(int state, int action, float reward, int nextState) 47 | { 48 | float oldQValue = qTable[state][action]; 49 | float maxNextQValue = qTable[nextState][0]; 50 | for (int i = 1; i < NUM_ACTIONS; i++) 51 | { 52 | if (qTable[nextState][i] > maxNextQValue) 53 | { 54 | maxNextQValue = qTable[nextState][i]; 55 | } 56 | } 57 | qTable[state][action] = oldQValue + ALPHA * (reward + GAMMA * maxNextQValue - oldQValue); 58 | } 59 | 60 | int readSensor() 61 | { 62 | int sensorValue = analogRead(34); // Assuming the MQ-135 is connected to pin 34 63 | // Convert the read value to a discrete state 64 | return map(sensorValue, 0, 4095, 0, NUM_STATES - 1); 65 | } 66 | 67 | void setup() 68 | { 69 | Serial.begin(115200); 70 | initializeQTable(); 71 | pinMode(34, INPUT); 72 | } 73 | 74 | void loop() 75 | { 76 | currentState = readSensor(); 77 | action = chooseAction(currentState); 78 | 79 | // Perform the action (in this example, we won't do anything specific) 80 | delay(1000); // Wait one second between readings 81 | 82 | int nextState = readSensor(); 83 | 84 | // Set the reward (in this example, we'll use a fictitious reward) 85 | reward = random(0, 10) / 10.0; 86 | 87 | updateQTable(currentState, action, reward, nextState); 88 | 89 | Serial.print("Current State: "); 90 | Serial.print(currentState); 91 | Serial.print(" | Action: "); 92 | Serial.print(action); 93 | Serial.print(" | Reward: "); 94 | Serial.print(reward); 95 | Serial.print(" | Next State: "); 96 | Serial.println(nextState); 97 | 98 | delay(1000); 99 | } -------------------------------------------------------------------------------- /20_Q_Learning/figures/fig0.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/20_Q_Learning/figures/fig0.png -------------------------------------------------------------------------------- /20_Q_Learning/figures/fig1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/20_Q_Learning/figures/fig1.png -------------------------------------------------------------------------------- /20_Q_Learning/figures/fig2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/20_Q_Learning/figures/fig2.png -------------------------------------------------------------------------------- /21_Huber_Regressor/HuberRegressor/HuberRegressor.h: -------------------------------------------------------------------------------- 1 | double score(double * input) { 2 | return 0.055454859886876616 + input[0] * 0.00900332318983459 + input[1] * 0.18410431672672556 + input[2] * 0.04558472576852831; 3 | } 4 | -------------------------------------------------------------------------------- /21_Huber_Regressor/README.md: -------------------------------------------------------------------------------- 1 | # TinyML - Huber Regression 2 | 3 | *From mathematical foundations to edge implementation* 4 | 5 | **Social media:** 6 | 7 | 8 | 👨🏽‍💻 Github: [thommaskevin/TinyML](https://github.com/thommaskevin/TinyML) 9 | 10 | 👷🏾 Linkedin: [Thommas Kevin](https://www.linkedin.com/in/thommas-kevin-ab9810166/) 11 | 12 | 📽 Youtube: [Thommas Kevin](https://www.youtube.com/channel/UC7uazGXaMIE6MNkHg4ll9oA) 13 | 14 | :pencil2:CV Lattes CNPq: [Thommas Kevin Sales Flores](http://lattes.cnpq.br/0630479458408181) 15 | 16 | 👨🏻‍🏫 Research group: [Conecta.ai](https://conect2ai.dca.ufrn.br/) 17 | 18 | 19 | 20 | ![Figure 1](./figures/fig0.png) 21 | 22 | 23 | 24 | 25 | ## SUMMARY 26 | 27 | 1 - Introduction 28 | 29 | 2 - Mathematical Foundations 30 | 31 |  2.1 - Huber Loss Function 32 | 33 |  2.2 - Bellman Equation 34 | 35 |  2.3 - Q-Learning Algorithm 36 | 37 |  2.4 - Convergence Considerations 38 | 39 |  2.5 - Forecasting Numeric Example 40 | 41 | 3 - TinyML Implementation 42 | 43 | 44 | ## 1 - Introduction 45 | 46 | Linear regression is widely used to model relationships between variables, but it is very sensitive to outliers - extreme values that can distort the model's fit. Huber Regressor has emerged as a robust alternative to deal with these cases, combining the benefits of linear regression and robust regression, reducing the impact of outliers on the model's fit. 47 | 48 | 49 | The Huber Regressor was introduced by Peter J. Huber in 1964 and uses a hybrid loss function between the quadratic loss function of linear regression and the absolute loss function of robust regression. It minimizes a loss function that is quadratic for small errors and linear for large errors, which helps reduce the impact of outliers. 50 | 51 | 52 | ![Figure 2](./figures/huber.png) 53 | 54 | 55 | 56 | ## 2 - Mathematical Foundations 57 | 58 | The Huber Regressor is a robust regression method designed to minimize the impact of outliers in data by combining the advantages of ordinary least squares (OLS) regression and absolute deviation regression. It is particularly useful when the data is mostly normally distributed but contains some outliers. 59 | 60 | 61 | ###  2.1 - Huber Loss Function 62 | 63 | 64 | The Huber Regressor minimizes the **Huber loss function** instead of the usual squared error used in OLS. The Huber loss is quadratic for small residuals and linear for large residuals, making it a blend of mean squared error (MSE) and mean absolute error (MAE). 65 | 66 | Mathematically, the Huber loss function $L_{\delta}(r_i)$ for a residual $r_i = y_i - \hat{y}_i$ is defined as: 67 | 68 | $L_{\delta}(r_i) = 69 | \begin{cases} 70 | \frac{1}{2} r_i^2 & \text{if } |r_i| \leq \delta \\ 71 | \delta \cdot (|r_i| - \frac{1}{2} \delta) & \text{if } |r_i| > \delta 72 | \end{cases}$ 73 | 74 | Where: 75 | - $r_i$ is the residual, or the difference between the observed value $y_i$ and the predicted value $\hat{y}_i$. 76 | - $\delta$ is a threshold parameter that controls the point where the loss function transitions from quadratic to linear. 77 | 78 | 79 | ###   2.2 - Behavior of the Loss Function 80 | 81 | The Huber loss function exhibits two distinct behaviors depending on the magnitude of the residual $r_i$: 82 | 83 | - **Quadratic (MSE-like) for small residuals**: When $|r_i| \leq \delta$, the loss is quadratic ($\frac{1}{2} r_i^2$). This makes the model sensitive to small errors, similar to the mean squared error (MSE), ensuring that it fits the data points closely when errors are small. 84 | 85 | - **Linear (MAE-like) for large residuals**: When $|r_i| > \delta$, the loss becomes linear ($\delta \cdot (|r_i| - \frac{1}{2} \delta)$). This limits the influence of outliers by reducing the penalty for large residuals, similar to the mean absolute error (MAE), making the model more robust to extreme values. 86 | 87 | This behavior allows the Huber Regressor to be robust to outliers while still being sensitive to small deviations in the data. 88 | 89 | 90 | ###   2.3 - Why Use the Huber Regressor? 91 | 92 | - **Outlier robustness**: The Huber loss reduces the influence of outliers by switching from quadratic to linear penalties for large residuals. This prevents outliers from dominating the model’s solution as they do in ordinary least squares. 93 | 94 | - **Sensitivity to small errors**: For small residuals, the quadratic penalty ensures that the model still focuses on fitting most data points well. 95 | 96 | - **Smooth transition**: The Huber loss offers a smooth transition between the two types of penalties (quadratic and linear), making it effective in scenarios with both normally distributed data and some outliers. 97 | 98 | 99 | ###   2.4 - Optimization Problem 100 | 101 | Similar to OLS, the Huber Regressor minimizes the sum of the losses for all data points: 102 | 103 | $\text{Minimize:} \quad \sum_{i=1}^{n} L_{\delta}(r_i)$ 104 | 105 | Where $n$ is the number of observations in the dataset. 106 | 107 | This minimization is typically done using iterative methods because the Huber loss function is non-smooth at $|r_i| = \delta$, meaning there is a transition in behavior that complicates analytical solutions. 108 | 109 | 110 | 111 | ###   2.5 - Choosing the Parameter $\delta$ 112 | 113 | The parameter $\delta$ controls the point at which the Huber loss transitions from quadratic to linear. The choice of $\delta$ directly affects the model’s behavior: 114 | 115 | - **Small $\delta$**: The model will be more robust to outliers, but less sensitive to normal data points. 116 | 117 | - **Large $\delta$**: The model behaves more like OLS, being more sensitive to small errors but also more influenced by outliers. 118 | 119 | Typically, the ideal value of $\delta$ is determined using cross-validation. 120 | 121 | 122 | ###   2.6 - Comparison with OLS and MAE 123 | 124 | - **OLS (Ordinary Least Squares)**: Uses a fully quadratic loss, making it very sensitive to outliers, as large errors are penalized more heavily. 125 | 126 | - **MAE (Mean Absolute Error)**: Uses a fully linear loss, making it robust to outliers, but it may underperform in capturing small deviations in the data. 127 | 128 | - **Huber Regressor**: Combines the two approaches, being quadratic for small residuals (like OLS) and linear for large residuals (like MAE), offering a balance between precision and robustness. 129 | 130 | 131 | 132 | 133 | ## 3 - TinyML Implementation 134 | 135 | With this example you can implement the machine learning algorithm in ESP32, Arduino, Arduino Portenta H7 with Vision Shield, Raspberry and other different microcontrollers or IoT devices. 136 | 137 | 138 | ### 3.0 - Install the libraries listed in the requirements.txt file 139 | 140 | 141 | 142 | 143 | ```python 144 | !pip install -r requirements.txt 145 | ``` 146 | 147 | ### 3.1 - Importing libraries 148 | 149 | ```python 150 | from sklearn.model_selection import train_test_split 151 | from sklearn.linear_model import HuberRegressor 152 | from sklearn.metrics import ( 153 | mean_absolute_error, 154 | mean_poisson_deviance, 155 | mean_squared_error, 156 | ) 157 | 158 | import m2cgen as m2c 159 | import numpy as np 160 | import pandas as pd 161 | import seaborn as sns 162 | 163 | from matplotlib import pyplot as plt 164 | 165 | import warnings 166 | warnings.filterwarnings('ignore') 167 | ``` 168 | 169 | 170 | ### 3.2 - Load Dataset 171 | 172 | 173 | The "Vehicle Attributes and Emissions Dataset" contains comprehensive information on various vehicles manufactured in the year 2000. It includes details such as make, model, vehicle class, engine size, cylinder count, transmission type, and fuel type. Additionally, the dataset provides ranges for fuel consumption and CO2 emissions, offering insights into the environmental impact of each vehicle. The dataset encompasses a wide range of vehicle types, from compact to mid-size, and includes both conventional and high-performance models. With this information, analysts and researchers can study trends in vehicle characteristics, fuel efficiency, and emissions . This dataset serves as a valuable resource for understanding the automotive landscape and informing discussions on environmental sustainability and transportation policies. 174 | 175 | 176 | link: https://www.kaggle.com/datasets/krupadharamshi/fuelconsumption/data 177 | 178 | 179 | 180 | ```python 181 | df = pd.read_csv('./data/FuelConsumption.csv') 182 | df.head() 183 | ``` 184 | 185 | ![Figure 3](./figures/fig1.png) 186 | 187 | ```python 188 | df.info() 189 | ``` 190 | 191 | ![Figure 3](./figures/fig2.png) 192 | 193 | 194 | ```python 195 | df.describe() 196 | ``` 197 | 198 | 199 | ![Figure 4](./figures/fig3.png) 200 | 201 | ### 3.3 - Clean Data 202 | 203 | ```python 204 | # 1. Removing rows with missing values 205 | df.dropna(inplace=True) 206 | # 2. Removing duplicates if any 207 | df.drop_duplicates(inplace=True) 208 | ``` 209 | 210 | ```python 211 | # Display the dataframe after cleaning 212 | df.describe() 213 | ``` 214 | 215 | 216 | ![Figure 4](./figures/fig4.png) 217 | 218 | ### 3.4 - Exploratory Data Analysis 219 | 220 | 221 | ```python 222 | sns.pairplot(df[['ENGINE SIZE','CYLINDERS','FUEL CONSUMPTION','COEMISSIONS ']]) 223 | plt.savefig('.\\figures\\pairplot.png', dpi=300, bbox_inches='tight') 224 | ``` 225 | 226 | ![Figure 4](./figures/fig5.png) 227 | 228 | 229 | ```python 230 | corr = df[['ENGINE SIZE','CYLINDERS','FUEL CONSUMPTION','COEMISSIONS ']].corr('spearman') 231 | ``` 232 | 233 | ```python 234 | # Adjusting the size of the figure 235 | plt.figure(figsize=(18,10)) 236 | # Your existing code for generating the heatmap 237 | heatmap = sns.heatmap(corr, xticklabels=corr.columns, yticklabels=corr.columns, cmap='coolwarm') 238 | # Adding values to the heatmap 239 | for i in range(len(corr.columns)): 240 | for j in range(len(corr.columns)): 241 | plt.text(j + 0.5, i + 0.5, f"{corr.iloc[i, j]:.2f}", ha='center', va='center', color='black', fontsize=18) 242 | 243 | plt.xticks(fontsize=20, rotation=45) 244 | plt.yticks(fontsize=20, rotation=0) 245 | cbar = heatmap.collections[0].colorbar 246 | cbar.ax.tick_params(labelsize=20) 247 | 248 | plt.savefig('.\\figures\\heatmap.png', dpi=300, bbox_inches='tight') 249 | 250 | # Display the heatmap 251 | plt.show() 252 | ``` 253 | 254 | 255 | ### 3.5 - Split into training and test data 256 | 257 | ```python 258 | X=df[['ENGINE SIZE','CYLINDERS', 'COEMISSIONS ']] 259 | y=df[['FUEL CONSUMPTION']] 260 | ``` 261 | 262 | 263 | ```python 264 | # Split the data into training and test sets 265 | X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42) 266 | ``` 267 | 268 | 269 | ### 3.6 - Create the regressor model 270 | 271 | 272 | ```python 273 | def score_estimator(y_pred , y_true): 274 | 275 | print( 276 | "MSE: %.3f" 277 | % mean_squared_error( 278 | y_true, y_pred 279 | ) 280 | ) 281 | print( 282 | "MAE: %.3f" 283 | % mean_absolute_error( 284 | y_true, y_pred, 285 | ) 286 | ) 287 | 288 | # Ignore non-positive predictions, as they are invalid for 289 | # the Poisson deviance. 290 | mask = y_pred > 0 291 | if (~mask).any(): 292 | n_masked, n_samples = (~mask).sum(), mask.shape[0] 293 | print( 294 | "WARNING: Estimator yields invalid, non-positive predictions " 295 | f" for {n_masked} samples out of {n_samples}. These predictions " 296 | "are ignored when computing the Poisson deviance." 297 | ) 298 | 299 | print( 300 | "mean Poisson deviance: %.3f" 301 | % mean_poisson_deviance( 302 | y_true , 303 | y_pred 304 | ) 305 | ) 306 | ``` 307 | 308 | 309 | ```python 310 | model = HuberRegressor(alpha=0.0, epsilon=1) 311 | ``` 312 | 313 | 314 | ### 3.7 - Train the model 315 | 316 | 317 | ```python 318 | model.fit(X_train, y_train) 319 | ``` 320 | 321 | 322 | ### 3.8 - Model evaluation 323 | 324 | ```python 325 | y_train_pred = model.predict(X_train) 326 | y_test_pred = model.predict(X_test) 327 | ``` 328 | 329 | ```python 330 | # Calculate residuals 331 | train_residuals = y_train.values.reshape(1,-1).tolist()[0] - y_train_pred 332 | # Calculate mean and standard deviation of residuals 333 | train_residuals_mean = np.mean(train_residuals) 334 | train_residuals_std = np.std(train_residuals) 335 | # Calculate residuals 336 | test_residuals = y_test.values.reshape(1,-1).tolist()[0] - y_test_pred 337 | # Calculate mean and standard deviation of residuals 338 | test_residuals_mean = np.mean(test_residuals) 339 | test_residuals_std = np.std(test_residuals) 340 | 341 | # Plot residuals 342 | plt.figure(figsize=(10, 5)) 343 | plt.subplot(1, 2, 1) 344 | plt.scatter(y_train_pred, train_residuals, c='blue', marker='o', label=f'Training data') 345 | plt.axhline(y=0, color='r', linestyle='-') 346 | plt.axhline(y=train_residuals_mean, color='k', linestyle='--', label=f'Mean: {train_residuals_mean:.3f}') 347 | plt.axhline(y=train_residuals_mean + 2 * train_residuals_std, color='g', linestyle='--', label=f'+2 Std Dev: {2*train_residuals_std:.2f}') 348 | plt.axhline(y=train_residuals_mean - 2 * train_residuals_std, color='g', linestyle='--', label=f'-2 Std Dev: {-2*train_residuals_std:.2f}') 349 | plt.xlabel('Predicted values') 350 | plt.ylabel('Residuals') 351 | plt.title('Residuals vs Predicted values (Training data)') 352 | plt.legend(loc='upper left') 353 | plt.grid(True) 354 | plt.subplot(1, 2, 2) 355 | plt.scatter(y_test_pred, test_residuals, c='green', marker='s', label=f'Test data') 356 | plt.axhline(y=0, color='r', linestyle='-') 357 | plt.axhline(y=test_residuals_mean, color='k', linestyle='--', label=f'Mean: {test_residuals_mean:.3f}') 358 | plt.axhline(y=test_residuals_mean + 2 * test_residuals_std, color='g', linestyle='--', label=f'+2 Std Dev: {2*test_residuals_std:.2f}') 359 | plt.axhline(y=test_residuals_mean - 2 * test_residuals_std, color='g', linestyle='--', label=f'-2 Std Dev: {-2*test_residuals_std:.2f}') 360 | plt.xlabel('Predicted values') 361 | plt.ylabel('Residuals') 362 | plt.title('Residuals vs Predicted values (Test data)') 363 | plt.legend(loc='upper left') 364 | plt.grid(True) 365 | plt.tight_layout() 366 | plt.show() 367 | 368 | # Check for normality 369 | plt.figure(figsize=(10, 5)) 370 | plt.subplot(1, 2, 1) 371 | plt.hist(train_residuals, bins=20, color='blue', alpha=0.6) 372 | plt.title('Histogram of Residuals (Training data)') 373 | plt.xlabel('Residuals') 374 | plt.ylabel('Frequency') 375 | plt.axvline(x=train_residuals_mean, color='k', linestyle='--', label=f'Mean: {train_residuals_mean:.3f}') 376 | plt.axvline(x=train_residuals_mean + 2 * train_residuals_std, color='g', linestyle='--', label=f'+2 Std Dev: {2*train_residuals_std:.3f}') 377 | plt.axvline(x=train_residuals_mean - 2 * train_residuals_std, color='g', linestyle='--', label=f'-2 Std Dev: {-2*train_residuals_std:.3f}') 378 | plt.legend(loc='upper right') 379 | plt.grid(True) 380 | plt.subplot(1, 2, 2) 381 | plt.hist(test_residuals, bins=20, color='green', alpha=0.6) 382 | plt.title('Histogram of Residuals (Test data)') 383 | plt.xlabel('Residuals') 384 | plt.ylabel('Frequency') 385 | plt.axvline(x=test_residuals_mean, color='k', linestyle='--', label=f'Mean: {test_residuals_mean:.3f}') 386 | plt.axvline(x=test_residuals_mean + 2 * test_residuals_std, color='g', linestyle='--', label=f'+2 Std Dev: {2*test_residuals_std:.3f}') 387 | plt.axvline(x=test_residuals_mean - 2 * test_residuals_std, color='g', linestyle='--', label=f'-2 Std Dev: {-2*test_residuals_std:.3f}') 388 | plt.legend(loc='upper right') 389 | plt.grid(True) 390 | plt.tight_layout() 391 | plt.show() 392 | ``` 393 | 394 | ![Figure 4](./figures/fig7.png) 395 | 396 | 397 | ![Figure 4](./figures/fig8.png) 398 | 399 | 400 | 401 | #### 3.8.1 - Evaluating the model with train data 402 | 403 | 404 | ```python 405 | print("Huber Regressor evaluation:") 406 | score_estimator(y_train_pred, y_train) 407 | ``` 408 | 409 | 410 | ![Figure 4](./figures/fig9.png) 411 | 412 | 413 | ```python 414 | plt.plot(y_train.values, label="original") 415 | plt.plot(y_train_pred, label="predicted") 416 | plt.legend(loc='best',fancybox=True, shadow=True) 417 | plt.grid() 418 | ``` 419 | 420 | ![Figure 4](./figures/fig10.png) 421 | 422 | 423 | #### 3.8.2 - Evaluating the model with test data 424 | 425 | 426 | ```python 427 | print("Huber Regressor evaluation:") 428 | score_estimator(y_test_pred, y_test) 429 | ``` 430 | 431 | 432 | ![Figure 4](./figures/fig11.png) 433 | 434 | 435 | ```python 436 | plt.plot(y_test.values, label="original") 437 | plt.plot(y_test_pred, label="predicted") 438 | plt.legend(loc='best',fancybox=True, shadow=True) 439 | plt.grid() 440 | ``` 441 | 442 | ![Figure 4](./figures/fig12.png) 443 | 444 | 445 | ### 3.9 - Obtaining the model to be implemented in the microcontroller 446 | 447 | ```python 448 | code = m2c.export_to_c(model) 449 | print(code) 450 | ``` 451 | 452 | ```cpp 453 | double score(double * input) { 454 | return 0.055454859886876616 + input[0] * 0.00900332318983459 + input[1] * 0.18410431672672556 + input[2] * 0.04558472576852831; 455 | } 456 | ``` 457 | 458 | ### 3.10 - Saves the template in a .h file 459 | 460 | ```python 461 | with open('./HuberRegressor/HuberRegressor.h', 'w') as file: 462 | file.write(code) 463 | ``` 464 | 465 | ### 3.11 - Deploy Model 466 | 467 | 468 | #### 3.11.1 - Complete Arduino Sketch 469 | 470 | 471 | ```cpp 472 | #include "HuberRegressor.h" 473 | 474 | void setup() 475 | { 476 | Serial.begin(115200); 477 | } 478 | void loop() 479 | { 480 | double X_1[] = {4.6, 8, 304}; 481 | float result_1 = score(X_1); 482 | Serial.print("Result of predict with input X1 (real value = 15.4):"); 483 | Serial.println(result_1); 484 | delay(2000); 485 | double X_2[] = {1.5, 4, 216}; 486 | float result_2 = score(X_2); 487 | Serial.print("Result of predict with input X2 (real value = 11.3):"); 488 | Serial.println(result_2); 489 | delay(2000); 490 | } 491 | ``` 492 | 493 | 494 | 495 | 496 | ### 3.12 - Result 497 | 498 | 499 | ![Figure 2](./figures/fig13.png) 500 | -------------------------------------------------------------------------------- /21_Huber_Regressor/figures/fig0.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/21_Huber_Regressor/figures/fig0.png -------------------------------------------------------------------------------- /21_Huber_Regressor/figures/fig1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/21_Huber_Regressor/figures/fig1.png -------------------------------------------------------------------------------- /21_Huber_Regressor/figures/fig10.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/21_Huber_Regressor/figures/fig10.png -------------------------------------------------------------------------------- /21_Huber_Regressor/figures/fig11.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/21_Huber_Regressor/figures/fig11.png -------------------------------------------------------------------------------- /21_Huber_Regressor/figures/fig12.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/21_Huber_Regressor/figures/fig12.png -------------------------------------------------------------------------------- /21_Huber_Regressor/figures/fig13.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/21_Huber_Regressor/figures/fig13.png -------------------------------------------------------------------------------- /21_Huber_Regressor/figures/fig2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/21_Huber_Regressor/figures/fig2.png -------------------------------------------------------------------------------- /21_Huber_Regressor/figures/fig3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/21_Huber_Regressor/figures/fig3.png -------------------------------------------------------------------------------- /21_Huber_Regressor/figures/fig4.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/21_Huber_Regressor/figures/fig4.png -------------------------------------------------------------------------------- /21_Huber_Regressor/figures/fig5.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/21_Huber_Regressor/figures/fig5.png -------------------------------------------------------------------------------- /21_Huber_Regressor/figures/fig6.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/21_Huber_Regressor/figures/fig6.png -------------------------------------------------------------------------------- /21_Huber_Regressor/figures/fig7.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/21_Huber_Regressor/figures/fig7.png -------------------------------------------------------------------------------- /21_Huber_Regressor/figures/fig8.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/21_Huber_Regressor/figures/fig8.png -------------------------------------------------------------------------------- /21_Huber_Regressor/figures/fig9.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/21_Huber_Regressor/figures/fig9.png -------------------------------------------------------------------------------- /21_Huber_Regressor/figures/heatmap.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/21_Huber_Regressor/figures/heatmap.png -------------------------------------------------------------------------------- /21_Huber_Regressor/figures/huber.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/21_Huber_Regressor/figures/huber.png -------------------------------------------------------------------------------- /21_Huber_Regressor/figures/pairplot.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/21_Huber_Regressor/figures/pairplot.png -------------------------------------------------------------------------------- /21_Huber_Regressor/requirements.txt: -------------------------------------------------------------------------------- 1 | antlr4-python3-runtime==4.7 2 | jupyter==1.0.0 3 | numpy==1.16.4 4 | pandas==0.23.4 5 | scikit-learn==0.21.2 6 | scipy==1.3.0 7 | tensorflow==1.15.4 8 | requests 9 | bokeh==2.1.1 10 | onnx==1.8.0 11 | tqdm==4.56.0 -------------------------------------------------------------------------------- /22_QAT/QAT/QAT.ino: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/22_QAT/QAT/QAT.ino -------------------------------------------------------------------------------- /22_QAT/QAT/model.h: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/22_QAT/QAT/model.h -------------------------------------------------------------------------------- /22_QAT/figures/fig0.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/22_QAT/figures/fig0.png -------------------------------------------------------------------------------- /22_QAT/figures/fig00.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/22_QAT/figures/fig00.png -------------------------------------------------------------------------------- /22_QAT/figures/fig1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/22_QAT/figures/fig1.png -------------------------------------------------------------------------------- /22_QAT/figures/fig10.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/22_QAT/figures/fig10.png -------------------------------------------------------------------------------- /22_QAT/figures/fig11.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/22_QAT/figures/fig11.png -------------------------------------------------------------------------------- /22_QAT/figures/fig12.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/22_QAT/figures/fig12.png -------------------------------------------------------------------------------- /22_QAT/figures/fig13.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/22_QAT/figures/fig13.png -------------------------------------------------------------------------------- /22_QAT/figures/fig14.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/22_QAT/figures/fig14.png -------------------------------------------------------------------------------- /22_QAT/figures/fig15.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/22_QAT/figures/fig15.png -------------------------------------------------------------------------------- /22_QAT/figures/fig16.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/22_QAT/figures/fig16.png -------------------------------------------------------------------------------- /22_QAT/figures/fig17.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/22_QAT/figures/fig17.png -------------------------------------------------------------------------------- /22_QAT/figures/fig18.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/22_QAT/figures/fig18.png -------------------------------------------------------------------------------- /22_QAT/figures/fig19.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/22_QAT/figures/fig19.png -------------------------------------------------------------------------------- /22_QAT/figures/fig2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/22_QAT/figures/fig2.png -------------------------------------------------------------------------------- /22_QAT/figures/fig20.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/22_QAT/figures/fig20.png -------------------------------------------------------------------------------- /22_QAT/figures/fig21.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/22_QAT/figures/fig21.png -------------------------------------------------------------------------------- /22_QAT/figures/fig3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/22_QAT/figures/fig3.png -------------------------------------------------------------------------------- /22_QAT/figures/fig4.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/22_QAT/figures/fig4.png -------------------------------------------------------------------------------- /22_QAT/figures/fig5.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/22_QAT/figures/fig5.png -------------------------------------------------------------------------------- /22_QAT/figures/fig6.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/22_QAT/figures/fig6.png -------------------------------------------------------------------------------- /22_QAT/figures/fig7.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/22_QAT/figures/fig7.png -------------------------------------------------------------------------------- /22_QAT/figures/fig8.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/22_QAT/figures/fig8.png -------------------------------------------------------------------------------- /22_QAT/figures/fig9.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/22_QAT/figures/fig9.png -------------------------------------------------------------------------------- /22_QAT/figures/heatmap.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/22_QAT/figures/heatmap.png -------------------------------------------------------------------------------- /22_QAT/figures/hist_testing.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/22_QAT/figures/hist_testing.png -------------------------------------------------------------------------------- /22_QAT/figures/hist_testing_q.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/22_QAT/figures/hist_testing_q.png -------------------------------------------------------------------------------- /22_QAT/figures/hist_training.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/22_QAT/figures/hist_training.png -------------------------------------------------------------------------------- /22_QAT/figures/hist_training_q.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/22_QAT/figures/hist_training_q.png -------------------------------------------------------------------------------- /22_QAT/figures/history_traing.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/22_QAT/figures/history_traing.png -------------------------------------------------------------------------------- /22_QAT/figures/history_traing_q.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/22_QAT/figures/history_traing_q.png -------------------------------------------------------------------------------- /22_QAT/figures/pairplot.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/22_QAT/figures/pairplot.png -------------------------------------------------------------------------------- /22_QAT/library/EloquentTinyML-main.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/22_QAT/library/EloquentTinyML-main.zip -------------------------------------------------------------------------------- /22_QAT/models/model.keras: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/22_QAT/models/model.keras -------------------------------------------------------------------------------- /22_QAT/models/model_quant_float32.tflite: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/22_QAT/models/model_quant_float32.tflite -------------------------------------------------------------------------------- /22_QAT/models/model_quant_int8.tflite: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/22_QAT/models/model_quant_int8.tflite -------------------------------------------------------------------------------- /22_QAT/models/q_aware_model_quant_float32.tflite: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/22_QAT/models/q_aware_model_quant_float32.tflite -------------------------------------------------------------------------------- /22_QAT/models/q_aware_model_quant_int8.tflite: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/22_QAT/models/q_aware_model_quant_int8.tflite -------------------------------------------------------------------------------- /22_QAT/requirements.txt: -------------------------------------------------------------------------------- 1 | scikit-learn==1.3.2 2 | tensorflow==2.15.0 3 | pandas==1.4.0 4 | numpy==1.24.0 5 | matplotlib==3.7.1 6 | seaborn==0.11.2 7 | keras==2.15.0 8 | tensorflow-model-optimization==0.7.5 9 | -------------------------------------------------------------------------------- /23_PTP/ArduinoCode/CNN_Pruning.ino: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | 4 | //#include "model_pruned_L1.h" 5 | //#include "model_pruned_interative.h" 6 | //#include "model_original.h" 7 | #include "model_random.h" 8 | 9 | 10 | #define N_INPUTS 64 11 | #define N_OUTPUTS 10 12 | // in future projects you may need to tweak this value: it's a trial and error process 13 | #define TENSOR_ARENA_SIZE 36*1024 14 | 15 | Eloquent::TinyML::TensorFlow::TensorFlow tf; 16 | 17 | 18 | float start_time = -1; 19 | float end_time = -1; 20 | float width_time = -1; 21 | 22 | float input[64] = {0.00000000000f, 0.12500000000f, 0.00000000000f, 0.50000000000f, 0.56250000000f, 0.00000000000f, 0.00000000000f, 0.00000000000f, 0.00000000000f, 0.81250000000f, 0.31250000000f, 0.87500000000f, 0.50000000000f, 0.43750000000f, 0.00000000000f, 0.00000000000f, 0.00000000000f, 0.75000000000f, 0.31250000000f, 0.12500000000f, 0.00000000000f, 0.56250000000f, 0.00000000000f, 0.00000000000f, 0.00000000000f, 0.43750000000f, 0.31250000000f, 0.00000000000f, 0.00000000000f, 0.18750000000f, 0.31250000000f, 0.00000000000f, 0.00000000000f, 0.18750000000f, 0.62500000000f, 0.00000000000f, 0.00000000000f, 0.12500000000f, 0.62500000000f, 0.00000000000f, 0.00000000000f, 0.06250000000f, 0.81250000000f, 0.00000000000f, 0.00000000000f, 0.06250000000f, 0.75000000000f, 0.00000000000f, 0.00000000000f, 0.00000000000f, 0.31250000000f, 0.81250000000f, 0.31250000000f, 0.56250000000f, 0.81250000000f, 0.00000000000f, 0.00000000000f, 0.00000000000f, 0.00000000000f, 0.56250000000f, 1.00000000000f, 1.00000000000f, 0.43750000000f, 0.00000000000f}; 23 | 24 | float y_pred[10] = {0}; 25 | 26 | void setup() { 27 | Serial.begin(9600); 28 | delay(4000); 29 | tf.begin(model); 30 | 31 | // check if model loaded fine 32 | if (!tf.isOk()) { 33 | Serial.print("ERROR: "); 34 | Serial.println(tf.getErrorMessage()); 35 | 36 | while (true) delay(1000); 37 | } 38 | } 39 | 40 | void loop() { 41 | 42 | 43 | 44 | start_time = millis() ; 45 | //start_time = micros(); 46 | tf.predict(input, y_pred); 47 | end_time = millis(); 48 | //end_time = micros(); 49 | for (int i = 0; i < 10; i++) { 50 | Serial.print(y_pred[i]); 51 | Serial.print(i == 9 ? '\n' : ','); 52 | } 53 | Serial.print("Predicted class is: "); 54 | Serial.println(tf.probaToClass(y_pred)); 55 | // or you can skip the predict() method and call directly predictClass() 56 | Serial.print("Sanity check: "); 57 | Serial.println(tf.predictClass(input)); 58 | Serial.print(" - Time (ms): "); 59 | width_time = end_time - start_time; 60 | Serial.println(width_time); 61 | delay(2000); 62 | 63 | } -------------------------------------------------------------------------------- /23_PTP/figures/acc_quantized_model.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/23_PTP/figures/acc_quantized_model.png -------------------------------------------------------------------------------- /23_PTP/figures/fig0.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/23_PTP/figures/fig0.png -------------------------------------------------------------------------------- /23_PTP/figures/fig01.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/23_PTP/figures/fig01.png -------------------------------------------------------------------------------- /23_PTP/figures/fig02.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/23_PTP/figures/fig02.png -------------------------------------------------------------------------------- /23_PTP/figures/fig03.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/23_PTP/figures/fig03.png -------------------------------------------------------------------------------- /23_PTP/figures/fig04.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/23_PTP/figures/fig04.png -------------------------------------------------------------------------------- /23_PTP/figures/fig1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/23_PTP/figures/fig1.png -------------------------------------------------------------------------------- /23_PTP/figures/fig2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/23_PTP/figures/fig2.png -------------------------------------------------------------------------------- /23_PTP/figures/fig3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/23_PTP/figures/fig3.png -------------------------------------------------------------------------------- /23_PTP/figures/fig4.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/23_PTP/figures/fig4.png -------------------------------------------------------------------------------- /23_PTP/figures/fig5.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/23_PTP/figures/fig5.png -------------------------------------------------------------------------------- /23_PTP/figures/history_pruning_traing.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/23_PTP/figures/history_pruning_traing.png -------------------------------------------------------------------------------- /23_PTP/figures/history_traing.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/23_PTP/figures/history_traing.png -------------------------------------------------------------------------------- /23_PTP/figures/model.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/23_PTP/figures/model.png -------------------------------------------------------------------------------- /23_PTP/figures/model_size_pruned_comparison.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/23_PTP/figures/model_size_pruned_comparison.png -------------------------------------------------------------------------------- /23_PTP/figures/size_PTQ_comparative.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/23_PTP/figures/size_PTQ_comparative.png -------------------------------------------------------------------------------- /23_PTP/figures/time_acc_pruning_comparative.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/23_PTP/figures/time_acc_pruning_comparative.png -------------------------------------------------------------------------------- /23_PTP/library/EloquentTinyML-main.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/23_PTP/library/EloquentTinyML-main.zip -------------------------------------------------------------------------------- /23_PTP/models/model_original.keras: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/23_PTP/models/model_original.keras -------------------------------------------------------------------------------- /23_PTP/models/model_original_quant_float32.tflite: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/23_PTP/models/model_original_quant_float32.tflite -------------------------------------------------------------------------------- /23_PTP/models/original_model.keras: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/23_PTP/models/original_model.keras -------------------------------------------------------------------------------- /23_PTP/models/pruned_model_L1.keras: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/23_PTP/models/pruned_model_L1.keras -------------------------------------------------------------------------------- /23_PTP/models/pruned_model_L1_quant_float32.tflite: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/23_PTP/models/pruned_model_L1_quant_float32.tflite -------------------------------------------------------------------------------- /23_PTP/models/pruned_model_interative.keras: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/23_PTP/models/pruned_model_interative.keras -------------------------------------------------------------------------------- /23_PTP/models/pruned_model_interative_quant_float32.tflite: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/23_PTP/models/pruned_model_interative_quant_float32.tflite -------------------------------------------------------------------------------- /23_PTP/models/pruned_model_magnitude.keras: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/23_PTP/models/pruned_model_magnitude.keras -------------------------------------------------------------------------------- /23_PTP/models/pruned_model_magnitude_quant_float32.tflite: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/23_PTP/models/pruned_model_magnitude_quant_float32.tflite -------------------------------------------------------------------------------- /23_PTP/models/pruned_model_random.keras: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/23_PTP/models/pruned_model_random.keras -------------------------------------------------------------------------------- /23_PTP/models/pruned_model_random_quant_float32.tflite: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/23_PTP/models/pruned_model_random_quant_float32.tflite -------------------------------------------------------------------------------- /23_PTP/requirements.txt: -------------------------------------------------------------------------------- 1 | scikit-learn==1.3.2 2 | tensorflow==2.15.0 3 | pandas==1.4.0 4 | numpy==1.24.0 5 | matplotlib==3.7.1 6 | seaborn==0.11.2 7 | keras==2.15.0 8 | tensorflow-model-optimization==0.7.5 9 | -------------------------------------------------------------------------------- /24_Knowledge_Distillation/ArduinoCode/CNN_Knowledge_Distillation.ino: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | 4 | #include "teacher_model_quant_float32.h" 5 | // #include "student_model_quant_float32.h" 6 | 7 | #define N_INPUTS 784 8 | #define N_OUTPUTS 10 9 | // in future projects you may need to tweak this value: it's a trial and error process 10 | #define TENSOR_ARENA_SIZE 90 * 1024 11 | 12 | Eloquent::TinyML::TensorFlow::TensorFlow tf; 13 | 14 | float start_time = -1; 15 | float end_time = -1; 16 | float width_time = -1; 17 | 18 | float input[784] = {...}; 19 | 20 | float y_pred[10] = {5}; 21 | 22 | void setup() 23 | { 24 | Serial.begin(9600); 25 | delay(4000); 26 | tf.begin(model); 27 | 28 | // check if model loaded fine 29 | if (!tf.isOk()) 30 | { 31 | Serial.print("ERROR: "); 32 | Serial.println(tf.getErrorMessage()); 33 | 34 | while (true) 35 | delay(1000); 36 | } 37 | } 38 | 39 | void loop() 40 | { 41 | 42 | start_time = millis(); 43 | // start_time = micros(); 44 | tf.predict(input, y_pred); 45 | end_time = millis(); 46 | // end_time = micros(); 47 | for (int i = 0; i < 10; i++) 48 | { 49 | Serial.print(y_pred[i]); 50 | Serial.print(i == 9 ? '\n' : ','); 51 | } 52 | Serial.print("Predicted class is: "); 53 | Serial.println(tf.probaToClass(y_pred)); 54 | // or you can skip the predict() method and call directly predictClass() 55 | Serial.print("Sanity check: "); 56 | Serial.println(tf.predictClass(input)); 57 | Serial.print(" - Time (ms): "); 58 | width_time = end_time - start_time; 59 | Serial.println(width_time); 60 | delay(2000); 61 | } -------------------------------------------------------------------------------- /24_Knowledge_Distillation/figures/acc_quantized_model.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/24_Knowledge_Distillation/figures/acc_quantized_model.png -------------------------------------------------------------------------------- /24_Knowledge_Distillation/figures/fig0.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/24_Knowledge_Distillation/figures/fig0.png -------------------------------------------------------------------------------- /24_Knowledge_Distillation/figures/fig00.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/24_Knowledge_Distillation/figures/fig00.png -------------------------------------------------------------------------------- /24_Knowledge_Distillation/figures/fig1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/24_Knowledge_Distillation/figures/fig1.png -------------------------------------------------------------------------------- /24_Knowledge_Distillation/figures/fig10.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/24_Knowledge_Distillation/figures/fig10.png -------------------------------------------------------------------------------- /24_Knowledge_Distillation/figures/fig11.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/24_Knowledge_Distillation/figures/fig11.png -------------------------------------------------------------------------------- /24_Knowledge_Distillation/figures/fig2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/24_Knowledge_Distillation/figures/fig2.png -------------------------------------------------------------------------------- /24_Knowledge_Distillation/figures/fig3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/24_Knowledge_Distillation/figures/fig3.png -------------------------------------------------------------------------------- /24_Knowledge_Distillation/figures/fig4.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/24_Knowledge_Distillation/figures/fig4.png -------------------------------------------------------------------------------- /24_Knowledge_Distillation/figures/fig5.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/24_Knowledge_Distillation/figures/fig5.png -------------------------------------------------------------------------------- /24_Knowledge_Distillation/figures/fig6.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/24_Knowledge_Distillation/figures/fig6.png -------------------------------------------------------------------------------- /24_Knowledge_Distillation/figures/fig7.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/24_Knowledge_Distillation/figures/fig7.png -------------------------------------------------------------------------------- /24_Knowledge_Distillation/figures/fig8.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/24_Knowledge_Distillation/figures/fig8.png -------------------------------------------------------------------------------- /24_Knowledge_Distillation/figures/fig9.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/24_Knowledge_Distillation/figures/fig9.png -------------------------------------------------------------------------------- /24_Knowledge_Distillation/figures/history_teacher_model.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/24_Knowledge_Distillation/figures/history_teacher_model.png -------------------------------------------------------------------------------- /24_Knowledge_Distillation/figures/size_KD_comparative.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/24_Knowledge_Distillation/figures/size_KD_comparative.png -------------------------------------------------------------------------------- /24_Knowledge_Distillation/library/EloquentTinyML-main.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/24_Knowledge_Distillation/library/EloquentTinyML-main.zip -------------------------------------------------------------------------------- /24_Knowledge_Distillation/models/student_model_quant_float32.tflite: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/24_Knowledge_Distillation/models/student_model_quant_float32.tflite -------------------------------------------------------------------------------- /24_Knowledge_Distillation/models/teacher_model.keras: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/24_Knowledge_Distillation/models/teacher_model.keras -------------------------------------------------------------------------------- /24_Knowledge_Distillation/models/teacher_model_quant_float32.tflite: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/24_Knowledge_Distillation/models/teacher_model_quant_float32.tflite -------------------------------------------------------------------------------- /24_Knowledge_Distillation/requirements.txt: -------------------------------------------------------------------------------- 1 | scikit-learn==1.3.2 2 | tensorflow==2.15.0 3 | pandas==1.4.0 4 | numpy==1.24.0 5 | matplotlib==3.7.1 6 | seaborn==0.11.2 7 | keras==2.15.0 8 | -------------------------------------------------------------------------------- /25_MicromobileNet/ArduinoCode/MobileNet.ino: -------------------------------------------------------------------------------- 1 | #include "sample_image.h" 2 | 3 | // #include "model_pico.h" 4 | // #include "model_nano.h" 5 | // #include "model_micro.h" 6 | // #include "model_milli.h" 7 | #include "model.h" 8 | 9 | // PicoMobileNet net; 10 | // NanoMobileNet net; 11 | // MicroMobileNet net; 12 | // MilliMobileNet net; 13 | MobileNet net; 14 | 15 | void setup() 16 | { 17 | Serial.begin(9600); 18 | } 19 | 20 | void loop() 21 | { 22 | size_t start = micros(); 23 | net.predict(sample_image); 24 | 25 | Serial.print("Predicted output = "); 26 | Serial.print(net.output); 27 | Serial.println(" (Real value: 0) "); 28 | Serial.print("It took "); 29 | Serial.print(micros() - start); 30 | Serial.println(" us to inference."); 31 | delay(2000); 32 | } -------------------------------------------------------------------------------- /25_MicromobileNet/figures/fig00.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/25_MicromobileNet/figures/fig00.png -------------------------------------------------------------------------------- /25_MicromobileNet/figures/fig01.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/25_MicromobileNet/figures/fig01.png -------------------------------------------------------------------------------- /25_MicromobileNet/figures/fig02.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/25_MicromobileNet/figures/fig02.png -------------------------------------------------------------------------------- /25_MicromobileNet/figures/fig03.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/25_MicromobileNet/figures/fig03.png -------------------------------------------------------------------------------- /25_MicromobileNet/figures/fig04.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/25_MicromobileNet/figures/fig04.png -------------------------------------------------------------------------------- /25_MicromobileNet/figures/fig05.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/25_MicromobileNet/figures/fig05.png -------------------------------------------------------------------------------- /25_MicromobileNet/figures/fig06.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/25_MicromobileNet/figures/fig06.png -------------------------------------------------------------------------------- /25_MicromobileNet/figures/fig07.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/25_MicromobileNet/figures/fig07.png -------------------------------------------------------------------------------- /25_MicromobileNet/figures/fig08.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/25_MicromobileNet/figures/fig08.png -------------------------------------------------------------------------------- /25_MicromobileNet/figures/fig09.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/25_MicromobileNet/figures/fig09.png -------------------------------------------------------------------------------- /25_MicromobileNet/figures/fig10.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/25_MicromobileNet/figures/fig10.png -------------------------------------------------------------------------------- /25_MicromobileNet/figures/fig11.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/25_MicromobileNet/figures/fig11.png -------------------------------------------------------------------------------- /25_MicromobileNet/figures/fig12.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/25_MicromobileNet/figures/fig12.png -------------------------------------------------------------------------------- /25_MicromobileNet/figures/fig13.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/25_MicromobileNet/figures/fig13.png -------------------------------------------------------------------------------- /25_MicromobileNet/figures/fig14.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/25_MicromobileNet/figures/fig14.png -------------------------------------------------------------------------------- /25_MicromobileNet/figures/fig15.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/25_MicromobileNet/figures/fig15.png -------------------------------------------------------------------------------- /25_MicromobileNet/figures/fig16.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/25_MicromobileNet/figures/fig16.png -------------------------------------------------------------------------------- /25_MicromobileNet/micromobilenet/__init__.py: -------------------------------------------------------------------------------- 1 | from micromobilenet.architectures import * -------------------------------------------------------------------------------- /25_MicromobileNet/micromobilenet/__pycache__/__init__.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/25_MicromobileNet/micromobilenet/__pycache__/__init__.cpython-39.pyc -------------------------------------------------------------------------------- /25_MicromobileNet/micromobilenet/architectures/BaseMobileNet.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import os.path 3 | from typing import List, Generator, Iterable 4 | 5 | import numpy as np 6 | from cached_property import cached_property 7 | from keras import Sequential 8 | from keras.optimizers import Adam 9 | from keras.callbacks import ModelCheckpoint 10 | from keras.layers import Input, Conv2D, Reshape, Softmax, ZeroPadding2D, ReLU, DepthwiseConv2D 11 | from micromobilenet.convert.MobileNetConverter import MobileNetConverter 12 | from micromobilenet.architectures.Config import Config 13 | 14 | 15 | class BaseMobileNet: 16 | """ 17 | Base class for BaseMobileNet architectures 18 | """ 19 | def __init__(self, num_classes: int): 20 | """ 21 | 22 | """ 23 | self.num_classes = num_classes 24 | self.history = None 25 | self.layers = [] 26 | self.config = Config() 27 | self.model = None 28 | self.i = 1 29 | 30 | def __repr__(self): 31 | """ 32 | 33 | :return: 34 | """ 35 | self.model.summary() 36 | return str(self.model) 37 | 38 | @property 39 | def weights_file(self) -> str: 40 | """ 41 | Get path to weights file 42 | :return: 43 | """ 44 | return f"{self.config.checkpoint_path}.weights.h5" if self.config.checkpoint_path != "" else "" 45 | 46 | @cached_property 47 | def convert(self) -> MobileNetConverter: 48 | """ 49 | Get instance of C++ converter 50 | :return: 51 | """ 52 | return MobileNetConverter(self) 53 | 54 | def build(self): 55 | """ 56 | Generate model 57 | :return: 58 | """ 59 | self.i = 1 60 | self.model = Sequential() 61 | self.add(Input(shape=(96, 96, 1), name="input")) 62 | 63 | # add middle layers 64 | for layers in self.make_layers(): 65 | if isinstance(layers, Iterable): 66 | layers = list(layers) 67 | 68 | if not isinstance(layers, List): 69 | layers = [layers] 70 | 71 | for l in layers: 72 | self.add(l) 73 | 74 | # head 75 | self.add(Conv2D(self.num_classes, (1, 1), padding="same", name="conv2d_last")) 76 | self.add(Reshape((self.num_classes,), name="reshape")) 77 | self.add(Softmax(name="softmax")) 78 | 79 | def add(self, layer): 80 | """ 81 | Add layer 82 | :param layer: 83 | :return: 84 | """ 85 | self.model.add(layer) 86 | self.layers.append(layer) 87 | 88 | def load_weights(self, abort_on_fail: bool = True): 89 | """ 90 | Load checkpoint 91 | :return: 92 | """ 93 | assert self.config.checkpoint_path != "", "you must set net.config.checkpoint_path!" 94 | 95 | if os.path.isfile(self.weights_file): 96 | self.model.load_weights(self.weights_file) 97 | else: 98 | logging.warning(f"Cannot load weight file {self.weights_file}") 99 | 100 | if abort_on_fail: 101 | raise FileNotFoundError(self.weights_file) 102 | 103 | return self 104 | 105 | def compile(self): 106 | """ 107 | Compile model 108 | :return: 109 | """ 110 | if self.model is None: 111 | self.build() 112 | 113 | self.model.compile( 114 | optimizer=Adam(learning_rate=self.config.learning_rate), 115 | loss=self.config.loss, 116 | metrics=self.config.metrics, 117 | ) 118 | 119 | return self 120 | 121 | def fit(self, train_x: np.ndarray, train_y: np.ndarray, val_x: np.ndarray, val_y: np.ndarray, epochs: int = 100): 122 | """ 123 | Fit model 124 | :param train_x: 125 | :param train_y: 126 | :param val_x: 127 | :param val_y: 128 | :return: 129 | """ 130 | callbacks = [] 131 | 132 | if self.weights_file != "": 133 | callbacks.append(ModelCheckpoint( 134 | self.weights_file, 135 | monitor=f"val_{self.config.metrics[0]}", 136 | verbose=1, 137 | save_best_only=True, 138 | save_weights_only=True, 139 | initial_value_threshold=self.config.checkpoint_min_accuracy 140 | )) 141 | 142 | self.history = self.model.fit( 143 | train_x, 144 | train_y, 145 | validation_data=(val_x, val_y), 146 | batch_size=self.config.batch_size, 147 | epochs=epochs, 148 | verbose=self.config.verbosity, 149 | callbacks=callbacks 150 | ) 151 | 152 | return self 153 | 154 | def predict(self, xs: np.ndarray) -> np.ndarray: 155 | """ 156 | Predict 157 | :param xs: 158 | :return: 159 | """ 160 | return self.model.predict(xs) 161 | 162 | def make_depthwise(self, filters: int, stride: int = 1, padding: str = "same") -> Generator: 163 | """ 164 | Generate depthwise + pointwise layers 165 | :param padding: 166 | :param filters: 167 | :param stride: 168 | :return: 169 | """ 170 | i = self.i 171 | self.i += 1 172 | 173 | if padding == "same": 174 | yield ZeroPadding2D(name=f"hidden_{i}__padding") 175 | 176 | yield DepthwiseConv2D((3, 3), padding="valid", strides=(stride, stride), use_bias=False, name=f"hidden_{i}__dw") 177 | yield ReLU(6., name=f"hidden_{i}__relu_1") 178 | yield Conv2D(filters, (1, 1), padding="same", strides=(1, 1), use_bias=False, name=f"hidden_{i}__pw") 179 | yield ReLU(6., name=f"hidden_{i}__relu_2") 180 | -------------------------------------------------------------------------------- /25_MicromobileNet/micromobilenet/architectures/Config.py: -------------------------------------------------------------------------------- 1 | class Config: 2 | """ 3 | BaseMobileNet config object 4 | """ 5 | def __init__(self): 6 | """ 7 | 8 | """ 9 | self.learning_rate = 0.001 10 | self.loss = "sparse_categorical_crossentropy" 11 | self.metrics = ["sparse_categorical_accuracy"] 12 | self.checkpoint_min_accuracy = 0.7 13 | self.batch_size = 32 14 | self.verbosity = 1 15 | self.checkpoint_path = "" 16 | -------------------------------------------------------------------------------- /25_MicromobileNet/micromobilenet/architectures/MicroMobileNet.py: -------------------------------------------------------------------------------- 1 | from keras.layers import Conv2D, MaxPool2D, Dropout 2 | from micromobilenet.architectures.BaseMobileNet import BaseMobileNet 3 | 4 | 5 | class MicroMobileNet(BaseMobileNet): 6 | def make_layers(self): 7 | yield Conv2D(3, (3, 3), padding="valid", use_bias=False, strides=(2, 2), name="conv2d_0") 8 | yield self.make_depthwise(filters=6) 9 | yield self.make_depthwise(filters=12, stride=2) 10 | yield self.make_depthwise(filters=12) 11 | yield self.make_depthwise(filters=24, stride=2) 12 | yield self.make_depthwise(filters=24) 13 | yield self.make_depthwise(filters=24, stride=2) 14 | yield self.make_depthwise(filters=24) 15 | yield self.make_depthwise(filters=24, stride=2) 16 | yield MaxPool2D((3, 3), name="maxpool_last") 17 | yield Dropout(0.1, name="dropout") 18 | -------------------------------------------------------------------------------- /25_MicromobileNet/micromobilenet/architectures/MilliMobileNet.py: -------------------------------------------------------------------------------- 1 | from keras.layers import Conv2D, MaxPool2D, Dropout 2 | from micromobilenet.architectures.BaseMobileNet import BaseMobileNet 3 | 4 | 5 | class MilliMobileNet(BaseMobileNet): 6 | def make_layers(self): 7 | """ 8 | 9 | """ 10 | yield Conv2D(3, (3, 3), padding="valid", use_bias=False, strides=(2, 2), name="conv2d_0") 11 | yield self.make_depthwise(filters=6) 12 | yield self.make_depthwise(filters=12, stride=2) 13 | yield self.make_depthwise(filters=12) 14 | yield self.make_depthwise(filters=24, stride=2) 15 | yield self.make_depthwise(filters=24) 16 | yield self.make_depthwise(filters=48, stride=2) 17 | yield self.make_depthwise(filters=48) 18 | yield self.make_depthwise(filters=48, stride=2) 19 | yield self.make_depthwise(filters=48) 20 | yield MaxPool2D((3, 3), name="maxpool_last") 21 | yield Dropout(0.1, name="dropout") 22 | 23 | -------------------------------------------------------------------------------- /25_MicromobileNet/micromobilenet/architectures/MobileNet.py: -------------------------------------------------------------------------------- 1 | from keras.layers import Conv2D, MaxPool2D, Dropout 2 | from micromobilenet.architectures.BaseMobileNet import BaseMobileNet 3 | 4 | 5 | class MobileNet(BaseMobileNet): 6 | def make_layers(self): 7 | """ 8 | 9 | """ 10 | yield Conv2D(3, (3, 3), padding="valid", use_bias=False, strides=(2, 2), name="conv2d_0") 11 | yield self.make_depthwise(filters=6) 12 | yield self.make_depthwise(filters=12, stride=2) 13 | yield self.make_depthwise(filters=12) 14 | yield self.make_depthwise(filters=24, stride=2) 15 | yield self.make_depthwise(filters=24) 16 | yield self.make_depthwise(filters=48, stride=2) 17 | yield self.make_depthwise(filters=48) 18 | yield self.make_depthwise(filters=48) 19 | yield self.make_depthwise(filters=48) 20 | yield self.make_depthwise(filters=48) 21 | yield self.make_depthwise(filters=96, stride=2) 22 | yield self.make_depthwise(filters=96) 23 | yield MaxPool2D((3, 3), name="maxpool_last") 24 | yield Dropout(0.1, name="dropout") 25 | 26 | -------------------------------------------------------------------------------- /25_MicromobileNet/micromobilenet/architectures/NanoMobileNet.py: -------------------------------------------------------------------------------- 1 | from keras.layers import Conv2D, MaxPool2D, Dropout 2 | from micromobilenet.architectures.BaseMobileNet import BaseMobileNet 3 | 4 | 5 | class NanoMobileNet(BaseMobileNet): 6 | def make_layers(self): 7 | yield Conv2D(3, (3, 3), padding="valid", use_bias=False, strides=(2, 2), name="conv2d_0") 8 | yield self.make_depthwise(filters=6, stride=2) 9 | yield self.make_depthwise(filters=12, stride=2) 10 | yield self.make_depthwise(filters=24, stride=2) 11 | yield self.make_depthwise(filters=24, stride=2) 12 | yield MaxPool2D((3, 3), name="maxpool_last") 13 | yield Dropout(0.1, name="dropout") 14 | -------------------------------------------------------------------------------- /25_MicromobileNet/micromobilenet/architectures/PicoMobileNet.py: -------------------------------------------------------------------------------- 1 | from keras.layers import Conv2D, MaxPool2D, Dropout 2 | from micromobilenet.architectures.BaseMobileNet import BaseMobileNet 3 | 4 | 5 | class PicoMobileNet(BaseMobileNet): 6 | def make_layers(self): 7 | yield Conv2D(3, (3, 3), padding="valid", use_bias=False, strides=(3, 3), name="conv2d_0") 8 | yield self.make_depthwise(filters=6, stride=2) 9 | yield self.make_depthwise(filters=12, stride=2) 10 | yield self.make_depthwise(filters=24, stride=2) 11 | yield MaxPool2D((4, 4), name="maxpool_last") 12 | yield Dropout(0.1, name="dropout") 13 | -------------------------------------------------------------------------------- /25_MicromobileNet/micromobilenet/architectures/__init__.py: -------------------------------------------------------------------------------- 1 | from micromobilenet.architectures.PicoMobileNet import PicoMobileNet 2 | from micromobilenet.architectures.NanoMobileNet import NanoMobileNet 3 | from micromobilenet.architectures.MicroMobileNet import MicroMobileNet 4 | from micromobilenet.architectures.MobileNet import MobileNet 5 | from micromobilenet.architectures.MilliMobileNet import MilliMobileNet 6 | -------------------------------------------------------------------------------- /25_MicromobileNet/micromobilenet/architectures/__pycache__/BaseMobileNet.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/25_MicromobileNet/micromobilenet/architectures/__pycache__/BaseMobileNet.cpython-39.pyc -------------------------------------------------------------------------------- /25_MicromobileNet/micromobilenet/architectures/__pycache__/Config.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/25_MicromobileNet/micromobilenet/architectures/__pycache__/Config.cpython-39.pyc -------------------------------------------------------------------------------- /25_MicromobileNet/micromobilenet/architectures/__pycache__/MicroMobileNet.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/25_MicromobileNet/micromobilenet/architectures/__pycache__/MicroMobileNet.cpython-39.pyc -------------------------------------------------------------------------------- /25_MicromobileNet/micromobilenet/architectures/__pycache__/MilliMobileNet.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/25_MicromobileNet/micromobilenet/architectures/__pycache__/MilliMobileNet.cpython-39.pyc -------------------------------------------------------------------------------- /25_MicromobileNet/micromobilenet/architectures/__pycache__/MobileNet.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/25_MicromobileNet/micromobilenet/architectures/__pycache__/MobileNet.cpython-39.pyc -------------------------------------------------------------------------------- /25_MicromobileNet/micromobilenet/architectures/__pycache__/NanoMobileNet.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/25_MicromobileNet/micromobilenet/architectures/__pycache__/NanoMobileNet.cpython-39.pyc -------------------------------------------------------------------------------- /25_MicromobileNet/micromobilenet/architectures/__pycache__/PicoMobileNet.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/25_MicromobileNet/micromobilenet/architectures/__pycache__/PicoMobileNet.cpython-39.pyc -------------------------------------------------------------------------------- /25_MicromobileNet/micromobilenet/architectures/__pycache__/__init__.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/25_MicromobileNet/micromobilenet/architectures/__pycache__/__init__.cpython-39.pyc -------------------------------------------------------------------------------- /25_MicromobileNet/micromobilenet/convert/Environment.py: -------------------------------------------------------------------------------- 1 | from typing import List 2 | 3 | from jinja2 import Environment as Base 4 | from os.path import normpath, join, dirname 5 | from math import ceil, floor 6 | import numpy as np 7 | 8 | 9 | class Environment(Base): 10 | """ 11 | Override default Environment 12 | """ 13 | def __init__(self, *args, **kwargs): 14 | """ 15 | 16 | :param args: 17 | :param kwargs: 18 | """ 19 | filters = kwargs.pop("filters", {}) 20 | globals = kwargs.pop("globals", {}) 21 | kwargs.setdefault("extensions", []) 22 | 23 | super().__init__(*args, **kwargs) 24 | self._add_filters() 25 | self._add_globals() 26 | self.filters.update(filters) 27 | self.globals.update(globals) 28 | 29 | def join_path(self, template: str, parent: str) -> str: 30 | """ 31 | 32 | :param template: 33 | :param parent: 34 | :return: 35 | """ 36 | return normpath(join(dirname(parent), template)) 37 | 38 | def _add_filters(self): 39 | """ 40 | Add language-agnostic filters 41 | :return: 42 | """ 43 | def to_array(arr) -> str: 44 | values = ", ".join("%.11f" % x for x in arr.flatten()) 45 | return f"{{{values}}}" 46 | 47 | def to_weights_shape(weights: np.ndarray) -> str: 48 | h, w, c, d = weights.shape 49 | 50 | if d == 1: 51 | # depthwise kernel 52 | return f"[{c}][{h * w}]" 53 | 54 | return f"[{d}][{h * w * c}]" 55 | 56 | def to_weights_array(weights: np.ndarray) -> str: 57 | h, w, c, d = weights.shape 58 | 59 | if d == 1: 60 | # depthwise kernel 61 | values = ",\n".join(to_array(weights[:, :, i]) for i in range(c)) 62 | else: 63 | values = ",\n".join(to_array(weights[:, :, :, i]) for i in range(d)) 64 | 65 | return f"{{{values}}}" 66 | 67 | self.filters.update({ 68 | "ceil": ceil, 69 | "floor": floor, 70 | "to_array": to_array, 71 | "to_weights_shape": to_weights_shape, 72 | "to_weights_array": to_weights_array 73 | }) 74 | 75 | def _add_globals(self): 76 | """ 77 | Add language-agnostic globals 78 | :return: 79 | """ 80 | self.globals.update({ 81 | "np": np, 82 | "len": len, 83 | "zip": zip, 84 | "int": int, 85 | "ceil": ceil, 86 | "eps": 0.0001, 87 | "floor": floor, 88 | "range": range, 89 | "sorted": sorted, 90 | "enumerate": enumerate, 91 | "isinstance": isinstance 92 | }) 93 | -------------------------------------------------------------------------------- /25_MicromobileNet/micromobilenet/convert/LayerData.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | 4 | class LayerData: 5 | """ 6 | Wrap layer to get its data 7 | """ 8 | def __init__(self, layer): 9 | """ 10 | 11 | :param layer: 12 | """ 13 | self.layer = layer 14 | 15 | def __repr__(self): 16 | """ 17 | Proxy 18 | :return: 19 | """ 20 | return repr(self.layer) 21 | 22 | def __getattr__(self, item): 23 | """ 24 | Proxy 25 | :param item: 26 | :return: 27 | """ 28 | return getattr(self.layer, item) 29 | 30 | @property 31 | def io(self): 32 | return getattr(self.layer, "_io", None) 33 | 34 | @property 35 | def input_shape(self): 36 | return self.layer.input.shape[1:] 37 | 38 | @property 39 | def output_shape(self): 40 | return self.layer.output.shape[1:] 41 | 42 | @property 43 | def weights(self): 44 | return self.io["weights"] if self.io is not None else np.asarray(self.layer.weights[0]) 45 | 46 | @property 47 | def bias(self): 48 | return self.io["bias"] if self.io is not None else self.layer.bias.numpy() 49 | 50 | -------------------------------------------------------------------------------- /25_MicromobileNet/micromobilenet/convert/Loader.py: -------------------------------------------------------------------------------- 1 | from typing import Callable, Tuple 2 | from os.path import sep 3 | from jinja2 import FileSystemLoader as Base 4 | 5 | 6 | class Loader(Base): 7 | """ 8 | Override default FileSystemLoader 9 | """ 10 | def get_source(self, environment: "Environment", template: str) -> Tuple[str, str, Callable[[], bool]]: 11 | """ 12 | 13 | :param environment: 14 | :param template: 15 | :return: 16 | """ 17 | # normalize path separator for Windows and Unix 18 | template = template.replace(sep, "/") 19 | 20 | if not template.endswith(".jinja"): 21 | template = f"{template}.jinja" 22 | 23 | return super().get_source(environment, template) -------------------------------------------------------------------------------- /25_MicromobileNet/micromobilenet/convert/MobileNetConverter.py: -------------------------------------------------------------------------------- 1 | from itertools import groupby 2 | from os.path import join, dirname, realpath 3 | from typing import Dict 4 | 5 | import numpy as np 6 | 7 | from micromobilenet.convert.Environment import Environment 8 | from micromobilenet.convert.Loader import Loader 9 | from micromobilenet.convert.LayerData import LayerData 10 | 11 | 12 | class MobileNetConverter: 13 | """ 14 | Convert BaseMobileNet to C++ 15 | """ 16 | def __init__(self, net: "BaseMobileNet"): 17 | """ 18 | 19 | :param net: 20 | """ 21 | self.net = net 22 | 23 | def to_cpp(self, classname: str = None) -> str: 24 | """ 25 | Convert to C++ 26 | :param classname: 27 | :return: 28 | """ 29 | root = join(dirname(realpath(__file__)), "templates") 30 | loader = Loader(root) 31 | env = Environment(loader=loader) 32 | template = env.get_template("BaseMobileNet") 33 | data = self.get_data() 34 | 35 | if classname is not None: 36 | data.update(classname=classname) 37 | 38 | # render template 39 | output = template.render(data) 40 | 41 | return output 42 | 43 | def get_data(self) -> Dict: 44 | """ 45 | Get data for code generation 46 | :return: 47 | """ 48 | model = self.net.model 49 | classname = self.net.__class__.__name__ 50 | layers = [LayerData(l) for l in self.net.layers] 51 | inputs = layers[0] 52 | conv_0 = LayerData(model.get_layer("conv2d_0")) 53 | maxpool = LayerData(model.get_layer("maxpool_last")) 54 | conv_last = LayerData(model.get_layer("conv2d_last")) 55 | softmax = LayerData(model.get_layer("softmax")) 56 | 57 | # group hidden layers into chunks 58 | hidden_layers = [l for l in layers if l.name.startswith("hidden_")] 59 | hidden_layers = [list(ll) for _, ll in groupby(hidden_layers, key=lambda l: l.name.split("__")[0])] 60 | hidden_layers = [{l.name.split("__")[1]: l for l in chunk} for chunk in hidden_layers] 61 | 62 | num_inputs = np.product(inputs.shape[1:]) 63 | num_outputs = softmax.output_shape[-1] 64 | output_sizes = [np.product(l.output_shape) for l in layers[1:]] 65 | arena_size = max(output_sizes) 66 | 67 | return locals() -------------------------------------------------------------------------------- /25_MicromobileNet/micromobilenet/convert/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/25_MicromobileNet/micromobilenet/convert/__init__.py -------------------------------------------------------------------------------- /25_MicromobileNet/micromobilenet/convert/__pycache__/Environment.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/25_MicromobileNet/micromobilenet/convert/__pycache__/Environment.cpython-39.pyc -------------------------------------------------------------------------------- /25_MicromobileNet/micromobilenet/convert/__pycache__/LayerData.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/25_MicromobileNet/micromobilenet/convert/__pycache__/LayerData.cpython-39.pyc -------------------------------------------------------------------------------- /25_MicromobileNet/micromobilenet/convert/__pycache__/Loader.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/25_MicromobileNet/micromobilenet/convert/__pycache__/Loader.cpython-39.pyc -------------------------------------------------------------------------------- /25_MicromobileNet/micromobilenet/convert/__pycache__/MobileNetConverter.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/25_MicromobileNet/micromobilenet/convert/__pycache__/MobileNetConverter.cpython-39.pyc -------------------------------------------------------------------------------- /25_MicromobileNet/micromobilenet/convert/__pycache__/__init__.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/25_MicromobileNet/micromobilenet/convert/__pycache__/__init__.cpython-39.pyc -------------------------------------------------------------------------------- /25_MicromobileNet/micromobilenet/convert/templates/BaseMobileNet.jinja: -------------------------------------------------------------------------------- 1 | /** 2 | * "Compiled" implementation of modified MobileNet 3 | */ 4 | class {{ classname }} { 5 | public: 6 | const uint16_t numInputs = {{ num_inputs }}; 7 | const uint16_t numOutputs = {{ num_outputs }}; 8 | float outputs[{{ num_outputs }}]; 9 | float arena[{{ arena_size * 2 }}]; 10 | uint16_t output; 11 | float proba; 12 | 13 | /** 14 | * 15 | */ 16 | {{ classname }}() : output(0), proba(0) { 17 | for (uint16_t i = 0; i < numOutputs; i++) 18 | outputs[i] = 0; 19 | } 20 | 21 | /** 22 | * 23 | * @param input 24 | */ 25 | uint16_t predict(float *input) { 26 | float *ping = arena; 27 | float *pong = arena + {{ arena_size }}; 28 | 29 | // conv2d (0) 30 | for (int16_t d = 0; d < {{ conv_0.output_shape[2] }}; d++) 31 | this->conv2d_3x3x1(input, ping + {{ conv_0.output_shape[0] }} * {{ conv_0.output_shape[1] }} * d, conv2d_0_weights[d], {{ conv_0.input_shape[0] }}, {{ conv_0.strides[0] }}); 32 | 33 | {% for i, hidden in enumerate(hidden_layers) %} 34 | {% if 'padding' in hidden %} 35 | // padding ({{ i + 1 }}) 36 | for (int16_t d = 0; d < {{ hidden['padding'].input_shape[2] }}; d++) 37 | this->pad(ping + {{ hidden['padding'].input_shape[0] }} * {{ hidden['padding'].input_shape[1] }} * d, pong + {{ hidden['padding'].output_shape[0] }} * {{ hidden['padding'].output_shape[1] }} * d, {{ hidden['padding'].input_shape[0] }}); 38 | 39 | memcpy(ping, pong, sizeof(float) * {{ hidden['padding'].output_shape[0] }} * {{ hidden['padding'].output_shape[1] }} * {{ hidden['padding'].output_shape[2] }}); 40 | {% endif %} 41 | 42 | // depthwise ({{ i + 1 }}) 43 | for (int16_t d = 0; d < {{ hidden['dw'].input_shape[2] }}; d++) 44 | this->depthwise_conv(ping + {{ hidden['dw'].input_shape[0] }} * {{ hidden['dw'].input_shape[1] }} * d, pong + {{ hidden['pw'].input_shape[0] }} * {{ hidden['pw'].input_shape[1] }} * d, depthwise_{{ i + 1 }}_weights[d], {{ hidden['dw'].input_shape[0] }}, {{ hidden['dw'].strides[0] }}); 45 | 46 | // pointwise ({{ i + 1 }}) 47 | for (int16_t d = 0; d < {{ hidden['pw'].output_shape[2] }}; d++) 48 | this->pointwise_conv(pong, ping + {{ hidden['pw'].input_shape[0] }} * {{ hidden['pw'].input_shape[1] }} * d, pointwise_{{ i + 1 }}_weights[d], {{ hidden['dw'].output_shape[0] }}, {{ hidden['dw'].output_shape[2] }}); 49 | {% endfor %} 50 | 51 | this->maxpool(ping, pong, {{ maxpool.input_shape[0] }}, {{ maxpool.input_shape[-1] }}); 52 | 53 | for (uint16_t d = 0; d < numOutputs; d++) 54 | this->dot(pong, ping + d, conv2d_last_weights[d], conv2d_last_bias[d], {{ conv_last.input_shape[-1] }}); 55 | 56 | this->softmax(ping, outputs, numOutputs); 57 | 58 | return this->argmax(); 59 | } 60 | 61 | {% include './ops/argmax' %} 62 | 63 | protected: 64 | const float conv2d_0_weights{{ conv_0.weights | to_weights_shape }} = {{ conv_0.weights | to_weights_array }}; 65 | {% for i, hidden in enumerate(hidden_layers) %} 66 | const float depthwise_{{ i + 1 }}_weights{{ hidden['dw'].weights | to_weights_shape }} = {{ hidden['dw'].weights | to_weights_array }}; 67 | const float pointwise_{{ i + 1 }}_weights{{ hidden['pw'].weights | to_weights_shape }} = {{ hidden['pw'].weights | to_weights_array }}; 68 | {% endfor %} 69 | const float conv2d_last_weights{{ conv_last.weights | to_weights_shape }} = {{ conv_last.weights | to_weights_array }}; 70 | const float conv2d_last_bias[{{ conv_last.bias | length }}] = {{ conv_last.bias | to_array }}; 71 | 72 | {% include './ops/mult3x3' %} 73 | {% include './ops/pad' %} 74 | {% include './ops/conv3x3x1' %} 75 | {% include './ops/depthwise_conv' %} 76 | {% include './ops/pointwise_conv' %} 77 | {% include './ops/maxpool' %} 78 | {% include './ops/dot' %} 79 | {% include './ops/softmax' %} 80 | }; -------------------------------------------------------------------------------- /25_MicromobileNet/micromobilenet/convert/templates/ops/argmax.jinja: -------------------------------------------------------------------------------- 1 | /** 2 | * Get index of max output 3 | */ 4 | uint16_t argmax() { 5 | this->output = 0; 6 | this->proba = outputs[0]; 7 | 8 | for (uint16_t i = 1; i < numOutputs; i++) { 9 | if (outputs[i] > this->proba) { 10 | this->proba = outputs[i]; 11 | this->output = i; 12 | } 13 | } 14 | 15 | return this->output; 16 | } 17 | -------------------------------------------------------------------------------- /25_MicromobileNet/micromobilenet/convert/templates/ops/conv3x3x1.jinja: -------------------------------------------------------------------------------- 1 | /** 2 | * Depthwise 3x3 convolution without ReLU 3 | * 4 | * @param input 5 | * @param output 6 | * @param kernel 7 | * @param width 8 | * @param stride 9 | */ 10 | void conv2d_3x3x1(float *input, float *output, const float *kernel, const uint16_t width, uint8_t stride) { 11 | uint16_t o = 0; 12 | 13 | for (uint16_t y = 0; y <= width - 3; y += stride) { 14 | const uint16_t offset = y * width; 15 | float *i = input + offset; 16 | 17 | for (uint16_t x = 0; x <= width - 3; x += stride) { 18 | output[o++] = this->mult3x3(i + x, kernel, width); 19 | } 20 | } 21 | } 22 | -------------------------------------------------------------------------------- /25_MicromobileNet/micromobilenet/convert/templates/ops/depthwise_conv.jinja: -------------------------------------------------------------------------------- 1 | /** 2 | * Depthwise 3x3 convolution with ReLU 3 | * 4 | * @param inputs 5 | * @param outputs 6 | * @param kernel 7 | * @param width 8 | * @param stride 9 | */ 10 | void depthwise_conv(float *inputs, float *outputs, const float *kernel, const uint16_t width, uint8_t stride) { 11 | uint16_t o = 0; 12 | 13 | for (uint16_t y = 0; y <= width - 3; y += stride) { 14 | const uint16_t offset = y * width; 15 | float *i = inputs + offset; 16 | 17 | for (uint16_t x = 0; x <= width - 3; x += stride) { 18 | float val = this->mult3x3(i + x, kernel, width); 19 | 20 | if (val < 0) val = 0; 21 | else if (val > 6) val = 6; 22 | 23 | outputs[o++] = val; 24 | } 25 | } 26 | } 27 | -------------------------------------------------------------------------------- /25_MicromobileNet/micromobilenet/convert/templates/ops/dot.jinja: -------------------------------------------------------------------------------- 1 | /** 2 | * Dot product with bias 3 | * 4 | * @param inputs 5 | * @param outputs 6 | * @param kernel 7 | * @param bias 8 | * @param length 9 | */ 10 | void dot(float *inputs, float *outputs, const float *weights, const float bias, const uint16_t length) { 11 | float sum = 0; 12 | 13 | for (uint16_t i = 0; i < length; i++) 14 | sum += inputs[i] * weights[i]; 15 | 16 | outputs[0] = sum + bias; 17 | } 18 | -------------------------------------------------------------------------------- /25_MicromobileNet/micromobilenet/convert/templates/ops/maxpool.jinja: -------------------------------------------------------------------------------- 1 | /** 2 | * (Global) MaxPooling 3 | * 4 | * @param inputs 5 | * @param outputs 6 | * @param width 7 | * @param channels 8 | */ 9 | void maxpool(float *inputs, float *outputs, const uint16_t width, const uint16_t channels) { 10 | const uint16_t size = width * width; 11 | 12 | for (uint16_t c = 0; c < channels; c++) { 13 | const uint16_t offset = size * c; 14 | float *in = inputs + offset; 15 | float greatest = in[0]; 16 | 17 | for (uint16_t j = 1; j < size; j++) 18 | if (in[j] > greatest) 19 | greatest = in[j]; 20 | 21 | outputs[c] = greatest; 22 | } 23 | } 24 | -------------------------------------------------------------------------------- /25_MicromobileNet/micromobilenet/convert/templates/ops/mult3x3.jinja: -------------------------------------------------------------------------------- 1 | /** 2 | * Multiply 3x3 kernel on single 3x3 image patch 3 | * 4 | * @param inputs 5 | * @param kernel 6 | * @param width 7 | */ 8 | inline float mult3x3(float *inputs, const float kernel[9], const uint16_t width) { 9 | const float *i1 = inputs; 10 | const float *i2 = inputs + width; 11 | const float *i3 = inputs + width + width; 12 | 13 | return i1[0] * kernel[0] + 14 | i1[1] * kernel[1] + 15 | i1[2] * kernel[2] + 16 | i2[0] * kernel[3] + 17 | i2[1] * kernel[4] + 18 | i2[2] * kernel[5] + 19 | i3[0] * kernel[6] + 20 | i3[1] * kernel[7] + 21 | i3[2] * kernel[8]; 22 | } 23 | -------------------------------------------------------------------------------- /25_MicromobileNet/micromobilenet/convert/templates/ops/pad.jinja: -------------------------------------------------------------------------------- 1 | /** 2 | * Zero padding 2D 3 | * 4 | * @param inputs 5 | * @param outputs 6 | * @param width 7 | */ 8 | void pad(float *inputs, float *outputs, uint16_t width) { 9 | const uint16_t paddedWidth = width + 2; 10 | uint16_t i = 0; 11 | uint16_t o = 0; 12 | 13 | // first row of zeros 14 | for (uint16_t x = 0; x < paddedWidth; x++) 15 | outputs[o++] = 0; 16 | 17 | for (uint16_t y = 0; y < width; y++) { 18 | outputs[o++] = 0; 19 | 20 | for (uint16_t x = 0; x < width; x++) 21 | outputs[o++] = inputs[i++]; 22 | 23 | outputs[o++] = 0; 24 | } 25 | 26 | // last row of zeros 27 | for (uint16_t x = 0; x < paddedWidth; x++) 28 | outputs[o++] = 0; 29 | } 30 | -------------------------------------------------------------------------------- /25_MicromobileNet/micromobilenet/convert/templates/ops/pointwise_conv.jinja: -------------------------------------------------------------------------------- 1 | /** 2 | * Pointwise 1x1 convolution with ReLU 3 | * 4 | * @param inputs 5 | * @param outputs 6 | * @param kernel 7 | * @param width 8 | * @param channels 9 | */ 10 | void pointwise_conv(float *inputs, float *outputs, const float *kernel, const uint16_t width, const uint16_t channels) { 11 | const uint16_t size = width * width; 12 | uint16_t o = 0; 13 | 14 | for (uint16_t y = 0; y < width; y += 1) { 15 | const uint16_t offset = y * width; 16 | for (uint16_t x = 0; x < width; x += 1) { 17 | float val = 0; 18 | 19 | for (uint16_t c = 0; c < channels; c++) 20 | val += inputs[(offset + x) + size * c] * kernel[c]; 21 | 22 | if (val < 0) val = 0; 23 | else if (val > 6) val = 6; 24 | 25 | outputs[o++] = val; 26 | } 27 | } 28 | } 29 | -------------------------------------------------------------------------------- /25_MicromobileNet/micromobilenet/convert/templates/ops/softmax.jinja: -------------------------------------------------------------------------------- 1 | /** 2 | * Softmax activation 3 | * 4 | * @param inputs 5 | * @param outputs 6 | * @param numOutputs 7 | */ 8 | void softmax(float *inputs, float *outputs, uint16_t numOutputs) { 9 | float sum = 0; 10 | 11 | for (uint16_t i = 0; i < numOutputs; i++) { 12 | const float e = exp(inputs[i]); 13 | outputs[i] = e; 14 | sum += e; 15 | } 16 | 17 | for (uint16_t i = 0; i < numOutputs; i++) 18 | outputs[i] /= sum; 19 | } 20 | -------------------------------------------------------------------------------- /25_MicromobileNet/micromobilenet/convert/templates/predict_file.jinja: -------------------------------------------------------------------------------- 1 | #define NUM_INPUTS 9216 2 | 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include "MobileNet.h" 8 | 9 | using namespace std; 10 | 11 | 12 | int main() { 13 | MobileNet net; 14 | unsigned char buffer[NUM_INPUTS]; 15 | float im[NUM_INPUTS]; 16 | FILE *file = fopen("X.bin", "rb"); 17 | 18 | while (fread(buffer, NUM_INPUTS, 1, file)) { 19 | for (int i = 0; i < NUM_INPUTS; i++) 20 | im[i] = buffer[i] / 255.0f; 21 | 22 | cout << net.predict(im) << endl; 23 | } 24 | 25 | fclose(file); 26 | return 0; 27 | } -------------------------------------------------------------------------------- /25_MicromobileNet/micromobilenet/converters.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | 4 | def convert_xs(xs: np.ndarray, ys: np.ndarray) -> str: 5 | """ 6 | Convert one sample for each y 7 | :param ys: 8 | :param xs: 9 | :return: 10 | """ 11 | samples = [] 12 | ys = ys.argmax(axis=1) 13 | 14 | for y in range(ys.max()): 15 | sample = xs[ys == y][-1].flatten() 16 | data = ", ".join("%.4f" % xi for xi in sample) 17 | samples.append(f"float x{y}[{len(sample)}] = {{ {data} }};") 18 | 19 | return "\n".join(samples) -------------------------------------------------------------------------------- /25_MicromobileNet/micromobilenet/load.py: -------------------------------------------------------------------------------- 1 | import os.path 2 | 3 | import numpy as np 4 | from os import listdir 5 | from glob import glob 6 | from PIL import Image 7 | 8 | 9 | def load_folder(folder: str): 10 | """ 11 | Load images from folder as [0, 1] floats 12 | :param folder: 13 | :return: 14 | """ 15 | for filename in sorted(glob(f"{folder}/*.jpg") + glob(f"{folder}/*.jpeg")): 16 | yield np.asarray(Image.open(filename).convert("L"), dtype=float) / 255. 17 | 18 | 19 | def load_split(root: str, split_name: str): 20 | """ 21 | Load images from train/val/test folder 22 | :param root: 23 | :param split_name: 24 | :return: 25 | """ 26 | X = [] 27 | Y = [] 28 | folders = listdir(f"{root}/{split_name}") 29 | folders = [f"{root}/{split_name}/{f}" for f in folders if os.path.isdir(f"{root}/{split_name}/{f}")] 30 | 31 | for k, folder in enumerate(sorted(folders)): 32 | folder_x = list(load_folder(folder)) 33 | X += folder_x 34 | Y += [k] * len(folder_x) 35 | 36 | # shuffle inputs 37 | shuffle_mask = np.random.permutation(len(X)) 38 | X = np.asarray(X)[shuffle_mask] 39 | Y = np.asarray(Y)[shuffle_mask] 40 | 41 | return X, Y 42 | -------------------------------------------------------------------------------- /25_MicromobileNet/micromobilenet/runner.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import os.path 3 | import warnings 4 | from tempfile import gettempdir 5 | from subprocess import check_output, check_call 6 | 7 | import numpy as np 8 | 9 | 10 | class Runner: 11 | """ 12 | Run C++ MobileNet 13 | """ 14 | def __init__(self, net): 15 | """ 16 | 17 | :param net: 18 | """ 19 | self.net = net 20 | 21 | def predict(self, X: np.ndarray) -> np.ndarray: 22 | """ 23 | Predict samples 24 | :param X: 25 | :return: 26 | """ 27 | root = os.path.abspath(gettempdir()) 28 | logging.warning(f"setting CWD={root}") 29 | 30 | # save input to binary file 31 | with open(os.path.join(root, "X.bin"), "wb") as file: 32 | file.write(X.flatten().astype(np.uint8).tobytes("C")) 33 | 34 | # save net to file 35 | with open(os.path.join(root, "MobileNet.h"), "w") as file: 36 | file.write(self.net.convert.to_cpp(classname="MobileNet")) 37 | 38 | # create C++ main file 39 | src = os.path.join(os.path.dirname(__file__), "convert", "templates", "predict_file.jinja") 40 | dest = os.path.join(root, "mobilenet_test.cpp") 41 | 42 | with open(src) as fin, open(dest, "w") as fout: 43 | fout.write(fin.read()) 44 | 45 | # compile (disable compilation warnings) 46 | with warnings.catch_warnings(): 47 | warnings.simplefilter("ignore") 48 | 49 | if check_call(["g++", "mobilenet_test.cpp", "-o", "mobilenet_test"], cwd=root) == 0: 50 | output = check_output(["./mobilenet_test"], cwd=root).decode() 51 | return np.asarray([int(x) for x in output.split("\n") if x.strip()]) 52 | -------------------------------------------------------------------------------- /25_MicromobileNet/micromobilenet/utils.py: -------------------------------------------------------------------------------- 1 | import re 2 | import json 3 | import numpy as np 4 | 5 | global_vars = {} 6 | 7 | 8 | def update_globals(**kwargs): 9 | global global_vars 10 | 11 | global_vars.update(**kwargs) 12 | 13 | 14 | def get_globals(): 15 | global global_vars 16 | 17 | return global_vars 18 | 19 | 20 | def parse_npy(x: str): 21 | """ 22 | Parse Numpy output as array 23 | :param x: 24 | :return: 25 | """ 26 | x = re.sub(r"(\d)\s+([-0-9])", lambda m: f"{m.group(1)}, {m.group(2)}", x) 27 | x = re.sub(r"\]\s+\[", "],\n[", x) 28 | x = json.loads(x) 29 | 30 | return np.asarray(x) 31 | -------------------------------------------------------------------------------- /25_MicromobileNet/models/mobilenetV1_0.1_96x96_greyscale_weights.h5: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/25_MicromobileNet/models/mobilenetV1_0.1_96x96_greyscale_weights.h5 -------------------------------------------------------------------------------- /25_MicromobileNet/models/mobilenetV1_0.25_96x96_greyscale_weights.h5: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/25_MicromobileNet/models/mobilenetV1_0.25_96x96_greyscale_weights.h5 -------------------------------------------------------------------------------- /25_MicromobileNet/models/mobilenetV1_0.2_96x96_greyscale_weights.h5: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/25_MicromobileNet/models/mobilenetV1_0.2_96x96_greyscale_weights.h5 -------------------------------------------------------------------------------- /25_MicromobileNet/models/mobilenetV2_0.05_96x96_greyscale_weights.h5: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/25_MicromobileNet/models/mobilenetV2_0.05_96x96_greyscale_weights.h5 -------------------------------------------------------------------------------- /25_MicromobileNet/models/mobilenetV2_0.1_96x96_greyscale_weights.h5: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/25_MicromobileNet/models/mobilenetV2_0.1_96x96_greyscale_weights.h5 -------------------------------------------------------------------------------- /25_MicromobileNet/models/mobilenetV2_0.35_96x96_greyscale_weights.h5: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/25_MicromobileNet/models/mobilenetV2_0.35_96x96_greyscale_weights.h5 -------------------------------------------------------------------------------- /25_MicromobileNet/models/model_TL_quant_float32.tflite: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/25_MicromobileNet/models/model_TL_quant_float32.tflite -------------------------------------------------------------------------------- /25_MicromobileNet/models/model_TL_quant_int8.h: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/25_MicromobileNet/models/model_TL_quant_int8.h -------------------------------------------------------------------------------- /25_MicromobileNet/models/model_stripped .keras: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/25_MicromobileNet/models/model_stripped .keras -------------------------------------------------------------------------------- /25_MicromobileNet/requirements.txt: -------------------------------------------------------------------------------- 1 | scikit-learn==1.3.2 2 | tensorflow==2.15.0 3 | pandas==1.4.0 4 | numpy==1.24.0 5 | matplotlib==3.7.1 6 | seaborn==0.11.2 7 | keras==2.15.0 8 | -------------------------------------------------------------------------------- /26_MutinominalNB/ArduinoCode/MultinomialNB.h: -------------------------------------------------------------------------------- 1 | const char* vocabulary[] = {"1000","500","75","90","about","access","account","act","afternoon","airport","and","any","approved","are","at","available","back","bank","be","been","before","best","big","biggest","bit","book","bring","business","busy","but","buy","by","call","can","card","catch","chance","chat","cheap","check","claim","class","click","clicking","coffee","compromised","concert","congratulations","consultation","coupon","courses","credit","customers","dating","day","days","deal","details","did","dinner","discounts","do","doing","done","download","earn","easy","event","excited","exclusive","experience","experts","family","far","few","files","final","finish","finished","flights","for","fortune","forward","found","free","from","fun","get","gift","gifts","go","going","good","got","grab","great","groceries","gym","had","has","have","heading","hear","hello","help","helping","here","hi","high","holidays","home","hope","how","huge","hurry","if","in","installment","interest","invitation","ipad","iphone","it","job","jobs","join","just","know","laptop","last","later","let","limit","limited","ll","loans","looking","low","lunch","luxury","major","make","match","me","meet","meeting","message","millionaire","miss","moment","monday","money","month","most","movie","my","need","netflix","new","next","night","no","now","of","off","offer","on","one","online","our","out","over","own","package","park","party","pay","perfect","pick","plan","plans","popular","pre","premium","presentation","prize","program","project","promised","proven","quick","re","reactivate","reading","really","receive","recommendations","recommended","report","required","reschedule","restaurant","rich","ride","sale","saturday","save","savings","saw","schedule","see","seeing","selected","send","session","shop","shortly","should","sign","signing","simple","site","sleep","so","software","some","something","special","start","still","suspended","system","task","tasks","thanks","that","the","then","things","this","tickets","tier","time","to","today","together","tomorrow","tonight","top","town","trial","trick","trip","try","unlock","up","vacation","ve","verify","waiting","walk","want","was","watch","watches","way","we","week","weekend","well","went","what","when","win","with","won","workout","year","you","your"}; 2 | 3 | const float log_probs_spam[] = {-6.364750756851911,-6.364750756851911,-6.364750756851911,-6.364750756851911,-5.671603576291965,-6.364750756851911,-6.364750756851911,-6.364750756851911,-5.671603576291965,-5.671603576291965,-6.364750756851911,-5.266138468183801,-6.364750756851911,-4.97845639573202,-4.75531284441781,-6.364750756851911,-5.671603576291965,-6.364750756851911,-5.671603576291965,-6.364750756851911,-5.671603576291965,-6.364750756851911,-6.364750756851911,-6.364750756851911,-5.671603576291965,-5.671603576291965,-5.671603576291965,-6.364750756851911,-5.671603576291965,-6.364750756851911,-6.364750756851911,-6.364750756851911,-5.671603576291965,-4.97845639573202,-6.364750756851911,-4.75531284441781,-6.364750756851911,-5.671603576291965,-6.364750756851911,-5.671603576291965,-6.364750756851911,-5.671603576291965,-6.364750756851911,-6.364750756851911,-5.266138468183801,-6.364750756851911,-5.671603576291965,-6.364750756851911,-6.364750756851911,-6.364750756851911,-6.364750756851911,-6.364750756851911,-6.364750756851911,-6.364750756851911,-5.671603576291965,-6.364750756851911,-6.364750756851911,-6.364750756851911,-5.671603576291965,-5.671603576291965,-6.364750756851911,-4.167526179515692,-5.266138468183801,-5.671603576291965,-6.364750756851911,-6.364750756851911,-6.364750756851911,-5.671603576291965,-5.671603576291965,-6.364750756851911,-6.364750756851911,-6.364750756851911,-5.671603576291965,-5.671603576291965,-5.671603576291965,-5.671603576291965,-6.364750756851911,-6.364750756851911,-5.671603576291965,-6.364750756851911,-3.96685548405354,-6.364750756851911,-5.266138468183801,-6.364750756851911,-5.671603576291965,-6.364750756851911,-5.671603576291965,-4.97845639573202,-6.364750756851911,-6.364750756851911,-5.671603576291965,-4.97845639573202,-5.671603576291965,-6.364750756851911,-5.671603576291965,-5.266138468183801,-5.671603576291965,-6.364750756851911,-5.671603576291965,-6.364750756851911,-5.266138468183801,-5.671603576291965,-5.671603576291965,-5.671603576291965,-4.97845639573202,-5.671603576291965,-6.364750756851911,-6.364750756851911,-6.364750756851911,-5.671603576291965,-5.266138468183801,-5.266138468183801,-4.75531284441781,-6.364750756851911,-6.364750756851911,-5.266138468183801,-5.671603576291965,-6.364750756851911,-6.364750756851911,-6.364750756851911,-6.364750756851911,-6.364750756851911,-4.97845639573202,-5.671603576291965,-6.364750756851911,-5.671603576291965,-5.671603576291965,-5.671603576291965,-6.364750756851911,-5.671603576291965,-4.75531284441781,-4.2853092151720755,-6.364750756851911,-6.364750756851911,-4.572991287623855,-6.364750756851911,-5.266138468183801,-6.364750756851911,-5.671603576291965,-6.364750756851911,-6.364750756851911,-6.364750756851911,-6.364750756851911,-4.97845639573202,-4.97845639573202,-4.75531284441781,-6.364750756851911,-6.364750756851911,-6.364750756851911,-5.671603576291965,-5.671603576291965,-6.364750756851911,-5.671603576291965,-6.364750756851911,-5.266138468183801,-4.97845639573202,-4.572991287623855,-6.364750756851911,-5.266138468183801,-5.266138468183801,-5.671603576291965,-6.364750756851911,-6.364750756851911,-6.364750756851911,-6.364750756851911,-6.364750756851911,-5.266138468183801,-6.364750756851911,-6.364750756851911,-5.266138468183801,-5.671603576291965,-5.671603576291965,-6.364750756851911,-6.364750756851911,-5.671603576291965,-5.671603576291965,-6.364750756851911,-6.364750756851911,-5.671603576291965,-5.671603576291965,-5.671603576291965,-6.364750756851911,-6.364750756851911,-6.364750756851911,-6.364750756851911,-6.364750756851911,-6.364750756851911,-4.97845639573202,-5.671603576291965,-6.364750756851911,-6.364750756851911,-6.364750756851911,-6.364750756851911,-6.364750756851911,-5.671603576291965,-6.364750756851911,-5.671603576291965,-5.671603576291965,-6.364750756851911,-6.364750756851911,-5.671603576291965,-5.671603576291965,-6.364750756851911,-5.671603576291965,-6.364750756851911,-5.671603576291965,-6.364750756851911,-6.364750756851911,-5.671603576291965,-5.671603576291965,-5.671603576291965,-5.671603576291965,-6.364750756851911,-5.671603576291965,-5.671603576291965,-6.364750756851911,-5.671603576291965,-5.671603576291965,-6.364750756851911,-6.364750756851911,-6.364750756851911,-6.364750756851911,-6.364750756851911,-5.671603576291965,-6.364750756851911,-5.671603576291965,-5.266138468183801,-6.364750756851911,-6.364750756851911,-5.671603576291965,-6.364750756851911,-6.364750756851911,-5.671603576291965,-6.364750756851911,-5.671603576291965,-4.97845639573202,-3.5315374127956947,-6.364750756851911,-5.671603576291965,-4.97845639573202,-6.364750756851911,-6.364750756851911,-5.266138468183801,-3.799801399390374,-5.671603576291965,-5.671603576291965,-4.418840607796597,-5.266138468183801,-6.364750756851911,-5.671603576291965,-6.364750756851911,-6.364750756851911,-5.266138468183801,-5.671603576291965,-6.364750756851911,-4.2853092151720755,-6.364750756851911,-6.364750756851911,-6.364750756851911,-6.364750756851911,-5.671603576291965,-4.572991287623855,-5.671603576291965,-5.671603576291965,-6.364750756851911,-5.671603576291965,-4.572991287623855,-5.671603576291965,-4.75531284441781,-5.266138468183801,-5.671603576291965,-5.671603576291965,-5.671603576291965,-6.364750756851911,-4.75531284441781,-6.364750756851911,-5.671603576291965,-6.364750756851911,-3.4743789989557463,-4.97845639573202}; 4 | 5 | const float log_probs_ham[] = {-5.183654458227896,-6.282266746896006,-5.589119566336061,-5.183654458227896,-6.282266746896006,-5.183654458227896,-5.589119566336061,-5.183654458227896,-6.282266746896006,-6.282266746896006,-5.589119566336061,-6.282266746896006,-5.589119566336061,-6.282266746896006,-6.282266746896006,-5.589119566336061,-6.282266746896006,-6.282266746896006,-6.282266746896006,-5.183654458227896,-6.282266746896006,-5.589119566336061,-5.589119566336061,-5.589119566336061,-6.282266746896006,-5.589119566336061,-6.282266746896006,-5.589119566336061,-6.282266746896006,-6.282266746896006,-5.183654458227896,-5.589119566336061,-6.282266746896006,-6.282266746896006,-5.589119566336061,-6.282266746896006,-4.8959723857761155,-6.282266746896006,-5.589119566336061,-6.282266746896006,-5.183654458227896,-6.282266746896006,-4.672828834461906,-5.589119566336061,-6.282266746896006,-6.282266746896006,-5.589119566336061,-5.183654458227896,-5.589119566336061,-5.589119566336061,-5.589119566336061,-5.183654458227896,-5.589119566336061,-5.589119566336061,-5.589119566336061,-5.589119566336061,-5.589119566336061,-5.589119566336061,-6.282266746896006,-6.282266746896006,-5.589119566336061,-6.282266746896006,-6.282266746896006,-6.282266746896006,-5.589119566336061,-5.589119566336061,-5.589119566336061,-5.589119566336061,-6.282266746896006,-4.8959723857761155,-5.183654458227896,-5.589119566336061,-6.282266746896006,-6.282266746896006,-6.282266746896006,-6.282266746896006,-5.183654458227896,-6.282266746896006,-6.282266746896006,-6.282266746896006,-3.8843714740976356,-6.282266746896006,-6.282266746896006,-6.282266746896006,-3.44905340283979,-5.589119566336061,-6.282266746896006,-4.20282520521617,-5.589119566336061,-6.282266746896006,-6.282266746896006,-6.282266746896006,-6.282266746896006,-6.282266746896006,-6.282266746896006,-6.282266746896006,-6.282266746896006,-6.282266746896006,-6.282266746896006,-5.589119566336061,-5.589119566336061,-6.282266746896006,-6.282266746896006,-6.282266746896006,-6.282266746896006,-6.282266746896006,-4.490507277667952,-6.282266746896006,-5.589119566336061,-6.282266746896006,-5.589119566336061,-6.282266746896006,-5.589119566336061,-5.589119566336061,-5.183654458227896,-5.589119566336061,-5.589119566336061,-6.282266746896006,-5.589119566336061,-6.282266746896006,-5.589119566336061,-5.589119566336061,-6.282266746896006,-6.282266746896006,-5.589119566336061,-5.589119566336061,-6.282266746896006,-6.282266746896006,-5.589119566336061,-5.183654458227896,-6.282266746896006,-6.282266746896006,-5.589119566336061,-5.183654458227896,-6.282266746896006,-5.183654458227896,-6.282266746896006,-5.589119566336061,-6.282266746896006,-6.282266746896006,-5.589119566336061,-4.8959723857761155,-6.282266746896006,-6.282266746896006,-6.282266746896006,-6.282266746896006,-6.282266746896006,-5.589119566336061,-6.282266746896006,-6.282266746896006,-6.282266746896006,-4.8959723857761155,-6.282266746896006,-5.589119566336061,-6.282266746896006,-6.282266746896006,-6.282266746896006,-5.589119566336061,-5.589119566336061,-6.282266746896006,-6.282266746896006,-5.183654458227896,-4.085042169559786,-4.8959723857761155,-5.183654458227896,-4.490507277667952,-5.589119566336061,-5.183654458227896,-4.8959723857761155,-4.8959723857761155,-6.282266746896006,-6.282266746896006,-5.589119566336061,-5.589119566336061,-6.282266746896006,-6.282266746896006,-6.282266746896006,-6.282266746896006,-6.282266746896006,-6.282266746896006,-6.282266746896006,-5.589119566336061,-5.589119566336061,-5.589119566336061,-6.282266746896006,-5.589119566336061,-5.183654458227896,-6.282266746896006,-6.282266746896006,-5.589119566336061,-6.282266746896006,-5.589119566336061,-5.589119566336061,-6.282266746896006,-6.282266746896006,-5.589119566336061,-6.282266746896006,-6.282266746896006,-5.589119566336061,-5.183654458227896,-6.282266746896006,-6.282266746896006,-6.282266746896006,-6.282266746896006,-5.183654458227896,-6.282266746896006,-5.589119566336061,-5.589119566336061,-6.282266746896006,-6.282266746896006,-6.282266746896006,-6.282266746896006,-5.589119566336061,-6.282266746896006,-6.282266746896006,-5.589119566336061,-6.282266746896006,-6.282266746896006,-5.589119566336061,-6.282266746896006,-6.282266746896006,-6.282266746896006,-5.589119566336061,-6.282266746896006,-5.589119566336061,-6.282266746896006,-6.282266746896006,-5.183654458227896,-5.183654458227896,-6.282266746896006,-5.589119566336061,-5.589119566336061,-6.282266746896006,-6.282266746896006,-6.282266746896006,-6.282266746896006,-4.8959723857761155,-6.282266746896006,-6.282266746896006,-4.8959723857761155,-5.589119566336061,-5.589119566336061,-5.183654458227896,-3.7173173894344695,-4.336356597840693,-6.282266746896006,-6.282266746896006,-6.282266746896006,-5.589119566336061,-6.282266746896006,-5.183654458227896,-6.282266746896006,-6.282266746896006,-6.282266746896006,-5.589119566336061,-4.8959723857761155,-5.183654458227896,-5.589119566336061,-6.282266746896006,-6.282266746896006,-6.282266746896006,-5.183654458227896,-6.282266746896006,-6.282266746896006,-6.282266746896006,-6.282266746896006,-6.282266746896006,-6.282266746896006,-6.282266746896006,-6.282266746896006,-6.282266746896006,-6.282266746896006,-6.282266746896006,-5.183654458227896,-4.8959723857761155,-5.589119566336061,-6.282266746896006,-5.589119566336061,-4.490507277667952,-4.20282520521617}; 6 | 7 | -------------------------------------------------------------------------------- /26_MutinominalNB/ArduinoCode/MultinominalNB.ino: -------------------------------------------------------------------------------- 1 | #include "MultinomialNB.h" 2 | 3 | float start_time = -1; 4 | float end_time = -1; 5 | float width_time = -1; 6 | int len_vocabulary = 280; 7 | 8 | // Function to get the index of the word in the vocabulary 9 | int getWordIndex(const char *word) { 10 | for (int i = 0; i < len_vocabulary; i++) { 11 | if (strcmp(word, vocabulary[i]) == 0) { 12 | return i; 13 | } 14 | } 15 | return -1; // Word not found 16 | } 17 | 18 | void setup() { 19 | // Start serial communication at 9600 baud 20 | Serial.begin(9600); 21 | while (!Serial) 22 | ; // Wait for connection to the serial port 23 | Serial.println("Enter a message for classification (type 'exit' to stop):"); 24 | } 25 | 26 | void loop() { 27 | static String input = ""; // String to store user input 28 | static bool processingInput = false; // Flag to control input processing 29 | 30 | // Check if input is available in the serial buffer 31 | if (Serial.available() > 0) { 32 | char c = Serial.read(); 33 | if (c == '\n') { 34 | // Process input when a new line is detected 35 | processInput(input); 36 | input = ""; // Clear input for the next message 37 | processingInput = false; // Reset flag 38 | } else { 39 | // Add character to input 40 | input += c; 41 | processingInput = true; 42 | } 43 | } 44 | } 45 | 46 | void processInput(String input) { 47 | // Print the original input message 48 | Serial.print("Input: "); 49 | Serial.println(input); 50 | 51 | // Initialize scores 52 | float spam_score = 0.0; 53 | float ham_score = 0.0; 54 | 55 | // Create a mutable copy of the input to use with strtok 56 | char input_copy[100]; 57 | input.toCharArray(input_copy, sizeof(input_copy)); 58 | 59 | start_time = micros(); 60 | 61 | // Tokenize the input string 62 | char *token = strtok(input_copy, " "); 63 | while (token != NULL) { 64 | int index = getWordIndex(token); 65 | if (index != -1) { 66 | // If the word is in the vocabulary, update the scores 67 | spam_score += abs(log_probs_spam[index]); 68 | ham_score += abs(log_probs_ham[index]); 69 | } 70 | token = strtok(NULL, " "); // Get the next token 71 | } 72 | end_time = micros(); 73 | 74 | width_time = end_time - start_time; 75 | 76 | // Classify the input based on the scores 77 | if (spam_score > ham_score) { 78 | Serial.print("Classification: "); 79 | Serial.println("Spam"); 80 | Serial.print("Inference time: "); 81 | Serial.print(width_time); 82 | Serial.println(" us "); 83 | } else { 84 | Serial.print("Classification: "); 85 | Serial.println("Ham"); 86 | Serial.print("Inference time: "); 87 | Serial.print(width_time); 88 | Serial.println(" us "); 89 | } 90 | 91 | // Prompt for the next input 92 | Serial.println("Enter another message for classification (or type 'exit' to stop):"); 93 | } 94 | -------------------------------------------------------------------------------- /26_MutinominalNB/figures/fig00.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/26_MutinominalNB/figures/fig00.png -------------------------------------------------------------------------------- /26_MutinominalNB/figures/fig01.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/26_MutinominalNB/figures/fig01.png -------------------------------------------------------------------------------- /26_MutinominalNB/figures/fig02.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/26_MutinominalNB/figures/fig02.png -------------------------------------------------------------------------------- /26_MutinominalNB/figures/fig03.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/26_MutinominalNB/figures/fig03.png -------------------------------------------------------------------------------- /26_MutinominalNB/figures/fig04.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/26_MutinominalNB/figures/fig04.png -------------------------------------------------------------------------------- /26_MutinominalNB/figures/fig05.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/26_MutinominalNB/figures/fig05.png -------------------------------------------------------------------------------- /26_MutinominalNB/figures/fig06.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/26_MutinominalNB/figures/fig06.png -------------------------------------------------------------------------------- /26_MutinominalNB/figures/fig07.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/26_MutinominalNB/figures/fig07.png -------------------------------------------------------------------------------- /26_MutinominalNB/figures/fig08.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/26_MutinominalNB/figures/fig08.png -------------------------------------------------------------------------------- /26_MutinominalNB/figures/fig09.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/26_MutinominalNB/figures/fig09.png -------------------------------------------------------------------------------- /26_MutinominalNB/figures/fig10.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/26_MutinominalNB/figures/fig10.png -------------------------------------------------------------------------------- /26_MutinominalNB/figures/fig11.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thommaskevin/TinyML/cc0b870e1e86ce2ac56ab9b0df58193c16b12c89/26_MutinominalNB/figures/fig11.png -------------------------------------------------------------------------------- /26_MutinominalNB/requirements.txt: -------------------------------------------------------------------------------- 1 | scikit-learn==1.3.2 2 | pandas==1.4.0 3 | numpy==1.24.0 4 | matplotlib==3.7.1 5 | nltk==3.8.1 6 | wordcloud==1.9.4 7 | keras==2.15.0 8 | 9 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # TinyML 2 | 3 | ## Repository 4 | 5 | - 01 - [Decision Tree](https://github.com/thommaskevin/TinyML/tree/main/01_decision_tree) 6 | 7 | - 02 - [Random Forest](https://github.com/thommaskevin/TinyML/tree/main/02_random_forest) 8 | 9 | - 03 - [XGBoost Classifier](https://github.com/thommaskevin/TinyML/tree/main/03_XGBoost) 10 | 11 | - 04 - [GaussianNB](https://github.com/thommaskevin/TinyML/tree/main/04_GaussianNB) 12 | 13 | - 05 - [Support Vector Machines](https://github.com/thommaskevin/TinyML/tree/main/05_support_vector_machine) 14 | 15 | - 06 - [SEFR](https://github.com/thommaskevin/TinyML/tree/main/06_SEFR) 16 | 17 | - 07 - [Principal Component Analysis](https://github.com/thommaskevin/TinyML/tree/main/07_principal_components_analysis) 18 | 19 | - 08 - [Binomial Logistic Regression](https://github.com/thommaskevin/TinyML/tree/main/08_Logistic_Regressor) 20 | 21 | - 09 - [K-means](https://github.com/thommaskevin/TinyML/tree/main/09_K_Means) 22 | 23 | - 10 - [Linear Regression](https://github.com/thommaskevin/TinyML/tree/main/10_Linear_Regressor) 24 | 25 | - 11 - [Gaussian Mixture Model](https://github.com/thommaskevin/TinyML/tree/main/11_GMM) 26 | 27 | - 12 - [Artificial Neural Networks (MLP)](https://github.com/thommaskevin/TinyML/tree/main/12_MLP) 28 | 29 | - 13 - [Convolutional Neural Networks (CNN)](https://github.com/thommaskevin/TinyML/tree/main/13_CNN) 30 | 31 | - 14 - [XGBoost Regression](https://github.com/thommaskevin/TinyML/tree/main/14_XGBRegression) 32 | 33 | - 15 - [Poisson Regression](https://github.com/thommaskevin/TinyML/tree/main/15_Poisson_Regressor) 34 | 35 | - 16 - [K-Nearest Neighbors (KNN-Classifier)](https://github.com/thommaskevin/TinyML/tree/main/16_KNN) 36 | 37 | - 17 - [ElasticNet (ELNET)](https://github.com/thommaskevin/TinyML/tree/main/17_ElasticNet) 38 | 39 | - 18 - [Long Short-Term Memory (LSTM)](https://github.com/thommaskevin/TinyML/tree/main/18_LSTM) 40 | 41 | - 19 - [AutoEncoder](https://github.com/thommaskevin/TinyML/tree/main/19_Autoencoder) 42 | 43 | - 20 - [Q-Learning](https://github.com/thommaskevin/TinyML/tree/main/20_Q_Learning) 44 | 45 | - 21 - [Huber Regression](https://github.com/thommaskevin/TinyML/tree/main/21_Huber_Regressor) 46 | 47 | - 22 - [Quantization Aware Training](https://github.com/thommaskevin/TinyML/tree/main/22_QAT) 48 | 49 | - 23 - [Post-Training Pruning](https://github.com/thommaskevin/TinyML/tree/main/23_PTP) 50 | 51 | - 24 - [Knowledge Distillation](https://github.com/thommaskevin/TinyML/tree/main/24_Knowledge_Distillation) 52 | 53 | - 25 - [MicroMobileNet](https://github.com/thommaskevin/TinyML/tree/main/25_MicromobileNet) 54 | 55 | - 26 - [MultinominalNB (Text Classifier)](https://github.com/thommaskevin/TinyML/tree/main/26_MutinominalNB) 56 | 57 | ## Medium - TinyML's Weekly Open Paper 58 | 59 | - 01 - [Decision Tree](https://medium.com/@thommaskevin/tinyml-%C3%A1rvore-de-decis%C3%A3o-aa1414562d97) 60 | 61 | - 02 - [Random Forest](https://medium.com/@thommaskevin/tinyml-random-forest-classifier-and-regressor-b351aa0980e8) 62 | 63 | - 03 - [XGBoost Classifier](https://medium.com/@thommaskevin/tinyml-xgboost-classifier-795202285779) 64 | 65 | - 04 - [GaussianNB](https://medium.com/@thommaskevin/tinyml-gaussian-naive-bayes-classifier-31f8d241c67c) 66 | 67 | - 05 - [Support Vector Machines](https://medium.com/@thommaskevin/tinyml-support-vector-machines-classifier-c391b54f3ab8) 68 | 69 | - 06 - [SEFR](https://medium.com/@thommaskevin/tinyml-similarity-ensemble-fusion-for-retrieval-sefr-379b647faba3) 70 | 71 | - 07 - [Principal Component Analysis](https://medium.com/@thommaskevin/tinyml-principal-component-analysis-pca-5379d0874592) 72 | 73 | - 08 - [Binomial Logistic Regression](https://medium.com/@thommaskevin/tinyml-binomial-logistic-regression-0fdbf00e6765) 74 | 75 | - 09 - [K-means](https://medium.com/@thommaskevin/tinyml-k-means-10e72828d492) 76 | 77 | - 10 - [Linear Regression](https://medium.com/@thommaskevin/tinyml-linear-regression-0b715844db01) 78 | 79 | - 11 - [Gaussian Mixture Model](https://medium.com/p/9730693fb8a4) 80 | 81 | - 12 - [Artificial Neural Networks (MLP)](https://medium.com/@thommaskevin/tinyml-neural-networks-mlp-62b2cfcc09c2) 82 | 83 | - 13 - [Convolutional Neural Networks (CNN)](https://medium.com/@thommaskevin/tinyml-convolutional-neural-networks-cnn-3601b32c35f4) 84 | 85 | - 14 - [XGBoost Regression](https://medium.com/@thommaskevin/tinyml-xgboost-regression-d2b513a0d237) 86 | 87 | - 15 - [Poisson Regression](https://medium.com/@thommaskevin/tinyml-poisson-regression-5174d88479f5) 88 | 89 | - 16 - [K-Nearest Neighbors (KNN-Classifier)](https://medium.com/@thommaskevin/tinyml-k-nearest-neighbors-knn-classifier-6008f8e51189) 90 | 91 | - 17 - [ElasticNet (ELNET)](https://medium.com/@thommaskevin/tinyml-elastic-net-elnet-e8eee849225a) 92 | 93 | --------------------------------------------------------------------------------