├── .github
└── workflows
│ └── compile-sketches.yml
├── FruitToEmoji
├── FruitToEmoji.ipynb
├── README.md
├── SampleData
│ ├── apple.csv
│ ├── banana.csv
│ └── orange.csv
└── sketches
│ ├── object_color_capture
│ └── object_color_capture.ino
│ └── object_color_classify
│ └── object_color_classify.ino
├── GestureToEmoji
├── ArduinoSketches
│ ├── Emoji_Button
│ │ └── Emoji_Button.ino
│ ├── HardwareTest
│ │ └── HardwareTest.ino
│ ├── IMU_Capture
│ │ └── IMU_Capture.ino
│ └── IMU_Classifier
│ │ ├── IMU_Classifier.ino
│ │ └── model.h
├── README.md
├── arduino_tinyml_workshop.ipynb
├── exercises
│ ├── exercise1.md
│ ├── exercise2.md
│ ├── exercise3.md
│ ├── exercise4.md
│ ├── exercise5.md
│ ├── exercise6.md
│ ├── exercise7.md
│ └── exercise8.md
└── images
│ ├── AddZipLibrary.png
│ ├── AddZipLibrary_2.png
│ ├── ArduinoIDE.png
│ ├── Arduino_logo_R_highquality.png
│ ├── BoardManager-Menu.png
│ ├── BoardsManager.png
│ ├── IDE.png
│ ├── InstallBoardDefinitions.png
│ ├── JustDownload.png
│ ├── ManageLibraries.png
│ ├── NANO-33-BLE-Sense-with-headers.svg
│ ├── accelerometer-example-serial-plotter.png
│ ├── arduino-classifier.png
│ ├── ble-sense.jpg
│ ├── colab-3rd-party-cookie-exception.png
│ ├── colab-error.png
│ ├── colab.png
│ ├── download-repo.png
│ ├── library-arduinolsm9ds1.png
│ ├── lsm9ds1-examples.png
│ ├── serial-monitor-imu.png
│ └── serial-plotter-imu.png
└── README.md
/.github/workflows/compile-sketches.yml:
--------------------------------------------------------------------------------
1 | name: Compile Sketches
2 |
3 | on:
4 | - push
5 | - pull_request
6 |
7 | jobs:
8 | compile-sketches:
9 | runs-on: ubuntu-latest
10 |
11 | steps:
12 | - name: Checkout repository
13 | uses: actions/checkout@v2
14 |
15 | # See: https://github.com/arduino/compile-sketches#readme
16 | - name: Compile sketches
17 | uses: arduino/compile-sketches@v1
18 | with:
19 | fqbn: arduino:mbed:nano33ble
20 | sketch-paths: |
21 | - FruitToEmoji/sketches/object_color_capture
22 | - GestureToEmoji/ArduinoSketches/HardwareTest
23 | - GestureToEmoji/ArduinoSketches/IMU_Capture
24 | - GestureToEmoji/ArduinoSketches/IMU_Classifier
25 | libraries: |
26 | - name: Arduino_TensorFlowLite
27 | - name: Arduino_APDS9960
28 | - name: Arduino_LSM9DS1
29 |
--------------------------------------------------------------------------------
/FruitToEmoji/FruitToEmoji.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "nbformat": 4,
3 | "nbformat_minor": 0,
4 | "metadata": {
5 | "colab": {
6 | "name": "FruitToEmoji-GIT.ipynb",
7 | "provenance": [],
8 | "collapsed_sections": [],
9 | "toc_visible": true
10 | },
11 | "kernelspec": {
12 | "name": "python3",
13 | "display_name": "Python 3"
14 | }
15 | },
16 | "cells": [
17 | {
18 | "cell_type": "markdown",
19 | "metadata": {
20 | "id": "f92-4Hjy7kA8",
21 | "colab_type": "text"
22 | },
23 | "source": [
24 | "
\n",
25 | "# Tiny ML on Arduino\n",
26 | "## Classify objects by color tutorial\n",
27 | "\n",
28 | " \n",
29 | "https://github.com/arduino/ArduinoTensorFlowLiteTutorials/"
30 | ]
31 | },
32 | {
33 | "cell_type": "markdown",
34 | "metadata": {
35 | "id": "uvDA8AK7QOq-",
36 | "colab_type": "text"
37 | },
38 | "source": [
39 | "## Setup Python Environment \n",
40 | "\n",
41 | "The next cell sets up the dependencies in required for the notebook, run it."
42 | ]
43 | },
44 | {
45 | "cell_type": "code",
46 | "metadata": {
47 | "id": "Y2gs-PL4xDkZ",
48 | "colab_type": "code",
49 | "colab": {}
50 | },
51 | "source": [
52 | "# Setup environment\n",
53 | "!apt-get -qq install xxd\n",
54 | "!pip install pandas numpy matplotlib\n",
55 | "%tensorflow_version 2.x\n",
56 | "!pip install tensorflow"
57 | ],
58 | "execution_count": 0,
59 | "outputs": []
60 | },
61 | {
62 | "cell_type": "markdown",
63 | "metadata": {
64 | "id": "9lwkeshJk7dg",
65 | "colab_type": "text"
66 | },
67 | "source": [
68 | "# Upload Data\n",
69 | "\n",
70 | "1. Open the panel on the left side of Colab by clicking on the __>__\n",
71 | "1. Select the Files tab\n",
72 | "1. Drag `csv` files from your computer to the tab to upload them into colab."
73 | ]
74 | },
75 | {
76 | "cell_type": "markdown",
77 | "metadata": {
78 | "id": "kSxUeYPNQbOg",
79 | "colab_type": "text"
80 | },
81 | "source": [
82 | "# Train Neural Network\n",
83 | "\n",
84 | "\n",
85 | "\n"
86 | ]
87 | },
88 | {
89 | "cell_type": "markdown",
90 | "metadata": {
91 | "id": "Gxk414PU3oy3",
92 | "colab_type": "text"
93 | },
94 | "source": [
95 | "## Parse and prepare the data\n",
96 | "\n",
97 | "The next cell parses the csv files and transforms them to a format that will be used to train the full connected neural network.\n",
98 | "\n"
99 | ]
100 | },
101 | {
102 | "cell_type": "code",
103 | "metadata": {
104 | "id": "AGChd1FAk5_j",
105 | "colab_type": "code",
106 | "colab": {}
107 | },
108 | "source": [
109 | "import matplotlib.pyplot as plt\n",
110 | "import numpy as np\n",
111 | "import pandas as pd\n",
112 | "import tensorflow as tf\n",
113 | "import os\n",
114 | "import fileinput\n",
115 | "\n",
116 | "print(f\"TensorFlow version = {tf.__version__}\\n\")\n",
117 | "\n",
118 | "# Set a fixed random seed value, for reproducibility, this will allow us to get\n",
119 | "# the same random numbers each time the notebook is run\n",
120 | "SEED = 1337\n",
121 | "np.random.seed(SEED)\n",
122 | "tf.random.set_seed(SEED)\n",
123 | "\n",
124 | "CLASSES = [];\n",
125 | "\n",
126 | "for file in os.listdir(\"/content/\"):\n",
127 | " if file.endswith(\".csv\"):\n",
128 | " CLASSES.append(os.path.splitext(file)[0])\n",
129 | "\n",
130 | "CLASSES.sort()\n",
131 | "\n",
132 | "SAMPLES_WINDOW_LEN = 1\n",
133 | "NUM_CLASSES = len(CLASSES)\n",
134 | "\n",
135 | "# create a one-hot encoded matrix that is used in the output\n",
136 | "ONE_HOT_ENCODED_CLASSES = np.eye(NUM_CLASSES)\n",
137 | "\n",
138 | "inputs = []\n",
139 | "outputs = []\n",
140 | "\n",
141 | "# read each csv file and push an input and output\n",
142 | "for class_index in range(NUM_CLASSES):\n",
143 | " objectClass = CLASSES[class_index]\n",
144 | " df = pd.read_csv(\"/content/\" + objectClass + \".csv\")\n",
145 | " columns = list(df)\n",
146 | " # get rid of pesky empty value lines of csv which cause NaN inputs to TensorFlow\n",
147 | " df = df.dropna()\n",
148 | " df = df.reset_index(drop=True)\n",
149 | " \n",
150 | " # calculate the number of objectClass recordings in the file\n",
151 | " num_recordings = int(df.shape[0] / SAMPLES_WINDOW_LEN)\n",
152 | " print(f\"\\u001b[32;4m{objectClass}\\u001b[0m class will be output \\u001b[32m{class_index}\\u001b[0m of the classifier\")\n",
153 | " print(f\"{num_recordings} samples captured for training with inputs {list(df)} \\n\")\n",
154 | "\n",
155 | " # graphing\n",
156 | " plt.rcParams[\"figure.figsize\"] = (10,1)\n",
157 | " pixels = np.array([df['Red'],df['Green'],df['Blue']],float)\n",
158 | " pixels = np.transpose(pixels)\n",
159 | " for i in range(num_recordings):\n",
160 | " plt.axvline(x=i, linewidth=8, color=tuple(pixels[i]/np.max(pixels[i], axis=0)))\n",
161 | " plt.show()\n",
162 | " \n",
163 | " #tensors\n",
164 | " output = ONE_HOT_ENCODED_CLASSES[class_index]\n",
165 | " for i in range(num_recordings):\n",
166 | " tensor = []\n",
167 | " row = []\n",
168 | " for c in columns:\n",
169 | " row.append(df[c][i])\n",
170 | " tensor += row\n",
171 | " inputs.append(tensor)\n",
172 | " outputs.append(output)\n",
173 | "\n",
174 | "# convert the list to numpy array\n",
175 | "inputs = np.array(inputs)\n",
176 | "outputs = np.array(outputs)\n",
177 | "\n",
178 | "print(\"Data set parsing and preparation complete.\")\n",
179 | "\n",
180 | "# Randomize the order of the inputs, so they can be evenly distributed for training, testing, and validation\n",
181 | "# https://stackoverflow.com/a/37710486/2020087\n",
182 | "num_inputs = len(inputs)\n",
183 | "randomize = np.arange(num_inputs)\n",
184 | "np.random.shuffle(randomize)\n",
185 | "\n",
186 | "# Swap the consecutive indexes (0, 1, 2, etc) with the randomized indexes\n",
187 | "inputs = inputs[randomize]\n",
188 | "outputs = outputs[randomize]\n",
189 | "\n",
190 | "# Split the recordings (group of samples) into three sets: training, testing and validation\n",
191 | "TRAIN_SPLIT = int(0.6 * num_inputs)\n",
192 | "TEST_SPLIT = int(0.2 * num_inputs + TRAIN_SPLIT)\n",
193 | "\n",
194 | "inputs_train, inputs_test, inputs_validate = np.split(inputs, [TRAIN_SPLIT, TEST_SPLIT])\n",
195 | "outputs_train, outputs_test, outputs_validate = np.split(outputs, [TRAIN_SPLIT, TEST_SPLIT])\n",
196 | "\n",
197 | "print(\"Data set randomization and splitting complete.\")\n"
198 | ],
199 | "execution_count": 0,
200 | "outputs": []
201 | },
202 | {
203 | "cell_type": "markdown",
204 | "metadata": {
205 | "colab_type": "text",
206 | "id": "v8qlSAX1b6Yv"
207 | },
208 | "source": [
209 | "## Build & Train the Model\n",
210 | "\n",
211 | "Build and train a [TensorFlow](https://www.tensorflow.org) model using the high-level [Keras](https://www.tensorflow.org/guide/keras) API."
212 | ]
213 | },
214 | {
215 | "cell_type": "code",
216 | "metadata": {
217 | "id": "kGNFa-lX24Qo",
218 | "colab_type": "code",
219 | "colab": {}
220 | },
221 | "source": [
222 | "# build the model and train it\n",
223 | "model = tf.keras.Sequential()\n",
224 | "model.add(tf.keras.layers.Dense(8, activation='relu')) # relu is used for performance\n",
225 | "model.add(tf.keras.layers.Dense(5, activation='relu'))\n",
226 | "model.add(tf.keras.layers.Dense(NUM_CLASSES, activation='softmax')) # softmax is used, because we only expect one class to occur per input\n",
227 | "model.compile(optimizer='rmsprop', loss='mse', metrics=['mae'])\n",
228 | "history = model.fit(inputs_train, outputs_train, epochs=400, batch_size=4, validation_data=(inputs_validate, outputs_validate))\n",
229 | "\n"
230 | ],
231 | "execution_count": 0,
232 | "outputs": []
233 | },
234 | {
235 | "cell_type": "markdown",
236 | "metadata": {
237 | "id": "guMjtfa42ahM",
238 | "colab_type": "text"
239 | },
240 | "source": [
241 | "### Run with Test Data\n",
242 | "Put our test data into the model and plot the predictions\n"
243 | ]
244 | },
245 | {
246 | "cell_type": "code",
247 | "metadata": {
248 | "id": "V3Y0CCWJz2EK",
249 | "colab_type": "code",
250 | "colab": {}
251 | },
252 | "source": [
253 | "# use the model to predict the test inputs\n",
254 | "predictions = model.predict(inputs_test)\n",
255 | "\n",
256 | "# print the predictions and the expected ouputs\n",
257 | "print(\"predictions =\\n\", np.round(predictions, decimals=3))\n",
258 | "print(\"actual =\\n\", outputs_test)\n",
259 | "\n",
260 | "# Plot the predictions along with to the test data\n",
261 | "plt.clf()\n",
262 | "plt.title('Training data predicted vs actual values')\n",
263 | "plt.plot(inputs_test, outputs_test, 'b.', label='Actual')\n",
264 | "plt.plot(inputs_test, predictions, 'r.', label='Predicted')\n",
265 | "plt.show()"
266 | ],
267 | "execution_count": 0,
268 | "outputs": []
269 | },
270 | {
271 | "cell_type": "markdown",
272 | "metadata": {
273 | "id": "j7DO6xxXVCym",
274 | "colab_type": "text"
275 | },
276 | "source": [
277 | "# Convert the Trained Model to Tensor Flow Lite\n",
278 | "\n",
279 | "The next cell converts the model to TFlite format. The size in bytes of the model is also printed out."
280 | ]
281 | },
282 | {
283 | "cell_type": "code",
284 | "metadata": {
285 | "id": "0Xn1-Rn9Cp_8",
286 | "colab_type": "code",
287 | "colab": {}
288 | },
289 | "source": [
290 | "# Convert the model to the TensorFlow Lite format without quantization\n",
291 | "converter = tf.lite.TFLiteConverter.from_keras_model(model)\n",
292 | "tflite_model = converter.convert()\n",
293 | "\n",
294 | "# Save the model to disk\n",
295 | "open(\"gesture_model.tflite\", \"wb\").write(tflite_model)\n",
296 | " \n",
297 | "import os\n",
298 | "basic_model_size = os.path.getsize(\"gesture_model.tflite\")\n",
299 | "print(\"Model is %d bytes\" % basic_model_size)\n",
300 | " \n",
301 | " "
302 | ],
303 | "execution_count": 0,
304 | "outputs": []
305 | },
306 | {
307 | "cell_type": "markdown",
308 | "metadata": {
309 | "id": "ykccQn7SXrUX",
310 | "colab_type": "text"
311 | },
312 | "source": [
313 | "## Encode the Model in an Arduino Header File \n",
314 | "\n",
315 | "The next cell creates a constant byte array that contains the TFlite model. Import it as a tab with the sketch below."
316 | ]
317 | },
318 | {
319 | "cell_type": "code",
320 | "metadata": {
321 | "id": "9J33uwpNtAku",
322 | "colab_type": "code",
323 | "colab": {}
324 | },
325 | "source": [
326 | "!echo \"const unsigned char model[] = {\" > /content/model.h\n",
327 | "!cat gesture_model.tflite | xxd -i >> /content/model.h\n",
328 | "!echo \"};\" >> /content/model.h\n",
329 | "\n",
330 | "import os\n",
331 | "model_h_size = os.path.getsize(\"model.h\")\n",
332 | "print(f\"Header file, model.h, is {model_h_size:,} bytes.\")\n",
333 | "print(\"\\nOpen the side panel (refresh if needed). Double click model.h to download the file.\")"
334 | ],
335 | "execution_count": 0,
336 | "outputs": []
337 | },
338 | {
339 | "cell_type": "markdown",
340 | "metadata": {
341 | "id": "1eSkHZaLzMId",
342 | "colab_type": "text"
343 | },
344 | "source": [
345 | "# Realtime Classification of Sensor Data on Arduino\n",
346 | "\n",
347 | "Now it's time to switch back to the tutorial instructions and run our new model on the [Arduino Nano 33 BLE Sense](https://www.arduino.cc/en/Guide/NANO33BLE)"
348 | ]
349 | }
350 | ]
351 | }
--------------------------------------------------------------------------------
/FruitToEmoji/README.md:
--------------------------------------------------------------------------------
1 | # FruitToEmoji
2 |
3 | Classifies fruit using the RGB color and proximity sensors of the Arduino Nano 33 BLE Sense, using a TensorFlow Lite Micro model trained on data captured from the same hardware
4 |
5 |
--------------------------------------------------------------------------------
/FruitToEmoji/SampleData/apple.csv:
--------------------------------------------------------------------------------
1 | Red,Green,Blue
2 | 0.522,0.261,0.217
3 | 0.522,0.261,0.217
4 | 0.550,0.250,0.200
5 | 0.545,0.227,0.227
6 | 0.556,0.222,0.222
7 | 0.545,0.227,0.227
8 | 0.524,0.238,0.238
9 | 0.500,0.273,0.227
10 | 0.500,0.292,0.208
11 | 0.526,0.263,0.211
12 | 0.545,0.227,0.227
13 | 0.556,0.222,0.222
14 | 0.550,0.250,0.200
15 | 0.529,0.235,0.235
16 | 0.579,0.211,0.211
17 | 0.550,0.250,0.200
18 | 0.556,0.222,0.222
19 | 0.500,0.273,0.227
20 | 0.524,0.286,0.190
21 | 0.550,0.250,0.200
22 | 0.588,0.235,0.176
23 | 0.550,0.250,0.200
24 | 0.556,0.222,0.222
25 | 0.550,0.250,0.200
26 | 0.588,0.235,0.176
27 | 0.550,0.250,0.200
28 | 0.562,0.250,0.187
29 | 0.550,0.250,0.200
30 | 0.588,0.235,0.176
31 | 0.588,0.235,0.176
32 | 0.579,0.211,0.211
33 | 0.588,0.235,0.176
34 | 0.550,0.250,0.200
35 | 0.562,0.250,0.187
36 | 0.550,0.250,0.200
37 | 0.562,0.250,0.187
38 | 0.588,0.235,0.176
39 | 0.556,0.222,0.222
40 | 0.562,0.250,0.187
41 | 0.556,0.222,0.222
42 | 0.600,0.200,0.200
43 | 0.588,0.235,0.176
44 | 0.562,0.250,0.187
45 | 0.588,0.235,0.176
46 | 0.550,0.250,0.200
47 | 0.562,0.250,0.187
48 | 0.588,0.235,0.176
49 | 0.571,0.214,0.214
50 | 0.600,0.200,0.200
51 | 0.571,0.214,0.214
52 | 0.600,0.200,0.200
53 | 0.588,0.235,0.176
54 | 0.600,0.200,0.200
55 | 0.579,0.211,0.211
56 | 0.600,0.200,0.200
57 | 0.562,0.250,0.187
58 | 0.588,0.235,0.176
59 | 0.600,0.200,0.200
60 | 0.562,0.250,0.187
61 | 0.562,0.250,0.187
62 | 0.571,0.238,0.190
63 | 0.588,0.235,0.176
64 | 0.571,0.238,0.190
65 | 0.562,0.250,0.187
66 | 0.636,0.182,0.182
67 | 0.611,0.222,0.167
68 | 0.571,0.214,0.214
69 | 0.583,0.250,0.167
70 | 0.500,0.250,0.250
71 | 0.562,0.250,0.187
72 | 0.500,0.269,0.231
73 |
--------------------------------------------------------------------------------
/FruitToEmoji/SampleData/banana.csv:
--------------------------------------------------------------------------------
1 | Red,Green,Blue
2 | 0.471,0.353,0.176
3 | 0.484,0.339,0.177
4 | 0.472,0.340,0.189
5 | 0.463,0.352,0.185
6 | 0.450,0.350,0.200
7 | 0.433,0.367,0.200
8 | 0.433,0.367,0.200
9 | 0.452,0.355,0.194
10 | 0.472,0.361,0.167
11 | 0.467,0.356,0.178
12 | 0.491,0.340,0.170
13 | 0.489,0.340,0.170
14 | 0.500,0.333,0.167
15 | 0.488,0.349,0.163
16 | 0.515,0.333,0.152
17 | 0.515,0.333,0.152
18 | 0.500,0.333,0.167
19 | 0.500,0.333,0.167
20 | 0.500,0.350,0.150
21 | 0.474,0.368,0.158
22 | 0.450,0.350,0.200
23 | 0.389,0.389,0.222
24 | 0.437,0.375,0.187
25 | 0.437,0.375,0.187
26 | 0.421,0.368,0.211
27 | 0.409,0.364,0.227
28 | 0.391,0.391,0.217
29 | 0.387,0.387,0.226
30 | 0.387,0.387,0.226
31 | 0.414,0.379,0.207
32 | 0.414,0.379,0.207
33 | 0.393,0.393,0.214
34 | 0.406,0.375,0.219
35 | 0.414,0.379,0.207
36 | 0.419,0.387,0.194
37 | 0.423,0.385,0.192
38 | 0.417,0.375,0.208
39 | 0.391,0.391,0.217
40 | 0.391,0.391,0.217
41 | 0.391,0.391,0.217
42 | 0.400,0.400,0.200
43 | 0.400,0.400,0.200
44 | 0.421,0.368,0.211
45 | 0.429,0.381,0.190
46 | 0.440,0.360,0.200
47 | 0.429,0.381,0.190
48 | 0.429,0.381,0.190
49 | 0.450,0.350,0.200
50 | 0.444,0.389,0.167
51 | 0.450,0.350,0.200
52 | 0.437,0.375,0.187
53 | 0.471,0.353,0.176
54 | 0.437,0.375,0.187
55 | 0.444,0.389,0.167
56 | 0.471,0.353,0.176
57 | 0.450,0.350,0.200
58 | 0.429,0.357,0.214
59 | 0.423,0.385,0.192
60 | 0.419,0.387,0.194
61 | 0.406,0.375,0.219
62 | 0.406,0.375,0.219
63 | 0.389,0.389,0.222
64 | 0.419,0.387,0.194
65 | 0.394,0.394,0.212
66 | 0.393,0.393,0.214
67 | 0.412,0.382,0.206
68 | 0.389,0.389,0.222
69 | 0.410,0.385,0.205
70 | 0.394,0.394,0.212
71 | 0.400,0.400,0.200
72 | 0.400,0.400,0.200
73 | 0.400,0.400,0.200
74 | 0.400,0.400,0.200
75 | 0.391,0.391,0.217
76 | 0.400,0.400,0.200
77 | 0.375,0.417,0.208
78 | 0.393,0.393,0.214
79 | 0.385,0.385,0.231
80 | 0.370,0.407,0.222
81 | 0.370,0.407,0.222
82 | 0.391,0.391,0.217
83 | 0.375,0.417,0.208
84 | 0.381,0.381,0.238
85 | 0.400,0.400,0.200
86 | 0.409,0.409,0.182
87 | 0.400,0.400,0.200
88 | 0.400,0.400,0.200
89 | 0.400,0.400,0.200
90 | 0.407,0.407,0.185
91 | 0.400,0.400,0.200
92 | 0.391,0.391,0.217
93 | 0.375,0.406,0.219
94 | 0.400,0.400,0.200
95 | 0.409,0.364,0.227
96 |
--------------------------------------------------------------------------------
/FruitToEmoji/SampleData/orange.csv:
--------------------------------------------------------------------------------
1 | Red,Green,Blue
2 | 0.540,0.300,0.160
3 | 0.558,0.288,0.154
4 | 0.600,0.286,0.114
5 | 0.571,0.286,0.143
6 | 0.571,0.286,0.143
7 | 0.615,0.269,0.115
8 | 0.591,0.273,0.136
9 | 0.600,0.300,0.100
10 | 0.625,0.250,0.125
11 | 0.625,0.250,0.125
12 | 0.600,0.267,0.133
13 | 0.591,0.273,0.136
14 | 0.609,0.261,0.130
15 | 0.615,0.269,0.115
16 | 0.591,0.273,0.136
17 | 0.600,0.280,0.120
18 | 0.579,0.263,0.158
19 | 0.636,0.273,0.091
20 | 0.600,0.267,0.133
21 | 0.600,0.267,0.133
22 | 0.591,0.273,0.136
23 | 0.591,0.273,0.136
24 | 0.609,0.261,0.130
25 | 0.583,0.292,0.125
26 | 0.607,0.286,0.107
27 | 0.600,0.280,0.120
28 | 0.619,0.286,0.095
29 | 0.571,0.286,0.143
30 | 0.588,0.294,0.118
31 | 0.611,0.278,0.111
32 | 0.625,0.250,0.125
33 | 0.611,0.278,0.111
34 | 0.600,0.267,0.133
35 | 0.600,0.267,0.133
36 | 0.611,0.278,0.111
37 | 0.615,0.269,0.115
38 | 0.615,0.269,0.115
39 | 0.600,0.267,0.133
40 | 0.615,0.269,0.115
41 | 0.594,0.281,0.125
42 | 0.594,0.281,0.125
43 | 0.594,0.281,0.125
44 | 0.630,0.259,0.111
45 | 0.609,0.261,0.130
46 | 0.591,0.273,0.136
47 | 0.615,0.231,0.154
48 | 0.600,0.267,0.133
49 | 0.600,0.250,0.150
50 | 0.571,0.286,0.143
51 | 0.609,0.261,0.130
52 | 0.611,0.278,0.111
53 | 0.609,0.261,0.130
54 | 0.591,0.273,0.136
55 | 0.615,0.269,0.115
56 | 0.591,0.273,0.136
57 | 0.600,0.250,0.150
58 | 0.600,0.267,0.133
59 | 0.611,0.278,0.111
60 | 0.588,0.294,0.118
61 | 0.609,0.261,0.130
62 | 0.615,0.269,0.115
63 | 0.594,0.281,0.125
64 | 0.594,0.281,0.125
65 | 0.581,0.290,0.129
66 | 0.594,0.281,0.125
67 | 0.583,0.292,0.125
68 | 0.615,0.269,0.115
69 | 0.609,0.261,0.130
70 | 0.600,0.280,0.120
71 | 0.586,0.276,0.138
72 | 0.600,0.280,0.120
73 | 0.606,0.273,0.121
74 | 0.600,0.267,0.133
75 | 0.595,0.270,0.135
76 | 0.571,0.286,0.143
77 | 0.583,0.278,0.139
78 | 0.579,0.289,0.132
79 | 0.571,0.286,0.143
80 | 0.564,0.282,0.154
81 | 0.559,0.294,0.147
82 | 0.579,0.263,0.158
83 | 0.571,0.286,0.143
84 | 0.571,0.286,0.143
85 | 0.590,0.282,0.128
86 | 0.588,0.294,0.118
87 | 0.591,0.273,0.136
88 | 0.590,0.282,0.128
89 | 0.605,0.279,0.116
90 | 0.595,0.286,0.119
91 | 0.583,0.292,0.125
92 | 0.518,0.304,0.179
93 |
--------------------------------------------------------------------------------
/FruitToEmoji/sketches/object_color_capture/object_color_capture.ino:
--------------------------------------------------------------------------------
1 | /*
2 | Object color sampler
3 | --------------------
4 | Samples the color of objects and outputs CSV logfile to serial console
5 |
6 | Hardware: Arduino Nano 33 BLE Sense board.
7 |
8 | Usage: Place object of interest to the color sensor
9 |
10 | This example code is in the public domain.
11 | */
12 |
13 | #include
14 |
15 | void setup() {
16 |
17 | Serial.begin(9600);
18 | while (!Serial) {};
19 |
20 | if (!APDS.begin()) {
21 | Serial.println("Error initializing APDS9960 sensor.");
22 | }
23 |
24 | // print the header
25 | Serial.println("Red,Green,Blue");
26 | }
27 |
28 | void loop() {
29 | int r, g, b, c, p;
30 | float sum;
31 |
32 | // wait for proximity and color sensor data
33 | while (!APDS.colorAvailable() || !APDS.proximityAvailable()) {}
34 |
35 | // read the color and proximity data
36 | APDS.readColor(r, g, b, c);
37 | sum = r + g + b;
38 | p = APDS.readProximity();
39 |
40 | // if object is close and well enough illumated
41 | if (p == 0 && c > 10 && sum > 0) {
42 |
43 | float redRatio = r / sum;
44 | float greenRatio = g / sum;
45 | float blueRatio = b / sum;
46 |
47 | // print the data in CSV format
48 | Serial.print(redRatio, 3);
49 | Serial.print(',');
50 | Serial.print(greenRatio, 3);
51 | Serial.print(',');
52 | Serial.print(blueRatio, 3);
53 | Serial.println();
54 | }
55 | }
56 |
--------------------------------------------------------------------------------
/FruitToEmoji/sketches/object_color_classify/object_color_classify.ino:
--------------------------------------------------------------------------------
1 | /*
2 | Object classifier by color
3 | --------------------------
4 |
5 | Uses RGB color sensor input to Neural Network to classify objects
6 | Outputs object class to serial using unicode emojis
7 |
8 | Note: The direct use of C/C++ pointers, namespaces, and dynamic memory is generally
9 | discouraged in Arduino examples, and in the future the TensorFlowLite library
10 | might change to make the sketch simpler.
11 |
12 | Hardware: Arduino Nano 33 BLE Sense board.
13 |
14 | Created by Don Coleman, Sandeep Mistry
15 | Adapted by Dominic Pajak
16 |
17 | This example code is in the public domain.
18 | */
19 |
20 | // Arduino_TensorFlowLite - Version: 0.alpha.precompiled
21 | #include
22 |
23 | #include
24 | #include
25 | #include
26 | #include
27 | #include
28 | #include
29 | #include "model.h"
30 |
31 | // global variables used for TensorFlow Lite (Micro)
32 | tflite::MicroErrorReporter tflErrorReporter;
33 |
34 | // pull in all the TFLM ops, you can remove this line and
35 | // only pull in the TFLM ops you need, if would like to reduce
36 | // the compiled size of the sketch.
37 | tflite::AllOpsResolver tflOpsResolver;
38 |
39 | const tflite::Model* tflModel = nullptr;
40 | tflite::MicroInterpreter* tflInterpreter = nullptr;
41 | TfLiteTensor* tflInputTensor = nullptr;
42 | TfLiteTensor* tflOutputTensor = nullptr;
43 |
44 | // Create a static memory buffer for TFLM, the size may need to
45 | // be adjusted based on the model you are using
46 | constexpr int tensorArenaSize = 8 * 1024;
47 | byte tensorArena[tensorArenaSize];
48 |
49 | // array to map gesture index to a name
50 | const char* CLASSES[] = {
51 | "Apple", // u8"\U0001F34E", // Apple
52 | "Banana", // u8"\U0001F34C", // Banana
53 | "Orange" // u8"\U0001F34A" // Orange
54 | };
55 |
56 | #define NUM_CLASSES (sizeof(CLASSES) / sizeof(CLASSES[0]))
57 |
58 | void setup() {
59 | Serial.begin(9600);
60 | while (!Serial) {};
61 |
62 | Serial.println("Object classification using RGB color sensor");
63 | Serial.println("--------------------------------------------");
64 | Serial.println("Arduino Nano 33 BLE Sense running TensorFlow Lite Micro");
65 | Serial.println("");
66 |
67 | if (!APDS.begin()) {
68 | Serial.println("Error initializing APDS9960 sensor.");
69 | }
70 |
71 | // get the TFL representation of the model byte array
72 | tflModel = tflite::GetModel(model);
73 | if (tflModel->version() != TFLITE_SCHEMA_VERSION) {
74 | Serial.println("Model schema mismatch!");
75 | while (1);
76 | }
77 |
78 | // Create an interpreter to run the model
79 | tflInterpreter = new tflite::MicroInterpreter(tflModel, tflOpsResolver, tensorArena, tensorArenaSize, &tflErrorReporter);
80 |
81 | // Allocate memory for the model's input and output tensors
82 | tflInterpreter->AllocateTensors();
83 |
84 | // Get pointers for the model's input and output tensors
85 | tflInputTensor = tflInterpreter->input(0);
86 | tflOutputTensor = tflInterpreter->output(0);
87 | }
88 |
89 | void loop() {
90 | int r, g, b, p, c;
91 | float sum;
92 |
93 | // check if both color and proximity data is available to sample
94 | while (!APDS.colorAvailable() || !APDS.proximityAvailable()) {}
95 |
96 | // read the color and proximity sensor
97 | APDS.readColor(r, g, b, c);
98 | p = APDS.readProximity();
99 | sum = r + g + b;
100 |
101 | // check if there's an object close and well illuminated enough
102 | if (p == 0 && c > 10 && sum > 0) {
103 |
104 | float redRatio = r / sum;
105 | float greenRatio = g / sum;
106 | float blueRatio = b / sum;
107 |
108 | // input sensor data to model
109 | tflInputTensor->data.f[0] = redRatio;
110 | tflInputTensor->data.f[1] = greenRatio;
111 | tflInputTensor->data.f[2] = blueRatio;
112 |
113 | // Run inferencing
114 | TfLiteStatus invokeStatus = tflInterpreter->Invoke();
115 | if (invokeStatus != kTfLiteOk) {
116 | Serial.println("Invoke failed!");
117 | while (1);
118 | return;
119 | }
120 |
121 | // Output results
122 | for (int i = 0; i < NUM_CLASSES; i++) {
123 | Serial.print(CLASSES[i]);
124 | Serial.print(" ");
125 | Serial.print(int(tflOutputTensor->data.f[i] * 100));
126 | Serial.print("%\n");
127 | }
128 | Serial.println();
129 |
130 | // Wait for the object to be moved away
131 | while (!APDS.proximityAvailable() || (APDS.readProximity() == 0)) {}
132 | }
133 |
134 | }
135 |
--------------------------------------------------------------------------------
/GestureToEmoji/ArduinoSketches/Emoji_Button/Emoji_Button.ino:
--------------------------------------------------------------------------------
1 | /*
2 | Emoji Button
3 |
4 | This example sends an emoji character over USB HID when the button is pressed.
5 |
6 | Note: Only macOS and Linux as supported at this time, and the use of
7 | #define is generally discouraged in Arduino examples
8 |
9 | The circuit:
10 | - Arduino Nano 33 BLE or Arduino Nano 33 BLE Sense board.
11 | - Button connected to pin 3 and GND.
12 |
13 | Created by Don Coleman, Sandeep Mistry
14 |
15 | This example code is in the public domain.
16 | */
17 |
18 | #include
19 | #include
20 |
21 | // Select an OS:
22 | //#define MACOS // You'll need to enable and select the unicode keyboard: System Preferences -> Input Sources -> + -> Others -> Unicode Hex Input
23 | //#define LINUX
24 |
25 | #if !defined(MACOS) && !defined(LINUX)
26 | #error "Please select an OS!"
27 | #endif
28 |
29 | // use table: https://apps.timwhitlock.info/emoji/tables/unicode
30 | const int bicep = 0x1f4aa;
31 | const int punch = 0x1f44a;
32 |
33 | const int buttonPin = 3;
34 |
35 | USBKeyboard keyboard;
36 |
37 | int previousButtonState = HIGH;
38 |
39 | void setup() {
40 | pinMode(buttonPin, INPUT_PULLUP);
41 | }
42 |
43 | void loop() {
44 | int buttonState = digitalRead(buttonPin);
45 |
46 | if (buttonState != previousButtonState) {
47 | if (buttonState == LOW) {
48 | // pressed
49 | sentUtf8(bicep);
50 | } else {
51 | // released
52 | }
53 |
54 | previousButtonState = buttonState;
55 | }
56 | }
57 |
58 | void sentUtf8(unsigned long c) {
59 | String s;
60 |
61 | #if defined(MACOS)
62 | // https://apple.stackexchange.com/questions/183045/how-can-i-type-unicode-characters-without-using-the-mouse
63 |
64 | s = String(utf8ToUtf16(c), HEX);
65 |
66 | for (int i = 0; i < s.length(); i++) {
67 | keyboard.key_code(s[i], KEY_ALT);
68 | }
69 | #elif defined(LINUX)
70 | s = String(c, HEX);
71 |
72 | keyboard.key_code('u', KEY_CTRL | KEY_SHIFT);
73 |
74 | for (int i = 0; i < s.length(); i++) {
75 | keyboard.key_code(s[i]);
76 | }
77 | #endif
78 | keyboard.key_code(' ');
79 | }
80 |
81 | // based on https://stackoverflow.com/a/6240819/2020087
82 | unsigned long utf8ToUtf16(unsigned long in) {
83 | unsigned long result;
84 |
85 | in -= 0x10000;
86 |
87 | result |= (in & 0x3ff);
88 | result |= (in << 6) & 0x03ff0000;
89 | result |= 0xd800dc00;
90 |
91 | return result;
92 | }
93 |
--------------------------------------------------------------------------------
/GestureToEmoji/ArduinoSketches/HardwareTest/HardwareTest.ino:
--------------------------------------------------------------------------------
1 | /*
2 | Hardware Test
3 |
4 | This example performs a basic hardware test of the board which includes
5 | testing the on-board IMU, LED and external button.
6 |
7 | When the button is pressed the on-board LED will turn on.
8 |
9 | The circuit:
10 | - Arduino Nano 33 BLE or Arduino Nano 33 BLE Sense board.
11 | - Button connected to pin 3 and GND.
12 |
13 | Created by Don Coleman, Sandeep Mistry
14 |
15 | This example code is in the public domain.
16 | */
17 |
18 | #include
19 |
20 | const int buttonPin = 3;
21 | const int ledPin = LED_BUILTIN;
22 |
23 | int buttonState = LOW;
24 | int previousButtonState = HIGH;
25 |
26 | void setup() {
27 | Serial.begin(9600);
28 | //while (!Serial);
29 | Serial.println("Arduino ML Workshop Hardware Test");
30 |
31 | if (!IMU.begin()) {
32 | Serial.println("Failed to initialize IMU!");
33 | while (1);
34 | }
35 |
36 | // initialize the LED pin as an output:
37 | pinMode(ledPin, OUTPUT);
38 | // initialize the push button pin as an input with (internal) pullup:
39 | pinMode(buttonPin, INPUT_PULLUP);
40 | }
41 |
42 | void loop() {
43 | // read the state of the push button pin:
44 | buttonState = digitalRead(buttonPin);
45 |
46 | // HIGH and LOW are opposite because of we are using an internal pullup resistor.
47 | // LOW is pressed. HIGH is released.
48 |
49 | if (buttonState == LOW) {
50 | // Button is pressed, turn the LED on
51 | digitalWrite(ledPin, HIGH);
52 | if (buttonState != previousButtonState) {
53 | Serial.println("LED is ON");
54 | }
55 | } else {
56 | // Button is released, turn the LED off
57 | digitalWrite(ledPin, LOW);
58 | if (buttonState != previousButtonState) {
59 | Serial.println("LED is OFF");
60 | }
61 | }
62 |
63 | // save the previous state of the button since we only print
64 | // the LED status when the state changes
65 | previousButtonState = buttonState;
66 | }
67 |
--------------------------------------------------------------------------------
/GestureToEmoji/ArduinoSketches/IMU_Capture/IMU_Capture.ino:
--------------------------------------------------------------------------------
1 | /*
2 | IMU Capture
3 |
4 | This example uses the on-board IMU to start reading acceleration and gyroscope
5 | data from on-board IMU and prints it to the Serial Monitor for one second
6 | when the significant motion is detected.
7 |
8 | You can also use the Serial Plotter to graph the data.
9 |
10 | The circuit:
11 | - Arduino Nano 33 BLE or Arduino Nano 33 BLE Sense board.
12 |
13 | Created by Don Coleman, Sandeep Mistry
14 | Modified by Dominic Pajak, Sandeep Mistry
15 |
16 | This example code is in the public domain.
17 | */
18 |
19 | #include
20 |
21 | const float accelerationThreshold = 2.5; // threshold of significant in G's
22 | const int numSamples = 119;
23 |
24 | int samplesRead = numSamples;
25 |
26 | void setup() {
27 | Serial.begin(9600);
28 | while (!Serial);
29 |
30 | if (!IMU.begin()) {
31 | Serial.println("Failed to initialize IMU!");
32 | while (1);
33 | }
34 |
35 | // print the header
36 | Serial.println("aX,aY,aZ,gX,gY,gZ");
37 | }
38 |
39 | void loop() {
40 | float aX, aY, aZ, gX, gY, gZ;
41 |
42 | // wait for significant motion
43 | while (samplesRead == numSamples) {
44 | if (IMU.accelerationAvailable()) {
45 | // read the acceleration data
46 | IMU.readAcceleration(aX, aY, aZ);
47 |
48 | // sum up the absolutes
49 | float aSum = fabs(aX) + fabs(aY) + fabs(aZ);
50 |
51 | // check if it's above the threshold
52 | if (aSum >= accelerationThreshold) {
53 | // reset the sample read count
54 | samplesRead = 0;
55 | break;
56 | }
57 | }
58 | }
59 |
60 | // check if the all the required samples have been read since
61 | // the last time the significant motion was detected
62 | while (samplesRead < numSamples) {
63 | // check if both new acceleration and gyroscope data is
64 | // available
65 | if (IMU.accelerationAvailable() && IMU.gyroscopeAvailable()) {
66 | // read the acceleration and gyroscope data
67 | IMU.readAcceleration(aX, aY, aZ);
68 | IMU.readGyroscope(gX, gY, gZ);
69 |
70 | samplesRead++;
71 |
72 | // print the data in CSV format
73 | Serial.print(aX, 3);
74 | Serial.print(',');
75 | Serial.print(aY, 3);
76 | Serial.print(',');
77 | Serial.print(aZ, 3);
78 | Serial.print(',');
79 | Serial.print(gX, 3);
80 | Serial.print(',');
81 | Serial.print(gY, 3);
82 | Serial.print(',');
83 | Serial.print(gZ, 3);
84 | Serial.println();
85 |
86 | if (samplesRead == numSamples) {
87 | // add an empty line if it's the last sample
88 | Serial.println();
89 | }
90 | }
91 | }
92 | }
93 |
--------------------------------------------------------------------------------
/GestureToEmoji/ArduinoSketches/IMU_Classifier/IMU_Classifier.ino:
--------------------------------------------------------------------------------
1 | /*
2 | IMU Classifier
3 |
4 | This example uses the on-board IMU to start reading acceleration and gyroscope
5 | data from on-board IMU, once enough samples are read, it then uses a
6 | TensorFlow Lite (Micro) model to try to classify the movement as a known gesture.
7 |
8 | Note: The direct use of C/C++ pointers, namespaces, and dynamic memory is generally
9 | discouraged in Arduino examples, and in the future the TensorFlowLite library
10 | might change to make the sketch simpler.
11 |
12 | The circuit:
13 | - Arduino Nano 33 BLE or Arduino Nano 33 BLE Sense board.
14 |
15 | Created by Don Coleman, Sandeep Mistry
16 | Modified by Dominic Pajak, Sandeep Mistry
17 |
18 | This example code is in the public domain.
19 | */
20 |
21 | #include
22 |
23 | #include
24 | #include
25 | #include
26 | #include
27 | #include
28 | #include
29 |
30 | #include "model.h"
31 |
32 | const float accelerationThreshold = 2.5; // threshold of significant in G's
33 | const int numSamples = 119;
34 |
35 | int samplesRead = numSamples;
36 |
37 | // global variables used for TensorFlow Lite (Micro)
38 | tflite::MicroErrorReporter tflErrorReporter;
39 |
40 | // pull in all the TFLM ops, you can remove this line and
41 | // only pull in the TFLM ops you need, if would like to reduce
42 | // the compiled size of the sketch.
43 | tflite::AllOpsResolver tflOpsResolver;
44 |
45 | const tflite::Model* tflModel = nullptr;
46 | tflite::MicroInterpreter* tflInterpreter = nullptr;
47 | TfLiteTensor* tflInputTensor = nullptr;
48 | TfLiteTensor* tflOutputTensor = nullptr;
49 |
50 | // Create a static memory buffer for TFLM, the size may need to
51 | // be adjusted based on the model you are using
52 | constexpr int tensorArenaSize = 8 * 1024;
53 | byte tensorArena[tensorArenaSize] __attribute__((aligned(16)));
54 |
55 | // array to map gesture index to a name
56 | const char* GESTURES[] = {
57 | "punch",
58 | "flex"
59 | };
60 |
61 | #define NUM_GESTURES (sizeof(GESTURES) / sizeof(GESTURES[0]))
62 |
63 | void setup() {
64 | Serial.begin(9600);
65 | while (!Serial);
66 |
67 | // initialize the IMU
68 | if (!IMU.begin()) {
69 | Serial.println("Failed to initialize IMU!");
70 | while (1);
71 | }
72 |
73 | // print out the samples rates of the IMUs
74 | Serial.print("Accelerometer sample rate = ");
75 | Serial.print(IMU.accelerationSampleRate());
76 | Serial.println(" Hz");
77 | Serial.print("Gyroscope sample rate = ");
78 | Serial.print(IMU.gyroscopeSampleRate());
79 | Serial.println(" Hz");
80 |
81 | Serial.println();
82 |
83 | // get the TFL representation of the model byte array
84 | tflModel = tflite::GetModel(model);
85 | if (tflModel->version() != TFLITE_SCHEMA_VERSION) {
86 | Serial.println("Model schema mismatch!");
87 | while (1);
88 | }
89 |
90 | // Create an interpreter to run the model
91 | tflInterpreter = new tflite::MicroInterpreter(tflModel, tflOpsResolver, tensorArena, tensorArenaSize, &tflErrorReporter);
92 |
93 | // Allocate memory for the model's input and output tensors
94 | tflInterpreter->AllocateTensors();
95 |
96 | // Get pointers for the model's input and output tensors
97 | tflInputTensor = tflInterpreter->input(0);
98 | tflOutputTensor = tflInterpreter->output(0);
99 | }
100 |
101 | void loop() {
102 | float aX, aY, aZ, gX, gY, gZ;
103 |
104 | // wait for significant motion
105 | while (samplesRead == numSamples) {
106 | if (IMU.accelerationAvailable()) {
107 | // read the acceleration data
108 | IMU.readAcceleration(aX, aY, aZ);
109 |
110 | // sum up the absolutes
111 | float aSum = fabs(aX) + fabs(aY) + fabs(aZ);
112 |
113 | // check if it's above the threshold
114 | if (aSum >= accelerationThreshold) {
115 | // reset the sample read count
116 | samplesRead = 0;
117 | break;
118 | }
119 | }
120 | }
121 |
122 | // check if the all the required samples have been read since
123 | // the last time the significant motion was detected
124 | while (samplesRead < numSamples) {
125 | // check if new acceleration AND gyroscope data is available
126 | if (IMU.accelerationAvailable() && IMU.gyroscopeAvailable()) {
127 | // read the acceleration and gyroscope data
128 | IMU.readAcceleration(aX, aY, aZ);
129 | IMU.readGyroscope(gX, gY, gZ);
130 |
131 | // normalize the IMU data between 0 to 1 and store in the model's
132 | // input tensor
133 | tflInputTensor->data.f[samplesRead * 6 + 0] = (aX + 4.0) / 8.0;
134 | tflInputTensor->data.f[samplesRead * 6 + 1] = (aY + 4.0) / 8.0;
135 | tflInputTensor->data.f[samplesRead * 6 + 2] = (aZ + 4.0) / 8.0;
136 | tflInputTensor->data.f[samplesRead * 6 + 3] = (gX + 2000.0) / 4000.0;
137 | tflInputTensor->data.f[samplesRead * 6 + 4] = (gY + 2000.0) / 4000.0;
138 | tflInputTensor->data.f[samplesRead * 6 + 5] = (gZ + 2000.0) / 4000.0;
139 |
140 | samplesRead++;
141 |
142 | if (samplesRead == numSamples) {
143 | // Run inferencing
144 | TfLiteStatus invokeStatus = tflInterpreter->Invoke();
145 | if (invokeStatus != kTfLiteOk) {
146 | Serial.println("Invoke failed!");
147 | while (1);
148 | return;
149 | }
150 |
151 | // Loop through the output tensor values from the model
152 | for (int i = 0; i < NUM_GESTURES; i++) {
153 | Serial.print(GESTURES[i]);
154 | Serial.print(": ");
155 | Serial.println(tflOutputTensor->data.f[i], 6);
156 | }
157 | Serial.println();
158 | }
159 | }
160 | }
161 | }
162 |
--------------------------------------------------------------------------------
/GestureToEmoji/ArduinoSketches/IMU_Classifier/model.h:
--------------------------------------------------------------------------------
1 | const unsigned char model[] = {
2 |
3 | };
4 |
--------------------------------------------------------------------------------
/GestureToEmoji/README.md:
--------------------------------------------------------------------------------
1 | # Machine Learning on Arduino
2 | ## TensorFlow Lite gesture training tutorial
3 |
4 | In this tutorial we will teach a board to recognise gestures! We'll capture motion data from the [Arduino Nano 33 BLE Sense](https://store.arduino.cc/arduino-nano-33-ble-sense) board, import it into TensorFlow to train a model, and deploy a classifier onto the board using [TensorFlow Lite for microcontrollers](https://www.tensorflow.org/lite/microcontrollers/overview).
5 |
6 | ### Credits
7 |
8 | This tutorial is adapted from the [workshop](https://github.com/sandeepmistry/aimldevfest-workshop-2019) Sandeep Mistry, Arduino and Don Coleman, Chariot Solutions presented at AI/ML Devfest in September 2019.
9 |
10 |
11 |
12 |
13 | ## Exercises
14 |
15 | * [Exercise 1: Development Environment](exercises/exercise1.md)
16 | * [Exercise 2: Connecting the Board](exercises/exercise2.md)
17 | * [Exercise 3: Visualizing the IMU Data](exercises/exercise3.md)
18 | * [Exercise 4: Gather the Training Data](exercises/exercise4.md)
19 | * [Exercise 5: Machine Learning](exercises/exercise5.md)
20 | * [Exercise 6: Classifying IMU Data](exercises/exercise6.md)
21 | * [Exercise 7: Gesture Controlled USB Emoji Keyboard](exercises/exercise7.md)
22 | * [Exercise 8: Next Steps](exercises/exercise8.md)
23 |
24 |
25 |
--------------------------------------------------------------------------------
/GestureToEmoji/arduino_tinyml_workshop.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "nbformat": 4,
3 | "nbformat_minor": 0,
4 | "metadata": {
5 | "colab": {
6 | "name": "arduino_tinyml_workshop.ipynb",
7 | "provenance": [],
8 | "collapsed_sections": [],
9 | "toc_visible": true
10 | },
11 | "kernelspec": {
12 | "name": "python3",
13 | "display_name": "Python 3"
14 | }
15 | },
16 | "cells": [
17 | {
18 | "cell_type": "markdown",
19 | "metadata": {
20 | "id": "f92-4Hjy7kA8",
21 | "colab_type": "text"
22 | },
23 | "source": [
24 | "
\n",
25 | "# Tiny ML on Arduino\n",
26 | "## Gesture recognition tutorial\n",
27 | " * Sandeep Mistry - Arduino\n",
28 | " * Don Coleman - Chariot Solutions\n",
29 | "\n",
30 | " \n",
31 | "https://github.com/arduino/ArduinoTensorFlowLiteTutorials/"
32 | ]
33 | },
34 | {
35 | "cell_type": "markdown",
36 | "metadata": {
37 | "id": "uvDA8AK7QOq-",
38 | "colab_type": "text"
39 | },
40 | "source": [
41 | "## Setup Python Environment \n",
42 | "\n",
43 | "The next cell sets up the dependencies in required for the notebook, run it."
44 | ]
45 | },
46 | {
47 | "cell_type": "code",
48 | "metadata": {
49 | "id": "Y2gs-PL4xDkZ",
50 | "colab_type": "code",
51 | "colab": {}
52 | },
53 | "source": [
54 | "# Setup environment\n",
55 | "!apt-get -qq install xxd\n",
56 | "!pip install pandas numpy matplotlib\n",
57 | "!pip install tensorflow==2.0.0-rc1"
58 | ],
59 | "execution_count": 0,
60 | "outputs": []
61 | },
62 | {
63 | "cell_type": "markdown",
64 | "metadata": {
65 | "id": "9lwkeshJk7dg",
66 | "colab_type": "text"
67 | },
68 | "source": [
69 | "# Upload Data\n",
70 | "\n",
71 | "1. Open the panel on the left side of Colab by clicking on the __>__\n",
72 | "1. Select the files tab\n",
73 | "1. Drag `punch.csv` and `flex.csv` files from your computer to the tab to upload them into colab."
74 | ]
75 | },
76 | {
77 | "cell_type": "markdown",
78 | "metadata": {
79 | "id": "Eh9yve14gUyD",
80 | "colab_type": "text"
81 | },
82 | "source": [
83 | "# Graph Data (optional)\n",
84 | "\n",
85 | "We'll graph the input files on two separate graphs, acceleration and gyroscope, as each data set has different units and scale."
86 | ]
87 | },
88 | {
89 | "cell_type": "code",
90 | "metadata": {
91 | "id": "I65ukChEgyNp",
92 | "colab_type": "code",
93 | "colab": {}
94 | },
95 | "source": [
96 | "import matplotlib.pyplot as plt\n",
97 | "import numpy as np\n",
98 | "import pandas as pd\n",
99 | "\n",
100 | "filename = \"punch.csv\"\n",
101 | "\n",
102 | "df = pd.read_csv(\"/content/\" + filename)\n",
103 | "\n",
104 | "index = range(1, len(df['aX']) + 1)\n",
105 | "\n",
106 | "plt.rcParams[\"figure.figsize\"] = (20,10)\n",
107 | "\n",
108 | "plt.plot(index, df['aX'], 'g.', label='x', linestyle='solid', marker=',')\n",
109 | "plt.plot(index, df['aY'], 'b.', label='y', linestyle='solid', marker=',')\n",
110 | "plt.plot(index, df['aZ'], 'r.', label='z', linestyle='solid', marker=',')\n",
111 | "plt.title(\"Acceleration\")\n",
112 | "plt.xlabel(\"Sample #\")\n",
113 | "plt.ylabel(\"Acceleration (G)\")\n",
114 | "plt.legend()\n",
115 | "plt.show()\n",
116 | "\n",
117 | "plt.plot(index, df['gX'], 'g.', label='x', linestyle='solid', marker=',')\n",
118 | "plt.plot(index, df['gY'], 'b.', label='y', linestyle='solid', marker=',')\n",
119 | "plt.plot(index, df['gZ'], 'r.', label='z', linestyle='solid', marker=',')\n",
120 | "plt.title(\"Gyroscope\")\n",
121 | "plt.xlabel(\"Sample #\")\n",
122 | "plt.ylabel(\"Gyroscope (deg/sec)\")\n",
123 | "plt.legend()\n",
124 | "plt.show()\n"
125 | ],
126 | "execution_count": 0,
127 | "outputs": []
128 | },
129 | {
130 | "cell_type": "markdown",
131 | "metadata": {
132 | "id": "kSxUeYPNQbOg",
133 | "colab_type": "text"
134 | },
135 | "source": [
136 | "# Train Neural Network\n",
137 | "\n",
138 | "\n",
139 | "\n"
140 | ]
141 | },
142 | {
143 | "cell_type": "markdown",
144 | "metadata": {
145 | "id": "Gxk414PU3oy3",
146 | "colab_type": "text"
147 | },
148 | "source": [
149 | "## Parse and prepare the data\n",
150 | "\n",
151 | "The next cell parses the csv files and transforms them to a format that will be used to train the fully connected neural network.\n",
152 | "\n",
153 | "Update the `GESTURES` list with the gesture data you've collected in `.csv` format.\n"
154 | ]
155 | },
156 | {
157 | "cell_type": "code",
158 | "metadata": {
159 | "id": "AGChd1FAk5_j",
160 | "colab_type": "code",
161 | "colab": {}
162 | },
163 | "source": [
164 | "import matplotlib.pyplot as plt\n",
165 | "import numpy as np\n",
166 | "import pandas as pd\n",
167 | "import tensorflow as tf\n",
168 | "\n",
169 | "print(f\"TensorFlow version = {tf.__version__}\\n\")\n",
170 | "\n",
171 | "# Set a fixed random seed value, for reproducibility, this will allow us to get\n",
172 | "# the same random numbers each time the notebook is run\n",
173 | "SEED = 1337\n",
174 | "np.random.seed(SEED)\n",
175 | "tf.random.set_seed(SEED)\n",
176 | "\n",
177 | "# the list of gestures that data is available for\n",
178 | "GESTURES = [\n",
179 | " \"punch\",\n",
180 | " \"flex\",\n",
181 | "]\n",
182 | "\n",
183 | "SAMPLES_PER_GESTURE = 119\n",
184 | "\n",
185 | "NUM_GESTURES = len(GESTURES)\n",
186 | "\n",
187 | "# create a one-hot encoded matrix that is used in the output\n",
188 | "ONE_HOT_ENCODED_GESTURES = np.eye(NUM_GESTURES)\n",
189 | "\n",
190 | "inputs = []\n",
191 | "outputs = []\n",
192 | "\n",
193 | "# read each csv file and push an input and output\n",
194 | "for gesture_index in range(NUM_GESTURES):\n",
195 | " gesture = GESTURES[gesture_index]\n",
196 | " print(f\"Processing index {gesture_index} for gesture '{gesture}'.\")\n",
197 | " \n",
198 | " output = ONE_HOT_ENCODED_GESTURES[gesture_index]\n",
199 | " \n",
200 | " df = pd.read_csv(\"/content/\" + gesture + \".csv\")\n",
201 | " \n",
202 | " # calculate the number of gesture recordings in the file\n",
203 | " num_recordings = int(df.shape[0] / SAMPLES_PER_GESTURE)\n",
204 | " \n",
205 | " print(f\"\\tThere are {num_recordings} recordings of the {gesture} gesture.\")\n",
206 | " \n",
207 | " for i in range(num_recordings):\n",
208 | " tensor = []\n",
209 | " for j in range(SAMPLES_PER_GESTURE):\n",
210 | " index = i * SAMPLES_PER_GESTURE + j\n",
211 | " # normalize the input data, between 0 to 1:\n",
212 | " # - acceleration is between: -4 to +4\n",
213 | " # - gyroscope is between: -2000 to +2000\n",
214 | " tensor += [\n",
215 | " (df['aX'][index] + 4) / 8,\n",
216 | " (df['aY'][index] + 4) / 8,\n",
217 | " (df['aZ'][index] + 4) / 8,\n",
218 | " (df['gX'][index] + 2000) / 4000,\n",
219 | " (df['gY'][index] + 2000) / 4000,\n",
220 | " (df['gZ'][index] + 2000) / 4000\n",
221 | " ]\n",
222 | "\n",
223 | " inputs.append(tensor)\n",
224 | " outputs.append(output)\n",
225 | "\n",
226 | "# convert the list to numpy array\n",
227 | "inputs = np.array(inputs)\n",
228 | "outputs = np.array(outputs)\n",
229 | "\n",
230 | "print(\"Data set parsing and preparation complete.\")"
231 | ],
232 | "execution_count": 0,
233 | "outputs": []
234 | },
235 | {
236 | "cell_type": "markdown",
237 | "metadata": {
238 | "id": "d5_61831d5AM",
239 | "colab_type": "text"
240 | },
241 | "source": [
242 | "## Randomize and split the input and output pairs for training\n",
243 | "\n",
244 | "Randomly split input and output pairs into sets of data: 60% for training, 20% for validation, and 20% for testing.\n",
245 | "\n",
246 | " - the training set is used to train the model\n",
247 | " - the validation set is used to measure how well the model is performing during training\n",
248 | " - the testing set is used to test the model after training"
249 | ]
250 | },
251 | {
252 | "cell_type": "code",
253 | "metadata": {
254 | "id": "QfNEmUZMeIEx",
255 | "colab_type": "code",
256 | "colab": {}
257 | },
258 | "source": [
259 | "# Randomize the order of the inputs, so they can be evenly distributed for training, testing, and validation\n",
260 | "# https://stackoverflow.com/a/37710486/2020087\n",
261 | "num_inputs = len(inputs)\n",
262 | "randomize = np.arange(num_inputs)\n",
263 | "np.random.shuffle(randomize)\n",
264 | "\n",
265 | "# Swap the consecutive indexes (0, 1, 2, etc) with the randomized indexes\n",
266 | "inputs = inputs[randomize]\n",
267 | "outputs = outputs[randomize]\n",
268 | "\n",
269 | "# Split the recordings (group of samples) into three sets: training, testing and validation\n",
270 | "TRAIN_SPLIT = int(0.6 * num_inputs)\n",
271 | "TEST_SPLIT = int(0.2 * num_inputs + TRAIN_SPLIT)\n",
272 | "\n",
273 | "inputs_train, inputs_test, inputs_validate = np.split(inputs, [TRAIN_SPLIT, TEST_SPLIT])\n",
274 | "outputs_train, outputs_test, outputs_validate = np.split(outputs, [TRAIN_SPLIT, TEST_SPLIT])\n",
275 | "\n",
276 | "print(\"Data set randomization and splitting complete.\")"
277 | ],
278 | "execution_count": 0,
279 | "outputs": []
280 | },
281 | {
282 | "cell_type": "markdown",
283 | "metadata": {
284 | "id": "a9g2n41p24nR",
285 | "colab_type": "text"
286 | },
287 | "source": [
288 | "## Build & Train the Model\n",
289 | "\n",
290 | "Build and train a [TensorFlow](https://www.tensorflow.org) model using the high-level [Keras](https://www.tensorflow.org/guide/keras) API."
291 | ]
292 | },
293 | {
294 | "cell_type": "code",
295 | "metadata": {
296 | "id": "kGNFa-lX24Qo",
297 | "colab_type": "code",
298 | "colab": {}
299 | },
300 | "source": [
301 | "# build the model and train it\n",
302 | "model = tf.keras.Sequential()\n",
303 | "model.add(tf.keras.layers.Dense(50, activation='relu')) # relu is used for performance\n",
304 | "model.add(tf.keras.layers.Dense(15, activation='relu'))\n",
305 | "model.add(tf.keras.layers.Dense(NUM_GESTURES, activation='softmax')) # softmax is used, because we only expect one gesture to occur per input\n",
306 | "model.compile(optimizer='rmsprop', loss='mse', metrics=['mae'])\n",
307 | "history = model.fit(inputs_train, outputs_train, epochs=600, batch_size=1, validation_data=(inputs_validate, outputs_validate))\n",
308 | "\n"
309 | ],
310 | "execution_count": 0,
311 | "outputs": []
312 | },
313 | {
314 | "cell_type": "markdown",
315 | "metadata": {
316 | "id": "NUDPvaJE1wRE",
317 | "colab_type": "text"
318 | },
319 | "source": [
320 | "## Verify \n",
321 | "\n",
322 | "Graph the models performance vs validation.\n"
323 | ]
324 | },
325 | {
326 | "cell_type": "markdown",
327 | "metadata": {
328 | "id": "kxA0zCOaS35v",
329 | "colab_type": "text"
330 | },
331 | "source": [
332 | "### Graph the loss\n",
333 | "\n",
334 | "Graph the loss to see when the model stops improving."
335 | ]
336 | },
337 | {
338 | "cell_type": "code",
339 | "metadata": {
340 | "id": "bvFNHXoQzmcM",
341 | "colab_type": "code",
342 | "colab": {}
343 | },
344 | "source": [
345 | "# increase the size of the graphs. The default size is (6,4).\n",
346 | "plt.rcParams[\"figure.figsize\"] = (20,10)\n",
347 | "\n",
348 | "# graph the loss, the model above is configure to use \"mean squared error\" as the loss function\n",
349 | "loss = history.history['loss']\n",
350 | "val_loss = history.history['val_loss']\n",
351 | "epochs = range(1, len(loss) + 1)\n",
352 | "plt.plot(epochs, loss, 'g.', label='Training loss')\n",
353 | "plt.plot(epochs, val_loss, 'b', label='Validation loss')\n",
354 | "plt.title('Training and validation loss')\n",
355 | "plt.xlabel('Epochs')\n",
356 | "plt.ylabel('Loss')\n",
357 | "plt.legend()\n",
358 | "plt.show()\n",
359 | "\n",
360 | "print(plt.rcParams[\"figure.figsize\"])"
361 | ],
362 | "execution_count": 0,
363 | "outputs": []
364 | },
365 | {
366 | "cell_type": "markdown",
367 | "metadata": {
368 | "id": "DG3m-VpE1zOd",
369 | "colab_type": "text"
370 | },
371 | "source": [
372 | "### Graph the loss again, skipping a bit of the start\n",
373 | "\n",
374 | "We'll graph the same data as the previous code cell, but start at index 100 so we can further zoom in once the model starts to converge."
375 | ]
376 | },
377 | {
378 | "cell_type": "code",
379 | "metadata": {
380 | "id": "c3xT7ue2zovd",
381 | "colab_type": "code",
382 | "colab": {}
383 | },
384 | "source": [
385 | "# graph the loss again skipping a bit of the start\n",
386 | "SKIP = 100\n",
387 | "plt.plot(epochs[SKIP:], loss[SKIP:], 'g.', label='Training loss')\n",
388 | "plt.plot(epochs[SKIP:], val_loss[SKIP:], 'b.', label='Validation loss')\n",
389 | "plt.title('Training and validation loss')\n",
390 | "plt.xlabel('Epochs')\n",
391 | "plt.ylabel('Loss')\n",
392 | "plt.legend()\n",
393 | "plt.show()"
394 | ],
395 | "execution_count": 0,
396 | "outputs": []
397 | },
398 | {
399 | "cell_type": "markdown",
400 | "metadata": {
401 | "id": "CRjvkFQy2RgS",
402 | "colab_type": "text"
403 | },
404 | "source": [
405 | "### Graph the mean absolute error\n",
406 | "\n",
407 | "[Mean absolute error](https://en.wikipedia.org/wiki/Mean_absolute_error) is another metric to judge the performance of the model.\n",
408 | "\n"
409 | ]
410 | },
411 | {
412 | "cell_type": "code",
413 | "metadata": {
414 | "id": "mBjCf1-2zx9C",
415 | "colab_type": "code",
416 | "colab": {}
417 | },
418 | "source": [
419 | "# graph of mean absolute error\n",
420 | "mae = history.history['mae']\n",
421 | "val_mae = history.history['val_mae']\n",
422 | "plt.plot(epochs[SKIP:], mae[SKIP:], 'g.', label='Training MAE')\n",
423 | "plt.plot(epochs[SKIP:], val_mae[SKIP:], 'b.', label='Validation MAE')\n",
424 | "plt.title('Training and validation mean absolute error')\n",
425 | "plt.xlabel('Epochs')\n",
426 | "plt.ylabel('MAE')\n",
427 | "plt.legend()\n",
428 | "plt.show()\n"
429 | ],
430 | "execution_count": 0,
431 | "outputs": []
432 | },
433 | {
434 | "cell_type": "markdown",
435 | "metadata": {
436 | "id": "guMjtfa42ahM",
437 | "colab_type": "text"
438 | },
439 | "source": [
440 | "### Run with Test Data\n",
441 | "Put our test data into the model and plot the predictions\n"
442 | ]
443 | },
444 | {
445 | "cell_type": "code",
446 | "metadata": {
447 | "id": "V3Y0CCWJz2EK",
448 | "colab_type": "code",
449 | "colab": {}
450 | },
451 | "source": [
452 | "# use the model to predict the test inputs\n",
453 | "predictions = model.predict(inputs_test)\n",
454 | "\n",
455 | "# print the predictions and the expected ouputs\n",
456 | "print(\"predictions =\\n\", np.round(predictions, decimals=3))\n",
457 | "print(\"actual =\\n\", outputs_test)\n",
458 | "\n",
459 | "# Plot the predictions along with to the test data\n",
460 | "plt.clf()\n",
461 | "plt.title('Training data predicted vs actual values')\n",
462 | "plt.plot(inputs_test, outputs_test, 'b.', label='Actual')\n",
463 | "plt.plot(inputs_test, predictions, 'r.', label='Predicted')\n",
464 | "plt.show()"
465 | ],
466 | "execution_count": 0,
467 | "outputs": []
468 | },
469 | {
470 | "cell_type": "markdown",
471 | "metadata": {
472 | "id": "j7DO6xxXVCym",
473 | "colab_type": "text"
474 | },
475 | "source": [
476 | "# Convert the Trained Model to Tensor Flow Lite\n",
477 | "\n",
478 | "The next cell converts the model to TFlite format. The size in bytes of the model is also printed out."
479 | ]
480 | },
481 | {
482 | "cell_type": "code",
483 | "metadata": {
484 | "id": "0Xn1-Rn9Cp_8",
485 | "colab_type": "code",
486 | "colab": {}
487 | },
488 | "source": [
489 | "# Convert the model to the TensorFlow Lite format without quantization\n",
490 | "converter = tf.lite.TFLiteConverter.from_keras_model(model)\n",
491 | "tflite_model = converter.convert()\n",
492 | "\n",
493 | "# Save the model to disk\n",
494 | "open(\"gesture_model.tflite\", \"wb\").write(tflite_model)\n",
495 | " \n",
496 | "import os\n",
497 | "basic_model_size = os.path.getsize(\"gesture_model.tflite\")\n",
498 | "print(\"Model is %d bytes\" % basic_model_size)\n",
499 | " \n",
500 | " "
501 | ],
502 | "execution_count": 0,
503 | "outputs": []
504 | },
505 | {
506 | "cell_type": "markdown",
507 | "metadata": {
508 | "id": "ykccQn7SXrUX",
509 | "colab_type": "text"
510 | },
511 | "source": [
512 | "## Encode the Model in an Arduino Header File \n",
513 | "\n",
514 | "The next cell creates a constant byte array that contains the TFlite model. Import it as a tab with the sketch below."
515 | ]
516 | },
517 | {
518 | "cell_type": "code",
519 | "metadata": {
520 | "id": "9J33uwpNtAku",
521 | "colab_type": "code",
522 | "colab": {}
523 | },
524 | "source": [
525 | "!echo \"const unsigned char model[] = {\" > /content/model.h\n",
526 | "!cat gesture_model.tflite | xxd -i >> /content/model.h\n",
527 | "!echo \"};\" >> /content/model.h\n",
528 | "\n",
529 | "import os\n",
530 | "model_h_size = os.path.getsize(\"model.h\")\n",
531 | "print(f\"Header file, model.h, is {model_h_size:,} bytes.\")\n",
532 | "print(\"\\nOpen the side panel (refresh if needed). Double click model.h to download the file.\")"
533 | ],
534 | "execution_count": 0,
535 | "outputs": []
536 | },
537 | {
538 | "cell_type": "markdown",
539 | "metadata": {
540 | "id": "1eSkHZaLzMId",
541 | "colab_type": "text"
542 | },
543 | "source": [
544 | "# Classifying IMU Data\n",
545 | "\n",
546 | "Now it's time to switch back to the tutorial instructions and run our new model on the Arduino Nano 33 BLE Sense to classify the accelerometer and gyroscope data.\n"
547 | ]
548 | }
549 | ]
550 | }
551 |
--------------------------------------------------------------------------------
/GestureToEmoji/exercises/exercise1.md:
--------------------------------------------------------------------------------
1 | # Exercise 1 - Development Environment
2 |
3 | Set up your computer for Arduino development.
4 |
5 | ## Arduino IDE
6 | Install the Arduino IDE from https://arduino.cc/downloads
7 |
8 | 
9 |
10 | ### Arduino nRF528x Boards Definitions
11 | Use the Arduino Boards Manager to install the Arduino SAMD Board definitions. Open the Boards Manager using the menu _Tools -> Board: -> Boards Manager..._
12 |
13 | 
14 |
15 | Search for "Nano 33 BLE" and install the Arduino nRF528x Boards (Mbed OS) definitions.
16 |
17 | 
18 |
19 | ### Arduino Libraries
20 | Install the following Arduino libraries using the Library manager:
21 |
22 | * TensorFlow Lite library (search for "Arduino_TensorFlowLite")
23 | * Arduino LSM9DS1 library (search for "Arduino_LSM9DS1")
24 |
25 | Open the library manager using the menu _Tools -> Manage Libraries..._
26 |
27 | 
28 |
29 | Search for "Arduino_TensorFlowLite". Click the row and press the __Install__ button to install TensorFlow Lite for Microcontrollers
30 |
31 | Search for "Arduino_LSM9DS1". Click the row and press the __Install__ button to install the Arduino LSM9DS1 accelerometer, magnetometer, and gyroscope library.
32 |
33 | 
34 |
35 |
36 | __Linux users__ may need to configure permissions so their user can access the serial port. See the [Getting Started Guide for Linux](https://www.arduino.cc/en/guide/linux) on the Arudino website for more information.
37 |
38 | Next [Exercise 2: Connecting the board](exercise2.md)
39 |
--------------------------------------------------------------------------------
/GestureToEmoji/exercises/exercise2.md:
--------------------------------------------------------------------------------
1 | # Exercise 2: Connecting the board
2 |
3 |
4 |
5 |
6 | 
7 |
8 |
9 | ## Unboxing and set up
10 |
11 | 1. Remove the Arduino Nano 33 BLE Sense board from the box
12 | 1. Plug the micro USB cable into the board and your computer
13 | 1. Open the Arduino IDE application on your computer
14 | 1. Choose the board `Tools -> Board -> Arduino Nano 33 BLE`
15 | 1. Choose the port `Tools -> Port -> COM5 (Arduino Nano 33 BLE)` Note that the actual port may be different on your computer
16 |
17 |
18 | ## LSM9DS1 Examples (Optional)
19 |
20 | You can try the example sketches that came with the LSM9DS1 library.
21 |
22 | 1. Open the Simple Accelerometer sketch using `File -> Examples -> Arduino_LSM9DS1 -> SimpleAccelerometer`
23 | 1. Upload the sketch to the board using the `Sketch -> Upload` menu or the right arrow button from the tool bar.
24 | 1. Open the Serial Monitor `Tools -> Serial Monitor` to view the text output
25 | 1. Open the Serial Plotter `Tools -> Serial Plotter` to view the output on a graph
26 |
27 | 
28 |
29 | 
30 |
31 | Next [Exercise 3: Visualize the IMU Data](exercise3.md)
32 |
--------------------------------------------------------------------------------
/GestureToEmoji/exercises/exercise3.md:
--------------------------------------------------------------------------------
1 | # Exercise 3: Visualize the IMU Data
2 |
3 | 1. Open __ArduinoSketches/IMU_Capture/IMU_Capture.ino__ in the Arduino IDE.
4 | 1. Compile the sketch and upload it to the board: `Sketch -> Upload`
5 | 1. Open the Serial Monitor: `Tools -> Serial Monitor`
6 | 1. Press the button, IMU data will be captured and outputted for 1 second
7 | 1. Close the Serial Monitor window
8 | 1. Open the Serial Plotter: `Tools -> Serial Plotter`
9 | 1. Press the button, and perform a gesture
10 | 1. You'll see a graph of the data capture
11 | 1. Repeat capturing various gestures to get a sense of what the training data will look like
12 | 1. Close the Serial Plotter
13 |
14 | 
15 |
16 | 
17 |
18 | Next [Exercise 4: Gather the Training Data](exercise4.md)
19 |
20 |
21 |
--------------------------------------------------------------------------------
/GestureToEmoji/exercises/exercise4.md:
--------------------------------------------------------------------------------
1 | # Exercise 4: Gather the Training Data
2 |
3 | 1. Press the reset button on the board
4 | 1. Open the Serial Monitor: `Tools -> Serial Monitor`
5 | 1. Make a punch gesture with the board in your hand - you should see the sensor data log in the Serial Monitor
6 | 1. Repeat 10 times to gather more data
7 | 1. Copy and paste the data from the serial output to new text file called `punch.csv` using your favorite text editor
8 | 1. Close the Serial Monitor
9 | 1. Press the reset button on the board
10 | 1. Open the Serial Monitor: `Tools -> Serial Monitor`
11 | 1. Make a flex gesture with the board in your hand
12 | 1. Repeat 10 times
13 | 1. Copy and paste the serial output to new text file `flex.csv` using your favorite text editor
14 |
15 | 
16 |
17 | Next [Exercise 5: Machine Learning ](exercise5.md)
18 |
19 |
--------------------------------------------------------------------------------
/GestureToEmoji/exercises/exercise5.md:
--------------------------------------------------------------------------------
1 | # Exercise 5: Machine Learning
2 |
3 | We're going to use [Google Colab](https://colab.research.google.com) to train our machine learning model. Colab provides a Jupyter notebook that allows us to run our machine learning model in a web browser.
4 |
5 | 
6 |
7 | ## 3rd Party Cookies
8 |
9 | Some of you might see an error about 3rd party cookies.
10 |
11 | 
12 |
13 | You can enable 3rd party cookies, or better yet, add an exception for `[*.]googleusercontent.com`.
14 |
15 | 
16 |
17 | ## Open the Notebook
18 |
19 | Open the notebook in Colab.
20 |
21 | https://colab.research.google.com/github/arduino/ArduinoTensorFlowLiteTutorials/blob/master/GestureToEmoji/arduino_tinyml_workshop.ipynb
22 |
23 | Next [Exercise 6: Classifying IMU Data](exercise6.md)
24 |
--------------------------------------------------------------------------------
/GestureToEmoji/exercises/exercise6.md:
--------------------------------------------------------------------------------
1 | # Classifying IMU Data
2 |
3 | 1. Open __ArduinoSketches/IMU_Classifier/IMU_Classifier.ino__ in the Arduino IDE.
4 | 1. Switch to the model.h tab
5 | 1. Replace the contents of model.h with the version you downloaded from Colab
6 | 1. Upload the sketch: `Sketch -> Upload`
7 | 1. Open the Serial Monitor: `Tools -> Serial Monitor`
8 | 1. Press the button, and perform a gesture
9 | 1. The confidence of each gesture will be printed to the Serial Monitor (0 -> low confidence, 1 -> high confidence)
10 |
11 | 
12 |
13 | Next [Exercise 7: Gesture Controlled USB Emoji Keyboard](exercise7.md)
14 |
--------------------------------------------------------------------------------
/GestureToEmoji/exercises/exercise7.md:
--------------------------------------------------------------------------------
1 | # Exercise 7: Gesture Controlled USB Emoji Keyboard
2 |
3 | The Emoji_Button example, __ArduinoSketchs/Emoji_Button/Emoji_Button.ino__, shows how to create a USB keyboard that prints and emoji character. (This only works on Linux and macos, so if you're running Windows, find a friend to work on this exercise.)
4 |
5 | Try combining the Emoji_Button example with the IMU_Classifier sketch to create an gesture controlled emoji keyboard. 👊
6 |
7 | Next [Exercise 8: Next Steps](exercise8.md)
8 |
--------------------------------------------------------------------------------
/GestureToEmoji/exercises/exercise8.md:
--------------------------------------------------------------------------------
1 | # Excercise 8: Next Steps
2 |
3 | Now that you have this working...
4 |
5 | - Load the code to record gestures. Create additional CSV files with more gestures. Retrain the model in Colab. Load the new model back onto the Arduino.
6 | - Note: you'll need to edit the code to add the names of the new gesture files
7 |
8 | - Try increasing and decreasing the number of recordings per gesture, how does this impact performance?
9 |
10 | - Try to only use the accelerometer or gyroscope data (not both), how does this impact performance?
11 |
12 | - Tweak the model structure and parameters
13 | - Can you get better results?
14 | - Can you reduce the size and still get "good" results
15 |
16 | - Grab a [board](https://store.arduino.cc/usa/nano-33-ble-sense) and the [TinyML book](http://shop.oreilly.com/product/0636920254508.do) to continue at home.
17 |
18 |
--------------------------------------------------------------------------------
/GestureToEmoji/images/AddZipLibrary.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/arduino/ArduinoTensorFlowLiteTutorials/5ff400ed5de826261d3e2ee4247cae026321bac2/GestureToEmoji/images/AddZipLibrary.png
--------------------------------------------------------------------------------
/GestureToEmoji/images/AddZipLibrary_2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/arduino/ArduinoTensorFlowLiteTutorials/5ff400ed5de826261d3e2ee4247cae026321bac2/GestureToEmoji/images/AddZipLibrary_2.png
--------------------------------------------------------------------------------
/GestureToEmoji/images/ArduinoIDE.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/arduino/ArduinoTensorFlowLiteTutorials/5ff400ed5de826261d3e2ee4247cae026321bac2/GestureToEmoji/images/ArduinoIDE.png
--------------------------------------------------------------------------------
/GestureToEmoji/images/Arduino_logo_R_highquality.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/arduino/ArduinoTensorFlowLiteTutorials/5ff400ed5de826261d3e2ee4247cae026321bac2/GestureToEmoji/images/Arduino_logo_R_highquality.png
--------------------------------------------------------------------------------
/GestureToEmoji/images/BoardManager-Menu.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/arduino/ArduinoTensorFlowLiteTutorials/5ff400ed5de826261d3e2ee4247cae026321bac2/GestureToEmoji/images/BoardManager-Menu.png
--------------------------------------------------------------------------------
/GestureToEmoji/images/BoardsManager.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/arduino/ArduinoTensorFlowLiteTutorials/5ff400ed5de826261d3e2ee4247cae026321bac2/GestureToEmoji/images/BoardsManager.png
--------------------------------------------------------------------------------
/GestureToEmoji/images/IDE.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/arduino/ArduinoTensorFlowLiteTutorials/5ff400ed5de826261d3e2ee4247cae026321bac2/GestureToEmoji/images/IDE.png
--------------------------------------------------------------------------------
/GestureToEmoji/images/InstallBoardDefinitions.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/arduino/ArduinoTensorFlowLiteTutorials/5ff400ed5de826261d3e2ee4247cae026321bac2/GestureToEmoji/images/InstallBoardDefinitions.png
--------------------------------------------------------------------------------
/GestureToEmoji/images/JustDownload.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/arduino/ArduinoTensorFlowLiteTutorials/5ff400ed5de826261d3e2ee4247cae026321bac2/GestureToEmoji/images/JustDownload.png
--------------------------------------------------------------------------------
/GestureToEmoji/images/ManageLibraries.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/arduino/ArduinoTensorFlowLiteTutorials/5ff400ed5de826261d3e2ee4247cae026321bac2/GestureToEmoji/images/ManageLibraries.png
--------------------------------------------------------------------------------
/GestureToEmoji/images/NANO-33-BLE-Sense-with-headers.svg:
--------------------------------------------------------------------------------
1 |
542 |
--------------------------------------------------------------------------------
/GestureToEmoji/images/accelerometer-example-serial-plotter.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/arduino/ArduinoTensorFlowLiteTutorials/5ff400ed5de826261d3e2ee4247cae026321bac2/GestureToEmoji/images/accelerometer-example-serial-plotter.png
--------------------------------------------------------------------------------
/GestureToEmoji/images/arduino-classifier.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/arduino/ArduinoTensorFlowLiteTutorials/5ff400ed5de826261d3e2ee4247cae026321bac2/GestureToEmoji/images/arduino-classifier.png
--------------------------------------------------------------------------------
/GestureToEmoji/images/ble-sense.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/arduino/ArduinoTensorFlowLiteTutorials/5ff400ed5de826261d3e2ee4247cae026321bac2/GestureToEmoji/images/ble-sense.jpg
--------------------------------------------------------------------------------
/GestureToEmoji/images/colab-3rd-party-cookie-exception.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/arduino/ArduinoTensorFlowLiteTutorials/5ff400ed5de826261d3e2ee4247cae026321bac2/GestureToEmoji/images/colab-3rd-party-cookie-exception.png
--------------------------------------------------------------------------------
/GestureToEmoji/images/colab-error.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/arduino/ArduinoTensorFlowLiteTutorials/5ff400ed5de826261d3e2ee4247cae026321bac2/GestureToEmoji/images/colab-error.png
--------------------------------------------------------------------------------
/GestureToEmoji/images/colab.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/arduino/ArduinoTensorFlowLiteTutorials/5ff400ed5de826261d3e2ee4247cae026321bac2/GestureToEmoji/images/colab.png
--------------------------------------------------------------------------------
/GestureToEmoji/images/download-repo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/arduino/ArduinoTensorFlowLiteTutorials/5ff400ed5de826261d3e2ee4247cae026321bac2/GestureToEmoji/images/download-repo.png
--------------------------------------------------------------------------------
/GestureToEmoji/images/library-arduinolsm9ds1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/arduino/ArduinoTensorFlowLiteTutorials/5ff400ed5de826261d3e2ee4247cae026321bac2/GestureToEmoji/images/library-arduinolsm9ds1.png
--------------------------------------------------------------------------------
/GestureToEmoji/images/lsm9ds1-examples.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/arduino/ArduinoTensorFlowLiteTutorials/5ff400ed5de826261d3e2ee4247cae026321bac2/GestureToEmoji/images/lsm9ds1-examples.png
--------------------------------------------------------------------------------
/GestureToEmoji/images/serial-monitor-imu.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/arduino/ArduinoTensorFlowLiteTutorials/5ff400ed5de826261d3e2ee4247cae026321bac2/GestureToEmoji/images/serial-monitor-imu.png
--------------------------------------------------------------------------------
/GestureToEmoji/images/serial-plotter-imu.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/arduino/ArduinoTensorFlowLiteTutorials/5ff400ed5de826261d3e2ee4247cae026321bac2/GestureToEmoji/images/serial-plotter-imu.png
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Machine Learning on Arduino
2 | ## Arduino TensorFlow Lite Tutorials
3 |
4 | [](https://github.com/arduino/ArduinoTensorFlowLiteTutorials/actions/workflows/compile-sketches.yml)
5 |
6 | This Github repo contains tutorials for using TensorFlow Lite on Arduino hardware.
7 |
8 | * [GestureToEmoji](GestureToEmoji/)
9 | * Use the Arduino Nano 33 BLE Sense to convert motion gestures to emojis
10 | * [FruitToEmoji](FruitToEmoji/)
11 | * Use the Arduino Nano 33 BLE Sense to classify fruit using the RGB color and proximity sensors
12 |
--------------------------------------------------------------------------------