├── .gitignore ├── ArduinoSketches ├── Emoji_Keyboard │ ├── Emoji_Keyboard.ino │ └── model.h ├── Emoji_Test │ └── Emoji_Test.ino ├── Hardware_Test │ └── Hardware_Test.ino ├── IMU_Capture │ └── IMU_Capture.ino └── IMU_Classifier │ ├── IMU_Classifier.ino │ └── model.h ├── README.md ├── arduino_tinyml_workshop.ipynb └── exercises ├── exercise1.md ├── exercise10.md ├── exercise2.md ├── exercise3.md ├── exercise4.md ├── exercise5.md ├── exercise6.md ├── exercise7.md ├── exercise8.md ├── exercise9.md ├── images ├── AI-IOT-devfest-AZ-2020-horiz.png ├── ArduinoIDE.png ├── Arduino_logo_R_highquality.png ├── BoardManager-Menu.png ├── BoardsManager.png ├── ChariotSolutions.png ├── JustDownload.png ├── ManageLibraries.png ├── arduino-classifier.png ├── ble-sense.jpg ├── charlie_gerard_street_fighter.gif ├── clone-or-download.png ├── colab-3rd-party-cookie-exception.png ├── colab-error.png ├── colab.png ├── emoji-google-doc.png ├── imu_classifier_emoji_linux.png ├── imu_classifier_emoji_mac.png ├── library-arduinolsm9ds1.png ├── library-tensorflowlite.png ├── macos-unicode-hex-input.png ├── nano-33-ble.jpg ├── nano-33-ble_iso.jpg ├── serial-monitor-imu.png ├── serial-plotter-imu.png ├── web-serial-choose-port.png ├── web-serial-disabled.png ├── web-serial-flag-enabled.png └── web-serial-monitor.png └── intro.md /.gitignore: -------------------------------------------------------------------------------- 1 | .DS_Store 2 | -------------------------------------------------------------------------------- /ArduinoSketches/Emoji_Keyboard/Emoji_Keyboard.ino: -------------------------------------------------------------------------------- 1 | /* 2 | 3 | TODO cleanup code and document!!!! 4 | 5 | IMU Classifier 6 | 7 | This example uses the on-board IMU to start reading acceleration and gyroscope 8 | data from on-board IMU, once enough samples are read, it then uses a 9 | TensorFlow Lite (Micro) model to try to classify the movement as a known gesture. 10 | 11 | Note: The direct use of C/C++ pointers, namespaces, and dynamic memory is generally 12 | discouraged in Arduino examples, and in the future the TensorFlowLite library 13 | might change to make the sketch simpler. 14 | 15 | The circuit: 16 | - Arduino Nano 33 BLE or Arduino Nano 33 BLE Sense board. 17 | 18 | Created by Don Coleman, Sandeep Mistry 19 | Modified by Dominic Pajak, Sandeep Mistry 20 | 21 | This example code is in the public domain. 22 | */ 23 | #include 24 | #include 25 | #include 26 | 27 | #include 28 | #include 29 | #include 30 | #include 31 | #include 32 | #include 33 | 34 | #include "model.h" 35 | 36 | // Select an OS: 37 | #define MACOS // You'll need to enable and select the unicode keyboard: System Preferences -> Keyboard -> Input Sources -> + -> Others -> Unicode Hex Input 38 | //#define LINUX 39 | 40 | #if !defined(MACOS) && !defined(LINUX) 41 | #error "Please select an OS!" 42 | #endif 43 | 44 | // use table: https://apps.timwhitlock.info/emoji/tables/unicode 45 | const int bicep = 0x1f4aa; 46 | const int punch = 0x1f44a; 47 | 48 | const float accelerationThreshold = 2.5; // threshold of significant in G's 49 | const int numSamples = 119; 50 | 51 | int samplesRead = numSamples; 52 | 53 | // global variables used for TensorFlow Lite (Micro) 54 | tflite::MicroErrorReporter tflErrorReporter; 55 | 56 | // pull in all the TFLM ops, you can remove this line and 57 | // only pull in the TFLM ops you need, if would like to reduce 58 | // the compiled size of the sketch. 59 | tflite::ops::micro::AllOpsResolver tflOpsResolver; 60 | 61 | const tflite::Model* tflModel = nullptr; 62 | tflite::MicroInterpreter* tflInterpreter = nullptr; 63 | TfLiteTensor* tflInputTensor = nullptr; 64 | TfLiteTensor* tflOutputTensor = nullptr; 65 | 66 | // Create a static memory buffer for TFLM, the size may need to 67 | // be adjusted based on the model you are using 68 | constexpr int tensorArenaSize = 8 * 1024; 69 | byte tensorArena[tensorArenaSize]; 70 | 71 | // array to map gesture index to a name 72 | const char* GESTURES[] = { 73 | "punch", 74 | "flex" 75 | }; 76 | 77 | // array to map gesture index to an emjoi - TODO associative array 78 | const int EMOJIS[] = { 79 | punch, 80 | bicep 81 | }; 82 | 83 | #define NUM_GESTURES (sizeof(GESTURES) / sizeof(GESTURES[0])) 84 | 85 | USBKeyboard keyboard; 86 | 87 | void setup() { 88 | Serial.begin(9600); 89 | //while (!Serial); 90 | 91 | // initialize the IMU 92 | if (!IMU.begin()) { 93 | Serial.println("Failed to initialize IMU!"); 94 | while (1); 95 | } 96 | 97 | // print out the samples rates of the IMUs 98 | Serial.print("Accelerometer sample rate = "); 99 | Serial.print(IMU.accelerationSampleRate()); 100 | Serial.println(" Hz"); 101 | Serial.print("Gyroscope sample rate = "); 102 | Serial.print(IMU.gyroscopeSampleRate()); 103 | Serial.println(" Hz"); 104 | 105 | Serial.println(); 106 | 107 | // get the TFL representation of the model byte array 108 | tflModel = tflite::GetModel(model); 109 | if (tflModel->version() != TFLITE_SCHEMA_VERSION) { 110 | Serial.println("Model schema mismatch!"); 111 | while (1); 112 | } 113 | 114 | // Create an interpreter to run the model 115 | tflInterpreter = new tflite::MicroInterpreter(tflModel, tflOpsResolver, tensorArena, tensorArenaSize, &tflErrorReporter); 116 | 117 | // Allocate memory for the model's input and output tensors 118 | tflInterpreter->AllocateTensors(); 119 | 120 | // Get pointers for the model's input and output tensors 121 | tflInputTensor = tflInterpreter->input(0); 122 | tflOutputTensor = tflInterpreter->output(0); 123 | } 124 | 125 | void loop() { 126 | float aX, aY, aZ, gX, gY, gZ; 127 | 128 | // wait for significant motion 129 | while (samplesRead == numSamples) { 130 | if (IMU.accelerationAvailable()) { 131 | // read the acceleration data 132 | IMU.readAcceleration(aX, aY, aZ); 133 | 134 | // sum up the absolutes 135 | float aSum = fabs(aX) + fabs(aY) + fabs(aZ); 136 | 137 | // check if it's above the threshold 138 | if (aSum >= accelerationThreshold) { 139 | // reset the sample read count 140 | samplesRead = 0; 141 | break; 142 | } 143 | } 144 | } 145 | 146 | // check if the all the required samples have been read since 147 | // the last time the significant motion was detected 148 | while (samplesRead < numSamples) { 149 | // check if new acceleration AND gyroscope data is available 150 | if (IMU.accelerationAvailable() && IMU.gyroscopeAvailable()) { 151 | // read the acceleration and gyroscope data 152 | IMU.readAcceleration(aX, aY, aZ); 153 | IMU.readGyroscope(gX, gY, gZ); 154 | 155 | // normalize the IMU data between 0 to 1 and store in the model's 156 | // input tensor 157 | tflInputTensor->data.f[samplesRead * 6 + 0] = (aX + 4.0) / 8.0; 158 | tflInputTensor->data.f[samplesRead * 6 + 1] = (aY + 4.0) / 8.0; 159 | tflInputTensor->data.f[samplesRead * 6 + 2] = (aZ + 4.0) / 8.0; 160 | tflInputTensor->data.f[samplesRead * 6 + 3] = (gX + 2000.0) / 4000.0; 161 | tflInputTensor->data.f[samplesRead * 6 + 4] = (gY + 2000.0) / 4000.0; 162 | tflInputTensor->data.f[samplesRead * 6 + 5] = (gZ + 2000.0) / 4000.0; 163 | 164 | samplesRead++; 165 | 166 | if (samplesRead == numSamples) { 167 | // Run inferencing 168 | TfLiteStatus invokeStatus = tflInterpreter->Invoke(); 169 | if (invokeStatus != kTfLiteOk) { 170 | Serial.println("Invoke failed!"); 171 | while (1); 172 | return; 173 | } 174 | 175 | // Loop through the output tensor values from the model 176 | // for (int i = 0; i < NUM_GESTURES; i++) { 177 | // Serial.print(GESTURES[i]); 178 | // Serial.print(": "); 179 | // Serial.println(tflOutputTensor->data.f[i], 6); 180 | // } 181 | 182 | // Loop through the output tensor values from the model 183 | for (int i = 0; i < NUM_GESTURES; i++) { 184 | if (tflOutputTensor->data.f[i] > 0.75) { 185 | Serial.print(GESTURES[i]); 186 | sendUtf8(EMOJIS[i]); 187 | } 188 | } 189 | 190 | Serial.println(); 191 | } 192 | } 193 | } 194 | } 195 | 196 | void sendUtf8(unsigned long c) { 197 | String s; 198 | 199 | #if defined(MACOS) 200 | // https://apple.stackexchange.com/questions/183045/how-can-i-type-unicode-characters-without-using-the-mouse 201 | 202 | s = String(utf8ToUtf16(c), HEX); 203 | 204 | for (int i = 0; i < s.length(); i++) { 205 | keyboard.key_code(s[i], KEY_ALT); 206 | } 207 | #elif defined(LINUX) 208 | s = String(c, HEX); 209 | 210 | keyboard.key_code('u', KEY_CTRL | KEY_SHIFT); 211 | 212 | for (int i = 0; i < s.length(); i++) { 213 | keyboard.key_code(s[i]); 214 | } 215 | #endif 216 | keyboard.key_code(' '); 217 | } 218 | 219 | // based on https://stackoverflow.com/a/6240819/2020087 220 | unsigned long utf8ToUtf16(unsigned long in) { 221 | unsigned long result; 222 | 223 | in -= 0x10000; 224 | 225 | result |= (in & 0x3ff); 226 | result |= (in << 6) & 0x03ff0000; 227 | result |= 0xd800dc00; 228 | 229 | return result; 230 | } 231 | -------------------------------------------------------------------------------- /ArduinoSketches/Emoji_Keyboard/model.h: -------------------------------------------------------------------------------- 1 | const unsigned char model[] = { 2 | }; 3 | -------------------------------------------------------------------------------- /ArduinoSketches/Emoji_Test/Emoji_Test.ino: -------------------------------------------------------------------------------- 1 | /* 2 | This example sends an emoji character over USB HID when here is significant accelerometer motion 3 | 4 | Note: Only macOS and Linux as supported at this time, and the use of 5 | #define is generally discouraged in Arduino examples 6 | 7 | The circuit: 8 | - Arduino Nano 33 BLE or Arduino Nano 33 BLE Sense board. 9 | 10 | Created by Don Coleman, Sandeep Mistry 11 | 12 | This example code is in the public domain. 13 | */ 14 | 15 | #include 16 | #include 17 | #include 18 | 19 | // Select an OS: 20 | // #define MACOS // You'll need to enable and select the unicode keyboard: System Preferences -> Keyboard -> Input Sources -> + -> Others -> Unicode Hex Input 21 | // #define LINUX 22 | 23 | #if !defined(MACOS) && !defined(LINUX) 24 | #error "Please select an OS!" 25 | #endif 26 | 27 | // http://www.unicode.org/emoji/charts/full-emoji-list.html 28 | const int bicep = 0x1f4aa; 29 | const int punch = 0x1f44a; 30 | 31 | const float accelerationThreshold = 2.5; // threshold of significant in G's 32 | 33 | USBKeyboard keyboard; 34 | 35 | void setup() { 36 | if (!IMU.begin()) { 37 | Serial.println("Failed to initialize IMU!"); 38 | while (1); 39 | } 40 | } 41 | 42 | void loop() { 43 | float aX, aY, aZ; 44 | 45 | if (IMU.accelerationAvailable()) { 46 | // read the acceleration data 47 | IMU.readAcceleration(aX, aY, aZ); 48 | 49 | // sum up the absolute values of the acceleration 50 | float aSum = fabs(aX) + fabs(aY) + fabs(aZ); 51 | 52 | // wait for significant motion 53 | // check if acceleration is above the threshold 54 | if (aSum >= accelerationThreshold) { 55 | // print an emoji 56 | sentUtf8(bicep); 57 | // delay to prevent too many emojis 58 | delay(250); 59 | } 60 | } 61 | 62 | } 63 | 64 | void sentUtf8(unsigned long c) { 65 | String s; 66 | 67 | #if defined(MACOS) 68 | // https://apple.stackexchange.com/questions/183045/how-can-i-type-unicode-characters-without-using-the-mouse 69 | 70 | s = String(utf8ToUtf16(c), HEX); 71 | 72 | for (int i = 0; i < s.length(); i++) { 73 | keyboard.key_code(s[i], KEY_ALT); 74 | } 75 | #elif defined(LINUX) 76 | s = String(c, HEX); 77 | 78 | keyboard.key_code('u', KEY_CTRL | KEY_SHIFT); 79 | 80 | for (int i = 0; i < s.length(); i++) { 81 | keyboard.key_code(s[i]); 82 | } 83 | #endif 84 | keyboard.key_code(' '); 85 | } 86 | 87 | // based on https://stackoverflow.com/a/6240819/2020087 88 | unsigned long utf8ToUtf16(unsigned long in) { 89 | unsigned long result; 90 | 91 | in -= 0x10000; 92 | 93 | result |= (in & 0x3ff); 94 | result |= (in << 6) & 0x03ff0000; 95 | result |= 0xd800dc00; 96 | 97 | return result; 98 | } 99 | -------------------------------------------------------------------------------- /ArduinoSketches/Hardware_Test/Hardware_Test.ino: -------------------------------------------------------------------------------- 1 | /* 2 | Hardware Test 3 | 4 | This sketch makes sure the development environment is set up correctly. 5 | 6 | Load the code, then open the serial terminal. The LED should blink quickly. 7 | 8 | This example code is in the public domain. 9 | */ 10 | 11 | #include 12 | 13 | void setup() { 14 | // initialize digital pin LED_BUILTIN as an output. 15 | pinMode(LED_BUILTIN, OUTPUT); 16 | 17 | // Turn on the LED 18 | digitalWrite(LED_BUILTIN, HIGH); 19 | 20 | Serial.begin(9600); 21 | 22 | // wait forever for the serial monitor to open 23 | while (!Serial); 24 | 25 | Serial.println("Arduino TinyML Workshop Hardware Test"); 26 | 27 | if (!IMU.begin()) { 28 | Serial.println("Failed to initialize IMU!"); 29 | while (1); 30 | } 31 | 32 | Serial.println("Things look good. You're ready to go."); 33 | 34 | } 35 | 36 | // the loop function runs over and over again forever 37 | void loop() { 38 | digitalWrite(LED_BUILTIN, HIGH); // turn the LED 39 | delay(250); // wait for 1/4 second 40 | digitalWrite(LED_BUILTIN, LOW); // turn the LED 41 | delay(250); // wait for 1/4 second 42 | } 43 | -------------------------------------------------------------------------------- /ArduinoSketches/IMU_Capture/IMU_Capture.ino: -------------------------------------------------------------------------------- 1 | /* 2 | IMU Capture 3 | 4 | This example uses the on-board IMU to start reading acceleration and gyroscope 5 | data from on-board IMU and prints it to the Serial Monitor for one second 6 | when the significant motion is detected. 7 | 8 | While waiting for significant motion, data is recorded in memory, to avoid 9 | loosing the starting portion of the gesture movement. There will be a delay 10 | between capturing the data and outputting it to the Serial Monitor. 11 | 12 | You can also use the Serial Plotter to graph the data. 13 | 14 | The circuit: 15 | - Arduino Nano 33 BLE or Arduino Nano 33 BLE Sense board. 16 | 17 | Created by Don Coleman, Dominic Pajak, Sandeep Mistry 18 | 19 | This example code is in the public domain. 20 | */ 21 | 22 | #include 23 | 24 | const float ACCELERATION_RMS_THRESHOLD = 2.0; // RMS (root mean square) threshold of significant motion in G's 25 | const int NUM_CAPTURED_SAMPLES_PER_GESTURE = 119; 26 | const int NUM_FEATURES_PER_SAMPLE = 6; 27 | const int TOTAL_SAMPLES = NUM_CAPTURED_SAMPLES_PER_GESTURE * NUM_FEATURES_PER_SAMPLE; 28 | const int THRESHOLD_SAMPLE_INDEX = ((NUM_CAPTURED_SAMPLES_PER_GESTURE / 3) * NUM_FEATURES_PER_SAMPLE); // one-third of data comes before threshold 29 | 30 | float samples[TOTAL_SAMPLES]; 31 | 32 | int capturedSamples = 0; 33 | 34 | void setup() { 35 | Serial.begin(9600); 36 | while (!Serial); 37 | 38 | if (!IMU.begin()) { 39 | Serial.println("Failed to initialize IMU!"); 40 | while (1); 41 | } 42 | 43 | // print the header 44 | Serial.println("aX,aY,aZ,gX,gY,gZ"); 45 | } 46 | 47 | void loop() { 48 | float aX, aY, aZ, gX, gY, gZ; 49 | 50 | // wait for threshold trigger, but keep N samples before threshold occurs 51 | while (1) { 52 | // wait for both acceleration and gyroscope data to be available 53 | if (IMU.accelerationAvailable() && IMU.gyroscopeAvailable()) { 54 | // read the acceleration and gyroscope data 55 | IMU.readAcceleration(aX, aY, aZ); 56 | IMU.readGyroscope(gX, gY, gZ); 57 | 58 | // shift values over one position (TODO: replace memmove with for loop?) 59 | memmove(samples, samples + NUM_FEATURES_PER_SAMPLE, sizeof(float) * NUM_FEATURES_PER_SAMPLE * 39); 60 | 61 | // insert the new data at the threshold index 62 | samples[THRESHOLD_SAMPLE_INDEX + 0] = aX; 63 | samples[THRESHOLD_SAMPLE_INDEX + 1] = aY; 64 | samples[THRESHOLD_SAMPLE_INDEX + 2] = aZ; 65 | samples[THRESHOLD_SAMPLE_INDEX + 3] = gX; 66 | samples[THRESHOLD_SAMPLE_INDEX + 4] = gY; 67 | samples[THRESHOLD_SAMPLE_INDEX + 5] = gZ; 68 | 69 | // calculate the RMS of the acceleration 70 | float accelerationRMS = sqrt(fabs(aX) + fabs(aY) + fabs(aZ)); 71 | 72 | if (accelerationRMS > ACCELERATION_RMS_THRESHOLD) { 73 | // threshold reached, break the loop 74 | break; 75 | } 76 | } 77 | } 78 | 79 | // use the threshold index as the starting point for the remainder of the data 80 | capturedSamples = THRESHOLD_SAMPLE_INDEX + NUM_FEATURES_PER_SAMPLE; 81 | 82 | // collect the remaining samples 83 | while (capturedSamples < TOTAL_SAMPLES) { 84 | // wait for both acceleration and gyroscope data to be available 85 | if (IMU.accelerationAvailable() && IMU.gyroscopeAvailable()) { 86 | // read the acceleration and gyroscope data 87 | IMU.readAcceleration(aX, aY, aZ); 88 | IMU.readGyroscope(gX, gY, gZ); 89 | 90 | // insert the new data 91 | samples[capturedSamples + 0] = aX; 92 | samples[capturedSamples + 1] = aY; 93 | samples[capturedSamples + 2] = aZ; 94 | samples[capturedSamples + 3] = gX; 95 | samples[capturedSamples + 4] = gY; 96 | samples[capturedSamples + 5] = gZ; 97 | 98 | capturedSamples += NUM_FEATURES_PER_SAMPLE; 99 | } 100 | } 101 | 102 | // print the samples 103 | for (int i = 0; i < TOTAL_SAMPLES; i += NUM_FEATURES_PER_SAMPLE) { 104 | // print the data in CSV format 105 | Serial.print(samples[i + 0], 3); 106 | Serial.print(','); 107 | Serial.print(samples[i + 1], 3); 108 | Serial.print(','); 109 | Serial.print(samples[i + 2], 3); 110 | Serial.print(','); 111 | Serial.print(samples[i + 3], 3); 112 | Serial.print(','); 113 | Serial.print(samples[i + 4], 3); 114 | Serial.print(','); 115 | Serial.print(samples[i + 5], 3); 116 | Serial.println(); 117 | 118 | delayMicroseconds(8403); // delay between each line for Serial Plotter, this matches the 119 Hz data rate of IMU 119 | } 120 | 121 | Serial.println(); // empty line 122 | } 123 | -------------------------------------------------------------------------------- /ArduinoSketches/IMU_Classifier/IMU_Classifier.ino: -------------------------------------------------------------------------------- 1 | /* 2 | IMU Classifier 3 | 4 | This example uses the on-board IMU to start reading acceleration and gyroscope 5 | data from on-board IMU, once enough samples are read, it then uses a 6 | TensorFlow Lite (Micro) model to try to classify the movement as a known gesture. 7 | 8 | Note: The direct use of C/C++ pointers, namespaces, and dynamic memory is generally 9 | discouraged in Arduino examples, and in the future the TensorFlowLite library 10 | might change to make the sketch simpler. 11 | 12 | The circuit: 13 | - Arduino Nano 33 BLE or Arduino Nano 33 BLE Sense board. 14 | 15 | Created by Don Coleman, Sandeep Mistry 16 | Modified by Dominic Pajak, Sandeep Mistry 17 | 18 | This example code is in the public domain. 19 | */ 20 | 21 | #include 22 | 23 | #include 24 | #include 25 | #include 26 | #include 27 | #include 28 | #include 29 | 30 | #include "model.h" 31 | 32 | const float ACCELERATION_RMS_THRESHOLD = 2.0; // RMS (root mean square) threshold of significant motion in G's 33 | const int NUM_CAPTURED_SAMPLES_PER_GESTURE = 119; 34 | const int NUM_FEATURES_PER_SAMPLE = 6; 35 | const int TOTAL_SAMPLES = NUM_CAPTURED_SAMPLES_PER_GESTURE * NUM_FEATURES_PER_SAMPLE; 36 | const int THRESHOLD_SAMPLE_INDEX = ((NUM_CAPTURED_SAMPLES_PER_GESTURE / 3) * NUM_FEATURES_PER_SAMPLE); // one-third of data comes before threshold 37 | 38 | int capturedSamples = 0; 39 | 40 | // global variables used for TensorFlow Lite (Micro) 41 | tflite::MicroErrorReporter tflErrorReporter; 42 | 43 | // pull in all the TFLM ops, you can remove this line and 44 | // only pull in the TFLM ops you need, if would like to reduce 45 | // the compiled size of the sketch. 46 | tflite::ops::micro::AllOpsResolver tflOpsResolver; 47 | 48 | const tflite::Model* tflModel = nullptr; 49 | tflite::MicroInterpreter* tflInterpreter = nullptr; 50 | TfLiteTensor* tflInputTensor = nullptr; 51 | TfLiteTensor* tflOutputTensor = nullptr; 52 | 53 | // Create a static memory buffer for TFLM, the size may need to 54 | // be adjusted based on the model you are using 55 | constexpr int tensorArenaSize = 8 * 1024; 56 | byte tensorArena[tensorArenaSize]; 57 | 58 | // array to map gesture index to a name 59 | const char* GESTURES[] = { 60 | "punch", 61 | "flex" 62 | }; 63 | 64 | #define NUM_GESTURES (sizeof(GESTURES) / sizeof(GESTURES[0])) 65 | 66 | void setup() { 67 | Serial.begin(9600); 68 | while (!Serial); 69 | 70 | // initialize the IMU 71 | if (!IMU.begin()) { 72 | Serial.println("Failed to initialize IMU!"); 73 | while (1); 74 | } 75 | 76 | // print out the samples rates of the IMUs 77 | Serial.print("Accelerometer sample rate = "); 78 | Serial.print(IMU.accelerationSampleRate()); 79 | Serial.println(" Hz"); 80 | Serial.print("Gyroscope sample rate = "); 81 | Serial.print(IMU.gyroscopeSampleRate()); 82 | Serial.println(" Hz"); 83 | 84 | Serial.println(); 85 | 86 | // get the TFL representation of the model byte array 87 | tflModel = tflite::GetModel(model); 88 | if (tflModel->version() != TFLITE_SCHEMA_VERSION) { 89 | Serial.println("Model schema mismatch!"); 90 | while (1); 91 | } 92 | 93 | // Create an interpreter to run the model 94 | tflInterpreter = new tflite::MicroInterpreter(tflModel, tflOpsResolver, tensorArena, tensorArenaSize, &tflErrorReporter); 95 | 96 | // Allocate memory for the model's input and output tensors 97 | tflInterpreter->AllocateTensors(); 98 | 99 | // Get pointers for the model's input and output tensors 100 | tflInputTensor = tflInterpreter->input(0); 101 | tflOutputTensor = tflInterpreter->output(0); 102 | } 103 | 104 | void loop() { 105 | float aX, aY, aZ, gX, gY, gZ; 106 | 107 | // wait for threshold trigger, but keep N samples before threshold occurs 108 | while (1) { 109 | // wait for both acceleration and gyroscope data to be available 110 | if (IMU.accelerationAvailable() && IMU.gyroscopeAvailable()) { 111 | // read the acceleration and gyroscope data 112 | IMU.readAcceleration(aX, aY, aZ); 113 | IMU.readGyroscope(gX, gY, gZ); 114 | 115 | // shift values over one position (TODO: replace memmove with for loop?) 116 | memmove(tflInputTensor->data.f, tflInputTensor->data.f + NUM_FEATURES_PER_SAMPLE, sizeof(float) * NUM_FEATURES_PER_SAMPLE * 39); 117 | 118 | // insert the new data at the threshold index 119 | tflInputTensor->data.f[THRESHOLD_SAMPLE_INDEX + 0] = (aX + 4.0) / 8.0; 120 | tflInputTensor->data.f[THRESHOLD_SAMPLE_INDEX + 1] = (aY + 4.0) / 8.0; 121 | tflInputTensor->data.f[THRESHOLD_SAMPLE_INDEX + 2] = (aZ + 4.0) / 8.0; 122 | tflInputTensor->data.f[THRESHOLD_SAMPLE_INDEX + 3] = (gX + 2000.0) / 4000.0; 123 | tflInputTensor->data.f[THRESHOLD_SAMPLE_INDEX + 4] = (gY + 2000.0) / 4000.0; 124 | tflInputTensor->data.f[THRESHOLD_SAMPLE_INDEX + 5] = (gZ + 2000.0) / 4000.0; 125 | 126 | // calculate the RMS of the acceleration 127 | float accelerationRMS = sqrt(fabs(aX) + fabs(aY) + fabs(aZ)); 128 | 129 | if (accelerationRMS > ACCELERATION_RMS_THRESHOLD) { 130 | // threshold reached, break the loop 131 | break; 132 | } 133 | } 134 | } 135 | 136 | // use the threshold index as the starting point for the remainder of the data 137 | capturedSamples = THRESHOLD_SAMPLE_INDEX + NUM_FEATURES_PER_SAMPLE; 138 | 139 | // collect the remaining samples 140 | while (capturedSamples < TOTAL_SAMPLES) { 141 | // wait for both acceleration and gyroscope data to be available 142 | if (IMU.accelerationAvailable() && IMU.gyroscopeAvailable()) { 143 | // read the acceleration and gyroscope data 144 | IMU.readAcceleration(aX, aY, aZ); 145 | IMU.readGyroscope(gX, gY, gZ); 146 | 147 | // insert the new data 148 | tflInputTensor->data.f[capturedSamples + 0] = (aX + 4.0) / 8.0; 149 | tflInputTensor->data.f[capturedSamples + 1] = (aY + 4.0) / 8.0; 150 | tflInputTensor->data.f[capturedSamples + 2] = (aZ + 4.0) / 8.0; 151 | tflInputTensor->data.f[capturedSamples + 3] = (gX + 2000.0) / 4000.0; 152 | tflInputTensor->data.f[capturedSamples + 4] = (gY + 2000.0) / 4000.0; 153 | tflInputTensor->data.f[capturedSamples + 5] = (gZ + 2000.0) / 4000.0; 154 | 155 | capturedSamples += NUM_FEATURES_PER_SAMPLE; 156 | } 157 | } 158 | 159 | // Run inferencing 160 | TfLiteStatus invokeStatus = tflInterpreter->Invoke(); 161 | 162 | if (invokeStatus != kTfLiteOk) { 163 | Serial.println("Invoke failed!"); 164 | while (1); 165 | return; 166 | } 167 | 168 | // Loop through the output tensor values from the model 169 | for (int i = 0; i < NUM_GESTURES; i++) { 170 | Serial.print(GESTURES[i]); 171 | Serial.print(": "); 172 | Serial.println(tflOutputTensor->data.f[i], 6); 173 | } 174 | Serial.println(); 175 | } 176 | -------------------------------------------------------------------------------- /ArduinoSketches/IMU_Classifier/model.h: -------------------------------------------------------------------------------- 1 | const unsigned char model[] = { 2 | 3 | }; 4 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # TinyML Application Development for Everyone 2 | 3 | AIoT Devfest Logo Arduino Nano 33 BLE board 4 | 5 | ## Hands-on workshop at AIoT Devfest January 2020 6 | 7 | In this workshop we will teach an Arduino board to recognize gestures! We will capture motion data from the [Arduino Nano 33 BLE](https://store.arduino.cc/arduino-nano-33-ble) board[1](#note1), import it into TensorFlow to train a model, and deploy a classifier onto the board using [TensorFlow Lite for microcontrollers](https://www.tensorflow.org/lite/microcontrollers/overview). 8 | 9 | The hardware for this workshop has been provided by [Arduino](https://arduino.cc) 10 | 11 | ## Exercises 12 | 13 | * [Introduction](exercises/intro.md) 14 | * [Exercise 1: Development Environment](exercises/exercise1.md) 15 | * [Exercise 2: Source Code](exercises/exercise2.md) 16 | * [Exercise 3: Hardware](exercises/exercise3.md) 17 | * [Exercise 4: Visualizing the IMU Data](exercises/exercise4.md) 18 | * [Exercise 5: Gather the Training Data](exercises/exercise5.md) 19 | * [Exercise 6: Machine Learning](exercises/exercise6.md) 20 | * [Exercise 7: Classifying IMU Data](exercises/exercise7.md) 21 | * [Exercise 8: Emojis](exercises/exercise8.md) 22 | * [Exercise 9: Gesture Controlled USB Emoji Keyboard](exercises/exercise9.md) 23 | * [Exercise 10: Next Steps](exercises/exercise10.md) 24 | 25 | This workshop material was developed by Sandeep Mistry and Don Coleman. 26 | 27 | Arduino Logo 28 | Chariot Solutions Logo 29 | 30 | Previous versions 31 | * https://github.com/sandeepmistry/aimldevfest-workshop-2019 32 | * https://github.com/arduino/AIoT-Dev-Summit-2019 33 | 34 | 35 | 36 | 1: You can also use the [Arduino Nano 33 BLE Sense](https://store.arduino.cc/arduino-nano-33-ble-sense) for this workshop. 37 | -------------------------------------------------------------------------------- /arduino_tinyml_workshop.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "nbformat": 4, 3 | "nbformat_minor": 0, 4 | "metadata": { 5 | "colab": { 6 | "name": "arduino_tinyml_workshop.ipynb", 7 | "provenance": [], 8 | "collapsed_sections": [], 9 | "toc_visible": true 10 | }, 11 | "kernelspec": { 12 | "name": "python3", 13 | "display_name": "Python 3" 14 | } 15 | }, 16 | "cells": [ 17 | { 18 | "cell_type": "markdown", 19 | "metadata": { 20 | "id": "f92-4Hjy7kA8", 21 | "colab_type": "text" 22 | }, 23 | "source": [ 24 | "\n", 25 | "# TinyML on Arduino\n", 26 | "## Gesture recognition tutorial\n", 27 | " * Sandeep Mistry - Arduino\n", 28 | " * Don Coleman - Chariot Solutions\n", 29 | "\n", 30 | " \n", 31 | "https://github.com/don/tinyml-workshop/" 32 | ] 33 | }, 34 | { 35 | "cell_type": "markdown", 36 | "metadata": { 37 | "id": "uvDA8AK7QOq-", 38 | "colab_type": "text" 39 | }, 40 | "source": [ 41 | "## Setup Python Environment \n", 42 | "\n", 43 | "Install up the Python libraries and Linux tools for the code in the notebook." 44 | ] 45 | }, 46 | { 47 | "cell_type": "code", 48 | "metadata": { 49 | "id": "Y2gs-PL4xDkZ", 50 | "colab_type": "code", 51 | "colab": {} 52 | }, 53 | "source": [ 54 | "# Setup environment\n", 55 | "!apt-get -qq install xxd\n", 56 | "!pip install pandas numpy matplotlib\n", 57 | "%tensorflow_version 2.x\n", 58 | "!pip install tensorflow" 59 | ], 60 | "execution_count": 0, 61 | "outputs": [] 62 | }, 63 | { 64 | "cell_type": "markdown", 65 | "metadata": { 66 | "id": "9lwkeshJk7dg", 67 | "colab_type": "text" 68 | }, 69 | "source": [ 70 | "# Upload Data\n", 71 | "\n", 72 | "1. If necessary, open the panel on the left side of Colab by clicking on the __>__\n", 73 | "1. Select the files tab in the left panel\n", 74 | "1. Drag the `punch.csv` and `flex.csv` files from your computer to the tab to upload them into colab." 75 | ] 76 | }, 77 | { 78 | "cell_type": "markdown", 79 | "metadata": { 80 | "id": "Eh9yve14gUyD", 81 | "colab_type": "text" 82 | }, 83 | "source": [ 84 | "# Graph Data (optional)\n", 85 | "\n", 86 | "Plot the CSV data on two separate graphs, acceleration and gyroscope, because each data set has different units and scale." 87 | ] 88 | }, 89 | { 90 | "cell_type": "code", 91 | "metadata": { 92 | "id": "I65ukChEgyNp", 93 | "colab_type": "code", 94 | "colab": {} 95 | }, 96 | "source": [ 97 | "import matplotlib.pyplot as plt\n", 98 | "import numpy as np\n", 99 | "import pandas as pd\n", 100 | "\n", 101 | "filename = \"punch.csv\"\n", 102 | "\n", 103 | "df = pd.read_csv(\"/content/\" + filename)\n", 104 | "\n", 105 | "index = range(1, len(df['aX']) + 1)\n", 106 | "\n", 107 | "plt.rcParams[\"figure.figsize\"] = (20,10)\n", 108 | "\n", 109 | "plt.plot(index, df['aX'], 'g.', label='x', linestyle='solid', marker=',')\n", 110 | "plt.plot(index, df['aY'], 'b.', label='y', linestyle='solid', marker=',')\n", 111 | "plt.plot(index, df['aZ'], 'r.', label='z', linestyle='solid', marker=',')\n", 112 | "plt.title(\"Acceleration\")\n", 113 | "plt.xlabel(\"Sample #\")\n", 114 | "plt.ylabel(\"Acceleration (G)\")\n", 115 | "plt.legend()\n", 116 | "plt.show()\n", 117 | "\n", 118 | "plt.plot(index, df['gX'], 'g.', label='x', linestyle='solid', marker=',')\n", 119 | "plt.plot(index, df['gY'], 'b.', label='y', linestyle='solid', marker=',')\n", 120 | "plt.plot(index, df['gZ'], 'r.', label='z', linestyle='solid', marker=',')\n", 121 | "plt.title(\"Gyroscope\")\n", 122 | "plt.xlabel(\"Sample #\")\n", 123 | "plt.ylabel(\"Gyroscope (deg/sec)\")\n", 124 | "plt.legend()\n", 125 | "plt.show()\n" 126 | ], 127 | "execution_count": 0, 128 | "outputs": [] 129 | }, 130 | { 131 | "cell_type": "markdown", 132 | "metadata": { 133 | "id": "kSxUeYPNQbOg", 134 | "colab_type": "text" 135 | }, 136 | "source": [ 137 | "# Train Neural Network\n", 138 | "\n", 139 | "\n", 140 | "\n" 141 | ] 142 | }, 143 | { 144 | "cell_type": "markdown", 145 | "metadata": { 146 | "id": "Gxk414PU3oy3", 147 | "colab_type": "text" 148 | }, 149 | "source": [ 150 | "## Parse and prepare the data\n", 151 | "\n", 152 | "Parse the CSV files and transforms them to a format that can be used to train the fully connected neural network.\n", 153 | "\n", 154 | "If you've recorded additional gestures, update the `GESTURES` list with the names of the additional CSV files.\n" 155 | ] 156 | }, 157 | { 158 | "cell_type": "code", 159 | "metadata": { 160 | "id": "AGChd1FAk5_j", 161 | "colab_type": "code", 162 | "colab": {} 163 | }, 164 | "source": [ 165 | "import matplotlib.pyplot as plt\n", 166 | "import numpy as np\n", 167 | "import pandas as pd\n", 168 | "import tensorflow as tf\n", 169 | "\n", 170 | "print(f\"TensorFlow version = {tf.__version__}\\n\")\n", 171 | "\n", 172 | "# Set a fixed random seed value, for reproducibility, this will allow us to get\n", 173 | "# the same random numbers each time the notebook is run\n", 174 | "SEED = 1337\n", 175 | "np.random.seed(SEED)\n", 176 | "tf.random.set_seed(SEED)\n", 177 | "\n", 178 | "# the list of gestures \n", 179 | "GESTURES = [\n", 180 | " \"punch\",\n", 181 | " \"flex\"\n", 182 | "]\n", 183 | "\n", 184 | "SAMPLES_PER_GESTURE = 119\n", 185 | "\n", 186 | "NUM_GESTURES = len(GESTURES)\n", 187 | "\n", 188 | "# create a one-hot encoded matrix that is used in the output\n", 189 | "ONE_HOT_ENCODED_GESTURES = np.eye(NUM_GESTURES)\n", 190 | "\n", 191 | "inputs = []\n", 192 | "outputs = []\n", 193 | "\n", 194 | "# read each csv file and push an input and output\n", 195 | "for gesture_index in range(NUM_GESTURES):\n", 196 | " gesture = GESTURES[gesture_index]\n", 197 | " print(f\"Processing index {gesture_index} for gesture '{gesture}'.\")\n", 198 | " \n", 199 | " output = ONE_HOT_ENCODED_GESTURES[gesture_index]\n", 200 | " \n", 201 | " df = pd.read_csv(\"/content/\" + gesture + \".csv\")\n", 202 | "\n", 203 | " # get rid of pesky empty value lines of csv which cause NaN inputs to TensorFlow\n", 204 | " df = df.dropna()\n", 205 | " df = df.reset_index(drop=True)\n", 206 | " \n", 207 | " # calculate the number of gesture recordings in the file\n", 208 | " num_recordings = int(df.shape[0] / SAMPLES_PER_GESTURE)\n", 209 | " \n", 210 | " print(f\"\\tThere are {num_recordings} recordings of the {gesture} gesture.\")\n", 211 | " \n", 212 | " for i in range(num_recordings):\n", 213 | " tensor = []\n", 214 | " for j in range(SAMPLES_PER_GESTURE):\n", 215 | " index = i * SAMPLES_PER_GESTURE + j\n", 216 | " # normalize the input data, between 0 to 1:\n", 217 | " # - acceleration is between: -4 to +4\n", 218 | " # - gyroscope is between: -2000 to +2000\n", 219 | " tensor += [\n", 220 | " (df['aX'][index] + 4) / 8,\n", 221 | " (df['aY'][index] + 4) / 8,\n", 222 | " (df['aZ'][index] + 4) / 8,\n", 223 | " (df['gX'][index] + 2000) / 4000,\n", 224 | " (df['gY'][index] + 2000) / 4000,\n", 225 | " (df['gZ'][index] + 2000) / 4000\n", 226 | " ]\n", 227 | "\n", 228 | " inputs.append(tensor)\n", 229 | " outputs.append(output)\n", 230 | "\n", 231 | "# convert the list to numpy array\n", 232 | "inputs = np.array(inputs)\n", 233 | "outputs = np.array(outputs)\n", 234 | "\n", 235 | "print(\"Data set parsing and preparation complete.\")" 236 | ], 237 | "execution_count": 0, 238 | "outputs": [] 239 | }, 240 | { 241 | "cell_type": "markdown", 242 | "metadata": { 243 | "id": "d5_61831d5AM", 244 | "colab_type": "text" 245 | }, 246 | "source": [ 247 | "## Randomize and split the input and output pairs for training\n", 248 | "\n", 249 | "Randomly split input and output pairs into sets of data: 60% for training, 20% for validation, and 20% for testing.\n", 250 | "\n", 251 | " - the training set is used to train the model\n", 252 | " - the validation set is used to measure how well the model is performing during training\n", 253 | " - the testing set is used to test the model after training" 254 | ] 255 | }, 256 | { 257 | "cell_type": "code", 258 | "metadata": { 259 | "id": "QfNEmUZMeIEx", 260 | "colab_type": "code", 261 | "colab": {} 262 | }, 263 | "source": [ 264 | "# Randomize the order of the inputs, so they can be evenly distributed for training, testing, and validation\n", 265 | "# https://stackoverflow.com/a/37710486/2020087\n", 266 | "num_inputs = len(inputs)\n", 267 | "randomize = np.arange(num_inputs)\n", 268 | "np.random.shuffle(randomize)\n", 269 | "\n", 270 | "# Swap the consecutive indexes (0, 1, 2, etc) with the randomized indexes\n", 271 | "inputs = inputs[randomize]\n", 272 | "outputs = outputs[randomize]\n", 273 | "\n", 274 | "# Split the recordings (group of samples) into three sets: training, testing and validation\n", 275 | "TRAIN_SPLIT = int(0.6 * num_inputs)\n", 276 | "TEST_SPLIT = int(0.2 * num_inputs + TRAIN_SPLIT)\n", 277 | "\n", 278 | "inputs_train, inputs_test, inputs_validate = np.split(inputs, [TRAIN_SPLIT, TEST_SPLIT])\n", 279 | "outputs_train, outputs_test, outputs_validate = np.split(outputs, [TRAIN_SPLIT, TEST_SPLIT])\n", 280 | "\n", 281 | "print(\"Data set randomization and splitting complete.\")" 282 | ], 283 | "execution_count": 0, 284 | "outputs": [] 285 | }, 286 | { 287 | "cell_type": "markdown", 288 | "metadata": { 289 | "id": "a9g2n41p24nR", 290 | "colab_type": "text" 291 | }, 292 | "source": [ 293 | "## Build & Train the Model\n", 294 | "\n", 295 | "Build and train a [TensorFlow](https://www.tensorflow.org) model using the high-level [Keras](https://www.tensorflow.org/guide/keras) API." 296 | ] 297 | }, 298 | { 299 | "cell_type": "code", 300 | "metadata": { 301 | "id": "kGNFa-lX24Qo", 302 | "colab_type": "code", 303 | "colab": {} 304 | }, 305 | "source": [ 306 | "# build the model and train it\n", 307 | "model = tf.keras.Sequential()\n", 308 | "model.add(tf.keras.layers.Dense(50, activation='relu')) # relu is used for performance\n", 309 | "model.add(tf.keras.layers.Dense(15, activation='relu'))\n", 310 | "# the final layer is softmax because we only expect one gesture to occur per input\n", 311 | "model.add(tf.keras.layers.Dense(NUM_GESTURES, activation='softmax'))\n", 312 | "model.compile(optimizer='rmsprop', loss='mse', metrics=['mae'])\n", 313 | "history = model.fit(inputs_train, outputs_train, epochs=600, batch_size=1, validation_data=(inputs_validate, outputs_validate))\n", 314 | "\n" 315 | ], 316 | "execution_count": 0, 317 | "outputs": [] 318 | }, 319 | { 320 | "cell_type": "markdown", 321 | "metadata": { 322 | "id": "NUDPvaJE1wRE", 323 | "colab_type": "text" 324 | }, 325 | "source": [ 326 | "## Verify \n", 327 | "\n", 328 | "Graph the models performance vs validation.\n" 329 | ] 330 | }, 331 | { 332 | "cell_type": "markdown", 333 | "metadata": { 334 | "id": "kxA0zCOaS35v", 335 | "colab_type": "text" 336 | }, 337 | "source": [ 338 | "### Graph the loss\n", 339 | "\n", 340 | "Graph the loss to see when the model stops improving." 341 | ] 342 | }, 343 | { 344 | "cell_type": "code", 345 | "metadata": { 346 | "id": "bvFNHXoQzmcM", 347 | "colab_type": "code", 348 | "colab": {} 349 | }, 350 | "source": [ 351 | "# increase the size of the graphs. The default size is (6,4).\n", 352 | "plt.rcParams[\"figure.figsize\"] = (20,10)\n", 353 | "\n", 354 | "# graph the loss, the model above is configure to use \"mean squared error\" as the loss function\n", 355 | "loss = history.history['loss']\n", 356 | "val_loss = history.history['val_loss']\n", 357 | "epochs = range(1, len(loss) + 1)\n", 358 | "plt.plot(epochs, loss, 'g.', label='Training loss')\n", 359 | "plt.plot(epochs, val_loss, 'b', label='Validation loss')\n", 360 | "plt.title('Training and validation loss')\n", 361 | "plt.xlabel('Epochs')\n", 362 | "plt.ylabel('Loss')\n", 363 | "plt.legend()\n", 364 | "plt.show()\n", 365 | "\n", 366 | "print(plt.rcParams[\"figure.figsize\"])" 367 | ], 368 | "execution_count": 0, 369 | "outputs": [] 370 | }, 371 | { 372 | "cell_type": "markdown", 373 | "metadata": { 374 | "id": "DG3m-VpE1zOd", 375 | "colab_type": "text" 376 | }, 377 | "source": [ 378 | "### Graph the loss again, skipping a bit of the start\n", 379 | "\n", 380 | "We'll graph the same data as the previous code cell, but start at index 100 so we can further zoom in once the model starts to converge." 381 | ] 382 | }, 383 | { 384 | "cell_type": "code", 385 | "metadata": { 386 | "id": "c3xT7ue2zovd", 387 | "colab_type": "code", 388 | "colab": {} 389 | }, 390 | "source": [ 391 | "# graph the loss again skipping a bit of the start\n", 392 | "SKIP = 100\n", 393 | "plt.plot(epochs[SKIP:], loss[SKIP:], 'g.', label='Training loss')\n", 394 | "plt.plot(epochs[SKIP:], val_loss[SKIP:], 'b.', label='Validation loss')\n", 395 | "plt.title('Training and validation loss')\n", 396 | "plt.xlabel('Epochs')\n", 397 | "plt.ylabel('Loss')\n", 398 | "plt.legend()\n", 399 | "plt.show()" 400 | ], 401 | "execution_count": 0, 402 | "outputs": [] 403 | }, 404 | { 405 | "cell_type": "markdown", 406 | "metadata": { 407 | "id": "CRjvkFQy2RgS", 408 | "colab_type": "text" 409 | }, 410 | "source": [ 411 | "### Graph the mean absolute error\n", 412 | "\n", 413 | "[Mean absolute error](https://en.wikipedia.org/wiki/Mean_absolute_error) is another metric to judge the performance of the model.\n", 414 | "\n" 415 | ] 416 | }, 417 | { 418 | "cell_type": "code", 419 | "metadata": { 420 | "id": "mBjCf1-2zx9C", 421 | "colab_type": "code", 422 | "colab": {} 423 | }, 424 | "source": [ 425 | "# graph of mean absolute error\n", 426 | "mae = history.history['mae']\n", 427 | "val_mae = history.history['val_mae']\n", 428 | "plt.plot(epochs[SKIP:], mae[SKIP:], 'g.', label='Training MAE')\n", 429 | "plt.plot(epochs[SKIP:], val_mae[SKIP:], 'b.', label='Validation MAE')\n", 430 | "plt.title('Training and validation mean absolute error')\n", 431 | "plt.xlabel('Epochs')\n", 432 | "plt.ylabel('MAE')\n", 433 | "plt.legend()\n", 434 | "plt.show()\n" 435 | ], 436 | "execution_count": 0, 437 | "outputs": [] 438 | }, 439 | { 440 | "cell_type": "markdown", 441 | "metadata": { 442 | "id": "guMjtfa42ahM", 443 | "colab_type": "text" 444 | }, 445 | "source": [ 446 | "### Run with Test Data\n", 447 | "Put our test data into the model and compare the predictions vs actual output\n" 448 | ] 449 | }, 450 | { 451 | "cell_type": "code", 452 | "metadata": { 453 | "id": "V3Y0CCWJz2EK", 454 | "colab_type": "code", 455 | "colab": {} 456 | }, 457 | "source": [ 458 | "# use the model to predict the test inputs\n", 459 | "predictions = model.predict(inputs_test)\n", 460 | "\n", 461 | "# print the predictions and the expected ouputs\n", 462 | "print(\"predictions =\\n\", np.round(predictions, decimals=3))\n", 463 | "print(\"actual =\\n\", outputs_test)" 464 | ], 465 | "execution_count": 0, 466 | "outputs": [] 467 | }, 468 | { 469 | "cell_type": "markdown", 470 | "metadata": { 471 | "id": "j7DO6xxXVCym", 472 | "colab_type": "text" 473 | }, 474 | "source": [ 475 | "# Convert the Trained Model to Tensor Flow Lite\n", 476 | "\n", 477 | "The next cell converts the model to TFlite format. The size in bytes of the model is also printed out." 478 | ] 479 | }, 480 | { 481 | "cell_type": "code", 482 | "metadata": { 483 | "id": "0Xn1-Rn9Cp_8", 484 | "colab_type": "code", 485 | "colab": {} 486 | }, 487 | "source": [ 488 | "# Convert the model to the TensorFlow Lite format without quantization\n", 489 | "converter = tf.lite.TFLiteConverter.from_keras_model(model)\n", 490 | "tflite_model = converter.convert()\n", 491 | "\n", 492 | "# Save the model to disk\n", 493 | "open(\"gesture_model.tflite\", \"wb\").write(tflite_model)\n", 494 | " \n", 495 | "import os\n", 496 | "basic_model_size = os.path.getsize(\"gesture_model.tflite\")\n", 497 | "print(\"Model is %d bytes\" % basic_model_size)\n", 498 | " \n", 499 | " " 500 | ], 501 | "execution_count": 0, 502 | "outputs": [] 503 | }, 504 | { 505 | "cell_type": "markdown", 506 | "metadata": { 507 | "id": "ykccQn7SXrUX", 508 | "colab_type": "text" 509 | }, 510 | "source": [ 511 | "## Encode the Model in an Arduino Header File \n", 512 | "\n", 513 | "The next cell creates a constant byte array that contains the TFlite model. Import the generated model.h file as a tab into your Arduino sketch." 514 | ] 515 | }, 516 | { 517 | "cell_type": "code", 518 | "metadata": { 519 | "id": "9J33uwpNtAku", 520 | "colab_type": "code", 521 | "colab": {} 522 | }, 523 | "source": [ 524 | "!echo \"const unsigned char model[] __attribute__((aligned(4))) = {\" > /content/model.h\n", 525 | "!cat gesture_model.tflite | xxd -i >> /content/model.h\n", 526 | "!echo \"};\" >> /content/model.h\n", 527 | "\n", 528 | "import os\n", 529 | "model_h_size = os.path.getsize(\"model.h\")\n", 530 | "print(f\"Header file, model.h, is {model_h_size:,} bytes.\")\n", 531 | "print(\"\\nOpen the side panel (refresh if needed). Double click model.h to download the file.\")" 532 | ], 533 | "execution_count": 0, 534 | "outputs": [] 535 | }, 536 | { 537 | "cell_type": "markdown", 538 | "metadata": { 539 | "id": "1eSkHZaLzMId", 540 | "colab_type": "text" 541 | }, 542 | "source": [ 543 | "# Classifying IMU Data\n", 544 | "\n", 545 | "Now it's time to switch back to the tutorial instructions and run our new model on the Arduino Nano 33 BLE Sense to classify the accelerometer and gyroscope data.\n" 546 | ] 547 | } 548 | ] 549 | } -------------------------------------------------------------------------------- /exercises/exercise1.md: -------------------------------------------------------------------------------- 1 | # Exercise 1: Development Environment 2 | 3 | Set up your computer for Arduino development. 4 | 5 | ## Arduino IDE 6 | Install the Arduino IDE from https://arduino.cc/downloads 7 | 8 | ![Arduino IDE Download](images/ArduinoIDE.png) 9 | 10 | ### Arduino nRF528x Boards Definitions 11 | Use the Arduino Boards Manager to install the “Arduino nRF528x Boards (Mbed OS)” definitions. Open the Boards Manager using the menu _Tools -> Board: -> Boards Manager..._ 12 | 13 | ![Arduino Boards Manager](images/BoardManager-Menu.png) 14 | 15 | Search for "Nano 33 BLE" and install the Arduino nRF528x Boards (Mbed OS) definitions. 16 | 17 | ![Arduino nRF528x Board Definitions](images/BoardsManager.png) 18 | 19 | ### Arduino Libraries 20 | Install the following Arduino libraries using the Library manager: 21 | 22 | * TensorFlow Lite library (search for "Arduino_TensorFlowLite") 23 | * Arduino LSM9DS1 library (search for "Arduino_LSM9DS1") 24 | 25 | Open the library manager using the menu _Tools -> Manage Libraries..._ 26 | 27 | ![Arduino Library Manager Menu](images/ManageLibraries.png) 28 | 29 | Search for "Arduino_TensorFlowLite". Click the row and press the __Install__ button to install TensorFlow Lite for Microcontrollers 30 | 31 | ![Arduino TensorFlow Lite library](images/library-tensorflowlite.png) 32 | 33 | Search for "Arduino_LSM9DS1". Click the row and press the __Install__ button to install the Arduino LSM9DS1 accelerometer, magnetometer, and gyroscope library. 34 | 35 | ![Arduino LSM9DS1 library](images/library-arduinolsm9ds1.png) 36 | 37 | 38 | __Linux users__ may need to configure permissions so their user can access the serial port. See the [Getting Started Guide for Linux](https://www.arduino.cc/en/guide/linux) on the Arduino website for more information. 39 | 40 | Next [Exercise 2: Source Code](exercise2.md) 41 | -------------------------------------------------------------------------------- /exercises/exercise10.md: -------------------------------------------------------------------------------- 1 | # Exercise 10: Next Steps 2 | 3 | Now that you have things working, here are a few new things to try. 4 | 5 | - Add some more gestures to go with the :punch: and :flex: 6 | 7 | 1. Load the IMU_Capture sketch on your Arduino Nano 33 BLE 8 | 1. Caputure some additional gestures for a new emoji. Perhaps a :smile:, :thumbsup:, :clap:, or :wave: 9 | 1. Load the new CSV files into your Colab model 10 | 1. Put the trained model back on your Nano 33 BLE 11 | 12 | - Try increasing and decreasing the number of recordings per gesture, how does this impact performance? 13 | 14 | - Try to only use the accelerometer or gyroscope data (not both), how does this impact performance? 15 | 16 | - Tweak the model structure and parameters 17 | - Can you get better results? 18 | - Can you reduce the size and still get "good" results 19 | 20 | - Check out the examples in the [TensorFlow Lite Getting Started Guide](https://www.tensorflow.org/lite/microcontrollers/get_started) 21 | 22 | 25 | - Grab the [TinyML book](http://shop.oreilly.com/product/0636920254508.do) to continue at home 26 | -------------------------------------------------------------------------------- /exercises/exercise2.md: -------------------------------------------------------------------------------- 1 | # Exercise 2: Source Code 2 | 3 | The source code and documentation for this workshop available on Github. The code is in the [ArduinoSketches](../ArduinoSketches) folder. 4 | 5 | If you're familiar with git and Github, clone the repository to your computer. If that last sentence didn't make sense to you, we recommend that you download the code. 6 | 7 | 1. Go to https://github.com/don/tinyml-workshop 8 | 1. Click the green **Clone or download** button 9 | 1. Choose Download ZIP 10 | 1. Go to your downloads folder and extract all the files from the zip archive 11 | 12 | ![Sceenshot of Github's clone or download options](images/clone-or-download.png) 13 | 14 | Next [Exercise 3: Hardware](exercise3.md) 15 | -------------------------------------------------------------------------------- /exercises/exercise3.md: -------------------------------------------------------------------------------- 1 | # Exercise 3: Hardware 2 | 3 | ![Picture of Arduino Nano 33 BLE board](images/nano-33-ble.jpg) 4 | 5 | ## Unboxing and set up 6 | 7 | 1. Remove the Arduino Nano 33 BLE board from the box 8 | 1. Plug the micro USB cable into the board and your computer 9 | 1. Open the Arduino IDE application on your computer 10 | 1. Choose the board `Tools -> Board -> Arduino Nano 33 BLE` 11 | 1. Choose the port `Tools -> Port -> COM5 (Arduino Nano 33 BLE)` *Note that the actual port may be different on your computer* 12 | 13 | ## Hardware Test 14 | 15 | 1. Plug the micro USB cable into the board and your computer 16 | 1. Open `ArduinoSketches/Hardware_Test/Hardware_Test.ino` in the Arduino IDE 17 | 1. Choose the board `Tools -> Board -> Arduino Nano 33 BLE` 18 | 1. Choose the port `Tools -> Port -> COM5 (Arduino Nano 33 BLE)` *Note that the actual port may be different on your computer* 19 | 1. Upload the code to the board `Sketch -> Upload` 20 | 1. Both LEDs next to the USB port should be on 21 | 1. Open the serial monitor `Tools -> Serial Monitor` to see debug messages 22 | 1. If everything is working properly, the LED will start blinking quickly 23 | 24 | __NOTE:__ The first time you upload a sketch to the Arduino Nano 33 BLE, the USB port name might change. If this happens, you need to re-select the port using the `Tools -> Port` menu. 25 | 26 | Next [Exercise 4: Visualize the IMU Data](exercise4.md) 27 | 28 | 29 | -------------------------------------------------------------------------------- /exercises/exercise4.md: -------------------------------------------------------------------------------- 1 | # Exercise 4: Visualize the IMU Data 2 | 3 | The next step is to use an Arduino program you downloaded in [Exercise 2](exercise2.md) to capture motion data from the IMU. 4 | 5 | 1. Open __tinyml-workshop/ArduinoSketches/IMU_Capture/IMU_Capture.ino__ in the Arduino IDE. 6 | 1. Compile the sketch and upload it to the board: `Sketch -> Upload` 7 | 1. Open the Serial Monitor: `Tools -> Serial Monitor` 8 | 1. Shake the board. The change in acceleration will start recording data from the IMU in the Serial Monitor 9 | 1. Close the Serial Monitor window 10 | 1. Open the Serial Plotter: `Tools -> Serial Plotter` 11 | 1. Shake the board. The change in acceleration will start graphing data in the Serial Plotter 12 | 1. Repeat capturing various gestures to get a sense of what the training data will look like 13 | 1. Close the Serial Plotter 14 | 15 | ![screenshot of serial monitor with IMU data](images/serial-monitor-imu.png) 16 | 17 | ![screenshot of serial plotter with IMU data](images/serial-plotter-imu.png) 18 | 19 | Next [Exercise 5: Gather the Training Data](exercise5.md) 20 | 21 | 22 | -------------------------------------------------------------------------------- /exercises/exercise5.md: -------------------------------------------------------------------------------- 1 | # Exercise 5: Gather the Training Data 2 | 3 | 1. Press the reset button on the board 4 | 1. Open the Serial Monitor: `Tools -> Serial Monitor` 5 | 1. Make a punch gesture with the board in your hand - you should see the sensor data log in the Serial Monitor 6 | 1. Repeat the gesture 10 (or more) times to gather addition data 7 | 1. Copy and paste the data from the serial output to new text file named `punch.csv` 8 | 1. Close the Serial Monitor 9 | 1. Press the reset button on the board 10 | 1. Open the Serial Monitor: `Tools -> Serial Monitor` 11 | 1. Make a flex gesture with the board in your hand 12 | 1. Repeat the flex gesture at least 10 times 13 | 1. Copy and paste the serial output to new text file named `flex.csv` 14 | 15 | ![screenshot of serial monitor with IMU data](images/serial-monitor-imu.png) 16 | 17 | ## Creating CSV Files 18 | 19 | Visual Studio Code, Sublime Text, or Atom will all work great for creating CSV files. If you don't have one of these editors installed, try Notepad.exe on Windows or TextEdit on MacOS. Note that TextEdit wants to save data in rich text format default. Be sure to choose _Format -> Make Plain Text_ before choosing _File -> Save_. 20 | 21 | 22 | Next [Exercise 6: Machine Learning ](exercise6.md) 23 | 24 | -------------------------------------------------------------------------------- /exercises/exercise6.md: -------------------------------------------------------------------------------- 1 | # Exercise 6: Machine Learning 2 | 3 | We're going to use [Google Colab](https://colab.research.google.com) to train our machine learning model. Colab provides a Jupyter notebook that allows us to run our machine learning model in a web browser. 4 | 5 | ![Screenshot of Google Colab website](images/colab.png) 6 | 7 | ## 3rd Party Cookies 8 | 9 | Some of you might see an error about 3rd party cookies. 10 | 11 | ![Screenshot of Google Colab error about 3rd party cookies](images/colab-error.png) 12 | 13 | You can enable 3rd party cookies, or better yet, add an exception for `[*.]googleusercontent.com`. 14 | 15 | ![Screenshot adding 3rd party cookie exception for googleusercontent.com](images/colab-3rd-party-cookie-exception.png) 16 | 17 | ## Open the Notebook 18 | 19 | Open the [arduino_tinyml_workshop.ipynb](../arduino_tinyml_workshop.ipynb) notebook in Google Colab and follow the instructions in the notebook to prepare the data and train the model. 20 | 21 | https://colab.research.google.com/github/don/tinyml-workshop/blob/master/arduino_tinyml_workshop.ipynb 22 | 23 | 24 | Next [Exercise 7: Classifying IMU Data](exercise7.md) 25 | -------------------------------------------------------------------------------- /exercises/exercise7.md: -------------------------------------------------------------------------------- 1 | # Exercise 7: Classifying IMU Data 2 | 3 | 1. Open __tinyml-workshop/ArduinoSketches/IMU_Classifier/IMU_Classifier.ino__ in the Arduino IDE. 4 | 1. Switch to the model.h tab 5 | 1. Replace the contents of model.h with the version you downloaded from Colab 6 | 1. Upload the sketch: `Sketch -> Upload` 7 | 1. Open the Serial Monitor: `Tools -> Serial Monitor` 8 | 1. Perform the punch or flex gesture 9 | 1. The confidence of each gesture will be printed to the Serial Monitor (0 -> low confidence, 1 -> high confidence) 10 | 11 | ![screenshot with output of the imu classifier sketch](images/arduino-classifier.png) 12 | 13 | Next [Exercise 8: Emojis](exercise8.md) 14 | -------------------------------------------------------------------------------- /exercises/exercise8.md: -------------------------------------------------------------------------------- 1 | # Exercise 8: Emojis 2 | 3 | ## Print an emoji 4 | 5 | Now that we the code can recognize gestures, let's try printing out some emojis in addition to the text. 6 | 7 | Open __tinyml-workshop/ArduinoSketches/IMU_Classifier/IMU_Classifier.ino__ in the Arduino IDE if it's not already loaded. 8 | 9 | Create a new char* array named `EMOJIS`. Inside the array defines the emojis as a UTF-8 encoded unicode strings. This array must be the same length at the `GESTURE` array. The order of the emojis in the array must match the order of the gestures. 10 | 11 | const char* EMOJIS[] = { 12 | u8"\U0001f44a", // punch 13 | u8"\U0001f4aa" // flex 14 | }; 15 | 16 | If you're recorded additional gestures, you can find the unicode characters on the [Unicode Consortium website](http://www.unicode.org/emoji/charts/full-emoji-list.html). 17 | 18 | In the `loop` function, after the gestures and probabilities are printed, add some code to print the emoji for the gesture if the probability is over 80%. 19 | 20 | for (int i = 0; i < NUM_GESTURES; i++) { 21 | if (tflOutputTensor->data.f[i] > 0.8) { 22 | Serial.println(EMOJIS[i]); 23 | Serial.println(); 24 | } 25 | } 26 | 27 | Compile and deploy the updated code onto your Arduino using `Sketch -> Upload`. 28 | 29 | The Serial Monitor in the Arduino IDE can't display unicode characters, so we need to use a different tool to view the output. For Linux and MacOS users, we recommend using the terminal. For Windows, you'll need Google Chrome. 30 | 31 | ### MacOS 32 | Open new Terminal by pressing the command key ⌘ and the space bar. Type `Terminal.app` into the Spotlight search window that appers and press enter. Once the terminal opens, type `cat /dev/cu.usb` and hit the TAB key. MacOS should autocomplete the name of the port the Arduino is connected to. This will look something like `/dev/cu.usbmodem146101`. The actual port name will likely be different on your computer. If necessary, get the name of the port from the Arduino IDE. 33 | 34 | ![screenshot of macos with emojis](images/imu_classifier_emoji_mac.png) 35 | 36 | ### Linux 37 | 38 | Open a terminal and cat the output from the Arduino device. If necessary, get the name of the port from the Arduino IDE. 39 | 40 | ![screenshot of linux with emojis](images/imu_classifier_emoji_linux.png) 41 | 42 | ### Windows 43 | 44 | The Windows terminal won't display emojis. Instead we created a web page that uses the experimental Web Serial API in [Google Chrome](https://google.com/chrome). 45 | 46 | Using Google Chrome, open [https://serial-monitor.glitch.me](https://serial-monitor.glitch.me). 47 | 48 | ![Screenshot warning Chrome features need to be enabled](images/web-serial-disabled.png) 49 | 50 | You'll likely need to enable the Web Serial API by toggling the #enable-experimental-web-platform-features flag in chrome://flags. 51 | 52 | ![Screenshot of Chrome enable-experimental-web-platform-features flag](images/web-serial-flag-enabled.png) 53 | 54 | After enabling the experimental features, click the Relaunch button to restart Chrome. Open https://serial-monitor.glitch.me and click the `Connect` button. Choose the COM port for your Arduino. 55 | 56 | ![Screenshot of web ui with port selection dialog](images/web-serial-choose-port.png) 57 | 58 | Make a gesture and the results will be displayed in the web page. 59 | 60 | ![Screenshot of Chrome web page with text and emoji output from Arduino](images/web-serial-monitor.png) 61 | 62 | Next [Exercise 9: Gesture Controlled USB Emoji Keyboard](exercise9.md) 63 | -------------------------------------------------------------------------------- /exercises/exercise9.md: -------------------------------------------------------------------------------- 1 | # Exercise 9: Gesture Controlled USB Emoji Keyboard 2 | 3 | Printing emojis is cool, but it would be even better if we could make a gesture controlled keyboard. This exercise only works on Linux and MacOS so, if you're running Windows, try teaming up with a MacOS or Linux person for this part. 4 | 5 | Open __ArduinoSketchs/Emoji_Test/Emoji_Test.ino__, in the Arduino IDE. Note that this code breaks some Arduino conventions by using `#define`. 6 | 7 | ## Linux 8 | 9 | In the code, uncomment the `#define` for LINUX. 10 | 11 | #define LINUX 12 | 13 | 14 | ## MacOS 15 | 16 | In the code, uncomment the `#define` for MACOS. 17 | 18 | #define MACOS 19 | 20 | For MacOS, you also need to enable support for emoji keyboards. Open System Preferences. Choose Keyboard. Select the Input Sources tab. Press the plus button. Search for 'other'. Choose 'Unicode Hex Input'. Press that add button. 21 | 22 | ![](images/macos-unicode-hex-input.png) 23 | 24 | ## Running the Test 25 | 26 | Compile and upload the code onto your Arduino using __Sketch -> Upload__. Open a new [Google Doc](https://doc.new) or another editor that supports emojis. Use your mouse to set focus into the editor. Shake the Arduino to activate the accelerometer and a bicep emoji 💪should appear. 27 | 28 | ## Building a Keyboard 29 | 30 | Now you are ready to build a keyboard. Create a new sketch in the Arduino IDE. Combine the code from `IMU_Classifier` and the `Emoji_Test` to create an emoji keyboard. Where a gesture is recognized print out the corresponding emoji using the USB Keyboard. 31 | 32 | ![screenshot of google doc with emojis from the our new gesture keyboard](images/emoji-google-doc.png) 33 | 34 | ### Note 35 | 36 | __NOTE:__ once you load code that runs the USB Keyboard, the Arduino IDE might not be able to see the serial port when you want to load new code. Double click the reset button on the Nano 33 BLE before you run __Sketch -> Upload__. 37 | 38 | Next [Exercise 10: Next Steps](exercise10.md) -------------------------------------------------------------------------------- /exercises/images/AI-IOT-devfest-AZ-2020-horiz.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/don/tinyml-workshop/7ab9bb877699826ff18d49fbd7f7d50885e6199f/exercises/images/AI-IOT-devfest-AZ-2020-horiz.png -------------------------------------------------------------------------------- /exercises/images/ArduinoIDE.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/don/tinyml-workshop/7ab9bb877699826ff18d49fbd7f7d50885e6199f/exercises/images/ArduinoIDE.png -------------------------------------------------------------------------------- /exercises/images/Arduino_logo_R_highquality.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/don/tinyml-workshop/7ab9bb877699826ff18d49fbd7f7d50885e6199f/exercises/images/Arduino_logo_R_highquality.png -------------------------------------------------------------------------------- /exercises/images/BoardManager-Menu.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/don/tinyml-workshop/7ab9bb877699826ff18d49fbd7f7d50885e6199f/exercises/images/BoardManager-Menu.png -------------------------------------------------------------------------------- /exercises/images/BoardsManager.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/don/tinyml-workshop/7ab9bb877699826ff18d49fbd7f7d50885e6199f/exercises/images/BoardsManager.png -------------------------------------------------------------------------------- /exercises/images/ChariotSolutions.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/don/tinyml-workshop/7ab9bb877699826ff18d49fbd7f7d50885e6199f/exercises/images/ChariotSolutions.png -------------------------------------------------------------------------------- /exercises/images/JustDownload.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/don/tinyml-workshop/7ab9bb877699826ff18d49fbd7f7d50885e6199f/exercises/images/JustDownload.png -------------------------------------------------------------------------------- /exercises/images/ManageLibraries.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/don/tinyml-workshop/7ab9bb877699826ff18d49fbd7f7d50885e6199f/exercises/images/ManageLibraries.png -------------------------------------------------------------------------------- /exercises/images/arduino-classifier.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/don/tinyml-workshop/7ab9bb877699826ff18d49fbd7f7d50885e6199f/exercises/images/arduino-classifier.png -------------------------------------------------------------------------------- /exercises/images/ble-sense.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/don/tinyml-workshop/7ab9bb877699826ff18d49fbd7f7d50885e6199f/exercises/images/ble-sense.jpg -------------------------------------------------------------------------------- /exercises/images/charlie_gerard_street_fighter.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/don/tinyml-workshop/7ab9bb877699826ff18d49fbd7f7d50885e6199f/exercises/images/charlie_gerard_street_fighter.gif -------------------------------------------------------------------------------- /exercises/images/clone-or-download.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/don/tinyml-workshop/7ab9bb877699826ff18d49fbd7f7d50885e6199f/exercises/images/clone-or-download.png -------------------------------------------------------------------------------- /exercises/images/colab-3rd-party-cookie-exception.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/don/tinyml-workshop/7ab9bb877699826ff18d49fbd7f7d50885e6199f/exercises/images/colab-3rd-party-cookie-exception.png -------------------------------------------------------------------------------- /exercises/images/colab-error.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/don/tinyml-workshop/7ab9bb877699826ff18d49fbd7f7d50885e6199f/exercises/images/colab-error.png -------------------------------------------------------------------------------- /exercises/images/colab.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/don/tinyml-workshop/7ab9bb877699826ff18d49fbd7f7d50885e6199f/exercises/images/colab.png -------------------------------------------------------------------------------- /exercises/images/emoji-google-doc.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/don/tinyml-workshop/7ab9bb877699826ff18d49fbd7f7d50885e6199f/exercises/images/emoji-google-doc.png -------------------------------------------------------------------------------- /exercises/images/imu_classifier_emoji_linux.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/don/tinyml-workshop/7ab9bb877699826ff18d49fbd7f7d50885e6199f/exercises/images/imu_classifier_emoji_linux.png -------------------------------------------------------------------------------- /exercises/images/imu_classifier_emoji_mac.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/don/tinyml-workshop/7ab9bb877699826ff18d49fbd7f7d50885e6199f/exercises/images/imu_classifier_emoji_mac.png -------------------------------------------------------------------------------- /exercises/images/library-arduinolsm9ds1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/don/tinyml-workshop/7ab9bb877699826ff18d49fbd7f7d50885e6199f/exercises/images/library-arduinolsm9ds1.png -------------------------------------------------------------------------------- /exercises/images/library-tensorflowlite.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/don/tinyml-workshop/7ab9bb877699826ff18d49fbd7f7d50885e6199f/exercises/images/library-tensorflowlite.png -------------------------------------------------------------------------------- /exercises/images/macos-unicode-hex-input.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/don/tinyml-workshop/7ab9bb877699826ff18d49fbd7f7d50885e6199f/exercises/images/macos-unicode-hex-input.png -------------------------------------------------------------------------------- /exercises/images/nano-33-ble.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/don/tinyml-workshop/7ab9bb877699826ff18d49fbd7f7d50885e6199f/exercises/images/nano-33-ble.jpg -------------------------------------------------------------------------------- /exercises/images/nano-33-ble_iso.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/don/tinyml-workshop/7ab9bb877699826ff18d49fbd7f7d50885e6199f/exercises/images/nano-33-ble_iso.jpg -------------------------------------------------------------------------------- /exercises/images/serial-monitor-imu.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/don/tinyml-workshop/7ab9bb877699826ff18d49fbd7f7d50885e6199f/exercises/images/serial-monitor-imu.png -------------------------------------------------------------------------------- /exercises/images/serial-plotter-imu.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/don/tinyml-workshop/7ab9bb877699826ff18d49fbd7f7d50885e6199f/exercises/images/serial-plotter-imu.png -------------------------------------------------------------------------------- /exercises/images/web-serial-choose-port.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/don/tinyml-workshop/7ab9bb877699826ff18d49fbd7f7d50885e6199f/exercises/images/web-serial-choose-port.png -------------------------------------------------------------------------------- /exercises/images/web-serial-disabled.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/don/tinyml-workshop/7ab9bb877699826ff18d49fbd7f7d50885e6199f/exercises/images/web-serial-disabled.png -------------------------------------------------------------------------------- /exercises/images/web-serial-flag-enabled.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/don/tinyml-workshop/7ab9bb877699826ff18d49fbd7f7d50885e6199f/exercises/images/web-serial-flag-enabled.png -------------------------------------------------------------------------------- /exercises/images/web-serial-monitor.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/don/tinyml-workshop/7ab9bb877699826ff18d49fbd7f7d50885e6199f/exercises/images/web-serial-monitor.png -------------------------------------------------------------------------------- /exercises/intro.md: -------------------------------------------------------------------------------- 1 | # Introduction 2 | 3 | ## TDLR 4 | 5 | We'll be covering the full embedded machine learning (ML) lifecycle: data capturing, model training, model. [Skip to Exercise 1: Development Environment](exercise1.md). 6 | 7 | 8 | ## Inspiration 9 | 10 | This project was inspired by [Charlie Gerard's street fighter project](https://dev.to/devdevcharlie/play-street-fighter-with-body-movements-using-arduino-and-tensorflow-js-4kbi). 11 | 12 | ![Charlie Gerard's motion controlled Street Fighter demo](images/charlie_gerard_street_fighter.gif) 13 | 14 | Instead of using JavaScript with TensorFlow.js and Johnny Five (which is great) we’ll develop the model in TensorFlow and run on a microcontroller (MCU) with TensorFlow Lite. 15 | 16 | JSConf talk link: https://www.youtube.com/watch?v=rwFiFWI23Rw 17 | 18 | ## Goal 19 | 20 | Build an motion / gesture based emoji keyboard! 21 | 22 | Detect punching vs flexing ... 23 | 24 | ## AI / ML overview 25 | 26 | Quick overview, we can't teach you everything in 2 hours ... 27 | 28 | ### What is Machine Learning (ML) ? 29 | 30 | “A field of study that gives computers the ability to learn without being explicitly programmed.” 31 | - Attributed to Arthur Samuel 32 | 33 | Excerpt from "Grokking Deep Learning" by Andrew Trask 34 | 35 | ... 36 | 37 | Machine Learning =~ Monkey see, monkey do 38 | 39 | 🐵 40 | 41 | 42 | Progamming: 43 | 44 | ``` 45 | +----------+ 46 | Algorithm -> | | 47 | | | -> *Answers* 48 | Input -> | | 49 | +----------+ 50 | ``` 51 | 52 | Machine Learning: 53 | 54 | ``` 55 | +----------+ 56 | *Answers* -> | | 57 | | | -> Algorithm 58 | Input -> | | 59 | +----------+ 60 | ``` 61 | 62 | Supervised Learning: 63 | 64 | ``` 65 | +---------------------+ 66 | What you know -> | Supervised Learning | -> What you want to know 67 | +---------------------+ 68 | ``` 69 | 70 | ## What we'll be using 71 | 72 | * Python 73 | * Jupyter Notebooks / Google Colab 74 | * TensorFlow 75 | * NumPy 76 | * Pandas 77 | * mathplotlib 78 | 79 | ## What's TinyML 80 | 81 | Running ML models on microcontrollers. Low power, below 1 mW. 82 | 83 | ## Arduino Nano 33 BLE board 84 | 85 | Based on the Nordic nRF52840 86 | * Arm Cortex-M4F running at 64 MHz 87 | * 256 kB RAM 88 | * 1 MB Flash 89 | * Bluetooth 5 radio 90 | 91 | Onboard Sensors 92 | * IMU (measure motion: acceleration, gyro) 93 | 94 | This workshop also works on the Arduino Nano 33 BLE Sense board. The Sense version of the board adds additional sensors: temperature, pressure, humidity, light, color, PDM microphone. 95 | 96 | ## What is an IMU? 97 | 98 | IMU - Inertial Measurement Unit 99 | 100 | This board uses the ST Micro LSM9DS1. 101 | 102 | * Gyroscope - measures angular velocity -- that is "how fast, and along which axis, am I rotating?" 103 | * Accelerometer - measures acceleration, which indicates how fast velocity is changing -- "how fast am I speeding up or slowing down?" 104 | * Magnetometer - which measures the power and direction of magnetic fields 105 | 106 | We're only using the accelerometer and gyroscope for this project. 107 | 108 | ## Workshop 109 | 110 | * Record IMU data 111 | * Use data to train ML model 112 | * Convert model to run on microcontroller 113 | * Run the model on the Arduino 114 | 115 | ## Why is ML / TinyML a good fit 116 | 117 | * It’s definitely possible to develop an algorithm to detect punches using classical programming techniques, but … how would you account for following 118 | * People might punch differently? 119 | * Different styles of punching? 120 | * You want to be able to detect more than one type of gesture 121 | * Things get complicated! 122 | * ML is also “cool”, that’s why we’re all here today? Why not try it out! 123 | 124 | ## IMU input data 125 | 126 | The Arduino library enables the sensor to report 119 data points every second, this means a new set of data is received every 8.4ms 127 | 128 | 129 | We’ll focus on the accelerometer and gyroscope inputs (ignore magnetometer). Each input has 3 axis = X, Y, Z 130 | 131 | The inputs for the model will be 1 second of data = 119 samples 132 | 133 | Each sample will be [Ax, Ay, Az, Gx, Gy, Gz] 134 | 135 | 136 | ``` 137 | [ax, ay, az, gx, gy, gz] 138 | ``` 139 | 140 | Each gesture recording contains 119 rows of 6 points of data record in one second 141 | 142 | We’ll pass 119 x 6 inputs as an array and expect the type of gesture. 143 | 144 | 145 | ``` 146 | [Ax1, Ay1, Az1, Gx1, Gy1, Gz1, Ax2, Ay2, Az2, Gx2, Gy2, Gz2,… Ax119, Ay119, Az119, Gx119, Gy119, Gz119] 147 | ``` 148 | 149 | Sample data file 150 | 151 | ``` 152 | aX,aY,aZ,gX,gY,gZ 153 | 0.569,-0.698,0.592,50.110,-43.762,22.888 154 | 0.590,-0.756,0.629,55.542,-32.654,19.592 155 | 0.553,-0.727,0.644,50.964,-34.912,16.479 156 | 0.549,-0.761,0.670,45.471,-34.363,13.062 157 | 0.577,-0.844,0.666,47.119,-29.236,11.780 158 | 0.576,-0.867,0.707,50.781,-19.592,13.123 159 | 0.600,-0.862,0.720,55.786,-14.343,16.235 160 | 0.589,-0.841,0.721,57.556,-8.301,18.372 161 | 0.578,-0.842,0.741,56.763,-1.099,20.020 162 | 163 | ``` 164 | 165 | We flatten the data before we pass it to Kera rather than passing a 2 dimensional array. 166 | (We could have passed a 2 dimensional array, but then we could need a Keras flatten layer) 167 | The flattened array or vector makes the Arduino code simpler too. 168 | 169 | 170 | ``` 171 | [ax1, ay1, az1, gx1, gy1, gz1, ax2, ay2, az2, gx2, gy2, gz2,…, ax119, ay119, az119, gx119, gy119, gz119] 172 | ``` 173 | 174 | # Post workshop 175 | 176 | TinyML: http://shop.oreilly.com/product/0636920254508.do 177 | * Will feature the same board we used today 178 | * The content of this workshop is based on it 179 | * Includes other examples, such as “micro speech”, voice keyword detection (“yes” vs “no”) 180 | 181 | 182 | Grokking Deep Learning: https://www.manning.com/books/grokking-deep-learning 183 | * Covers theory of Deep Learning, teaches you at lower level using Python with Numpy 184 | 185 | Next [Exercise 1: Development Environment](exercise1.md) 186 | 187 | --------------------------------------------------------------------------------