The Poses Classifier classify video as class ... with ... confidence.
24 |
Class "sit": Sit in front of the webcam
25 |
Class "standup": Stand up front of the webcam
26 |
27 |
28 |
29 |
30 |
31 |
32 |
--------------------------------------------------------------------------------
/Classification/teachableMachinePoses/sketch.js:
--------------------------------------------------------------------------------
1 | // the json file (model topology) has a reference to the bin file (model weights)
2 | const checkpointURL =
3 | "https://storage.googleapis.com/tm-posenet/yiningposetest2019071231647/model.json";
4 | // the metatadata json file contains the text labels of your model and additional information
5 | const metadataURL =
6 | "https://storage.googleapis.com/tm-posenet/yiningposetest2019071231647/metadata.json";
7 |
8 | const size = 300;
9 | let webcamEl;
10 | let model;
11 | let totalClasses;
12 | let myCanvas;
13 | let ctx;
14 |
15 | // A function that loads the model from the checkpoint
16 | async function load() {
17 | model = await tm.posenet.load(checkpointURL, metadataURL);
18 | totalClasses = model.getTotalClasses();
19 | console.log("Number of classes, ", totalClasses);
20 | }
21 |
22 | async function loadWebcam() {
23 | webcamEl = await tm.getWebcam(size, size); // can change width and height
24 | webcamEl.play();
25 | }
26 |
27 | async function setup() {
28 | myCanvas = createCanvas(size, size);
29 | ctx = myCanvas.elt.getContext("2d");
30 | // Call the load function, wait until it finishes loading
31 | await load();
32 | await loadWebcam();
33 | }
34 |
35 | function draw() {
36 | predictVideo(webcamEl);
37 | }
38 |
39 | async function predictVideo(image) {
40 | if (image) {
41 | // Prediction #1: run input through posenet
42 | // predictPosenet can take in an image, video or canvas html element
43 | const flipHorizontal = false;
44 | const { pose, posenetOutput } = await model.predictPosenet(
45 | webcamEl,
46 | flipHorizontal
47 | );
48 | // Prediction 2: run input through teachable machine assification model
49 | const prediction = await model.predict(
50 | posenetOutput,
51 | flipHorizontal,
52 | totalClasses
53 | );
54 |
55 | // Show the result
56 | const res = select('#res'); // select
57 | res.html(prediction[0].className);
58 |
59 | // Show the probability
60 | const prob = select('#prob'); // select
61 | prob.html(prediction[0].probability.toFixed(2));
62 |
63 | // draw the keypoints and skeleton
64 | if (pose) {
65 | const minPartConfidence = 0.5;
66 | ctx.drawImage(webcamEl, 0, 0);
67 | tm.drawKeypoints(pose.keypoints, minPartConfidence, ctx);
68 | tm.drawSkeleton(pose.keypoints, minPartConfidence, ctx);
69 | }
70 | }
71 | }
72 |
--------------------------------------------------------------------------------
/Classification/teachableMachineSound/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
My Teachable Machine with Sound
9 |
10 |
11 |
12 |
--------------------------------------------------------------------------------
/Classification/teachableMachineSound/sketch.js:
--------------------------------------------------------------------------------
1 | const mySoundModelURL = 'https://storage.googleapis.com/tm-speech-commands/yiningtestsound07112019/model.json';
2 | let mySoundModel;
3 | let resultDiv;
4 |
5 | function preload() {
6 | mySoundModel = ml5.soundClassifier(mySoundModelURL);
7 | }
8 |
9 | function setup() {
10 | resultDiv = createElement('h1', '...');
11 | mySoundModel.classify(gotResults);
12 | }
13 |
14 | function gotResults(err, results) {
15 | if (err) console.log(err);
16 | if (results) {
17 | console.log(results);
18 | resultDiv.html('Result is: ' + results[0].label);
19 | }
20 | }
21 |
--------------------------------------------------------------------------------
/Examples/ABCGestures/ArduinoSketches/IMU_Capture/IMU_Capture.ino:
--------------------------------------------------------------------------------
1 | /*
2 | IMU Capture
3 |
4 | This example uses the on-board IMU to start reading acceleration and gyroscope
5 | data from on-board IMU and prints it to the Serial Monitor for one second
6 | when the significant motion is detected.
7 |
8 | You can also use the Serial Plotter to graph the data.
9 |
10 | The circuit:
11 | - Arduino Nano 33 BLE or Arduino Nano 33 BLE Sense board.
12 |
13 | Created by Don Coleman, Sandeep Mistry
14 | Modified by Dominic Pajak, Sandeep Mistry
15 |
16 | This example code is in the public domain.
17 | */
18 |
19 | #include
20 |
21 | const float accelerationThreshold = 2.5; // threshold of significant in G's
22 | const int numSamples = 119;
23 |
24 | int samplesRead = numSamples;
25 |
26 | void setup() {
27 | Serial.begin(9600);
28 | while (!Serial);
29 |
30 | if (!IMU.begin()) {
31 | Serial.println("Failed to initialize IMU!");
32 | while (1);
33 | }
34 |
35 | // print the header
36 | Serial.println("aX,aY,aZ,gX,gY,gZ");
37 | }
38 |
39 | void loop() {
40 | float aX, aY, aZ, gX, gY, gZ;
41 |
42 | // wait for significant motion
43 | while (samplesRead == numSamples) {
44 | if (IMU.accelerationAvailable()) {
45 | // read the acceleration data
46 | IMU.readAcceleration(aX, aY, aZ);
47 |
48 | // sum up the absolutes
49 | float aSum = fabs(aX) + fabs(aY) + fabs(aZ);
50 |
51 | // check if it's above the threshold
52 | if (aSum >= accelerationThreshold) {
53 | // reset the sample read count
54 | samplesRead = 0;
55 | break;
56 | }
57 | }
58 | }
59 |
60 | // check if the all the required samples have been read since
61 | // the last time the significant motion was detected
62 | while (samplesRead < numSamples) {
63 | // check if both new acceleration and gyroscope data is
64 | // available
65 | if (IMU.accelerationAvailable() && IMU.gyroscopeAvailable()) {
66 | // read the acceleration and gyroscope data
67 | IMU.readAcceleration(aX, aY, aZ);
68 | IMU.readGyroscope(gX, gY, gZ);
69 |
70 | samplesRead++;
71 |
72 | // print the data in CSV format
73 | Serial.print(aX, 3);
74 | Serial.print(',');
75 | Serial.print(aY, 3);
76 | Serial.print(',');
77 | Serial.print(aZ, 3);
78 | Serial.print(',');
79 | Serial.print(gX, 3);
80 | Serial.print(',');
81 | Serial.print(gY, 3);
82 | Serial.print(',');
83 | Serial.print(gZ, 3);
84 | Serial.println();
85 |
86 | if (samplesRead == numSamples) {
87 | // add an empty line if it's the last sample
88 | Serial.println();
89 | }
90 | }
91 | }
92 | }
93 |
--------------------------------------------------------------------------------
/Examples/ABCGestures/README.md:
--------------------------------------------------------------------------------
1 | # ABC Gestures Classification
2 | This is a tutorial of training 3 gestures instead of 2(flex, punch) from the [Gesture2Emoji](../GestureToEmoji) workshop. You can follow this tutorial to see how can we train more than 2 gestures classifier. You can also modify it to train any number of gestures you like. But make sure each class are distinct from each other.
3 |
4 | ## Demo
5 | Classify "A", "B", "C" gestures.
6 | [video](https://youtu.be/7AfUF4RuiZ8)
7 |
8 | ## How to Run it
9 | A [video](https://www.loom.com/share/ae50b56c41774eb6b98e9b8f728b3c90) of how to run this workshop
10 |
11 | ## 0. What do you need
12 | Arduino Nano 33 BLE Sense (and its USB cable), Laptop
13 |
14 | ## 1. Circuit
15 | - Connects D2, D3, D4 to three LEDs.
16 |
17 |
18 | ## 2. Collect data
19 | - Open the [IMU_Classifier](https://github.com/yining1023/Machine-Learning-for-Physical-Computing/tree/master/Examples/ABCGestures/ArduinoSketches/IMU_Capture) sketch in Arduino, select the board and port, upload it to your Arduino Nano 33 BLE sense board.
20 | - Open the serial port, start performing gesture "A" for more than 15 times, Arduino will detect the sudden movement and start recording the accelerometer and gyroscope data for 1 second, you should be able to see 119 lines of aX,aY,aZ,gX,gY,gZ numbers in the serial monitor. Copy the output from the serial monitor and save it in a "a.csv" file.
21 | - Clear the serial montior, press the reset button on the board, repeat the process for gesture "B" and "C".
22 | - In the end, we will have 3 .csv files: "a.csv", "b.csv", "c.csv"
23 |
24 | ## 3. Train the model
25 | Open [this google colab](https://colab.research.google.com/drive/1E2UFGJjddwEp3yKxMF9Fk02D2KACauyn).
26 | **Upload the 3 `.csv` files to the colab.**
27 | To train our own classes, we need to change the Gesture class.
28 | There are 3 places that we need to make some small changes.
29 | - 1. Graph Data (optional)
30 | ```python
31 | filename = "a.csv" # change the "a.csv" to any of your own filename
32 | ```
33 | - 2. Parse and prepare the data
34 | ```python
35 | GESTURES = [
36 | "a",
37 | "b",
38 | 'c'
39 | ]
40 | ```
41 | Put a list of your gestures here, the same names of your `csv` file
42 | - In the end, you should have a `model.h` file that you downloaded from the colab.
43 |
44 | ## 4. Run the model
45 | Go to Arduino IDE, open this [IMU_Classifier](https://github.com/yining1023/Machine-Learning-for-Physical-Computing/tree/master/Examples/ABCGestures/ArduinoSketches/IMU_Classifier) sketch, replace its `model.h` file to the file that we got from the google colab.
46 |
47 | Inside of the sketch, change the list of gestures as well:
48 | ```
49 | // array to map gesture index to a name
50 | const char* GESTURES[] = {
51 | "a",
52 | "b",
53 | "c"
54 | };
55 | ```
56 | - Select the port and board, upload this sketch, open the serial monitor. Perform all gestures, you will see the results in the serial monitor.
57 | - This skecth will also light up LEDs at D2,3,4 pins when different classes are recognized. Watch the above demo video to learn more.
58 |
--------------------------------------------------------------------------------
/Examples/FruitToEmoji/README.md:
--------------------------------------------------------------------------------
1 | # [Guide](https://blog.arduino.cc/2019/11/07/fruit-identification-using-arduino-and-tensorflow/)
2 |
3 | # Forked from [ArduinoTensorFlowLiteTutorials](https://github.com/arduino/ArduinoTensorFlowLiteTutorials)
4 |
5 | # FruitToEmoji
6 |
7 | Classifies fruit using the RGB color and proximity sensors of the Arduino Nano 33 BLE Sense, using a TensorFlow Lite Micro model trained on data captured from the same hardware
8 |
9 | [Demo video](https://youtu.be/7Tjj2vMZ-6c): classify three books: yellow, blue, purple books
10 |
--------------------------------------------------------------------------------
/Examples/FruitToEmoji/SamepleData_Stuffanimals/heart.csv:
--------------------------------------------------------------------------------
1 | Red,Green,Blue
2 | 0.500,0.278,0.222
3 | 0.571,0.214,0.214
4 | 0.727,0.091,0.182
5 | 0.692,0.154,0.154
6 | 0.727,0.091,0.182
7 | 0.667,0.133,0.200
8 | 0.727,0.091,0.182
9 | 0.625,0.187,0.187
10 | 0.750,0.083,0.167
11 | 0.727,0.091,0.182
12 | 0.692,0.154,0.154
13 | 0.727,0.091,0.182
14 | 0.750,0.083,0.167
15 | 0.643,0.143,0.214
16 | 0.727,0.091,0.182
17 | 0.727,0.091,0.182
18 | 0.727,0.091,0.182
19 | 0.500,0.294,0.206
20 | 0.727,0.091,0.182
21 | 0.727,0.091,0.182
22 | 0.727,0.091,0.182
23 | 0.667,0.167,0.167
24 | 0.667,0.167,0.167
25 | 0.667,0.167,0.167
26 | 0.667,0.167,0.167
27 | 0.667,0.167,0.167
28 | 0.615,0.154,0.231
29 | 0.643,0.143,0.214
30 | 0.667,0.167,0.167
31 | 0.667,0.167,0.167
32 | 0.643,0.143,0.214
33 | 0.600,0.200,0.200
34 | 0.667,0.167,0.167
35 | 0.600,0.200,0.200
36 | 0.667,0.167,0.167
37 | 0.615,0.154,0.231
38 | 0.615,0.154,0.231
39 | 0.667,0.167,0.167
40 | 0.643,0.143,0.214
41 | 0.600,0.200,0.200
42 | 0.615,0.154,0.231
43 | 0.667,0.167,0.167
44 | 0.625,0.187,0.187
45 | 0.667,0.167,0.167
46 | 0.615,0.154,0.231
47 | 0.667,0.167,0.167
48 | 0.667,0.167,0.167
49 | 0.667,0.167,0.167
50 | 0.667,0.167,0.167
51 | 0.667,0.167,0.167
52 | 0.667,0.167,0.167
53 | 0.643,0.143,0.214
54 | 0.667,0.167,0.167
55 | 0.667,0.167,0.167
56 | 0.615,0.154,0.231
57 | 0.643,0.143,0.214
58 | 0.667,0.167,0.167
59 | 0.667,0.167,0.167
60 | 0.667,0.167,0.167
61 | 0.615,0.154,0.231
62 | 0.643,0.143,0.214
--------------------------------------------------------------------------------
/Examples/FruitToEmoji/SamepleData_Stuffanimals/minion.csv:
--------------------------------------------------------------------------------
1 | Red,Green,Blue
2 | 0.529,0.294,0.176
3 | 0.562,0.250,0.187
4 | 0.562,0.250,0.187
5 | 0.562,0.250,0.187
6 | 0.562,0.250,0.187
7 | 0.562,0.250,0.187
8 | 0.562,0.250,0.187
9 | 0.562,0.250,0.187
10 | 0.529,0.294,0.176
11 | 0.500,0.300,0.200
12 | 0.533,0.267,0.200
13 | 0.533,0.267,0.200
14 | 0.571,0.286,0.143
15 | 0.583,0.250,0.167
16 | 0.583,0.250,0.167
17 | 0.583,0.250,0.167
18 | 0.571,0.286,0.143
19 | 0.533,0.267,0.200
20 | 0.571,0.286,0.143
21 | 0.615,0.231,0.154
22 | 0.615,0.231,0.154
23 | 0.562,0.250,0.187
24 | 0.529,0.294,0.176
25 | 0.562,0.250,0.187
26 | 0.562,0.250,0.187
27 | 0.588,0.235,0.176
28 | 0.562,0.250,0.187
29 | 0.562,0.250,0.187
30 | 0.562,0.250,0.187
31 | 0.562,0.250,0.187
32 | 0.562,0.250,0.187
33 | 0.571,0.286,0.143
34 | 0.562,0.250,0.187
35 | 0.588,0.235,0.176
36 | 0.588,0.235,0.176
37 | 0.562,0.250,0.187
38 | 0.588,0.235,0.176
39 | 0.588,0.235,0.176
40 | 0.588,0.235,0.176
41 | 0.526,0.263,0.211
42 | 0.562,0.250,0.187
43 | 0.556,0.278,0.167
44 | 0.556,0.278,0.167
45 | 0.556,0.278,0.167
46 | 0.533,0.267,0.200
47 | 0.533,0.267,0.200
48 | 0.583,0.250,0.167
49 | 0.533,0.267,0.200
50 | 0.562,0.250,0.187
51 | 0.533,0.267,0.200
52 | 0.562,0.250,0.187
53 | 0.484,0.290,0.226
54 | 0.465,0.302,0.233
55 | 0.439,0.317,0.244
56 | 0.419,0.326,0.256
57 | 0.419,0.326,0.256
58 | 0.371,0.343,0.286
59 | 0.364,0.364,0.273
60 | 0.378,0.351,0.270
61 | 0.378,0.351,0.270
62 | 0.385,0.346,0.269
63 | 0.444,0.296,0.259
64 | 0.458,0.333,0.208
--------------------------------------------------------------------------------
/Examples/FruitToEmoji/SamepleData_Stuffanimals/sun.csv:
--------------------------------------------------------------------------------
1 | Red,Green,Blue
2 | 0.621,0.207,0.172
3 | 0.714,0.143,0.143
4 | 0.750,0.100,0.150
5 | 0.737,0.105,0.158
6 | 0.737,0.105,0.158
7 | 0.737,0.105,0.158
8 | 0.750,0.100,0.150
9 | 0.750,0.100,0.150
10 | 0.762,0.095,0.143
11 | 0.727,0.136,0.136
12 | 0.750,0.100,0.150
13 | 0.750,0.100,0.150
14 | 0.750,0.125,0.125
15 | 0.750,0.125,0.125
16 | 0.765,0.118,0.118
17 | 0.750,0.100,0.150
18 | 0.750,0.100,0.150
19 | 0.727,0.136,0.136
20 | 0.750,0.100,0.150
21 | 0.750,0.100,0.150
22 | 0.750,0.100,0.150
23 | 0.714,0.143,0.143
24 | 0.714,0.143,0.143
25 | 0.750,0.100,0.150
26 | 0.737,0.105,0.158
27 | 0.750,0.100,0.150
28 | 0.714,0.143,0.143
29 | 0.750,0.100,0.150
30 | 0.750,0.100,0.150
31 | 0.714,0.143,0.143
32 | 0.714,0.143,0.143
33 | 0.737,0.105,0.158
34 | 0.750,0.100,0.150
35 | 0.714,0.143,0.143
36 | 0.714,0.143,0.143
37 | 0.737,0.105,0.158
38 | 0.750,0.100,0.150
39 | 0.714,0.143,0.143
40 | 0.714,0.143,0.143
41 | 0.750,0.100,0.150
42 | 0.750,0.100,0.150
43 | 0.727,0.136,0.136
44 | 0.727,0.136,0.136
45 | 0.750,0.100,0.150
46 | 0.750,0.100,0.150
47 | 0.714,0.143,0.143
48 | 0.750,0.100,0.150
49 | 0.737,0.105,0.158
50 | 0.727,0.136,0.136
51 | 0.750,0.100,0.150
52 | 0.750,0.100,0.150
53 | 0.714,0.143,0.143
54 | 0.714,0.143,0.143
55 | 0.750,0.100,0.150
56 | 0.682,0.136,0.182
57 | 0.750,0.100,0.150
58 | 0.750,0.100,0.150
59 | 0.727,0.136,0.136
60 | 0.737,0.105,0.158
61 | 0.750,0.100,0.150
62 | 0.750,0.100,0.150
63 | 0.737,0.105,0.158
64 | 0.714,0.143,0.143
65 | 0.714,0.143,0.143
66 | 0.714,0.143,0.143
67 | 0.750,0.100,0.150
--------------------------------------------------------------------------------
/Examples/FruitToEmoji/SampleData/apple.csv:
--------------------------------------------------------------------------------
1 | Red,Green,Blue
2 | 0.522,0.261,0.217
3 | 0.522,0.261,0.217
4 | 0.550,0.250,0.200
5 | 0.545,0.227,0.227
6 | 0.556,0.222,0.222
7 | 0.545,0.227,0.227
8 | 0.524,0.238,0.238
9 | 0.500,0.273,0.227
10 | 0.500,0.292,0.208
11 | 0.526,0.263,0.211
12 | 0.545,0.227,0.227
13 | 0.556,0.222,0.222
14 | 0.550,0.250,0.200
15 | 0.529,0.235,0.235
16 | 0.579,0.211,0.211
17 | 0.550,0.250,0.200
18 | 0.556,0.222,0.222
19 | 0.500,0.273,0.227
20 | 0.524,0.286,0.190
21 | 0.550,0.250,0.200
22 | 0.588,0.235,0.176
23 | 0.550,0.250,0.200
24 | 0.556,0.222,0.222
25 | 0.550,0.250,0.200
26 | 0.588,0.235,0.176
27 | 0.550,0.250,0.200
28 | 0.562,0.250,0.187
29 | 0.550,0.250,0.200
30 | 0.588,0.235,0.176
31 | 0.588,0.235,0.176
32 | 0.579,0.211,0.211
33 | 0.588,0.235,0.176
34 | 0.550,0.250,0.200
35 | 0.562,0.250,0.187
36 | 0.550,0.250,0.200
37 | 0.562,0.250,0.187
38 | 0.588,0.235,0.176
39 | 0.556,0.222,0.222
40 | 0.562,0.250,0.187
41 | 0.556,0.222,0.222
42 | 0.600,0.200,0.200
43 | 0.588,0.235,0.176
44 | 0.562,0.250,0.187
45 | 0.588,0.235,0.176
46 | 0.550,0.250,0.200
47 | 0.562,0.250,0.187
48 | 0.588,0.235,0.176
49 | 0.571,0.214,0.214
50 | 0.600,0.200,0.200
51 | 0.571,0.214,0.214
52 | 0.600,0.200,0.200
53 | 0.588,0.235,0.176
54 | 0.600,0.200,0.200
55 | 0.579,0.211,0.211
56 | 0.600,0.200,0.200
57 | 0.562,0.250,0.187
58 | 0.588,0.235,0.176
59 | 0.600,0.200,0.200
60 | 0.562,0.250,0.187
61 | 0.562,0.250,0.187
62 | 0.571,0.238,0.190
63 | 0.588,0.235,0.176
64 | 0.571,0.238,0.190
65 | 0.562,0.250,0.187
66 | 0.636,0.182,0.182
67 | 0.611,0.222,0.167
68 | 0.571,0.214,0.214
69 | 0.583,0.250,0.167
70 | 0.500,0.250,0.250
71 | 0.562,0.250,0.187
72 | 0.500,0.269,0.231
73 |
--------------------------------------------------------------------------------
/Examples/FruitToEmoji/SampleData/banana.csv:
--------------------------------------------------------------------------------
1 | Red,Green,Blue
2 | 0.471,0.353,0.176
3 | 0.484,0.339,0.177
4 | 0.472,0.340,0.189
5 | 0.463,0.352,0.185
6 | 0.450,0.350,0.200
7 | 0.433,0.367,0.200
8 | 0.433,0.367,0.200
9 | 0.452,0.355,0.194
10 | 0.472,0.361,0.167
11 | 0.467,0.356,0.178
12 | 0.491,0.340,0.170
13 | 0.489,0.340,0.170
14 | 0.500,0.333,0.167
15 | 0.488,0.349,0.163
16 | 0.515,0.333,0.152
17 | 0.515,0.333,0.152
18 | 0.500,0.333,0.167
19 | 0.500,0.333,0.167
20 | 0.500,0.350,0.150
21 | 0.474,0.368,0.158
22 | 0.450,0.350,0.200
23 | 0.389,0.389,0.222
24 | 0.437,0.375,0.187
25 | 0.437,0.375,0.187
26 | 0.421,0.368,0.211
27 | 0.409,0.364,0.227
28 | 0.391,0.391,0.217
29 | 0.387,0.387,0.226
30 | 0.387,0.387,0.226
31 | 0.414,0.379,0.207
32 | 0.414,0.379,0.207
33 | 0.393,0.393,0.214
34 | 0.406,0.375,0.219
35 | 0.414,0.379,0.207
36 | 0.419,0.387,0.194
37 | 0.423,0.385,0.192
38 | 0.417,0.375,0.208
39 | 0.391,0.391,0.217
40 | 0.391,0.391,0.217
41 | 0.391,0.391,0.217
42 | 0.400,0.400,0.200
43 | 0.400,0.400,0.200
44 | 0.421,0.368,0.211
45 | 0.429,0.381,0.190
46 | 0.440,0.360,0.200
47 | 0.429,0.381,0.190
48 | 0.429,0.381,0.190
49 | 0.450,0.350,0.200
50 | 0.444,0.389,0.167
51 | 0.450,0.350,0.200
52 | 0.437,0.375,0.187
53 | 0.471,0.353,0.176
54 | 0.437,0.375,0.187
55 | 0.444,0.389,0.167
56 | 0.471,0.353,0.176
57 | 0.450,0.350,0.200
58 | 0.429,0.357,0.214
59 | 0.423,0.385,0.192
60 | 0.419,0.387,0.194
61 | 0.406,0.375,0.219
62 | 0.406,0.375,0.219
63 | 0.389,0.389,0.222
64 | 0.419,0.387,0.194
65 | 0.394,0.394,0.212
66 | 0.393,0.393,0.214
67 | 0.412,0.382,0.206
68 | 0.389,0.389,0.222
69 | 0.410,0.385,0.205
70 | 0.394,0.394,0.212
71 | 0.400,0.400,0.200
72 | 0.400,0.400,0.200
73 | 0.400,0.400,0.200
74 | 0.400,0.400,0.200
75 | 0.391,0.391,0.217
76 | 0.400,0.400,0.200
77 | 0.375,0.417,0.208
78 | 0.393,0.393,0.214
79 | 0.385,0.385,0.231
80 | 0.370,0.407,0.222
81 | 0.370,0.407,0.222
82 | 0.391,0.391,0.217
83 | 0.375,0.417,0.208
84 | 0.381,0.381,0.238
85 | 0.400,0.400,0.200
86 | 0.409,0.409,0.182
87 | 0.400,0.400,0.200
88 | 0.400,0.400,0.200
89 | 0.400,0.400,0.200
90 | 0.407,0.407,0.185
91 | 0.400,0.400,0.200
92 | 0.391,0.391,0.217
93 | 0.375,0.406,0.219
94 | 0.400,0.400,0.200
95 | 0.409,0.364,0.227
96 |
--------------------------------------------------------------------------------
/Examples/FruitToEmoji/SampleData/orange.csv:
--------------------------------------------------------------------------------
1 | Red,Green,Blue
2 | 0.540,0.300,0.160
3 | 0.558,0.288,0.154
4 | 0.600,0.286,0.114
5 | 0.571,0.286,0.143
6 | 0.571,0.286,0.143
7 | 0.615,0.269,0.115
8 | 0.591,0.273,0.136
9 | 0.600,0.300,0.100
10 | 0.625,0.250,0.125
11 | 0.625,0.250,0.125
12 | 0.600,0.267,0.133
13 | 0.591,0.273,0.136
14 | 0.609,0.261,0.130
15 | 0.615,0.269,0.115
16 | 0.591,0.273,0.136
17 | 0.600,0.280,0.120
18 | 0.579,0.263,0.158
19 | 0.636,0.273,0.091
20 | 0.600,0.267,0.133
21 | 0.600,0.267,0.133
22 | 0.591,0.273,0.136
23 | 0.591,0.273,0.136
24 | 0.609,0.261,0.130
25 | 0.583,0.292,0.125
26 | 0.607,0.286,0.107
27 | 0.600,0.280,0.120
28 | 0.619,0.286,0.095
29 | 0.571,0.286,0.143
30 | 0.588,0.294,0.118
31 | 0.611,0.278,0.111
32 | 0.625,0.250,0.125
33 | 0.611,0.278,0.111
34 | 0.600,0.267,0.133
35 | 0.600,0.267,0.133
36 | 0.611,0.278,0.111
37 | 0.615,0.269,0.115
38 | 0.615,0.269,0.115
39 | 0.600,0.267,0.133
40 | 0.615,0.269,0.115
41 | 0.594,0.281,0.125
42 | 0.594,0.281,0.125
43 | 0.594,0.281,0.125
44 | 0.630,0.259,0.111
45 | 0.609,0.261,0.130
46 | 0.591,0.273,0.136
47 | 0.615,0.231,0.154
48 | 0.600,0.267,0.133
49 | 0.600,0.250,0.150
50 | 0.571,0.286,0.143
51 | 0.609,0.261,0.130
52 | 0.611,0.278,0.111
53 | 0.609,0.261,0.130
54 | 0.591,0.273,0.136
55 | 0.615,0.269,0.115
56 | 0.591,0.273,0.136
57 | 0.600,0.250,0.150
58 | 0.600,0.267,0.133
59 | 0.611,0.278,0.111
60 | 0.588,0.294,0.118
61 | 0.609,0.261,0.130
62 | 0.615,0.269,0.115
63 | 0.594,0.281,0.125
64 | 0.594,0.281,0.125
65 | 0.581,0.290,0.129
66 | 0.594,0.281,0.125
67 | 0.583,0.292,0.125
68 | 0.615,0.269,0.115
69 | 0.609,0.261,0.130
70 | 0.600,0.280,0.120
71 | 0.586,0.276,0.138
72 | 0.600,0.280,0.120
73 | 0.606,0.273,0.121
74 | 0.600,0.267,0.133
75 | 0.595,0.270,0.135
76 | 0.571,0.286,0.143
77 | 0.583,0.278,0.139
78 | 0.579,0.289,0.132
79 | 0.571,0.286,0.143
80 | 0.564,0.282,0.154
81 | 0.559,0.294,0.147
82 | 0.579,0.263,0.158
83 | 0.571,0.286,0.143
84 | 0.571,0.286,0.143
85 | 0.590,0.282,0.128
86 | 0.588,0.294,0.118
87 | 0.591,0.273,0.136
88 | 0.590,0.282,0.128
89 | 0.605,0.279,0.116
90 | 0.595,0.286,0.119
91 | 0.583,0.292,0.125
92 | 0.518,0.304,0.179
93 |
--------------------------------------------------------------------------------
/Examples/FruitToEmoji/SampleData_Books/bluebook.csv:
--------------------------------------------------------------------------------
1 | Red,Green,Blue
2 | 0.273,0.347,0.380
3 | 0.271,0.347,0.382
4 | 0.271,0.343,0.386
5 | 0.267,0.348,0.385
6 | 0.264,0.349,0.388
7 | 0.268,0.346,0.386
8 | 0.268,0.346,0.386
9 | 0.268,0.346,0.386
10 | 0.264,0.349,0.388
11 | 0.269,0.346,0.385
12 | 0.268,0.346,0.386
13 | 0.268,0.346,0.386
14 | 0.262,0.349,0.389
15 | 0.266,0.347,0.387
16 | 0.260,0.350,0.390
17 | 0.264,0.347,0.388
18 | 0.264,0.347,0.388
19 | 0.264,0.347,0.388
20 | 0.263,0.347,0.390
21 | 0.263,0.347,0.390
22 | 0.261,0.348,0.391
23 | 0.261,0.348,0.391
24 | 0.261,0.348,0.391
25 | 0.263,0.351,0.386
26 | 0.261,0.348,0.391
27 | 0.261,0.348,0.391
28 | 0.265,0.345,0.389
29 | 0.259,0.348,0.393
30 | 0.264,0.345,0.391
31 | 0.264,0.345,0.391
32 | 0.257,0.349,0.394
33 | 0.262,0.346,0.393
34 | 0.262,0.346,0.393
35 | 0.257,0.352,0.390
36 | 0.260,0.346,0.394
37 | 0.262,0.350,0.388
38 | 0.257,0.347,0.396
39 | 0.263,0.343,0.394
40 | 0.260,0.350,0.390
41 | 0.257,0.347,0.396
42 | 0.257,0.347,0.396
43 | 0.262,0.350,0.388
44 | 0.260,0.346,0.394
45 | 0.260,0.346,0.394
46 | 0.262,0.350,0.388
47 | 0.257,0.347,0.396
48 | 0.260,0.350,0.390
49 | 0.260,0.350,0.390
50 | 0.257,0.347,0.396
51 | 0.257,0.347,0.396
52 | 0.257,0.347,0.396
53 | 0.257,0.347,0.396
54 | 0.257,0.347,0.396
55 | 0.257,0.347,0.396
56 | 0.255,0.353,0.392
57 | 0.260,0.346,0.394
58 | 0.260,0.346,0.394
59 | 0.260,0.346,0.394
60 | 0.255,0.349,0.396
61 | 0.255,0.349,0.396
62 | 0.260,0.346,0.394
63 | 0.260,0.346,0.394
64 | 0.260,0.346,0.394
65 | 0.260,0.346,0.394
66 | 0.260,0.346,0.394
67 | 0.260,0.346,0.394
68 | 0.260,0.346,0.394
69 | 0.260,0.346,0.394
70 | 0.260,0.346,0.394
71 | 0.255,0.353,0.392
72 | 0.257,0.347,0.396
73 | 0.255,0.353,0.392
74 | 0.260,0.346,0.394
75 | 0.260,0.346,0.394
76 | 0.260,0.346,0.394
77 | 0.257,0.352,0.390
78 | 0.255,0.349,0.396
79 | 0.260,0.346,0.394
80 | 0.260,0.346,0.394
81 | 0.260,0.346,0.394
82 | 0.262,0.350,0.388
83 | 0.262,0.350,0.388
84 | 0.262,0.350,0.388
85 | 0.257,0.347,0.396
86 | 0.257,0.347,0.396
87 | 0.257,0.347,0.396
88 | 0.257,0.347,0.396
89 | 0.263,0.343,0.394
90 | 0.258,0.351,0.392
91 | 0.260,0.344,0.396
92 | 0.258,0.344,0.398
93 | 0.258,0.344,0.398
94 | 0.258,0.344,0.398
95 | 0.261,0.348,0.391
96 | 0.258,0.344,0.398
97 | 0.258,0.344,0.398
98 | 0.258,0.344,0.398
99 | 0.258,0.344,0.398
100 | 0.256,0.344,0.400
101 | 0.253,0.352,0.396
102 | 0.256,0.344,0.400
103 | 0.256,0.344,0.400
104 | 0.256,0.344,0.400
105 | 0.256,0.344,0.400
106 | 0.258,0.344,0.398
107 | 0.255,0.351,0.394
108 | 0.255,0.347,0.398
109 | 0.262,0.350,0.388
110 | 0.260,0.346,0.394
111 | 0.260,0.346,0.394
112 | 0.255,0.349,0.396
113 | 0.257,0.349,0.394
114 | 0.265,0.345,0.389
115 | 0.264,0.347,0.388
116 | 0.271,0.346,0.383
117 | 0.275,0.345,0.380
118 |
--------------------------------------------------------------------------------
/Examples/FruitToEmoji/SampleData_Books/purplebook.csv:
--------------------------------------------------------------------------------
1 | Red,Green,Blue
2 | 0.286,0.286,0.429
3 | 0.316,0.263,0.421
4 | 0.294,0.294,0.412
5 | 0.312,0.250,0.437
6 | 0.333,0.267,0.400
7 | 0.333,0.267,0.400
8 | 0.312,0.250,0.437
9 | 0.312,0.250,0.437
10 | 0.312,0.250,0.437
11 | 0.312,0.250,0.437
12 | 0.312,0.250,0.437
13 | 0.312,0.250,0.437
14 | 0.312,0.250,0.437
15 | 0.333,0.267,0.400
16 | 0.286,0.286,0.429
17 | 0.286,0.286,0.429
18 | 0.286,0.286,0.429
19 | 0.286,0.286,0.429
20 | 0.286,0.286,0.429
21 | 0.286,0.286,0.429
22 | 0.286,0.286,0.429
23 | 0.286,0.286,0.429
24 | 0.286,0.286,0.429
25 | 0.286,0.286,0.429
26 | 0.286,0.286,0.429
27 | 0.286,0.286,0.429
28 | 0.286,0.286,0.429
29 | 0.286,0.286,0.429
30 | 0.286,0.286,0.429
31 | 0.286,0.286,0.429
32 | 0.286,0.286,0.429
33 | 0.286,0.286,0.429
34 | 0.286,0.286,0.429
35 | 0.286,0.286,0.429
36 | 0.286,0.286,0.429
37 | 0.286,0.286,0.429
38 | 0.286,0.286,0.429
39 | 0.286,0.286,0.429
40 | 0.286,0.286,0.429
41 | 0.308,0.231,0.462
42 | 0.308,0.231,0.462
43 | 0.286,0.286,0.429
44 | 0.286,0.286,0.429
45 | 0.308,0.231,0.462
46 | 0.308,0.231,0.462
47 | 0.308,0.231,0.462
48 | 0.286,0.286,0.429
49 | 0.308,0.231,0.462
50 | 0.308,0.231,0.462
51 | 0.308,0.231,0.462
52 | 0.308,0.231,0.462
53 | 0.308,0.231,0.462
54 | 0.333,0.250,0.417
55 | 0.333,0.250,0.417
56 | 0.333,0.250,0.417
57 | 0.333,0.250,0.417
58 | 0.333,0.250,0.417
59 | 0.333,0.250,0.417
60 | 0.333,0.250,0.417
61 | 0.333,0.250,0.417
62 | 0.333,0.250,0.417
63 | 0.333,0.250,0.417
64 | 0.333,0.250,0.417
65 | 0.333,0.250,0.417
66 | 0.333,0.250,0.417
67 | 0.333,0.250,0.417
68 | 0.308,0.231,0.462
69 | 0.308,0.231,0.462
70 | 0.308,0.231,0.462
71 | 0.308,0.231,0.462
72 | 0.308,0.231,0.462
73 | 0.286,0.286,0.429
74 | 0.286,0.286,0.429
75 | 0.308,0.231,0.462
76 | 0.286,0.286,0.429
77 | 0.286,0.286,0.429
78 | 0.286,0.286,0.429
79 | 0.286,0.286,0.429
80 | 0.286,0.286,0.429
81 | 0.308,0.231,0.462
82 | 0.286,0.286,0.429
83 | 0.286,0.286,0.429
84 | 0.286,0.286,0.429
85 | 0.286,0.286,0.429
86 | 0.286,0.286,0.429
87 | 0.286,0.286,0.429
88 | 0.286,0.286,0.429
89 | 0.286,0.286,0.429
90 | 0.286,0.286,0.429
91 | 0.286,0.286,0.429
92 | 0.286,0.286,0.429
93 | 0.286,0.286,0.429
94 | 0.286,0.286,0.429
95 | 0.286,0.286,0.429
96 | 0.286,0.286,0.429
97 | 0.286,0.286,0.429
98 | 0.286,0.286,0.429
99 | 0.286,0.286,0.429
100 | 0.286,0.286,0.429
101 | 0.286,0.286,0.429
102 | 0.286,0.286,0.429
103 | 0.286,0.286,0.429
104 | 0.308,0.231,0.462
105 | 0.308,0.231,0.462
106 | 0.286,0.286,0.429
107 | 0.312,0.250,0.437
108 | 0.316,0.263,0.421
109 |
--------------------------------------------------------------------------------
/Examples/FruitToEmoji/SampleData_Books/yellowbook.csv:
--------------------------------------------------------------------------------
1 | Red,Green,Blue
2 | 0.475,0.313,0.212
3 | 0.470,0.320,0.210
4 | 0.475,0.317,0.208
5 | 0.476,0.320,0.204
6 | 0.480,0.314,0.206
7 | 0.475,0.317,0.208
8 | 0.476,0.320,0.204
9 | 0.476,0.320,0.204
10 | 0.475,0.323,0.202
11 | 0.469,0.327,0.204
12 | 0.469,0.323,0.208
13 | 0.468,0.330,0.202
14 | 0.473,0.323,0.204
15 | 0.473,0.323,0.204
16 | 0.473,0.323,0.204
17 | 0.467,0.326,0.207
18 | 0.472,0.326,0.202
19 | 0.466,0.330,0.205
20 | 0.471,0.329,0.200
21 | 0.470,0.325,0.205
22 | 0.470,0.325,0.205
23 | 0.468,0.329,0.203
24 | 0.468,0.329,0.203
25 | 0.468,0.329,0.203
26 | 0.468,0.325,0.208
27 | 0.461,0.329,0.211
28 | 0.473,0.324,0.203
29 | 0.466,0.329,0.205
30 | 0.466,0.329,0.205
31 | 0.466,0.329,0.205
32 | 0.466,0.329,0.205
33 | 0.466,0.329,0.205
34 | 0.458,0.333,0.208
35 | 0.458,0.333,0.208
36 | 0.465,0.324,0.211
37 | 0.464,0.333,0.203
38 | 0.464,0.333,0.203
39 | 0.463,0.328,0.209
40 | 0.463,0.328,0.209
41 | 0.463,0.328,0.209
42 | 0.463,0.328,0.209
43 | 0.463,0.328,0.209
44 | 0.463,0.328,0.209
45 | 0.463,0.328,0.209
46 | 0.471,0.324,0.206
47 | 0.471,0.324,0.206
48 | 0.463,0.328,0.209
49 | 0.463,0.328,0.209
50 | 0.463,0.328,0.209
51 | 0.460,0.333,0.206
52 | 0.460,0.333,0.206
53 | 0.459,0.328,0.213
54 | 0.467,0.333,0.200
55 | 0.466,0.328,0.207
56 | 0.456,0.333,0.211
57 | 0.463,0.333,0.204
58 | 0.472,0.321,0.208
59 | 0.462,0.327,0.212
60 | 0.451,0.333,0.216
61 | 0.469,0.327,0.204
62 | 0.458,0.333,0.208
63 | 0.469,0.327,0.204
64 | 0.458,0.333,0.208
65 | 0.458,0.333,0.208
66 | 0.457,0.326,0.217
67 | 0.467,0.333,0.200
68 | 0.465,0.326,0.209
69 | 0.465,0.326,0.209
70 | 0.465,0.326,0.209
71 | 0.465,0.326,0.209
72 | 0.457,0.326,0.217
73 | 0.458,0.333,0.208
74 | 0.460,0.340,0.200
75 | 0.463,0.333,0.204
76 | 0.464,0.321,0.214
77 | 0.475,0.322,0.203
78 | 0.469,0.328,0.203
79 | 0.469,0.328,0.203
80 | 0.469,0.328,0.203
81 | 0.463,0.328,0.209
82 | 0.463,0.328,0.209
83 | 0.471,0.324,0.206
84 | 0.471,0.324,0.206
85 | 0.471,0.324,0.206
86 | 0.464,0.333,0.203
87 | 0.471,0.324,0.206
88 | 0.463,0.328,0.209
89 | 0.463,0.328,0.209
90 | 0.470,0.333,0.197
91 | 0.469,0.328,0.203
92 | 0.469,0.328,0.203
93 | 0.459,0.328,0.213
94 | 0.466,0.328,0.207
95 | 0.456,0.333,0.211
96 | 0.456,0.333,0.211
97 | 0.456,0.333,0.211
98 | 0.463,0.333,0.204
99 | 0.472,0.321,0.208
100 | 0.451,0.333,0.216
101 | 0.469,0.327,0.204
102 | 0.469,0.327,0.204
103 | 0.469,0.327,0.204
104 | 0.463,0.333,0.204
105 | 0.468,0.323,0.210
106 | 0.474,0.329,0.197
107 | 0.482,0.318,0.200
108 | 0.473,0.323,0.204
109 | 0.460,0.320,0.220
110 |
--------------------------------------------------------------------------------
/Examples/FruitToEmoji/sketches/object_color_capture/object_color_capture.ino:
--------------------------------------------------------------------------------
1 | /*
2 | Object color sampler
3 | --------------------
4 | Samples the color of objects and outputs CSV logfile to serial console
5 |
6 | Hardware: Arduino Nano 33 BLE Sense board.
7 |
8 | Usage: Place object of interest to the color sensor
9 |
10 | This example code is in the public domain.
11 | */
12 |
13 | #include
14 |
15 | void setup() {
16 |
17 | Serial.begin(9600);
18 | while (!Serial) {};
19 |
20 | if (!APDS.begin()) {
21 | Serial.println("Error initializing APDS9960 sensor.");
22 | }
23 |
24 | // print the header
25 | Serial.println("Red,Green,Blue");
26 | }
27 |
28 | void loop() {
29 | int r, g, b, c, p;
30 | float sum;
31 |
32 | // wait for proximity and color sensor data
33 | while (!APDS.colorAvailable() || !APDS.proximityAvailable()) {}
34 |
35 | // read the color and proximity data
36 | APDS.readColor(r, g, b, c);
37 | sum = r + g + b;
38 | p = APDS.readProximity();
39 |
40 | // if object is close and well enough illumated
41 | if (p == 0 && c > 10 && sum > 0) {
42 |
43 | float redRatio = r / sum;
44 | float greenRatio = g / sum;
45 | float blueRatio = b / sum;
46 |
47 | // print the data in CSV format
48 | Serial.print(redRatio, 3);
49 | Serial.print(',');
50 | Serial.print(greenRatio, 3);
51 | Serial.print(',');
52 | Serial.print(blueRatio, 3);
53 | Serial.println();
54 | }
55 | }
56 |
--------------------------------------------------------------------------------
/Examples/FruitToEmoji/sketches/object_color_classify/object_color_classify.ino:
--------------------------------------------------------------------------------
1 | /*
2 | Object classifier by color
3 | --------------------------
4 |
5 | Uses RGB color sensor input to Neural Network to classify objects
6 | Outputs object class to serial using unicode emojis
7 |
8 | Note: The direct use of C/C++ pointers, namespaces, and dynamic memory is generally
9 | discouraged in Arduino examples, and in the future the TensorFlowLite library
10 | might change to make the sketch simpler.
11 |
12 | Hardware: Arduino Nano 33 BLE Sense board.
13 |
14 | Created by Don Coleman, Sandeep Mistry
15 | Adapted by Dominic Pajak
16 |
17 | This example code is in the public domain.
18 | */
19 |
20 | // Arduino_TensorFlowLite - Version: 0.alpha.precompiled
21 | #include
22 |
23 | #include
24 | #include
25 | #include
26 | #include
27 | #include
28 | #include
29 | #include "model.h"
30 |
31 | // global variables used for TensorFlow Lite (Micro)
32 | tflite::MicroErrorReporter tflErrorReporter;
33 |
34 | // pull in all the TFLM ops, you can remove this line and
35 | // only pull in the TFLM ops you need, if would like to reduce
36 | // the compiled size of the sketch.
37 | tflite::AllOpsResolver tflOpsResolver;
38 |
39 | const tflite::Model* tflModel = nullptr;
40 | tflite::MicroInterpreter* tflInterpreter = nullptr;
41 | TfLiteTensor* tflInputTensor = nullptr;
42 | TfLiteTensor* tflOutputTensor = nullptr;
43 |
44 | // Create a static memory buffer for TFLM, the size may need to
45 | // be adjusted based on the model you are using
46 | constexpr int tensorArenaSize = 8 * 1024;
47 | byte tensorArena[tensorArenaSize];
48 |
49 | // array to map gesture index to a name
50 | const char* CLASSES[] = {
51 | "bluebook", // u8"\U0001F34E", // Apple // heart
52 | "purplebook", // u8"\U0001F34C", // Banana // minion
53 | "yellowbook" // u8"\U0001F34A" // Orange // sun
54 | };
55 |
56 | #define NUM_CLASSES (sizeof(CLASSES) / sizeof(CLASSES[0]))
57 |
58 | void setup() {
59 | Serial.begin(9600);
60 | while (!Serial) {};
61 |
62 | Serial.println("Object classification using RGB color sensor");
63 | Serial.println("--------------------------------------------");
64 | Serial.println("Arduino Nano 33 BLE Sense running TensorFlow Lite Micro");
65 | Serial.println("");
66 |
67 | if (!APDS.begin()) {
68 | Serial.println("Error initializing APDS9960 sensor.");
69 | }
70 |
71 | // get the TFL representation of the model byte array
72 | tflModel = tflite::GetModel(model);
73 | if (tflModel->version() != TFLITE_SCHEMA_VERSION) {
74 | Serial.println("Model schema mismatch!");
75 | while (1);
76 | }
77 |
78 | // Create an interpreter to run the model
79 | tflInterpreter = new tflite::MicroInterpreter(tflModel, tflOpsResolver, tensorArena, tensorArenaSize, &tflErrorReporter);
80 |
81 | // Allocate memory for the model's input and output tensors
82 | tflInterpreter->AllocateTensors();
83 |
84 | // Get pointers for the model's input and output tensors
85 | tflInputTensor = tflInterpreter->input(0);
86 | tflOutputTensor = tflInterpreter->output(0);
87 | }
88 |
89 | void loop() {
90 | int r, g, b, p, c;
91 | float sum;
92 |
93 | // check if both color and proximity data is available to sample
94 | while (!APDS.colorAvailable() || !APDS.proximityAvailable()) {}
95 |
96 | // read the color and proximity sensor
97 | APDS.readColor(r, g, b, c);
98 | p = APDS.readProximity();
99 | sum = r + g + b;
100 |
101 | // check if there's an object close and well illuminated enough
102 | if (p == 0 && c > 10 && sum > 0) {
103 |
104 | float redRatio = r / sum;
105 | float greenRatio = g / sum;
106 | float blueRatio = b / sum;
107 |
108 | // input sensor data to model
109 | tflInputTensor->data.f[0] = redRatio;
110 | tflInputTensor->data.f[1] = greenRatio;
111 | tflInputTensor->data.f[2] = blueRatio;
112 |
113 | // Run inferencing
114 | TfLiteStatus invokeStatus = tflInterpreter->Invoke();
115 | if (invokeStatus != kTfLiteOk) {
116 | Serial.println("Invoke failed!");
117 | while (1);
118 | return;
119 | }
120 |
121 | // Output results
122 | for (int i = 0; i < NUM_CLASSES; i++) {
123 | Serial.print(CLASSES[i]);
124 | Serial.print(" ");
125 | Serial.print(int(tflOutputTensor->data.f[i] * 100));
126 | Serial.print("%\n");
127 | }
128 | Serial.println();
129 |
130 | // Wait for the object to be moved away
131 | while (!APDS.proximityAvailable() || (APDS.readProximity() == 0)) {}
132 | }
133 |
134 | }
135 |
--------------------------------------------------------------------------------
/Examples/GestureToEmoji/ArduinoSketches/Emoji_Button/Emoji_Button.ino:
--------------------------------------------------------------------------------
1 | /*
2 | Emoji Button
3 |
4 | This example sends an emoji character over USB HID when the button is pressed.
5 |
6 | Note: Only macOS and Linux as supported at this time, and the use of
7 | #define is generally discouraged in Arduino examples💪 💪 💪 💪 💪
8 |
9 | The circuit:
10 | - Arduino Nano 33 BLE or Arduino Nano 33 BLE Sense board.
11 | - Button connected to pin 3 and GND.
12 |
13 | Created by Don Coleman, Sandeep Mistry
14 |
15 | This example code is in the public domain.
16 | */
17 |
18 | #include
19 | #include
20 |
21 | // Select an OS:
22 | #define MACOS // You'll need to enable and select the unicode keyboard: System Preferences -> Input Sources -> + -> Others -> Unicode Hex Input
23 | //#define LINUX
24 |
25 | #if !defined(MACOS) && !defined(LINUX)
26 | #error "Please select an OS!"
27 | #endif
28 |
29 | // use table: https://apps.timwhitlock.info/emoji/tables/unicode
30 | const int bicep = 0x1f4aa;
31 | const int punch = 0x1f44a;
32 |
33 | const int buttonPin = 3;
34 |
35 | USBKeyboard keyboard;
36 |
37 | int previousButtonState = HIGH;
38 |
39 | void setup() {
40 | pinMode(buttonPin, INPUT_PULLUP);
41 | }
42 |
43 | void loop() {
44 | int buttonState = digitalRead(buttonPin);
45 |
46 | if (buttonState != previousButtonState) {
47 | if (buttonState == LOW) {
48 | // pressed
49 | sentUtf8(bicep);
50 | } else {
51 | // released
52 | }
53 |
54 | previousButtonState = buttonState;
55 | }
56 | }
57 |
58 | void sentUtf8(unsigned long c) {
59 | String s;
60 |
61 | #if defined(MACOS)
62 | // https://apple.stackexchange.com/questions/183045/how-can-i-type-unicode-characters-without-using-the-mouse
63 |
64 | s = String(utf8ToUtf16(c), HEX);
65 |
66 | for (int i = 0; i < s.length(); i++) {
67 | keyboard.key_code(s[i], KEY_ALT);
68 | }
69 | #elif defined(LINUX)
70 | s = String(c, HEX);
71 |
72 | keyboard.key_code('u', KEY_CTRL | KEY_SHIFT);
73 |
74 | for (int i = 0; i < s.length(); i++) {
75 | keyboard.key_code(s[i]);
76 | }
77 | #endif
78 | keyboard.key_code(' ');
79 | }
80 |
81 | // based on https://stackoverflow.com/a/6240819/2020087
82 | unsigned long utf8ToUtf16(unsigned long in) {
83 | unsigned long result;
84 |
85 | in -= 0x10000;
86 |
87 | result |= (in & 0x3ff);
88 | result |= (in << 6) & 0x03ff0000;
89 | result |= 0xd800dc00;
90 |
91 | return result;
92 | }
93 |
--------------------------------------------------------------------------------
/Examples/GestureToEmoji/ArduinoSketches/HardwareTest/HardwareTest.ino:
--------------------------------------------------------------------------------
1 | /*
2 | Hardware Test
3 |
4 | This example performs a basic hardware test of the board which includes
5 | testing the on-board IMU, LED and external button.
6 |
7 | When the button is pressed the on-board LED will turn on.
8 |
9 | The circuit:
10 | - Arduino Nano 33 BLE or Arduino Nano 33 BLE Sense board.
11 | - Button connected to pin 3 and GND.
12 |
13 | Created by Don Coleman, Sandeep Mistry
14 |
15 | This example code is in the public domain.
16 | */
17 |
18 | #include
19 |
20 | const int buttonPin = 3;
21 | const int ledPin = LED_BUILTIN;
22 |
23 | int buttonState = LOW;
24 | int previousButtonState = HIGH;
25 |
26 | void setup() {
27 | Serial.begin(9600);
28 | //while (!Serial);
29 | Serial.println("Arduino ML Workshop Hardware Test");
30 |
31 | if (!IMU.begin()) {
32 | Serial.println("Failed to initialize IMU!");
33 | while (1);
34 | }
35 |
36 | // initialize the LED pin as an output:
37 | pinMode(ledPin, OUTPUT);
38 | // initialize the push button pin as an input with (internal) pullup:
39 | pinMode(buttonPin, INPUT_PULLUP);
40 | }
41 |
42 | void loop() {
43 | // read the state of the push button pin:
44 | buttonState = digitalRead(buttonPin);
45 |
46 | // HIGH and LOW are opposite because of we are using an internal pullup resistor.
47 | // LOW is pressed. HIGH is released.
48 |
49 | if (buttonState == LOW) {
50 | // Button is pressed, turn the LED on
51 | digitalWrite(ledPin, HIGH);
52 | if (buttonState != previousButtonState) {
53 | Serial.println("LED is ON");
54 | }
55 | } else {
56 | // Button is released, turn the LED off
57 | digitalWrite(ledPin, LOW);
58 | if (buttonState != previousButtonState) {
59 | Serial.println("LED is OFF");
60 | }
61 | }
62 |
63 | // save the previous state of the button since we only print
64 | // the LED status when the state changes
65 | previousButtonState = buttonState;
66 | }
67 |
--------------------------------------------------------------------------------
/Examples/GestureToEmoji/ArduinoSketches/IMU_Capture/IMU_Capture.ino:
--------------------------------------------------------------------------------
1 | /*
2 | IMU Capture
3 |
4 | This example uses the on-board IMU to start reading acceleration and gyroscope
5 | data from on-board IMU and prints it to the Serial Monitor for one second
6 | when the significant motion is detected.
7 |
8 | You can also use the Serial Plotter to graph the data.
9 |
10 | The circuit:
11 | - Arduino Nano 33 BLE or Arduino Nano 33 BLE Sense board.
12 |
13 | Created by Don Coleman, Sandeep Mistry
14 | Modified by Dominic Pajak, Sandeep Mistry
15 |
16 | This example code is in the public domain.
17 | */
18 |
19 | #include
20 |
21 | const float accelerationThreshold = 2.5; // threshold of significant in G's
22 | const int numSamples = 119;
23 |
24 | int samplesRead = numSamples;
25 |
26 | void setup() {
27 | Serial.begin(9600);
28 | while (!Serial);
29 |
30 | if (!IMU.begin()) {
31 | Serial.println("Failed to initialize IMU!");
32 | while (1);
33 | }
34 |
35 | // print the header
36 | Serial.println("aX,aY,aZ,gX,gY,gZ");
37 | }
38 |
39 | void loop() {
40 | float aX, aY, aZ, gX, gY, gZ;
41 |
42 | // wait for significant motion
43 | while (samplesRead == numSamples) {
44 | if (IMU.accelerationAvailable()) {
45 | // read the acceleration data
46 | IMU.readAcceleration(aX, aY, aZ);
47 |
48 | // sum up the absolutes
49 | float aSum = fabs(aX) + fabs(aY) + fabs(aZ);
50 |
51 | // check if it's above the threshold
52 | if (aSum >= accelerationThreshold) {
53 | // reset the sample read count
54 | samplesRead = 0;
55 | break;
56 | }
57 | }
58 | }
59 |
60 | // check if the all the required samples have been read since
61 | // the last time the significant motion was detected
62 | while (samplesRead < numSamples) {
63 | // check if both new acceleration and gyroscope data is
64 | // available
65 | if (IMU.accelerationAvailable() && IMU.gyroscopeAvailable()) {
66 | // read the acceleration and gyroscope data
67 | IMU.readAcceleration(aX, aY, aZ);
68 | IMU.readGyroscope(gX, gY, gZ);
69 |
70 | samplesRead++;
71 |
72 | // print the data in CSV format
73 | Serial.print(aX, 3);
74 | Serial.print(',');
75 | Serial.print(aY, 3);
76 | Serial.print(',');
77 | Serial.print(aZ, 3);
78 | Serial.print(',');
79 | Serial.print(gX, 3);
80 | Serial.print(',');
81 | Serial.print(gY, 3);
82 | Serial.print(',');
83 | Serial.print(gZ, 3);
84 | Serial.println();
85 |
86 | if (samplesRead == numSamples) {
87 | // add an empty line if it's the last sample
88 | Serial.println();
89 | }
90 | }
91 | }
92 | }
93 |
--------------------------------------------------------------------------------
/Examples/GestureToEmoji/README.md:
--------------------------------------------------------------------------------
1 | ## Troubleshooting
2 | - [Why the loss does not go down? Arduino tinyml workshop gesture classification](https://www.loom.com/share/ebee6b3a1f594fda84ed17bf33252e77)
3 | - [How to train/run Google Colab? Arduino Tinyml Workshop Gesture classification](https://www.loom.com/share/a7cf8894e4c94b9cb30cb3d8cbd9a496)
4 | - The loss from colab is always 0: you might only have one class, it needs to have at least 2 classes
5 | - When collecting data from the serial monitior, uncheck "Show timestamp", so it doesn't include timestamp in your flex.csv file
6 | - When compling "IMU_Classifier", "Error on compiling Arduino Nano 33 BLE", check the Arduino_TensorflowLite library version in your arduino IDE, it should be "2.1.0-ALPHA" without any "precomplied" label.
7 | - Library version: Arduino_LSM9DS1 @1.1.0, Arduino_TensorflowLite @2.1.0-ALPHA
8 |
9 | [Demo Video](https://www.loom.com/share/c504446e1d284736b9309986e2b2a4ed)
10 |
11 | # Forked from [ArduinoTensorFlowLiteTutorials](https://github.com/arduino/ArduinoTensorFlowLiteTutorials)
12 |
13 | # Machine Learning on Arduino
14 | ## TensorFlow Lite gesture training tutorial
15 |
16 | In this tutorial we will teach a board to recognise gestures! We'll capture motion data from the [Arduino Nano 33 BLE Sense](https://store.arduino.cc/arduino-nano-33-ble-sense) board, import it into TensorFlow to train a model, and deploy a classifier onto the board using [TensorFlow Lite for microcontrollers](https://www.tensorflow.org/lite/microcontrollers/overview).
17 |
18 | ### Credits
19 |
20 | This tutorial is adapted from the [workshop](https://github.com/sandeepmistry/aimldevfest-workshop-2019) Sandeep Mistry, Arduino and Don Coleman, Chariot Solutions presented at AI/ML Devfest in September 2019.
21 |
22 |
23 |
24 |
25 | ## Exercises
26 |
27 | * [Exercise 1: Development Environment](exercises/exercise1.md)
28 | * [Exercise 2: Connecting the Board](exercises/exercise2.md)
29 | * [Exercise 3: Visualizing the IMU Data](exercises/exercise3.md)
30 | * [Exercise 4: Gather the Training Data](exercises/exercise4.md)
31 | * [Exercise 5: Machine Learning](exercises/exercise5.md)
32 | * [Exercise 6: Classifying IMU Data](exercises/exercise6.md)
33 | * [Exercise 7: Gesture Controlled USB Emoji Keyboard](exercises/exercise7.md)
34 | * [Exercise 8: Next Steps](exercises/exercise8.md)
35 |
36 |
37 |
--------------------------------------------------------------------------------
/Examples/GestureToEmoji/exercises/exercise1.md:
--------------------------------------------------------------------------------
1 | # Exercise 1 - Development Environment
2 |
3 | Set up your computer for Arduino development.
4 |
5 | ## Arduino IDE
6 | Install the Arduino IDE from https://arduino.cc/downloads
7 |
8 | 
9 |
10 | ### Arduino nRF528x Boards Definitions
11 | Use the Arduino Boards Manager to install the Arduino SAMD Board definitions. Open the Boards Manager using the menu _Tools -> Board: -> Boards Manager..._
12 |
13 | 
14 |
15 | Search for "Nano 33 BLE" and install the Arduino nRF528x Boards (Mbed OS) definitions.
16 |
17 | 
18 |
19 | ### Arduino Libraries
20 | Install the following Arduino libraries using the Library manager:
21 |
22 | * TensorFlow Lite library (search for "Arduino_TensorFlowLite")
23 | * Arduino LSM9DS1 library (search for "Arduino_LSM9DS1")
24 |
25 | Open the library manager using the menu _Tools -> Manage Libraries..._
26 |
27 | 
28 |
29 | Search for "Arduino_TensorFlowLite". Click the row and press the __Install__ button to install TensorFlow Lite for Microcontrollers
30 |
31 | Search for "Arduino_LSM9DS1". Click the row and press the __Install__ button to install the Arduino LSM9DS1 accelerometer, magnetometer, and gyroscope library.
32 |
33 | 
34 |
35 |
36 | __Linux users__ may need to configure permissions so their user can access the serial port. See the [Getting Started Guide for Linux](https://www.arduino.cc/en/guide/linux) on the Arudino website for more information.
37 |
38 | Next [Exercise 2: Connecting the board](exercise2.md)
39 |
--------------------------------------------------------------------------------
/Examples/GestureToEmoji/exercises/exercise2.md:
--------------------------------------------------------------------------------
1 | # Exercise 2: Connecting the board
2 |
3 |
4 |
5 |
6 | 
7 |
8 |
9 | ## Unboxing and set up
10 |
11 | 1. Remove the Arduino Nano 33 BLE Sense board from the box
12 | 1. Plug the micro USB cable into the board and your computer
13 | 1. Open the Arduino IDE application on your computer
14 | 1. Choose the board `Tools -> Board -> Arduino Nano 33 BLE`
15 | 1. Choose the port `Tools -> Port -> COM5 (Arduino Nano 33 BLE)` Note that the actual port may be different on your computer
16 |
17 |
18 | ## LSM9DS1 Examples (Optional)
19 |
20 | You can try the example sketches that came with the LSM9DS1 library.
21 |
22 | 1. Open the Simple Accelerometer sketch using `File -> Examples -> Arduino_LSM9DS1 -> SimpleAccelerometer`
23 | 1. Upload the sketch to the board using the `Sketch -> Upload` menu or the right arrow button from the tool bar.
24 | 1. Open the Serial Monitor `Tools -> Serial Monitor` to view the text output
25 | 1. Open the Serial Plotter `Tools -> Serial Plotter` to view the output on a graph
26 |
27 | 
28 |
29 | 
30 |
31 | Next [Exercise 3: Visualize the IMU Data](exercise3.md)
32 |
--------------------------------------------------------------------------------
/Examples/GestureToEmoji/exercises/exercise3.md:
--------------------------------------------------------------------------------
1 | # Exercise 3: Visualize the IMU Data
2 |
3 | 1. Open __ArduinoSketches/IMU_Capture/IMU_Capture.ino__ in the Arduino IDE.
4 | 1. Compile the sketch and upload it to the board: `Sketch -> Upload`
5 | 1. Open the Serial Monitor: `Tools -> Serial Monitor`
6 | 1. Once it detects signicant motion, IMU data will be captured and outputted for 1 second
7 | 1. Close the Serial Monitor window
8 | 1. Open the Serial Plotter: `Tools -> Serial Plotter`
9 | 1. Perform a gesture
10 | 1. You'll see a graph of the data capture
11 | 1. Repeat capturing various gestures to get a sense of what the training data will look like
12 | 1. Close the Serial Plotter
13 |
14 | 
15 |
16 | 
17 |
18 | Next [Exercise 4: Gather the Training Data](exercise4.md)
19 |
20 |
21 |
--------------------------------------------------------------------------------
/Examples/GestureToEmoji/exercises/exercise4.md:
--------------------------------------------------------------------------------
1 | # Exercise 4: Gather the Training Data
2 |
3 | 1. Press the reset button on the board
4 | 1. Open the Serial Monitor: `Tools -> Serial Monitor`
5 | 1. Make a punch gesture with the board in your hand - you should see the sensor data log in the Serial Monitor
6 | 1. Repeat 10 times to gather more data
7 | 1. Copy and paste the data from the serial output to new text file called `punch.csv` using your favorite text editor
8 | 1. Close the Serial Monitor
9 | 1. Press the reset button on the board
10 | 1. Open the Serial Monitor: `Tools -> Serial Monitor`
11 | 1. Make a flex gesture with the board in your hand
12 | 1. Repeat 10 times
13 | 1. Copy and paste the serial output to new text file `flex.csv` using your favorite text editor
14 |
15 | 
16 |
17 | **Make sure both `flex.csv` and `punch.csv` files has `aX,aY,aZ,gX,gY,gZ` had their first line**
18 |
19 | Next [Exercise 5: Machine Learning ](exercise5.md)
20 |
21 |
--------------------------------------------------------------------------------
/Examples/GestureToEmoji/exercises/exercise5.md:
--------------------------------------------------------------------------------
1 | # Exercise 5: Machine Learning
2 |
3 | We're going to use [Google Colab](https://colab.research.google.com) to train our machine learning model. Colab provides a Jupyter notebook that allows us to run our machine learning model in a web browser.
4 |
5 | 
6 |
7 | ## 3rd Party Cookies
8 |
9 | Some of you might see an error about 3rd party cookies.
10 |
11 | 
12 |
13 | You can enable 3rd party cookies, or better yet, add an exception for `[*.]googleusercontent.com`.
14 |
15 | 
16 |
17 | ## Open the Notebook
18 |
19 | Open the notebook in Colab.
20 |
21 | https://colab.research.google.com/github/arduino/ArduinoTensorFlowLiteTutorials/blob/master/GestureToEmoji/arduino_tinyml_workshop.ipynb
22 |
23 | Next [Exercise 6: Classifying IMU Data](exercise6.md)
24 |
--------------------------------------------------------------------------------
/Examples/GestureToEmoji/exercises/exercise6.md:
--------------------------------------------------------------------------------
1 | # Classifying IMU Data
2 |
3 | 1. Open __ArduinoSketches/IMU_Classifier/IMU_Classifier.ino__ in the Arduino IDE.
4 | 1. Switch to the model.h tab
5 | 1. Replace the contents of model.h with the version you downloaded from Colab
6 | 1. Upload the sketch: `Sketch -> Upload`
7 | 1. Open the Serial Monitor: `Tools -> Serial Monitor`
8 | 1. Press the button, and perform a gesture
9 | 1. The confidence of each gesture will be printed to the Serial Monitor (0 -> low confidence, 1 -> high confidence)
10 |
11 | 
12 |
13 | Next [Exercise 7: Gesture Controlled USB Emoji Keyboard](exercise7.md)
14 |
--------------------------------------------------------------------------------
/Examples/GestureToEmoji/exercises/exercise7.md:
--------------------------------------------------------------------------------
1 | # Exercise 7: Gesture Controlled USB Emoji Keyboard
2 |
3 | The Emoji_Button example, __ArduinoSketchs/Emoji_Button/Emoji_Button.ino__, shows how to create a USB keyboard that prints and emoji character. (This only works on Linux and macos, so if you're running Windows, find a friend to work on this exercise.)
4 |
5 | Try combining the Emoji_Button example with the IMU_Classifier sketch to create an gesture controlled emoji keyboard. 👊
6 |
7 | Next [Exercise 8: Next Steps](exercise8.md)
8 |
9 | The Emoji_Button example needs a button connected to pin 3 and GND.
10 | 
11 |
12 |
13 | Go to a text editor like google doc, select Unicode Hex Input has the keyboard input source:
14 | 
15 |
16 | You can install the unicode keyboard like this:
17 | 
18 | 
19 |
--------------------------------------------------------------------------------
/Examples/GestureToEmoji/exercises/exercise8.md:
--------------------------------------------------------------------------------
1 | # Excercise 8: Next Steps
2 |
3 | Now that you have this working...
4 |
5 | - Load the code to record gestures. Create additional CSV files with more gestures. Retrain the model in Colab. Load the new model back onto the Arduino.
6 | - Note: you'll need to edit the code to add the names of the new gesture files
7 |
8 | - Try increasing and decreasing the number of recordings per gesture, how does this impact performance?
9 |
10 | - Try to only use the accelerometer or gyroscope data (not both), how does this impact performance?
11 |
12 | - Tweak the model structure and parameters
13 | - Can you get better results?
14 | - Can you reduce the size and still get "good" results
15 |
16 | - Grab a [board](https://store.arduino.cc/usa/nano-33-ble-sense) and the [TinyML book](http://shop.oreilly.com/product/0636920254508.do) to continue at home.
17 |
18 |
--------------------------------------------------------------------------------
/Examples/GestureToEmoji/images/AddZipLibrary.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yining1023/Machine-Learning-for-Physical-Computing/c03df5e5cbdd5c0d0e15f8bee3d85342efa850b5/Examples/GestureToEmoji/images/AddZipLibrary.png
--------------------------------------------------------------------------------
/Examples/GestureToEmoji/images/AddZipLibrary_2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yining1023/Machine-Learning-for-Physical-Computing/c03df5e5cbdd5c0d0e15f8bee3d85342efa850b5/Examples/GestureToEmoji/images/AddZipLibrary_2.png
--------------------------------------------------------------------------------
/Examples/GestureToEmoji/images/ArduinoIDE.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yining1023/Machine-Learning-for-Physical-Computing/c03df5e5cbdd5c0d0e15f8bee3d85342efa850b5/Examples/GestureToEmoji/images/ArduinoIDE.png
--------------------------------------------------------------------------------
/Examples/GestureToEmoji/images/Arduino_logo_R_highquality.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yining1023/Machine-Learning-for-Physical-Computing/c03df5e5cbdd5c0d0e15f8bee3d85342efa850b5/Examples/GestureToEmoji/images/Arduino_logo_R_highquality.png
--------------------------------------------------------------------------------
/Examples/GestureToEmoji/images/BoardManager-Menu.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yining1023/Machine-Learning-for-Physical-Computing/c03df5e5cbdd5c0d0e15f8bee3d85342efa850b5/Examples/GestureToEmoji/images/BoardManager-Menu.png
--------------------------------------------------------------------------------
/Examples/GestureToEmoji/images/BoardsManager.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yining1023/Machine-Learning-for-Physical-Computing/c03df5e5cbdd5c0d0e15f8bee3d85342efa850b5/Examples/GestureToEmoji/images/BoardsManager.png
--------------------------------------------------------------------------------
/Examples/GestureToEmoji/images/IDE.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yining1023/Machine-Learning-for-Physical-Computing/c03df5e5cbdd5c0d0e15f8bee3d85342efa850b5/Examples/GestureToEmoji/images/IDE.png
--------------------------------------------------------------------------------
/Examples/GestureToEmoji/images/InstallBoardDefinitions.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yining1023/Machine-Learning-for-Physical-Computing/c03df5e5cbdd5c0d0e15f8bee3d85342efa850b5/Examples/GestureToEmoji/images/InstallBoardDefinitions.png
--------------------------------------------------------------------------------
/Examples/GestureToEmoji/images/JustDownload.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yining1023/Machine-Learning-for-Physical-Computing/c03df5e5cbdd5c0d0e15f8bee3d85342efa850b5/Examples/GestureToEmoji/images/JustDownload.png
--------------------------------------------------------------------------------
/Examples/GestureToEmoji/images/ManageLibraries.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yining1023/Machine-Learning-for-Physical-Computing/c03df5e5cbdd5c0d0e15f8bee3d85342efa850b5/Examples/GestureToEmoji/images/ManageLibraries.png
--------------------------------------------------------------------------------
/Examples/GestureToEmoji/images/accelerometer-example-serial-plotter.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yining1023/Machine-Learning-for-Physical-Computing/c03df5e5cbdd5c0d0e15f8bee3d85342efa850b5/Examples/GestureToEmoji/images/accelerometer-example-serial-plotter.png
--------------------------------------------------------------------------------
/Examples/GestureToEmoji/images/add-unicode1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yining1023/Machine-Learning-for-Physical-Computing/c03df5e5cbdd5c0d0e15f8bee3d85342efa850b5/Examples/GestureToEmoji/images/add-unicode1.png
--------------------------------------------------------------------------------
/Examples/GestureToEmoji/images/add-unicode2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yining1023/Machine-Learning-for-Physical-Computing/c03df5e5cbdd5c0d0e15f8bee3d85342efa850b5/Examples/GestureToEmoji/images/add-unicode2.png
--------------------------------------------------------------------------------
/Examples/GestureToEmoji/images/arduino-classifier.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yining1023/Machine-Learning-for-Physical-Computing/c03df5e5cbdd5c0d0e15f8bee3d85342efa850b5/Examples/GestureToEmoji/images/arduino-classifier.png
--------------------------------------------------------------------------------
/Examples/GestureToEmoji/images/assembled-hardware.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yining1023/Machine-Learning-for-Physical-Computing/c03df5e5cbdd5c0d0e15f8bee3d85342efa850b5/Examples/GestureToEmoji/images/assembled-hardware.jpg
--------------------------------------------------------------------------------
/Examples/GestureToEmoji/images/ble-sense.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yining1023/Machine-Learning-for-Physical-Computing/c03df5e5cbdd5c0d0e15f8bee3d85342efa850b5/Examples/GestureToEmoji/images/ble-sense.jpg
--------------------------------------------------------------------------------
/Examples/GestureToEmoji/images/colab-3rd-party-cookie-exception.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yining1023/Machine-Learning-for-Physical-Computing/c03df5e5cbdd5c0d0e15f8bee3d85342efa850b5/Examples/GestureToEmoji/images/colab-3rd-party-cookie-exception.png
--------------------------------------------------------------------------------
/Examples/GestureToEmoji/images/colab-error.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yining1023/Machine-Learning-for-Physical-Computing/c03df5e5cbdd5c0d0e15f8bee3d85342efa850b5/Examples/GestureToEmoji/images/colab-error.png
--------------------------------------------------------------------------------
/Examples/GestureToEmoji/images/colab.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yining1023/Machine-Learning-for-Physical-Computing/c03df5e5cbdd5c0d0e15f8bee3d85342efa850b5/Examples/GestureToEmoji/images/colab.png
--------------------------------------------------------------------------------
/Examples/GestureToEmoji/images/download-repo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yining1023/Machine-Learning-for-Physical-Computing/c03df5e5cbdd5c0d0e15f8bee3d85342efa850b5/Examples/GestureToEmoji/images/download-repo.png
--------------------------------------------------------------------------------
/Examples/GestureToEmoji/images/library-arduinolsm9ds1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yining1023/Machine-Learning-for-Physical-Computing/c03df5e5cbdd5c0d0e15f8bee3d85342efa850b5/Examples/GestureToEmoji/images/library-arduinolsm9ds1.png
--------------------------------------------------------------------------------
/Examples/GestureToEmoji/images/lsm9ds1-examples.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yining1023/Machine-Learning-for-Physical-Computing/c03df5e5cbdd5c0d0e15f8bee3d85342efa850b5/Examples/GestureToEmoji/images/lsm9ds1-examples.png
--------------------------------------------------------------------------------
/Examples/GestureToEmoji/images/select-unicode.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yining1023/Machine-Learning-for-Physical-Computing/c03df5e5cbdd5c0d0e15f8bee3d85342efa850b5/Examples/GestureToEmoji/images/select-unicode.png
--------------------------------------------------------------------------------
/Examples/GestureToEmoji/images/serial-monitor-imu.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yining1023/Machine-Learning-for-Physical-Computing/c03df5e5cbdd5c0d0e15f8bee3d85342efa850b5/Examples/GestureToEmoji/images/serial-monitor-imu.png
--------------------------------------------------------------------------------
/Examples/GestureToEmoji/images/serial-plotter-imu.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yining1023/Machine-Learning-for-Physical-Computing/c03df5e5cbdd5c0d0e15f8bee3d85342efa850b5/Examples/GestureToEmoji/images/serial-plotter-imu.png
--------------------------------------------------------------------------------
/Examples/Hand_tracking_KNN_Classifier/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
KNN Classification on Webcam Images with Hand pose model. Built with p5.js
16 |
17 |
Loading Model...
18 |
19 | KNN Classifier with mobileNet model labeled this
20 | as Class: ...
21 | with a confidence of ...
22 |
23 |
24 | ✊
25 |
26 |
0 Rock examples | Confidence in Rock is: 0
27 |
28 | 🖐
29 |
30 |
0 Paper examples | Confidence in Paper is: 0
31 |
32 | ✌️
33 |
34 |
0 Scissor examples | Confidence in Scissor is: 0
35 |
36 |
37 |
38 |
39 |
40 |
41 |
42 |
43 |
44 |
--------------------------------------------------------------------------------
/Examples/MicroSpeech/YesNo-with-LEDs/README.md:
--------------------------------------------------------------------------------
1 | # Forked from [tflite examples](https://github.com/tensorflow/tflite-micro/tree/main/tensorflow/lite/micro/examples/micro_speech)
2 |
3 | # Micro speech with LEDs example
4 |
5 | Connect two LEDs to D2 D3 pin. When saying "yes", D3 LED will light up; When saying "no", D2 LED wull light up.
6 |
7 | This example shows how you can use TensorFlow Lite to run a 20 kilobyte neural
8 | network model to recognize keywords in speech. It's designed to run on systems
9 | with very small amounts of memory such as microcontrollers and DSPs.
10 |
11 | The example application listens to its surroundings with a microphone and
12 | indicates when it has detected a word by lighting an LED or displaying data on a
13 | screen, depending on the capabilities of the device.
14 |
15 | The code has a small footprint (for example around 22 kilobytes on a Cortex
16 | M3) and only uses about 10 kilobytes of RAM for working memory, so it's able to
17 | run on systems like an STM32F103 with only 20 kilobytes of total SRAM and 64
18 | kilobytes of Flash.
19 |
20 | ## Table of contents
21 |
22 | - [Deploy to Arduino](#deploy-to-arduino)
23 | - [Train your own model](#train-your-own-model)
24 |
25 | ## Deploy to Arduino
26 |
27 | The following instructions will help you build and deploy this sample
28 | to [Arduino](https://www.arduino.cc/) devices.
29 |
30 | The sample has been tested with the following devices:
31 |
32 | - [Arduino Nano 33 BLE Sense](https://store.arduino.cc/usa/nano-33-ble-sense-with-headers)
33 |
34 | The Arduino Nano 33 BLE Sense is currently the only Arduino with a built-in
35 | microphone. If you're using a different Arduino board and attaching your own
36 | microphone, you'll need to implement your own +audio_provider.cc+. It also has a
37 | built-in LED, which is used to indicate that a word has been recognized.
38 |
39 | ### Install the Arduino_TensorFlowLite library
40 |
41 | This example application is included as part of the official TensorFlow Lite
42 | Arduino library. To install it, open the Arduino library manager in
43 | `Tools -> Manage Libraries...` and search for `Arduino_TensorFlowLite`.
44 |
45 | ### Load and run the example
46 |
47 | Once the library has been added, go to `File -> Examples`. You should see an
48 | example near the bottom of the list named `TensorFlowLite:micro_speech`. Select
49 | it and click `micro_speech` to load the example.
50 |
51 | Use the Arduino IDE to build and upload the example. Once it is running, you
52 | should see the built-in LED on your device flashing. Saying the word "yes" will
53 | cause the LED to remain on for 3 seconds. The current model has fairly low
54 | accuracy, so you may have to repeat "yes" a few times.
55 |
56 | The program also outputs inference results to the serial port, which appear as
57 | follows:
58 |
59 | ```
60 | Heard yes (201) @4056ms
61 | Heard no (205) @6448ms
62 | Heard unknown (201) @13696ms
63 | Heard yes (205) @15000ms
64 | ```
65 |
66 | The number after each detected word is its score. By default, the program only
67 | considers matches as valid if their score is over 200, so all of the scores you
68 | see will be at least 200.
69 |
70 | When the program is run, it waits 5 seconds for a USB-serial connection to be
71 | available. If there is no connection available, it will not output data. To see
72 | the serial output in the Arduino desktop IDE, do the following:
73 |
74 | 1. Open the Arduino IDE
75 | 1. Connect the Arduino board to your computer via USB
76 | 1. Press the reset button on the Arduino board
77 | 1. Within 5 seconds, go to `Tools -> Serial Monitor` in the Arduino IDE. You may
78 | have to try several times, since the board will take a moment to connect.
79 |
80 | If you don't see any output, repeat the process again.
81 |
82 | ## Training your own model
83 |
84 | You can train your own model with some easy-to-use scripts. See
85 | [training_a_model.md](https://github.com/tensorflow/tflite-micro/tree/main/tensorflow/lite/micro/examples/micro_speech/train) for instructions.
86 |
--------------------------------------------------------------------------------
/Examples/MicroSpeech/YesNo-with-LEDs/micro_speech/arduino_command_responder.cpp:
--------------------------------------------------------------------------------
1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 |
16 | #if defined(ARDUINO) && !defined(ARDUINO_ARDUINO_NANO33BLE)
17 | #define ARDUINO_EXCLUDE_CODE
18 | #endif // defined(ARDUINO) && !defined(ARDUINO_ARDUINO_NANO33BLE)
19 |
20 | #ifndef ARDUINO_EXCLUDE_CODE
21 |
22 | #include "command_responder.h"
23 |
24 | #include "Arduino.h"
25 |
26 | // Toggles the built-in LED every inference, and lights a colored LED depending
27 | // on which word was detected.
28 | void RespondToCommand(tflite::ErrorReporter* error_reporter,
29 | int32_t current_time, const char* found_command,
30 | uint8_t score, bool is_new_command) {
31 | static bool is_initialized = false;
32 | if (!is_initialized) {
33 | pinMode(LED_BUILTIN, OUTPUT);
34 | // Pins for the built-in RGB LEDs on the Arduino Nano 33 BLE Sense
35 | pinMode(LEDR, OUTPUT);
36 | pinMode(LEDG, OUTPUT);
37 | pinMode(LEDB, OUTPUT);
38 | // Ensure the LED is off by default.
39 | // Note: The RGB LEDs on the Arduino Nano 33 BLE
40 | // Sense are on when the pin is LOW, off when HIGH.
41 | digitalWrite(LEDR, HIGH);
42 | digitalWrite(LEDG, HIGH);
43 | digitalWrite(LEDB, HIGH);
44 | is_initialized = true;
45 | }
46 | static int32_t last_command_time = 0;
47 | static int count = 0;
48 | static int certainty = 220;
49 |
50 | if (is_new_command) {
51 | TF_LITE_REPORT_ERROR(error_reporter, "Heard %s (%d) @%dms", found_command,
52 | score, current_time);
53 | // If we hear a command, light up the appropriate LED
54 | if (found_command[0] == 'y') {
55 | last_command_time = current_time;
56 | digitalWrite(LEDG, LOW); // Green for yes
57 | digitalWrite(3, HIGH);
58 | digitalWrite(2, LOW);
59 | }
60 |
61 | if (found_command[0] == 'n') {
62 | last_command_time = current_time;
63 | digitalWrite(LEDR, LOW); // Red for no
64 | digitalWrite(2, HIGH);
65 | digitalWrite(3, LOW);
66 | }
67 |
68 | if (found_command[0] == 'u') {
69 | last_command_time = current_time;
70 | digitalWrite(LEDB, LOW); // Blue for unknown
71 | digitalWrite(2, LOW);
72 | digitalWrite(3, LOW);
73 | }
74 | }
75 |
76 | // If last_command_time is non-zero but was >3 seconds ago, zero it
77 | // and switch off the LED.
78 | if (last_command_time != 0) {
79 | if (last_command_time < (current_time - 3000)) {
80 | last_command_time = 0;
81 | digitalWrite(LED_BUILTIN, LOW);
82 | digitalWrite(LEDR, HIGH);
83 | digitalWrite(LEDG, HIGH);
84 | digitalWrite(LEDB, HIGH);
85 | }
86 | // If it is non-zero but <3 seconds ago, do nothing.
87 | return;
88 | }
89 |
90 | // Otherwise, toggle the LED every time an inference is performed.
91 | ++count;
92 | if (count & 1) {
93 | digitalWrite(LED_BUILTIN, HIGH);
94 | } else {
95 | digitalWrite(LED_BUILTIN, LOW);
96 | }
97 | }
98 |
99 | #endif // ARDUINO_EXCLUDE_CODE
100 |
--------------------------------------------------------------------------------
/Examples/MicroSpeech/YesNo-with-LEDs/micro_speech/arduino_main.cpp:
--------------------------------------------------------------------------------
1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 |
16 | #include "main_functions.h"
17 |
18 | // Arduino automatically calls the setup() and loop() functions in a sketch, so
19 | // where other systems need their own main routine in this file, it can be left
20 | // empty.
21 |
--------------------------------------------------------------------------------
/Examples/MicroSpeech/YesNo-with-LEDs/micro_speech/audio_provider.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 |
16 | #ifndef TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_AUDIO_PROVIDER_H_
17 | #define TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_AUDIO_PROVIDER_H_
18 |
19 | #include "tensorflow/lite/c/common.h"
20 | #include "tensorflow/lite/micro/micro_error_reporter.h"
21 |
22 | // This is an abstraction around an audio source like a microphone, and is
23 | // expected to return 16-bit PCM sample data for a given point in time. The
24 | // sample data itself should be used as quickly as possible by the caller, since
25 | // to allow memory optimizations there are no guarantees that the samples won't
26 | // be overwritten by new data in the future. In practice, implementations should
27 | // ensure that there's a reasonable time allowed for clients to access the data
28 | // before any reuse.
29 | // The reference implementation can have no platform-specific dependencies, so
30 | // it just returns an array filled with zeros. For real applications, you should
31 | // ensure there's a specialized implementation that accesses hardware APIs.
32 | TfLiteStatus GetAudioSamples(tflite::ErrorReporter* error_reporter,
33 | int start_ms, int duration_ms,
34 | int* audio_samples_size, int16_t** audio_samples);
35 |
36 | // Returns the time that audio data was last captured in milliseconds. There's
37 | // no contract about what time zero represents, the accuracy, or the granularity
38 | // of the result. Subsequent calls will generally not return a lower value, but
39 | // even that's not guaranteed if there's an overflow wraparound.
40 | // The reference implementation of this function just returns a constantly
41 | // incrementing value for each call, since it would need a non-portable platform
42 | // call to access time information. For real applications, you'll need to write
43 | // your own platform-specific implementation.
44 | int32_t LatestAudioTimestamp();
45 |
46 | #endif // TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_AUDIO_PROVIDER_H_
47 |
--------------------------------------------------------------------------------
/Examples/MicroSpeech/YesNo-with-LEDs/micro_speech/command_responder.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 |
16 | // Provides an interface to take an action based on an audio command.
17 |
18 | #ifndef TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_COMMAND_RESPONDER_H_
19 | #define TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_COMMAND_RESPONDER_H_
20 |
21 | #include "tensorflow/lite/c/common.h"
22 | #include "tensorflow/lite/micro/micro_error_reporter.h"
23 |
24 | // Called every time the results of an audio recognition run are available. The
25 | // human-readable name of any recognized command is in the `found_command`
26 | // argument, `score` has the numerical confidence, and `is_new_command` is set
27 | // if the previous command was different to this one.
28 | void RespondToCommand(tflite::ErrorReporter* error_reporter,
29 | int32_t current_time, const char* found_command,
30 | uint8_t score, bool is_new_command);
31 |
32 | #endif // TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_COMMAND_RESPONDER_H_
33 |
--------------------------------------------------------------------------------
/Examples/MicroSpeech/YesNo-with-LEDs/micro_speech/feature_provider.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 |
16 | #ifndef TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_FEATURE_PROVIDER_H_
17 | #define TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_FEATURE_PROVIDER_H_
18 |
19 | #include "tensorflow/lite/c/common.h"
20 | #include "tensorflow/lite/micro/micro_error_reporter.h"
21 |
22 | // Binds itself to an area of memory intended to hold the input features for an
23 | // audio-recognition neural network model, and fills that data area with the
24 | // features representing the current audio input, for example from a microphone.
25 | // The audio features themselves are a two-dimensional array, made up of
26 | // horizontal slices representing the frequencies at one point in time, stacked
27 | // on top of each other to form a spectrogram showing how those frequencies
28 | // changed over time.
29 | class FeatureProvider {
30 | public:
31 | // Create the provider, and bind it to an area of memory. This memory should
32 | // remain accessible for the lifetime of the provider object, since subsequent
33 | // calls will fill it with feature data. The provider does no memory
34 | // management of this data.
35 | FeatureProvider(int feature_size, int8_t* feature_data);
36 | ~FeatureProvider();
37 |
38 | // Fills the feature data with information from audio inputs, and returns how
39 | // many feature slices were updated.
40 | TfLiteStatus PopulateFeatureData(tflite::ErrorReporter* error_reporter,
41 | int32_t last_time_in_ms, int32_t time_in_ms,
42 | int* how_many_new_slices);
43 |
44 | private:
45 | int feature_size_;
46 | int8_t* feature_data_;
47 | // Make sure we don't try to use cached information if this is the first call
48 | // into the provider.
49 | bool is_first_run_;
50 | };
51 |
52 | #endif // TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_FEATURE_PROVIDER_H_
53 |
--------------------------------------------------------------------------------
/Examples/MicroSpeech/YesNo-with-LEDs/micro_speech/main_functions.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 |
16 | #ifndef TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_MAIN_FUNCTIONS_H_
17 | #define TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_MAIN_FUNCTIONS_H_
18 |
19 | // Expose a C friendly interface for main functions.
20 | #ifdef __cplusplus
21 | extern "C" {
22 | #endif
23 |
24 | // Initializes all data needed for the example. The name is important, and needs
25 | // to be setup() for Arduino compatibility.
26 | void setup();
27 |
28 | // Runs one iteration of data gathering and inference. This should be called
29 | // repeatedly from the application code. The name needs to be loop() for Arduino
30 | // compatibility.
31 | void loop();
32 |
33 | #ifdef __cplusplus
34 | }
35 | #endif
36 |
37 | #endif // TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_MAIN_FUNCTIONS_H_
38 |
--------------------------------------------------------------------------------
/Examples/MicroSpeech/YesNo-with-LEDs/micro_speech/micro_features_micro_features_generator.cpp:
--------------------------------------------------------------------------------
1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 |
16 | #include "micro_features_micro_features_generator.h"
17 |
18 | #include
19 | #include
20 |
21 | #include "tensorflow/lite/experimental/microfrontend/lib/frontend.h"
22 | #include "tensorflow/lite/experimental/microfrontend/lib/frontend_util.h"
23 | #include "micro_features_micro_model_settings.h"
24 |
25 | // Configure FFT to output 16 bit fixed point.
26 | #define FIXED_POINT 16
27 |
28 | namespace {
29 |
30 | FrontendState g_micro_features_state;
31 | bool g_is_first_time = true;
32 |
33 | } // namespace
34 |
35 | TfLiteStatus InitializeMicroFeatures(tflite::ErrorReporter* error_reporter) {
36 | FrontendConfig config;
37 | config.window.size_ms = kFeatureSliceDurationMs;
38 | config.window.step_size_ms = kFeatureSliceStrideMs;
39 | config.noise_reduction.smoothing_bits = 10;
40 | config.filterbank.num_channels = kFeatureSliceSize;
41 | config.filterbank.lower_band_limit = 125.0;
42 | config.filterbank.upper_band_limit = 7500.0;
43 | config.noise_reduction.smoothing_bits = 10;
44 | config.noise_reduction.even_smoothing = 0.025;
45 | config.noise_reduction.odd_smoothing = 0.06;
46 | config.noise_reduction.min_signal_remaining = 0.05;
47 | config.pcan_gain_control.enable_pcan = 1;
48 | config.pcan_gain_control.strength = 0.95;
49 | config.pcan_gain_control.offset = 80.0;
50 | config.pcan_gain_control.gain_bits = 21;
51 | config.log_scale.enable_log = 1;
52 | config.log_scale.scale_shift = 6;
53 | if (!FrontendPopulateState(&config, &g_micro_features_state,
54 | kAudioSampleFrequency)) {
55 | TF_LITE_REPORT_ERROR(error_reporter, "FrontendPopulateState() failed");
56 | return kTfLiteError;
57 | }
58 | g_is_first_time = true;
59 | return kTfLiteOk;
60 | }
61 |
62 | // This is not exposed in any header, and is only used for testing, to ensure
63 | // that the state is correctly set up before generating results.
64 | void SetMicroFeaturesNoiseEstimates(const uint32_t* estimate_presets) {
65 | for (int i = 0; i < g_micro_features_state.filterbank.num_channels; ++i) {
66 | g_micro_features_state.noise_reduction.estimate[i] = estimate_presets[i];
67 | }
68 | }
69 |
70 | TfLiteStatus GenerateMicroFeatures(tflite::ErrorReporter* error_reporter,
71 | const int16_t* input, int input_size,
72 | int output_size, int8_t* output,
73 | size_t* num_samples_read) {
74 | const int16_t* frontend_input;
75 | if (g_is_first_time) {
76 | frontend_input = input;
77 | g_is_first_time = false;
78 | } else {
79 | frontend_input = input + 160;
80 | }
81 | FrontendOutput frontend_output = FrontendProcessSamples(
82 | &g_micro_features_state, frontend_input, input_size, num_samples_read);
83 |
84 | for (size_t i = 0; i < frontend_output.size; ++i) {
85 | // These scaling values are derived from those used in input_data.py in the
86 | // training pipeline.
87 | // The feature pipeline outputs 16-bit signed integers in roughly a 0 to 670
88 | // range. In training, these are then arbitrarily divided by 25.6 to get
89 | // float values in the rough range of 0.0 to 26.0. This scaling is performed
90 | // for historical reasons, to match up with the output of other feature
91 | // generators.
92 | // The process is then further complicated when we quantize the model. This
93 | // means we have to scale the 0.0 to 26.0 real values to the -128 to 127
94 | // signed integer numbers.
95 | // All this means that to get matching values from our integer feature
96 | // output into the tensor input, we have to perform:
97 | // input = (((feature / 25.6) / 26.0) * 256) - 128
98 | // To simplify this and perform it in 32-bit integer math, we rearrange to:
99 | // input = (feature * 256) / (25.6 * 26.0) - 128
100 | constexpr int32_t value_scale = 256;
101 | constexpr int32_t value_div = static_cast((25.6f * 26.0f) + 0.5f);
102 | int32_t value =
103 | ((frontend_output.values[i] * value_scale) + (value_div / 2)) /
104 | value_div;
105 | value -= 128;
106 | if (value < -128) {
107 | value = -128;
108 | }
109 | if (value > 127) {
110 | value = 127;
111 | }
112 | output[i] = value;
113 | }
114 |
115 | return kTfLiteOk;
116 | }
117 |
--------------------------------------------------------------------------------
/Examples/MicroSpeech/YesNo-with-LEDs/micro_speech/micro_features_micro_features_generator.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 |
16 | #ifndef TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_MICRO_FEATURES_MICRO_FEATURES_GENERATOR_H_
17 | #define TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_MICRO_FEATURES_MICRO_FEATURES_GENERATOR_H_
18 |
19 | #include "tensorflow/lite/c/common.h"
20 | #include "tensorflow/lite/micro/micro_error_reporter.h"
21 |
22 | // Sets up any resources needed for the feature generation pipeline.
23 | TfLiteStatus InitializeMicroFeatures(tflite::ErrorReporter* error_reporter);
24 |
25 | // Converts audio sample data into a more compact form that's appropriate for
26 | // feeding into a neural network.
27 | TfLiteStatus GenerateMicroFeatures(tflite::ErrorReporter* error_reporter,
28 | const int16_t* input, int input_size,
29 | int output_size, int8_t* output,
30 | size_t* num_samples_read);
31 |
32 | #endif // TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_MICRO_FEATURES_MICRO_FEATURES_GENERATOR_H_
33 |
--------------------------------------------------------------------------------
/Examples/MicroSpeech/YesNo-with-LEDs/micro_speech/micro_features_micro_model_settings.cpp:
--------------------------------------------------------------------------------
1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 |
16 | #include "micro_features_micro_model_settings.h"
17 |
18 | const char* kCategoryLabels[kCategoryCount] = {
19 | "silence",
20 | "unknown",
21 | "yes",
22 | "no",
23 | };
24 |
--------------------------------------------------------------------------------
/Examples/MicroSpeech/YesNo-with-LEDs/micro_speech/micro_features_micro_model_settings.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 |
16 | #ifndef TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_MICRO_FEATURES_MICRO_MODEL_SETTINGS_H_
17 | #define TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_MICRO_FEATURES_MICRO_MODEL_SETTINGS_H_
18 |
19 | // Keeping these as constant expressions allow us to allocate fixed-sized arrays
20 | // on the stack for our working memory.
21 |
22 | // The size of the input time series data we pass to the FFT to produce the
23 | // frequency information. This has to be a power of two, and since we're dealing
24 | // with 30ms of 16KHz inputs, which means 480 samples, this is the next value.
25 | constexpr int kMaxAudioSampleSize = 512;
26 | constexpr int kAudioSampleFrequency = 16000;
27 |
28 | // The following values are derived from values used during model training.
29 | // If you change the way you preprocess the input, update all these constants.
30 | constexpr int kFeatureSliceSize = 40;
31 | constexpr int kFeatureSliceCount = 49;
32 | constexpr int kFeatureElementCount = (kFeatureSliceSize * kFeatureSliceCount);
33 | constexpr int kFeatureSliceStrideMs = 20;
34 | constexpr int kFeatureSliceDurationMs = 30;
35 |
36 | // Variables for the model's output categories.
37 | constexpr int kSilenceIndex = 0;
38 | constexpr int kUnknownIndex = 1;
39 | // If you modify the output categories, you need to update the following values.
40 | constexpr int kCategoryCount = 4;
41 | extern const char* kCategoryLabels[kCategoryCount];
42 |
43 | #endif // TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_MICRO_FEATURES_MICRO_MODEL_SETTINGS_H_
44 |
--------------------------------------------------------------------------------
/Examples/MicroSpeech/YesNo-with-LEDs/micro_speech/micro_features_model.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 |
16 | // This is a standard TensorFlow Lite FlatBuffer model file that has been
17 | // converted into a C data array, so it can be easily compiled into a binary
18 | // for devices that don't have a file system. It was created using the command:
19 | // xxd -i model.tflite > model.cc
20 |
21 | #ifndef TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_MICRO_FEATURES_MODEL_H_
22 | #define TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_MICRO_FEATURES_MODEL_H_
23 |
24 | extern const unsigned char g_model[];
25 | extern const int g_model_len;
26 |
27 | #endif // TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_MICRO_FEATURES_MODEL_H_
28 |
--------------------------------------------------------------------------------
/Examples/MicroSpeech/YesNo-with-LEDs/micro_speech/micro_features_no_micro_features_data.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 |
16 | #ifndef TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_MICRO_FEATURES_NO_MICRO_FEATURES_DATA_H_
17 | #define TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_MICRO_FEATURES_NO_MICRO_FEATURES_DATA_H_
18 |
19 | extern const int g_no_micro_f9643d42_nohash_4_width;
20 | extern const int g_no_micro_f9643d42_nohash_4_height;
21 | extern const signed char g_no_micro_f9643d42_nohash_4_data[];
22 |
23 | #endif // TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_MICRO_FEATURES_NO_MICRO_FEATURES_DATA_H_
24 |
--------------------------------------------------------------------------------
/Examples/MicroSpeech/YesNo-with-LEDs/micro_speech/micro_features_yes_micro_features_data.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 |
16 | #ifndef TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_MICRO_FEATURES_YES_MICRO_FEATURES_DATA_H_
17 | #define TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_MICRO_FEATURES_YES_MICRO_FEATURES_DATA_H_
18 |
19 | extern const int g_yes_micro_f2e59fea_nohash_1_width;
20 | extern const int g_yes_micro_f2e59fea_nohash_1_height;
21 | extern const signed char g_yes_micro_f2e59fea_nohash_1_data[];
22 |
23 | #endif // TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_MICRO_FEATURES_YES_MICRO_FEATURES_DATA_H_
24 |
--------------------------------------------------------------------------------
/Examples/MicroSpeech/YesNo-with-LEDs/micro_speech/sparkfun_edge_command_responder.cpp:
--------------------------------------------------------------------------------
1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 |
16 | #if defined(ARDUINO) && !defined(ARDUINO_SFE_EDGE)
17 | #define ARDUINO_EXCLUDE_CODE
18 | #endif // defined(ARDUINO) && !defined(ARDUINO_SFE_EDGE)
19 |
20 | #ifndef ARDUINO_EXCLUDE_CODE
21 |
22 | #include "command_responder.h"
23 |
24 | #include "am_bsp.h"
25 |
26 | // This implementation will light up the LEDs on the board in response to
27 | // different commands.
28 | void RespondToCommand(tflite::ErrorReporter* error_reporter,
29 | int32_t current_time, const char* found_command,
30 | uint8_t score, bool is_new_command) {
31 | static bool is_initialized = false;
32 | if (!is_initialized) {
33 | // Setup LED's as outputs
34 | #ifdef AM_BSP_NUM_LEDS
35 | am_devices_led_array_init(am_bsp_psLEDs, AM_BSP_NUM_LEDS);
36 | am_devices_led_array_out(am_bsp_psLEDs, AM_BSP_NUM_LEDS, 0x00000000);
37 | #endif
38 | is_initialized = true;
39 | }
40 |
41 | // Toggle the blue LED every time an inference is performed.
42 | am_devices_led_toggle(am_bsp_psLEDs, AM_BSP_LED_BLUE);
43 |
44 | // Turn on LEDs corresponding to the detection for the cycle
45 | am_devices_led_off(am_bsp_psLEDs, AM_BSP_LED_RED);
46 | am_devices_led_off(am_bsp_psLEDs, AM_BSP_LED_YELLOW);
47 | am_devices_led_off(am_bsp_psLEDs, AM_BSP_LED_GREEN);
48 | if (is_new_command) {
49 | TF_LITE_REPORT_ERROR(error_reporter, "Heard %s (%d) @%dms", found_command,
50 | score, current_time);
51 | if (found_command[0] == 'y') {
52 | am_devices_led_on(am_bsp_psLEDs, AM_BSP_LED_YELLOW);
53 | }
54 | if (found_command[0] == 'n') {
55 | am_devices_led_on(am_bsp_psLEDs, AM_BSP_LED_RED);
56 | }
57 | if (found_command[0] == 'u') {
58 | am_devices_led_on(am_bsp_psLEDs, AM_BSP_LED_GREEN);
59 | }
60 | }
61 | }
62 |
63 | #endif // ARDUINO_EXCLUDE_CODE
64 |
--------------------------------------------------------------------------------
/Examples/MicroSpeech/YesNo/README.md:
--------------------------------------------------------------------------------
1 | # Forked from [tflite examples](https://github.com/tensorflow/tflite-micro/tree/main/tensorflow/lite/micro/examples/micro_speech)
2 |
3 | # Micro speech example
4 |
5 | This example shows how you can use TensorFlow Lite to run a 20 kilobyte neural
6 | network model to recognize keywords in speech. It's designed to run on systems
7 | with very small amounts of memory such as microcontrollers and DSPs.
8 |
9 | The example application listens to its surroundings with a microphone and
10 | indicates when it has detected a word by lighting an LED or displaying data on a
11 | screen, depending on the capabilities of the device.
12 |
13 | The code has a small footprint (for example around 22 kilobytes on a Cortex
14 | M3) and only uses about 10 kilobytes of RAM for working memory, so it's able to
15 | run on systems like an STM32F103 with only 20 kilobytes of total SRAM and 64
16 | kilobytes of Flash.
17 |
18 | ## Table of contents
19 |
20 | - [Deploy to Arduino](#deploy-to-arduino)
21 | - [Train your own model](#train-your-own-model)
22 |
23 | ## Deploy to Arduino
24 |
25 | The following instructions will help you build and deploy this sample
26 | to [Arduino](https://www.arduino.cc/) devices.
27 |
28 | The sample has been tested with the following devices:
29 |
30 | - [Arduino Nano 33 BLE Sense](https://store.arduino.cc/usa/nano-33-ble-sense-with-headers)
31 |
32 | The Arduino Nano 33 BLE Sense is currently the only Arduino with a built-in
33 | microphone. If you're using a different Arduino board and attaching your own
34 | microphone, you'll need to implement your own +audio_provider.cc+. It also has a
35 | built-in LED, which is used to indicate that a word has been recognized.
36 |
37 | ### Install the Arduino_TensorFlowLite library
38 |
39 | This example application is included as part of the official TensorFlow Lite
40 | Arduino library. To install it, open the Arduino library manager in
41 | `Tools -> Manage Libraries...` and search for `Arduino_TensorFlowLite`.
42 |
43 | ### Load and run the example
44 |
45 | Once the library has been added, go to `File -> Examples`. You should see an
46 | example near the bottom of the list named `TensorFlowLite:micro_speech`. Select
47 | it and click `micro_speech` to load the example.
48 |
49 | Use the Arduino IDE to build and upload the example. Once it is running, you
50 | should see the built-in LED on your device flashing. Saying the word "yes" will
51 | cause the LED to remain on for 3 seconds. The current model has fairly low
52 | accuracy, so you may have to repeat "yes" a few times.
53 |
54 | The program also outputs inference results to the serial port, which appear as
55 | follows:
56 |
57 | ```
58 | Heard yes (201) @4056ms
59 | Heard no (205) @6448ms
60 | Heard unknown (201) @13696ms
61 | Heard yes (205) @15000ms
62 | ```
63 |
64 | The number after each detected word is its score. By default, the program only
65 | considers matches as valid if their score is over 200, so all of the scores you
66 | see will be at least 200.
67 |
68 | When the program is run, it waits 5 seconds for a USB-serial connection to be
69 | available. If there is no connection available, it will not output data. To see
70 | the serial output in the Arduino desktop IDE, do the following:
71 |
72 | 1. Open the Arduino IDE
73 | 1. Connect the Arduino board to your computer via USB
74 | 1. Press the reset button on the Arduino board
75 | 1. Within 5 seconds, go to `Tools -> Serial Monitor` in the Arduino IDE. You may
76 | have to try several times, since the board will take a moment to connect.
77 |
78 | If you don't see any output, repeat the process again.
79 |
80 | ## Training your own model
81 |
82 | You can train your own model with some easy-to-use scripts. See
83 | [training_a_model.md](https://github.com/tensorflow/tflite-micro/tree/main/tensorflow/lite/micro/examples/micro_speech/train) for instructions.
84 |
--------------------------------------------------------------------------------
/Examples/MicroSpeech/YesNo/micro_speech/arduino_command_responder.cpp:
--------------------------------------------------------------------------------
1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 |
16 | #include "command_responder.h"
17 |
18 | #include "Arduino.h"
19 |
20 | // Toggles the built-in LED every inference, and lights a colored LED depending
21 | // on which word was detected.
22 | void RespondToCommand(tflite::ErrorReporter* error_reporter,
23 | int32_t current_time, const char* found_command,
24 | uint8_t score, bool is_new_command) {
25 | static bool is_initialized = false;
26 | if (!is_initialized) {
27 | pinMode(LED_BUILTIN, OUTPUT);
28 | // Pins for the built-in RGB LEDs on the Arduino Nano 33 BLE Sense
29 | pinMode(LEDR, OUTPUT);
30 | pinMode(LEDG, OUTPUT);
31 | pinMode(LEDB, OUTPUT);
32 | is_initialized = true;
33 | }
34 | static int32_t last_command_time = 0;
35 | static int count = 0;
36 | static int certainty = 220;
37 |
38 | if (is_new_command) {
39 | error_reporter->Report("Heard %s (%d) @%dms", found_command, score,
40 | current_time);
41 | // If we hear a command, light up the appropriate LED.
42 | // Note: The RGB LEDs on the Arduino Nano 33 BLE
43 | // Sense are on when the pin is LOW, off when HIGH.
44 | if (found_command[0] == 'y') {
45 | last_command_time = current_time;
46 | digitalWrite(LEDG, LOW); // Green for yes
47 | }
48 |
49 | if (found_command[0] == 'n') {
50 | last_command_time = current_time;
51 | digitalWrite(LEDR, LOW); // Red for no
52 | }
53 |
54 | if (found_command[0] == 'u') {
55 | last_command_time = current_time;
56 | digitalWrite(LEDB, LOW); // Blue for unknown
57 | }
58 | }
59 |
60 | // If last_command_time is non-zero but was >3 seconds ago, zero it
61 | // and switch off the LED.
62 | if (last_command_time != 0) {
63 | if (last_command_time < (current_time - 3000)) {
64 | last_command_time = 0;
65 | digitalWrite(LED_BUILTIN, LOW);
66 | digitalWrite(LEDR, HIGH);
67 | digitalWrite(LEDG, HIGH);
68 | digitalWrite(LEDB, HIGH);
69 | }
70 | // If it is non-zero but <3 seconds ago, do nothing.
71 | return;
72 | }
73 |
74 | // Otherwise, toggle the LED every time an inference is performed.
75 | ++count;
76 | if (count & 1) {
77 | digitalWrite(LED_BUILTIN, HIGH);
78 | } else {
79 | digitalWrite(LED_BUILTIN, LOW);
80 | }
81 | }
82 |
--------------------------------------------------------------------------------
/Examples/MicroSpeech/YesNo/micro_speech/arduino_main.cpp:
--------------------------------------------------------------------------------
1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 |
16 | #include "main_functions.h"
17 |
18 | // Arduino automatically calls the setup() and loop() functions in a sketch, so
19 | // where other systems need their own main routine in this file, it can be left
20 | // empty.
21 |
--------------------------------------------------------------------------------
/Examples/MicroSpeech/YesNo/micro_speech/audio_provider.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 |
16 | #ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICRO_EXAMPLES_MICRO_SPEECH_AUDIO_PROVIDER_H_
17 | #define TENSORFLOW_LITE_EXPERIMENTAL_MICRO_EXAMPLES_MICRO_SPEECH_AUDIO_PROVIDER_H_
18 |
19 | #include "tensorflow/lite/c/c_api_internal.h"
20 | #include "tensorflow/lite/experimental/micro/micro_error_reporter.h"
21 |
22 | // This is an abstraction around an audio source like a microphone, and is
23 | // expected to return 16-bit PCM sample data for a given point in time. The
24 | // sample data itself should be used as quickly as possible by the caller, since
25 | // to allow memory optimizations there are no guarantees that the samples won't
26 | // be overwritten by new data in the future. In practice, implementations should
27 | // ensure that there's a reasonable time allowed for clients to access the data
28 | // before any reuse.
29 | // The reference implementation can have no platform-specific dependencies, so
30 | // it just returns an array filled with zeros. For real applications, you should
31 | // ensure there's a specialized implementation that accesses hardware APIs.
32 | TfLiteStatus GetAudioSamples(tflite::ErrorReporter* error_reporter,
33 | int start_ms, int duration_ms,
34 | int* audio_samples_size, int16_t** audio_samples);
35 |
36 | // Returns the time that audio data was last captured in milliseconds. There's
37 | // no contract about what time zero represents, the accuracy, or the granularity
38 | // of the result. Subsequent calls will generally not return a lower value, but
39 | // even that's not guaranteed if there's an overflow wraparound.
40 | // The reference implementation of this function just returns a constantly
41 | // incrementing value for each call, since it would need a non-portable platform
42 | // call to access time information. For real applications, you'll need to write
43 | // your own platform-specific implementation.
44 | int32_t LatestAudioTimestamp();
45 |
46 | #endif // TENSORFLOW_LITE_EXPERIMENTAL_MICRO_EXAMPLES_MICRO_SPEECH_AUDIO_PROVIDER_H_
47 |
--------------------------------------------------------------------------------
/Examples/MicroSpeech/YesNo/micro_speech/command_responder.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 |
16 | // Provides an interface to take an action based on an audio command.
17 |
18 | #ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICRO_EXAMPLES_MICRO_SPEECH_COMMAND_RESPONDER_H_
19 | #define TENSORFLOW_LITE_EXPERIMENTAL_MICRO_EXAMPLES_MICRO_SPEECH_COMMAND_RESPONDER_H_
20 |
21 | #include "tensorflow/lite/c/c_api_internal.h"
22 | #include "tensorflow/lite/experimental/micro/micro_error_reporter.h"
23 |
24 | // Called every time the results of an audio recognition run are available. The
25 | // human-readable name of any recognized command is in the `found_command`
26 | // argument, `score` has the numerical confidence, and `is_new_command` is set
27 | // if the previous command was different to this one.
28 | void RespondToCommand(tflite::ErrorReporter* error_reporter,
29 | int32_t current_time, const char* found_command,
30 | uint8_t score, bool is_new_command);
31 |
32 | #endif // TENSORFLOW_LITE_EXPERIMENTAL_MICRO_EXAMPLES_MICRO_SPEECH_COMMAND_RESPONDER_H_
33 |
--------------------------------------------------------------------------------
/Examples/MicroSpeech/YesNo/micro_speech/feature_provider.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 |
16 | #ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICRO_EXAMPLES_MICRO_SPEECH_FEATURE_PROVIDER_H_
17 | #define TENSORFLOW_LITE_EXPERIMENTAL_MICRO_EXAMPLES_MICRO_SPEECH_FEATURE_PROVIDER_H_
18 |
19 | #include "tensorflow/lite/c/c_api_internal.h"
20 | #include "tensorflow/lite/experimental/micro/micro_error_reporter.h"
21 |
22 | // Binds itself to an area of memory intended to hold the input features for an
23 | // audio-recognition neural network model, and fills that data area with the
24 | // features representing the current audio input, for example from a microphone.
25 | // The audio features themselves are a two-dimensional array, made up of
26 | // horizontal slices representing the frequencies at one point in time, stacked
27 | // on top of each other to form a spectrogram showing how those frequencies
28 | // changed over time.
29 | class FeatureProvider {
30 | public:
31 | // Create the provider, and bind it to an area of memory. This memory should
32 | // remain accessible for the lifetime of the provider object, since subsequent
33 | // calls will fill it with feature data. The provider does no memory
34 | // management of this data.
35 | FeatureProvider(int feature_size, uint8_t* feature_data);
36 | ~FeatureProvider();
37 |
38 | // Fills the feature data with information from audio inputs, and returns how
39 | // many feature slices were updated.
40 | TfLiteStatus PopulateFeatureData(tflite::ErrorReporter* error_reporter,
41 | int32_t last_time_in_ms, int32_t time_in_ms,
42 | int* how_many_new_slices);
43 |
44 | private:
45 | int feature_size_;
46 | uint8_t* feature_data_;
47 | // Make sure we don't try to use cached information if this is the first call
48 | // into the provider.
49 | bool is_first_run_;
50 | };
51 |
52 | #endif // TENSORFLOW_LITE_EXPERIMENTAL_MICRO_EXAMPLES_MICRO_SPEECH_FEATURE_PROVIDER_H_
53 |
--------------------------------------------------------------------------------
/Examples/MicroSpeech/YesNo/micro_speech/main_functions.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 |
16 | #ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICRO_EXAMPLES_MICRO_SPEECH_MAIN_FUNCTIONS_H_
17 | #define TENSORFLOW_LITE_EXPERIMENTAL_MICRO_EXAMPLES_MICRO_SPEECH_MAIN_FUNCTIONS_H_
18 |
19 | // Initializes all data needed for the example. The name is important, and needs
20 | // to be setup() for Arduino compatibility.
21 | void setup();
22 |
23 | // Runs one iteration of data gathering and inference. This should be called
24 | // repeatedly from the application code. The name needs to be loop() for Arduino
25 | // compatibility.
26 | void loop();
27 |
28 | #endif // TENSORFLOW_LITE_EXPERIMENTAL_MICRO_EXAMPLES_MICRO_SPEECH_MAIN_FUNCTIONS_H_
29 |
--------------------------------------------------------------------------------
/Examples/MicroSpeech/YesNo/micro_speech/micro_features_micro_features_generator.cpp:
--------------------------------------------------------------------------------
1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 |
16 | #include "micro_features_micro_features_generator.h"
17 |
18 | #include
19 | #include
20 |
21 | #include "micro_features_micro_model_settings.h"
22 | #include "tensorflow/lite/experimental/microfrontend/lib/frontend.h"
23 | #include "tensorflow/lite/experimental/microfrontend/lib/frontend_util.h"
24 |
25 | // Configure FFT to output 16 bit fixed point.
26 | #define FIXED_POINT 16
27 |
28 | namespace {
29 |
30 | FrontendState g_micro_features_state;
31 | bool g_is_first_time = true;
32 |
33 | } // namespace
34 |
35 | TfLiteStatus InitializeMicroFeatures(tflite::ErrorReporter* error_reporter) {
36 | FrontendConfig config;
37 | config.window.size_ms = kFeatureSliceDurationMs;
38 | config.window.step_size_ms = kFeatureSliceStrideMs;
39 | config.noise_reduction.smoothing_bits = 10;
40 | config.filterbank.num_channels = kFeatureSliceSize;
41 | config.filterbank.lower_band_limit = 125.0;
42 | config.filterbank.upper_band_limit = 7500.0;
43 | config.noise_reduction.smoothing_bits = 10;
44 | config.noise_reduction.even_smoothing = 0.025;
45 | config.noise_reduction.odd_smoothing = 0.06;
46 | config.noise_reduction.min_signal_remaining = 0.05;
47 | config.pcan_gain_control.enable_pcan = 1;
48 | config.pcan_gain_control.strength = 0.95;
49 | config.pcan_gain_control.offset = 80.0;
50 | config.pcan_gain_control.gain_bits = 21;
51 | config.log_scale.enable_log = 1;
52 | config.log_scale.scale_shift = 6;
53 | if (!FrontendPopulateState(&config, &g_micro_features_state,
54 | kAudioSampleFrequency)) {
55 | error_reporter->Report("FrontendPopulateState() failed");
56 | return kTfLiteError;
57 | }
58 | g_is_first_time = true;
59 | return kTfLiteOk;
60 | }
61 |
62 | // This is not exposed in any header, and is only used for testing, to ensure
63 | // that the state is correctly set up before generating results.
64 | void SetMicroFeaturesNoiseEstimates(const uint32_t* estimate_presets) {
65 | for (int i = 0; i < g_micro_features_state.filterbank.num_channels; ++i) {
66 | g_micro_features_state.noise_reduction.estimate[i] = estimate_presets[i];
67 | }
68 | }
69 |
70 | TfLiteStatus GenerateMicroFeatures(tflite::ErrorReporter* error_reporter,
71 | const int16_t* input, int input_size,
72 | int output_size, uint8_t* output,
73 | size_t* num_samples_read) {
74 | const int16_t* frontend_input;
75 | if (g_is_first_time) {
76 | frontend_input = input;
77 | g_is_first_time = false;
78 | } else {
79 | frontend_input = input + 160;
80 | }
81 | FrontendOutput frontend_output = FrontendProcessSamples(
82 | &g_micro_features_state, frontend_input, input_size, num_samples_read);
83 |
84 | for (int i = 0; i < frontend_output.size; ++i) {
85 | // These scaling values are derived from those used in input_data.py in the
86 | // training pipeline.
87 | constexpr int32_t value_scale = (10 * 255);
88 | constexpr int32_t value_div = (256 * 26);
89 | int32_t value =
90 | ((frontend_output.values[i] * value_scale) + (value_div / 2)) /
91 | value_div;
92 | if (value < 0) {
93 | value = 0;
94 | }
95 | if (value > 255) {
96 | value = 255;
97 | }
98 | output[i] = value;
99 | }
100 |
101 | return kTfLiteOk;
102 | }
103 |
--------------------------------------------------------------------------------
/Examples/MicroSpeech/YesNo/micro_speech/micro_features_micro_features_generator.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 |
16 | #ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICRO_EXAMPLES_MICRO_SPEECH_MICRO_FEATURES_MICRO_FEATURES_GENERATOR_H_
17 | #define TENSORFLOW_LITE_EXPERIMENTAL_MICRO_EXAMPLES_MICRO_SPEECH_MICRO_FEATURES_MICRO_FEATURES_GENERATOR_H_
18 |
19 | #include "tensorflow/lite/c/c_api_internal.h"
20 | #include "tensorflow/lite/experimental/micro/micro_error_reporter.h"
21 |
22 | // Sets up any resources needed for the feature generation pipeline.
23 | TfLiteStatus InitializeMicroFeatures(tflite::ErrorReporter* error_reporter);
24 |
25 | // Converts audio sample data into a more compact form that's appropriate for
26 | // feeding into a neural network.
27 | TfLiteStatus GenerateMicroFeatures(tflite::ErrorReporter* error_reporter,
28 | const int16_t* input, int input_size,
29 | int output_size, uint8_t* output,
30 | size_t* num_samples_read);
31 |
32 | #endif // TENSORFLOW_LITE_EXPERIMENTAL_MICRO_EXAMPLES_MICRO_SPEECH_MICRO_FEATURES_MICRO_FEATURES_GENERATOR_H_
33 |
--------------------------------------------------------------------------------
/Examples/MicroSpeech/YesNo/micro_speech/micro_features_micro_model_settings.cpp:
--------------------------------------------------------------------------------
1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 |
16 | #include "micro_features_micro_model_settings.h"
17 |
18 | const char* kCategoryLabels[kCategoryCount] = {
19 | "silence",
20 | "unknown",
21 | "yes",
22 | "no",
23 | };
24 |
--------------------------------------------------------------------------------
/Examples/MicroSpeech/YesNo/micro_speech/micro_features_micro_model_settings.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 |
16 | #ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICRO_EXAMPLES_MICRO_SPEECH_MICRO_FEATURES_MICRO_MODEL_SETTINGS_H_
17 | #define TENSORFLOW_LITE_EXPERIMENTAL_MICRO_EXAMPLES_MICRO_SPEECH_MICRO_FEATURES_MICRO_MODEL_SETTINGS_H_
18 |
19 | // Keeping these as constant expressions allow us to allocate fixed-sized arrays
20 | // on the stack for our working memory.
21 |
22 | // The size of the input time series data we pass to the FFT to produce the
23 | // frequency information. This has to be a power of two, and since we're dealing
24 | // with 30ms of 16KHz inputs, which means 480 samples, this is the next value.
25 | constexpr int kMaxAudioSampleSize = 512;
26 | constexpr int kAudioSampleFrequency = 16000;
27 |
28 | // All of these values are derived from the values used during model training,
29 | // if you change your model you'll need to update these constants.
30 | constexpr int kFeatureSliceSize = 40;
31 | constexpr int kFeatureSliceCount = 49;
32 | constexpr int kFeatureElementCount = (kFeatureSliceSize * kFeatureSliceCount);
33 | constexpr int kFeatureSliceStrideMs = 20;
34 | constexpr int kFeatureSliceDurationMs = 30;
35 |
36 | constexpr int kCategoryCount = 4;
37 | constexpr int kSilenceIndex = 0;
38 | constexpr int kUnknownIndex = 1;
39 | extern const char* kCategoryLabels[kCategoryCount];
40 |
41 | #endif // TENSORFLOW_LITE_EXPERIMENTAL_MICRO_EXAMPLES_MICRO_SPEECH_MICRO_FEATURES_MICRO_MODEL_SETTINGS_H_
42 |
--------------------------------------------------------------------------------
/Examples/MicroSpeech/YesNo/micro_speech/micro_features_no_micro_features_data.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 |
16 | #ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICRO_EXAMPLES_MICRO_SPEECH_MICRO_FEATURES_NO_MICRO_FEATURES_DATA_H_
17 | #define TENSORFLOW_LITE_EXPERIMENTAL_MICRO_EXAMPLES_MICRO_SPEECH_MICRO_FEATURES_NO_MICRO_FEATURES_DATA_H_
18 |
19 | extern const int g_no_micro_f9643d42_nohash_4_width;
20 | extern const int g_no_micro_f9643d42_nohash_4_height;
21 | extern const unsigned char g_no_micro_f9643d42_nohash_4_data[];
22 |
23 | #endif // TENSORFLOW_LITE_EXPERIMENTAL_MICRO_EXAMPLES_MICRO_SPEECH_MICRO_FEATURES_NO_MICRO_FEATURES_DATA_H_
24 |
--------------------------------------------------------------------------------
/Examples/MicroSpeech/YesNo/micro_speech/micro_features_tiny_conv_micro_features_model_data.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 |
16 | // This is a standard TensorFlow Lite model file that has been converted into a
17 | // C data array, so it can be easily compiled into a binary for devices that
18 | // don't have a file system. It was created using the command:
19 | // xxd -i tiny_conv.tflite > tiny_conv_simple_features_model_data.cc
20 |
21 | #ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICRO_EXAMPLES_MICRO_SPEECH_MICRO_FEATURES_TINY_CONV_MICRO_FEATURES_MODEL_DATA_H_
22 | #define TENSORFLOW_LITE_EXPERIMENTAL_MICRO_EXAMPLES_MICRO_SPEECH_MICRO_FEATURES_TINY_CONV_MICRO_FEATURES_MODEL_DATA_H_
23 |
24 | extern const unsigned char g_tiny_conv_micro_features_model_data[];
25 | extern const int g_tiny_conv_micro_features_model_data_len;
26 |
27 | #endif // TENSORFLOW_LITE_EXPERIMENTAL_MICRO_EXAMPLES_MICRO_SPEECH_MICRO_FEATURES_TINY_CONV_MICRO_FEATURES_MODEL_DATA_H_
28 |
--------------------------------------------------------------------------------
/Examples/MicroSpeech/YesNo/micro_speech/micro_features_yes_micro_features_data.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 |
16 | #ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICRO_EXAMPLES_MICRO_SPEECH_MICRO_FEATURES_YES_MICRO_FEATURES_DATA_H_
17 | #define TENSORFLOW_LITE_EXPERIMENTAL_MICRO_EXAMPLES_MICRO_SPEECH_MICRO_FEATURES_YES_MICRO_FEATURES_DATA_H_
18 |
19 | extern const int g_yes_micro_f2e59fea_nohash_1_width;
20 | extern const int g_yes_micro_f2e59fea_nohash_1_height;
21 | extern const unsigned char g_yes_micro_f2e59fea_nohash_1_data[];
22 |
23 | #endif // TENSORFLOW_LITE_EXPERIMENTAL_MICRO_EXAMPLES_MICRO_SPEECH_MICRO_FEATURES_YES_MICRO_FEATURES_DATA_H_
24 |
--------------------------------------------------------------------------------
/Examples/ServoMotor/README.md:
--------------------------------------------------------------------------------
1 | # How to connect a servo motor with Arduino Nano 33 BLE (Sense)
2 |
3 | ## Demo
4 | This is a [video](https://youtu.be/1JK84QPrm5s) of Arduino Nano 33 BLE Sense on the [Sweep example](https://www.arduino.cc/en/Tutorial/Sweep) from [Servo library](https://www.arduino.cc/en/Reference/Servo)
5 |
6 | ## Ciruits
7 |
8 |
9 | ## Sweep example
10 | Read more about the Servo motor Sweep example [here](https://www.arduino.cc/en/Tutorial/Sweep)
11 |
12 | ## Steps
13 | 1. Connect the circuit, make sure to connect Arduino's D9 pin to the servo pulse pin. This board is 3.3v, unlike the diagram below, connect 3.3V to the power pin of the servo.
14 |
15 |
16 | 2. Open Arduino IDE, install the latest version of the Servo library, you can search for the library under "Tools - Manage Libraries". Make sure you are using the latest version of the library, the library was updated to support this board.
17 |
18 | 3. Open to Sweep example, go to "Files - Examples - Servo - Sweep"
19 |
20 | 4. Select the right board and port, and upload the sketch to your board, then you are good to go. You should be able to see the servo moves like the demo video above.
21 |
--------------------------------------------------------------------------------
/Examples/ServoMotor/Sweep/Sweep.ino:
--------------------------------------------------------------------------------
1 | /* Sweep
2 | by BARRAGAN
3 | This example code is in the public domain.
4 |
5 | modified 8 Nov 2013
6 | by Scott Fitzgerald
7 | http://www.arduino.cc/en/Tutorial/Sweep
8 | */
9 |
10 | #include
11 |
12 | Servo myservo; // create servo object to control a servo
13 | // twelve servo objects can be created on most boards
14 |
15 | int pos = 0; // variable to store the servo position
16 |
17 | void setup() {
18 | myservo.attach(9); // attaches the servo on pin 9 to the servo object
19 | }
20 |
21 | void loop() {
22 | for (pos = 0; pos <= 180; pos += 1) { // goes from 0 degrees to 180 degrees
23 | // in steps of 1 degree
24 | myservo.write(pos); // tell servo to go to position in variable 'pos'
25 | delay(15); // waits 15ms for the servo to reach the position
26 | }
27 | for (pos = 180; pos >= 0; pos -= 1) { // goes from 180 degrees to 0 degrees
28 | myservo.write(pos); // tell servo to go to position in variable 'pos'
29 | delay(15); // waits 15ms for the servo to reach the position
30 | }
31 | }
32 |
--------------------------------------------------------------------------------
/Examples/TeachableMachineArduino/ImageClassifier/arduino_code/teachableMachineArduinoLEDs/teachableMachineArduinoLEDs.ino:
--------------------------------------------------------------------------------
1 | int ledPin1 = 12;
2 | int ledPin2 = 13;
3 |
4 | void setup() {
5 | pinMode(ledPin1, OUTPUT); // sets the pin as output
6 | pinMode(ledPin2, OUTPUT);
7 | Serial.begin(9600); // initialize serial communications
8 | }
9 |
10 | void loop() {
11 | if (Serial.available() > 0) { // if there's serial data available
12 | int inByte = Serial.read(); // read it
13 | if (inByte == 1) {
14 | digitalWrite(ledPin1, HIGH); // use it to turn on the LED 1
15 | digitalWrite(ledPin2, LOW);
16 | } else if (inByte == 2) {
17 | digitalWrite(ledPin2, HIGH); // use it to turn on the LED 2
18 | digitalWrite(ledPin1, LOW);
19 | } else {
20 | digitalWrite(ledPin1, LOW); // sets the LED off
21 | digitalWrite(ledPin2, LOW); // sets the LED off
22 | }
23 | delay(200); // waits
24 | }
25 | }
26 |
--------------------------------------------------------------------------------
/Examples/TeachableMachineArduino/ImageClassifier/p5_code/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
My Teachable Machine with Image
9 |
Show a yellow minion or a white rabbit in front of the webcam
10 |
11 |
12 |
13 |
--------------------------------------------------------------------------------
/Examples/TeachableMachineArduino/ImageClassifier/p5_code/sketch.js:
--------------------------------------------------------------------------------
1 | const myImageModelURL = 'https://teachablemachine.withgoogle.com/models/H1eZs1_jA/';
2 | let myImageModel;
3 | let resultDiv;
4 | let serial;// variable to hold an instance of the serialport library
5 | let portName = '/dev/tty.usbmodem144301';// fill in your serial port name here
6 | let outByte = 0;// for outgoing data
7 | let video;
8 |
9 | function preload() {
10 | video = createCapture(VIDEO);
11 | myImageModel = ml5.imageClassifier(myImageModelURL+ 'model.json');
12 | }
13 |
14 | function setup() {
15 | resultDiv = createElement('h1', '...');
16 | serial = new p5.SerialPort(); // make a new instance of the serialport library
17 | serial.on('error', serialError); // callback for errors
18 | serial.open(portName); // open a serial port
19 | myImageModel.classify(video, gotResults);
20 | }
21 |
22 | function serialError(err) {
23 | console.log('Something went wrong with the serial port. ' + err);
24 | }
25 |
26 | function gotResults(err, results) {
27 | if (err) console.log(err);
28 | if (results) {
29 | console.log(results);
30 | // Wait for 0.5 second before classifying again
31 | setTimeout(() => myImageModel.classify(video, gotResults), 500);
32 | if (results[0].confidence < 0.7) return;
33 | resultDiv.html('Result is: ' + results[0].label);
34 | if (results[0].label === 'rabbit') {
35 | outByte = 1;
36 | } else if (results[0].label === 'minion') {
37 | outByte = 2;
38 | } else {
39 | outByte = 0;
40 | }
41 | // send it out the serial port:
42 | console.log('outByte: ', outByte)
43 | serial.write(outByte);
44 | }
45 | }
46 |
--------------------------------------------------------------------------------
/Examples/TeachableMachineArduino/ImageClassifier/p5_code/style.css:
--------------------------------------------------------------------------------
1 | html, body {
2 | margin: 0;
3 | padding: 0;
4 | }
5 | canvas {
6 | display: block;
7 | }
8 |
--------------------------------------------------------------------------------
/Examples/TeachableMachineArduino/PoseClassifier/arduino_code/teachableMachineArduinoLEDs/teachableMachineArduinoLEDs.ino:
--------------------------------------------------------------------------------
1 | int ledPin1 = 12;
2 | int ledPin2 = 13;
3 |
4 | void setup() {
5 | pinMode(ledPin1, OUTPUT); // sets the pin as output
6 | pinMode(ledPin2, OUTPUT);
7 | Serial.begin(9600); // initialize serial communications
8 | }
9 |
10 | void loop() {
11 | if (Serial.available() > 0) { // if there's serial data available
12 | int inByte = Serial.read(); // read it
13 | if (inByte == 1) {
14 | digitalWrite(ledPin1, HIGH); // use it to turn on the LED 1
15 | digitalWrite(ledPin2, LOW);
16 | } else if (inByte == 2) {
17 | digitalWrite(ledPin2, HIGH); // use it to turn on the LED 2
18 | digitalWrite(ledPin1, LOW);
19 | } else {
20 | digitalWrite(ledPin1, LOW); // sets the LED off
21 | digitalWrite(ledPin2, LOW); // sets the LED off
22 | }
23 | delay(200); // waits
24 | }
25 | }
26 |
--------------------------------------------------------------------------------
/Examples/TeachableMachineArduino/PoseClassifier/p5_code/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 | Poses classifier
4 |
5 |
6 |
7 |
8 |
9 |
10 |
16 |
17 |
18 |
19 |
20 |
Poses classifier on Webcam
21 |
22 |
The Poses Classifier classify video as class ... with ... confidence.
6 |
7 |
8 |
--------------------------------------------------------------------------------
/Examples/TeachableMachineArduino/PoseClassifier/p5_code/sketch.js:
--------------------------------------------------------------------------------
1 | const modelURL = 'https://teachablemachine.withgoogle.com/models/r8wsgg5mm/';
2 | // the json file (model topology) has a reference to the bin file (model weights)
3 | const checkpointURL = modelURL + "model.json";
4 | // the metatadata json file contains the text labels of your model and additional information
5 | const metadataURL = modelURL + "metadata.json";
6 | let serial;// variable to hold an instance of the serialport library
7 | let portName = '/dev/tty.usbmodem144301';// fill in your serial port name here
8 |
9 | const size = 300;
10 | const flip = true; // whether to flip the webcam
11 | let webcam;
12 | let model;
13 | let totalClasses;
14 | let myCanvas;
15 | let ctx;
16 |
17 | // A function that loads the model from the checkpoint
18 | async function load() {
19 | model = await tmPose.load(checkpointURL, metadataURL);
20 | totalClasses = model.getTotalClasses();
21 | console.log("Number of classes, ", totalClasses);
22 | }
23 |
24 | async function loadWebcam() {
25 | webcam = new tmPose.Webcam(size, size, flip); // can change width and height
26 | await webcam.setup(); // request access to the webcam
27 | await webcam.play();
28 | window.requestAnimationFrame(loopWebcam);
29 | }
30 |
31 | async function setup() {
32 | serial = new p5.SerialPort(); // make a new instance of the serialport library
33 | serial.on('error', serialError); // callback for errors
34 | serial.open(portName); // open a serial port
35 |
36 | myCanvas = createCanvas(size, size);
37 | ctx = myCanvas.elt.getContext("2d");
38 | // Call the load function, wait until it finishes loading
39 | await load();
40 | await loadWebcam();
41 | }
42 |
43 | function serialError(err) {
44 | console.log('Something went wrong with the serial port. ' + err);
45 | }
46 |
47 |
48 | async function loopWebcam(timestamp) {
49 | webcam.update(); // update the webcam frame
50 | await predict();
51 | window.requestAnimationFrame(loopWebcam);
52 | }
53 |
54 | async function predict() {
55 | // Prediction #1: run input through posenet
56 | // predict can take in an image, video or canvas html element
57 | const flipHorizontal = false;
58 | const { pose, posenetOutput } = await model.estimatePose(
59 | webcam.canvas,
60 | flipHorizontal
61 | );
62 | // Prediction 2: run input through teachable machine assification model
63 | const prediction = await model.predict(
64 | posenetOutput,
65 | flipHorizontal,
66 | totalClasses
67 | );
68 |
69 | // console.log('prediction: ', prediction);
70 | // Sort prediction array by probability
71 | // So the first classname will have the highest probability
72 | const sortedPrediction = prediction.sort((a, b) => - a.probability + b.probability);
73 |
74 | // Show the result
75 | const res = select('#res'); // select
76 | res.html(sortedPrediction[0].className);
77 |
78 | // Show the probability
79 | const prob = select('#prob'); // select
80 | prob.html(sortedPrediction[0].probability.toFixed(2));
81 |
82 | // draw the keypoints and skeleton
83 | if (pose) {
84 | drawPose(pose);
85 | }
86 |
87 | if (sortedPrediction[0].className === 'Sit') {
88 | outByte = 1;
89 | } else if (sortedPrediction[0].className === 'Stand') {
90 | outByte = 2;
91 | } else {
92 | outByte = 0;
93 | }
94 | // send it out the serial port:
95 | console.log('outByte: ', outByte)
96 | serial.write(outByte);
97 | }
98 |
99 | function drawPose(pose) {
100 | if (webcam.canvas) {
101 | ctx.drawImage(webcam.canvas, 0, 0);
102 | // draw the keypoints and skeleton
103 | if (pose) {
104 | const minPartConfidence = 0.5;
105 | tmPose.drawKeypoints(pose.keypoints, minPartConfidence, ctx);
106 | tmPose.drawSkeleton(pose.keypoints, minPartConfidence, ctx);
107 | }
108 | }
109 | }
110 |
--------------------------------------------------------------------------------
/Examples/TeachableMachineArduino/PoseClassifier/p5_code/style.css:
--------------------------------------------------------------------------------
1 | html, body {
2 | margin: 0;
3 | padding: 0;
4 | }
5 | canvas {
6 | display: block;
7 | }
8 |
--------------------------------------------------------------------------------
/Examples/TeachableMachineArduino/README.md:
--------------------------------------------------------------------------------
1 | # Steps:
2 | - [Download](https://www.arduino.cc/en/main/software) Arduino IDE
3 | - [Download](https://github.com/p5-serial/p5.serialcontrol/releases) p5 serial app
4 | - Upload Arduino code to the Arduino board (you can find it in the `SoundClassifier`/ `ImageClassifier` / `PoseClassifier` folder)
5 | - Open p5 serial app (if cannot open, change firewall settings)
6 |
7 | ## Sound:
8 | - Running this [p5 sketch](https://editor.p5js.org/yining/sketches/eHYnYa5BR) on p5 web editor, remember to update the `portName` and `mySoundModelURL`, and update class names to your own classes.
9 | - [Demo](https://youtu.be/7xPDbbHCjLw)
10 | - [Demo made by Cara Neels](https://vimeo.com/363431151)
11 |
12 | ## Sound with Servo Motor
13 | - Upload the [Arduino Sketch](https://github.com/yining1023/Machine-Learning-for-Physical-Computing/tree/master/Examples/TeachableMachineArduino/SoundClassifier_with_Servo/SoundClassifier_Servo) to the Arduino board
14 | - Running this [p5 sketch](https://editor.p5js.org/yining/sketches/q8JEPDwK7), remember to update the `portName` and `mySoundModelURL`, and update class names to your own classes.
15 | - [Video Demo](https://youtu.be/RnStPxTfEnU)
16 | - Circuit
17 | - Connect D2,3,4 to 3 LEDs
18 | - Connect servo signal pin to D9. [More about](https://github.com/yining1023/Machine-Learning-for-Physical-Computing/tree/master/Examples/ServoMotor) how to use servo motor with arduino.
19 |
20 |
21 |
22 | ## Image:
23 | - Running this [p5 sketch](https://editor.p5js.org/yining/sketches/Ob8Zkf_FZ) on p5 web editor, remember to update the `portName` and `myImageModelURL`, and update class names to your own classes.
24 | - [Demo](https://youtu.be/ZGafimlnLw8)
25 | - Circuit
26 | - Connect Ground to the 2 LEDs Ground pin
27 | - Connect Pin 2, 3(or 12, 13) to the LED power pin
28 | An example circuit on Arduino Nano 33 BLE sense:
29 |
30 |
31 |
32 | ## Pose:
33 | - Running this [p5 sketch](https://editor.p5js.org/yining/sketches/WqhmvWzoo) on p5 web editor, remember to update the `portName` and `poseModelUrl`, and update class names to your own classes.
34 | - [Demo](https://youtu.be/2E0LpbdPjMs)
35 |
36 | ## Trouble shooting:
37 | - The models works in p5 web editor, but my LEDs are not lighted up
38 | 1. Light up LEDs in the arduino code directly to test if there is anything wrong with the LEDs.
39 | 2. Make sure p5 serial is working: There shouldn't be any error in the console. The p5 serial app should be open, but do NOT connect to the port inside of the p5 serial app, otherwise p5 serial app will be using the port, then p5 web editor cannot use the port.
40 | 3. You can find your portname in the p5 serial app. But there is no need to connect to the port in the p5 serial app.
41 | 4. When you are re-uploading Arduino sketch, you need to stop p5 sketch in the editor and close the p5 serial app.
42 | - Cannot download/open p5 serial app
43 | 1. Cannot open p5 serial app: change the firewall settings, turn the firewall
44 | 2. Cannot download p5 serial app in Chrome(`p5.serialcontrol.zip is dangerous, so Chrome has blocked it`): try download it in other browers, like Safari, Firefox
45 |
46 |
--------------------------------------------------------------------------------
/Examples/TeachableMachineArduino/SoundClassifier/arduino_code/teachableMachineArduinoLEDs/teachableMachineArduinoLEDs.ino:
--------------------------------------------------------------------------------
1 | int ledPin1 = 2;
2 | int ledPin2 = 3;
3 |
4 | void setup() {
5 | pinMode(ledPin1, OUTPUT); // sets the pin as output
6 | pinMode(ledPin2, OUTPUT);
7 | Serial.begin(9600); // initialize serial communications
8 | }
9 |
10 | void loop() {
11 | if (Serial.available() > 0) { // if there's serial data available
12 | int inByte = Serial.read(); // read it
13 | if (inByte == 1) {
14 | digitalWrite(ledPin1, HIGH); // use it to turn on the LED 1
15 | digitalWrite(ledPin2, LOW);
16 | } else if (inByte == 2) {
17 | digitalWrite(ledPin2, HIGH); // use it to turn on the LED 2
18 | digitalWrite(ledPin1, LOW);
19 | } else {
20 | digitalWrite(ledPin1, LOW); // sets the LED off
21 | digitalWrite(ledPin2, LOW); // sets the LED off
22 | }
23 | delay(200); // waits
24 | }
25 | }
26 |
--------------------------------------------------------------------------------
/Examples/TeachableMachineArduino/SoundClassifier/p5_code/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
My Teachable Machine with Sound
9 |
Say Happy or Clap
10 |
11 |
12 |
13 |
--------------------------------------------------------------------------------
/Examples/TeachableMachineArduino/SoundClassifier/p5_code/sketch.js:
--------------------------------------------------------------------------------
1 | const mySoundModelURL = 'https://teachablemachine.withgoogle.com/models/-OUoUPKHF/';
2 | let mySoundModel;
3 | let resultDiv;
4 | let serial;// variable to hold an instance of the serialport library
5 | let portName = '/dev/tty.usbmodem144301';// fill in your serial port name here
6 | let outByte = 0;// for outgoing data
7 |
8 | function preload() {
9 | mySoundModel = ml5.soundClassifier(mySoundModelURL+ 'model.json');
10 | }
11 |
12 | function setup() {
13 | resultDiv = createElement('h1', '...');
14 | serial = new p5.SerialPort(); // make a new instance of the serialport library
15 | serial.on('error', serialError); // callback for errors
16 | serial.open(portName); // open a serial port
17 | mySoundModel.classify(gotResults);
18 | }
19 |
20 | function serialError(err) {
21 | console.log('Something went wrong with the serial port. ' + err);
22 | }
23 |
24 | function gotResults(err, results) {
25 | if (err) console.log(err);
26 | if (results) {
27 | console.log(results);
28 | if (results[0].confidence < 0.7) return;
29 | resultDiv.html('Result is: ' + results[0].label);
30 | if (results[0].label === 'happy') {
31 | outByte = 1;
32 | } else if (results[0].label === 'clap') {
33 | outByte = 2;
34 | } else {
35 | outByte = 0;
36 | }
37 | // send it out the serial port:
38 | console.log('outByte: ', outByte)
39 | serial.write(outByte);
40 | }
41 | }
42 |
--------------------------------------------------------------------------------
/Examples/TeachableMachineArduino/SoundClassifier/p5_code/style.css:
--------------------------------------------------------------------------------
1 | html, body {
2 | margin: 0;
3 | padding: 0;
4 | }
5 | canvas {
6 | display: block;
7 | }
8 |
--------------------------------------------------------------------------------
/Examples/TeachableMachineArduino/SoundClassifier_with_Servo/SoundClassifier_Servo/SoundClassifier_Servo.ino:
--------------------------------------------------------------------------------
1 | #include
2 |
3 | Servo myservo;
4 | int servoPin = 9;
5 | int redPin = 2;
6 | int greenPin = 3;
7 | int bluePin = 4;
8 |
9 | void setup() {
10 | pinMode(redPin, OUTPUT);
11 | pinMode(greenPin, OUTPUT);
12 | pinMode(bluePin, OUTPUT);
13 |
14 | myservo.attach(servoPin);
15 | myservo.write(0); // set servo to mid-point
16 |
17 | // start serial port at 9600 bps:
18 | Serial.begin(9600);
19 | }
20 |
21 | void loop() {
22 | if (Serial.available() > 0) { // if there's serial data available
23 | int inByte = Serial.read(); // read it
24 | Serial.println(inByte);
25 | if (inByte == 1) {
26 | myservo.write(0);
27 | // Light red LED
28 | digitalWrite(redPin, HIGH);
29 | digitalWrite(greenPin, LOW);
30 | digitalWrite(bluePin, LOW);
31 | } else if (inByte == 2) {
32 | myservo.write(90);
33 | // Light green LED
34 | digitalWrite(redPin, LOW);
35 | digitalWrite(greenPin, HIGH);
36 | digitalWrite(bluePin, LOW);
37 | } else if (inByte == 3) {
38 | myservo.write(180);
39 | // Light BLUE LED
40 | digitalWrite(redPin, LOW);
41 | digitalWrite(greenPin, LOW);
42 | digitalWrite(bluePin, HIGH);
43 | } else {
44 | myservo.write(0);
45 | digitalWrite(redPin, HIGH);
46 | digitalWrite(greenPin, HIGH);
47 | digitalWrite(bluePin, HIGH);
48 | }
49 | // Wait for 1 second
50 | delay(1000);
51 | }
52 | }
53 |
--------------------------------------------------------------------------------
/Examples/ThumbupDownLeds/arduino_code/ThumbuoDownLeds/ThumbuoDownLeds.ino:
--------------------------------------------------------------------------------
1 | int ledPin1 = 2;
2 | int ledPin2 = 3;
3 |
4 | void setup() {
5 | pinMode(ledPin1, OUTPUT); // sets the pin as output
6 | pinMode(ledPin2, OUTPUT);
7 | Serial.begin(9600); // initialize serial communications
8 | }
9 |
10 | void loop() {
11 | if (Serial.available() > 0) { // if there's serial data available
12 | int inByte = Serial.read(); // read it
13 | if (inByte == 1) {
14 | digitalWrite(ledPin1, HIGH); // use it to turn on the LED 1
15 | digitalWrite(ledPin2, LOW);
16 | } else if (inByte == 2) {
17 | digitalWrite(ledPin2, HIGH); // use it to turn on the LED 2
18 | digitalWrite(ledPin1, LOW);
19 | } else {
20 | digitalWrite(ledPin1, LOW); // sets the LED off
21 | digitalWrite(ledPin2, LOW); // sets the LED off
22 | }
23 | delay(200); // waits
24 | }
25 | }
26 |
--------------------------------------------------------------------------------
/Examples/ThumbupDownLeds/p5_code/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 | Handpose with Webcam
4 |
5 |
6 |
7 |
8 |
9 |
10 |
Handpose with Webcam
11 |
12 |
13 |
14 |
--------------------------------------------------------------------------------
/Examples/ThumbupDownLeds/p5_code/sketch.js:
--------------------------------------------------------------------------------
1 | let handpose;
2 | let video;
3 | let predictions = [];
4 | let gesture = 0;
5 | let prevGesture = 0;
6 | let serial;// variable to hold an instance of the serialport library
7 | let portName = '/dev/tty.usbmodem144301';// fill in your serial port name here
8 |
9 | function setup() {
10 | createCanvas(640, 480);
11 | video = createCapture(VIDEO);
12 | video.size(width, height);
13 |
14 | serial = new p5.SerialPort(); // make a new instance of the serialport library
15 | serial.on('error', serialError); // callback for errors
16 | serial.open(portName);
17 |
18 | handpose = ml5.handpose(video, modelReady);
19 |
20 | // This sets up an event that fills the global variable "predictions"
21 | // with an array every time new hand poses are detected
22 | handpose.on("predict", results => {
23 | predictions = results;
24 | });
25 |
26 | // Hide the video element, and just show the canvas
27 | video.hide();
28 | }
29 |
30 | function serialError(err) {
31 | console.log('Something went wrong with the serial port. ' + err);
32 | }
33 |
34 | function modelReady() {
35 | console.log("Model ready!");
36 | }
37 |
38 | function draw() {
39 | image(video, 0, 0, width, height);
40 |
41 | classifyGesture();
42 |
43 | // We can call both functions to draw all keypoints and the skeletons
44 | drawKeypoints();
45 | }
46 |
47 | function classifyGesture() {
48 | if (predictions && predictions[0]) {
49 | const prediction = predictions[0];
50 | const thumbTip = prediction.landmarks[4];
51 | // thumbUp will be true if thumbTip's y position is higher than other points
52 | const thumbUp = prediction.landmarks.every(mark => mark[1] >= thumbTip[1]);
53 | if (thumbUp) {
54 | gesture = 1;
55 | } else {
56 | gesture = 2;
57 | }
58 | } else {
59 | gesture = 0;
60 | }
61 | console.log('gesture', gesture);
62 | if (prevGesture !== gesture) {
63 | serial.write(gesture);
64 | prevGesture = gesture;
65 | }
66 | }
67 |
68 | // A function to draw ellipses over the detected keypoints
69 | function drawKeypoints() {
70 | for (let i = 0; i < predictions.length; i += 1) {
71 | const prediction = predictions[i];
72 | for (let j = 0; j < prediction.landmarks.length; j += 1) {
73 | const keypoint = prediction.landmarks[j];
74 | fill(0, 255, 0);
75 | noStroke();
76 | ellipse(keypoint[0], keypoint[1], 10, 10);
77 | }
78 | }
79 | }
80 |
--------------------------------------------------------------------------------
/Examples/ThumbupDownLeds_p5ble/Readme.md:
--------------------------------------------------------------------------------
1 | # Classify thumb up and down gestures
2 |
3 | Running handpose model on a p5 sketch with ml5js and send the results to Arduino board through BLE.
4 |
5 | - p5 web editor code: https://editor.p5js.org/yining/sketches/bnjIKmhgA
6 | - [Video demo](https://youtu.be/TrmJKZBqfHs)
7 |
--------------------------------------------------------------------------------
/Examples/ThumbupDownLeds_p5ble/arduino_code/ThumbuoDownLeds/ThumbuoDownLeds.ino:
--------------------------------------------------------------------------------
1 | #include
2 |
3 | BLEService ledService("19B10000-E8F2-537E-4F6C-D104768A1214"); // BLE LED Service
4 | // BLE LED Switch Characteristic - custom 128-bit UUID, read and writable by central
5 | BLEByteCharacteristic switchCharacteristic("19B10001-E8F2-537E-4F6C-D104768A1214", BLERead | BLEWrite);
6 |
7 | int ledPin1 = 2;
8 | int ledPin2 = 3;
9 |
10 | void setup() {
11 | pinMode(ledPin1, OUTPUT); // sets the pin as output
12 | pinMode(ledPin2, OUTPUT);
13 | Serial.begin(9600); // initialize serial communications
14 |
15 | // begin initialization
16 | if (!BLE.begin()) {
17 | Serial.println("starting BLE failed!");
18 |
19 | while (1);
20 | }
21 | // set advertised local name and service UUID:
22 | BLE.setLocalName("LED");
23 | BLE.setAdvertisedService(ledService);
24 |
25 | // add the characteristic to the service
26 | ledService.addCharacteristic(switchCharacteristic);
27 |
28 | // add service
29 | BLE.addService(ledService);
30 |
31 | // set the initial value for the characeristic:
32 | switchCharacteristic.writeValue(0);
33 |
34 | // start advertising
35 | BLE.advertise();
36 |
37 | Serial.println("BLE LED Peripheral");
38 | }
39 |
40 | void loop() {
41 | // listen for BLE peripherals to connect:
42 | BLEDevice central = BLE.central();
43 |
44 |
45 | // if a central is connected to peripheral:
46 | if (central) {
47 | Serial.print("Connected to central: ");
48 | // print the central's MAC address:
49 | Serial.println(central.address());
50 |
51 | // while the central is still connected to peripheral:
52 | while (central.connected()) {
53 | // if the remote device wrote to the characteristic,
54 | // use the value to control the LED:
55 | if (switchCharacteristic.written()) {
56 | int inByte = switchCharacteristic.value();
57 | if (inByte == 1) {
58 | digitalWrite(ledPin1, HIGH); // use it to turn on the LED 1
59 | digitalWrite(ledPin2, LOW);
60 | } else if (inByte == 2) {
61 | digitalWrite(ledPin2, HIGH); // use it to turn on the LED 2
62 | digitalWrite(ledPin1, LOW);
63 | } else {
64 | digitalWrite(ledPin1, LOW); // sets the LED off
65 | digitalWrite(ledPin2, LOW); // sets the LED off
66 | }
67 | }
68 | }
69 |
70 | // when the central disconnects, print it out:
71 | Serial.print(F("Disconnected from central: "));
72 | Serial.println(central.address());
73 | }
74 | }
75 |
--------------------------------------------------------------------------------
/Examples/ThumbupDownLeds_p5ble/p5_code/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 | Handpose with Webcam
4 |
5 |
6 |
7 |
8 |
9 |
10 |
Handpose with Webcam
11 |
12 |
13 |
14 |
--------------------------------------------------------------------------------
/Examples/ThumbupDownLeds_p5ble/p5_code/sketch.js:
--------------------------------------------------------------------------------
1 | let handpose;
2 | let video;
3 | let predictions = [];
4 | let gesture = 0;
5 | let prevGesture = 0;
6 | let serial;// variable to hold an instance of the serialport library
7 | let portName = '/dev/tty.usbmodem144301';// fill in your serial port name here
8 |
9 | function setup() {
10 | createCanvas(640, 480);
11 | video = createCapture(VIDEO);
12 | video.size(width, height);
13 |
14 | serial = new p5.SerialPort(); // make a new instance of the serialport library
15 | serial.on('error', serialError); // callback for errors
16 | serial.open(portName);
17 |
18 | handpose = ml5.handpose(video, modelReady);
19 |
20 | // This sets up an event that fills the global variable "predictions"
21 | // with an array every time new hand poses are detected
22 | handpose.on("predict", results => {
23 | predictions = results;
24 | });
25 |
26 | // Hide the video element, and just show the canvas
27 | video.hide();
28 | }
29 |
30 | function serialError(err) {
31 | console.log('Something went wrong with the serial port. ' + err);
32 | }
33 |
34 | function modelReady() {
35 | console.log("Model ready!");
36 | }
37 |
38 | function draw() {
39 | image(video, 0, 0, width, height);
40 |
41 | classifyGesture();
42 |
43 | // We can call both functions to draw all keypoints and the skeletons
44 | drawKeypoints();
45 | }
46 |
47 | function classifyGesture() {
48 | if (predictions && predictions[0]) {
49 | const prediction = predictions[0];
50 | const thumbTip = prediction.landmarks[4];
51 | // thumbUp will be true if thumbTip's y position is higher than other points
52 | const thumbUp = prediction.landmarks.every(mark => mark[1] >= thumbTip[1]);
53 | if (thumbUp) {
54 | gesture = 1;
55 | } else {
56 | gesture = 2;
57 | }
58 | } else {
59 | gesture = 0;
60 | }
61 | console.log('gesture', gesture);
62 | if (prevGesture !== gesture) {
63 | serial.write(gesture);
64 | prevGesture = gesture;
65 | }
66 | }
67 |
68 | // A function to draw ellipses over the detected keypoints
69 | function drawKeypoints() {
70 | for (let i = 0; i < predictions.length; i += 1) {
71 | const prediction = predictions[i];
72 | for (let j = 0; j < prediction.landmarks.length; j += 1) {
73 | const keypoint = prediction.landmarks[j];
74 | fill(0, 255, 0);
75 | noStroke();
76 | ellipse(keypoint[0], keypoint[1], 10, 10);
77 | }
78 | }
79 | }
80 |
--------------------------------------------------------------------------------
/Examples/magic_wand/accelerometer_handler.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 |
16 | #ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICRO_EXAMPLES_MAGIC_WAND_ACCELEROMETER_HANDLER_H_
17 | #define TENSORFLOW_LITE_EXPERIMENTAL_MICRO_EXAMPLES_MAGIC_WAND_ACCELEROMETER_HANDLER_H_
18 |
19 | #define kChannelNumber 3
20 |
21 | #include "tensorflow/lite/c/c_api_internal.h"
22 | #include "tensorflow/lite/experimental/micro/micro_error_reporter.h"
23 |
24 | extern int begin_index;
25 | extern TfLiteStatus SetupAccelerometer(tflite::ErrorReporter* error_reporter);
26 | extern bool ReadAccelerometer(tflite::ErrorReporter* error_reporter,
27 | float* input, int length, bool reset_buffer);
28 |
29 | #endif // TENSORFLOW_LITE_EXPERIMENTAL_MICRO_EXAMPLES_MAGIC_WAND_ACCELEROMETER_HANDLER_H_
30 |
--------------------------------------------------------------------------------
/Examples/magic_wand/arduino_accelerometer_handler.cpp:
--------------------------------------------------------------------------------
1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 |
16 | #include "accelerometer_handler.h"
17 |
18 | #include
19 | #include
20 |
21 | #include "constants.h"
22 |
23 | // A buffer holding the last 200 sets of 3-channel values
24 | float save_data[600] = {0.0};
25 | // Most recent position in the save_data buffer
26 | int begin_index = 0;
27 | // True if there is not yet enough data to run inference
28 | bool pending_initial_data = true;
29 | // How often we should save a measurement during downsampling
30 | int sample_every_n;
31 | // The number of measurements since we last saved one
32 | int sample_skip_counter = 1;
33 |
34 | TfLiteStatus SetupAccelerometer(tflite::ErrorReporter* error_reporter) {
35 | // Wait until we know the serial port is ready
36 | while (!Serial) {
37 | }
38 |
39 | // Switch on the IMU
40 | if (!IMU.begin()) {
41 | error_reporter->Report("Failed to initialize IMU");
42 | return kTfLiteError;
43 | }
44 |
45 | // Determine how many measurements to keep in order to
46 | // meet kTargetHz
47 | float sample_rate = IMU.accelerationSampleRate();
48 | sample_every_n = static_cast(roundf(sample_rate / kTargetHz));
49 |
50 | error_reporter->Report("Magic starts!");
51 |
52 | return kTfLiteOk;
53 | }
54 |
55 | bool ReadAccelerometer(tflite::ErrorReporter* error_reporter, float* input,
56 | int length, bool reset_buffer) {
57 | // Clear the buffer if required, e.g. after a successful prediction
58 | if (reset_buffer) {
59 | memset(save_data, 0, 600 * sizeof(float));
60 | begin_index = 0;
61 | pending_initial_data = true;
62 | }
63 | // Keep track of whether we stored any new data
64 | bool new_data = false;
65 | // Loop through new samples and add to buffer
66 | while (IMU.accelerationAvailable()) {
67 | float x, y, z;
68 | // Read each sample, removing it from the device's FIFO buffer
69 | if (!IMU.readAcceleration(x, y, z)) {
70 | error_reporter->Report("Failed to read data");
71 | break;
72 | }
73 | // Throw away this sample unless it's the nth
74 | if (sample_skip_counter != sample_every_n) {
75 | sample_skip_counter += 1;
76 | continue;
77 | }
78 | // Write samples to our buffer, converting to milli-Gs
79 | // and flipping y and x order for compatibility with
80 | // model (sensor orientation is different on Arduino
81 | // Nano BLE Sense compared with SparkFun Edge)
82 | save_data[begin_index++] = y * 1000;
83 | save_data[begin_index++] = x * 1000;
84 | save_data[begin_index++] = z * 1000;
85 | // Since we took a sample, reset the skip counter
86 | sample_skip_counter = 1;
87 | // If we reached the end of the circle buffer, reset
88 | if (begin_index >= 600) {
89 | begin_index = 0;
90 | }
91 | new_data = true;
92 | }
93 |
94 | // Skip this round if data is not ready yet
95 | if (!new_data) {
96 | return false;
97 | }
98 |
99 | // Check if we are ready for prediction or still pending more initial data
100 | if (pending_initial_data && begin_index >= 200) {
101 | pending_initial_data = false;
102 | }
103 |
104 | // Return if we don't have enough data
105 | if (pending_initial_data) {
106 | return false;
107 | }
108 |
109 | // Copy the requested number of bytes to the provided input tensor
110 | for (int i = 0; i < length; ++i) {
111 | int ring_array_index = begin_index + i - length;
112 | if (ring_array_index < 0) {
113 | ring_array_index += 600;
114 | }
115 | input[i] = save_data[ring_array_index];
116 | }
117 |
118 | return true;
119 | }
120 |
--------------------------------------------------------------------------------
/Examples/magic_wand/arduino_constants.cpp:
--------------------------------------------------------------------------------
1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 |
16 | #include "constants.h"
17 |
18 | // The number of expected consecutive inferences for each gesture type.
19 | // Established with the Arduino Nano 33 BLE Sense.
20 | const int kConsecutiveInferenceThresholds[3] = {8, 5, 4};
21 |
--------------------------------------------------------------------------------
/Examples/magic_wand/arduino_main.cpp:
--------------------------------------------------------------------------------
1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 |
16 | #include "main_functions.h"
17 |
18 | // Arduino automatically calls the setup() and loop() functions in a sketch, so
19 | // where other systems need their own main routine in this file, it can be left
20 | // empty.
21 |
--------------------------------------------------------------------------------
/Examples/magic_wand/arduino_output_handler.cpp:
--------------------------------------------------------------------------------
1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 |
16 | #include "output_handler.h"
17 |
18 | #include "Arduino.h"
19 |
20 | void HandleOutput(tflite::ErrorReporter* error_reporter, int kind) {
21 | // The first time this method runs, set up our LED
22 | static bool is_initialized = false;
23 | if (!is_initialized) {
24 | pinMode(LED_BUILTIN, OUTPUT);
25 | is_initialized = true;
26 | }
27 | // Toggle the LED every time an inference is performed
28 | static int count = 0;
29 | ++count;
30 | if (count & 1) {
31 | digitalWrite(LED_BUILTIN, HIGH);
32 | } else {
33 | digitalWrite(LED_BUILTIN, LOW);
34 | }
35 | // Print some ASCII art for each gesture
36 | if (kind == 0) {
37 | error_reporter->Report(
38 | "WING:\n\r* * *\n\r * * * "
39 | "*\n\r * * * *\n\r * * * *\n\r * * "
40 | "* *\n\r * *\n\r");
41 | } else if (kind == 1) {
42 | error_reporter->Report(
43 | "RING:\n\r *\n\r * *\n\r * *\n\r "
44 | " * *\n\r * *\n\r * *\n\r "
45 | " *\n\r");
46 | } else if (kind == 2) {
47 | error_reporter->Report(
48 | "SLOPE:\n\r *\n\r *\n\r *\n\r *\n\r "
49 | "*\n\r *\n\r *\n\r * * * * * * * *\n\r");
50 | }
51 | }
52 |
--------------------------------------------------------------------------------
/Examples/magic_wand/constants.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 |
16 | #ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICRO_EXAMPLES_MAGIC_WAND_CONSTANTS_H_
17 | #define TENSORFLOW_LITE_EXPERIMENTAL_MICRO_EXAMPLES_MAGIC_WAND_CONSTANTS_H_
18 |
19 | // The expected accelerometer data sample frequency
20 | const float kTargetHz = 25;
21 |
22 | // The number of expected consecutive inferences for each gesture type
23 | extern const int kConsecutiveInferenceThresholds[3];
24 | #endif // TENSORFLOW_LITE_EXPERIMENTAL_MICRO_EXAMPLES_MAGIC_WAND_CONSTANTS_H_
25 |
--------------------------------------------------------------------------------
/Examples/magic_wand/gesture_predictor.cpp:
--------------------------------------------------------------------------------
1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 |
16 | #include "gesture_predictor.h"
17 |
18 | #include "constants.h"
19 |
20 | // How many times the most recent gesture has been matched in a row
21 | int continuous_count = 0;
22 | // The result of the last prediction
23 | int last_predict = -1;
24 |
25 | // Return the result of the last prediction
26 | // 0: wing("W"), 1: ring("O"), 2: slope("angle"), 3: unknown
27 | int PredictGesture(float* output) {
28 | // Find whichever output has a probability > 0.8 (they sum to 1)
29 | int this_predict = -1;
30 | for (int i = 0; i < 3; i++) {
31 | if (output[i] > 0.8) this_predict = i;
32 | }
33 | // No gesture was detected above the threshold
34 | if (this_predict == -1) {
35 | continuous_count = 0;
36 | last_predict = 3;
37 | return 3;
38 | }
39 | if (last_predict == this_predict) {
40 | continuous_count += 1;
41 | } else {
42 | continuous_count = 0;
43 | }
44 | last_predict = this_predict;
45 | // If we haven't yet had enough consecutive matches for this gesture,
46 | // report a negative result
47 | if (continuous_count < kConsecutiveInferenceThresholds[this_predict]) {
48 | return 3;
49 | }
50 | // Otherwise, we've seen a positive result, so clear all our variables
51 | // and report it
52 | continuous_count = 0;
53 | last_predict = -1;
54 | return this_predict;
55 | }
56 |
--------------------------------------------------------------------------------
/Examples/magic_wand/gesture_predictor.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 |
16 | #ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICRO_EXAMPLES_MAGIC_WAND_GESTURE_PREDICTOR_H_
17 | #define TENSORFLOW_LITE_EXPERIMENTAL_MICRO_EXAMPLES_MAGIC_WAND_GESTURE_PREDICTOR_H_
18 |
19 | extern int PredictGesture(float* output);
20 |
21 | #endif // TENSORFLOW_LITE_EXPERIMENTAL_MICRO_EXAMPLES_MAGIC_WAND_GESTURE_PREDICTOR_H_
22 |
--------------------------------------------------------------------------------
/Examples/magic_wand/magic_wand_model_data.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 |
16 | // This is a standard TensorFlow Lite model file that has been converted into a
17 | // C data array, so it can be easily compiled into a binary for devices that
18 | // don't have a file system. It was created using the command:
19 | // xxd -i magic_wand_model.tflite > magic_wand_model_data.cc
20 |
21 | #ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICRO_EXAMPLES_MAGIC_WAND_MAGIC_WAND_MODEL_DATA_H_
22 | #define TENSORFLOW_LITE_EXPERIMENTAL_MICRO_EXAMPLES_MAGIC_WAND_MAGIC_WAND_MODEL_DATA_H_
23 |
24 | extern const unsigned char g_magic_wand_model_data[];
25 | extern const int g_magic_wand_model_data_len;
26 |
27 | #endif // TENSORFLOW_LITE_EXPERIMENTAL_MICRO_EXAMPLES_MAGIC_WAND_MAGIC_WAND_MODEL_DATA_H_
28 |
--------------------------------------------------------------------------------
/Examples/magic_wand/main_functions.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 |
16 | #ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICRO_EXAMPLES_MAGIC_WAND_MAIN_FUNCTIONS_H_
17 | #define TENSORFLOW_LITE_EXPERIMENTAL_MICRO_EXAMPLES_MAGIC_WAND_MAIN_FUNCTIONS_H_
18 |
19 | // Initializes all data needed for the example. The name is important, and needs
20 | // to be setup() for Arduino compatibility.
21 | void setup();
22 |
23 | // Runs one iteration of data gathering and inference. This should be called
24 | // repeatedly from the application code. The name needs to be loop() for Arduino
25 | // compatibility.
26 | void loop();
27 |
28 | #endif // TENSORFLOW_LITE_EXPERIMENTAL_MICRO_EXAMPLES_MAGIC_WAND_MAIN_FUNCTIONS_H_
29 |
--------------------------------------------------------------------------------
/Examples/magic_wand/output_handler.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 |
16 | #ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICRO_EXAMPLES_MAGIC_WAND_OUTPUT_HANDLER_H_
17 | #define TENSORFLOW_LITE_EXPERIMENTAL_MICRO_EXAMPLES_MAGIC_WAND_OUTPUT_HANDLER_H_
18 |
19 | #include "tensorflow/lite/c/c_api_internal.h"
20 | #include "tensorflow/lite/experimental/micro/micro_error_reporter.h"
21 |
22 | void HandleOutput(tflite::ErrorReporter* error_reporter, int kind);
23 |
24 | #endif // TENSORFLOW_LITE_EXPERIMENTAL_MICRO_EXAMPLES_MAGIC_WAND_OUTPUT_HANDLER_H_
25 |
--------------------------------------------------------------------------------
/Examples/magic_wand_led/accelerometer_handler.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 |
16 | #ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICRO_EXAMPLES_MAGIC_WAND_ACCELEROMETER_HANDLER_H_
17 | #define TENSORFLOW_LITE_EXPERIMENTAL_MICRO_EXAMPLES_MAGIC_WAND_ACCELEROMETER_HANDLER_H_
18 |
19 | #define kChannelNumber 3
20 |
21 | #include "tensorflow/lite/c/c_api_internal.h"
22 | #include "tensorflow/lite/experimental/micro/micro_error_reporter.h"
23 |
24 | extern int begin_index;
25 | extern TfLiteStatus SetupAccelerometer(tflite::ErrorReporter* error_reporter);
26 | extern bool ReadAccelerometer(tflite::ErrorReporter* error_reporter,
27 | float* input, int length, bool reset_buffer);
28 |
29 | #endif // TENSORFLOW_LITE_EXPERIMENTAL_MICRO_EXAMPLES_MAGIC_WAND_ACCELEROMETER_HANDLER_H_
30 |
--------------------------------------------------------------------------------
/Examples/magic_wand_led/arduino_accelerometer_handler.cpp:
--------------------------------------------------------------------------------
1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 |
16 | #include "accelerometer_handler.h"
17 |
18 | #include
19 | #include
20 |
21 | #include "constants.h"
22 |
23 | // A buffer holding the last 200 sets of 3-channel values
24 | float save_data[600] = {0.0};
25 | // Most recent position in the save_data buffer
26 | int begin_index = 0;
27 | // True if there is not yet enough data to run inference
28 | bool pending_initial_data = true;
29 | // How often we should save a measurement during downsampling
30 | int sample_every_n;
31 | // The number of measurements since we last saved one
32 | int sample_skip_counter = 1;
33 |
34 | TfLiteStatus SetupAccelerometer(tflite::ErrorReporter* error_reporter) {
35 | // Wait until we know the serial port is ready
36 | while (!Serial) {
37 | }
38 |
39 | // Switch on the IMU
40 | if (!IMU.begin()) {
41 | error_reporter->Report("Failed to initialize IMU");
42 | return kTfLiteError;
43 | }
44 |
45 | // Determine how many measurements to keep in order to
46 | // meet kTargetHz
47 | float sample_rate = IMU.accelerationSampleRate();
48 | sample_every_n = static_cast(roundf(sample_rate / kTargetHz));
49 |
50 | error_reporter->Report("Magic starts!");
51 |
52 | return kTfLiteOk;
53 | }
54 |
55 | bool ReadAccelerometer(tflite::ErrorReporter* error_reporter, float* input,
56 | int length, bool reset_buffer) {
57 | // Clear the buffer if required, e.g. after a successful prediction
58 | if (reset_buffer) {
59 | memset(save_data, 0, 600 * sizeof(float));
60 | begin_index = 0;
61 | pending_initial_data = true;
62 | }
63 | // Keep track of whether we stored any new data
64 | bool new_data = false;
65 | // Loop through new samples and add to buffer
66 | while (IMU.accelerationAvailable()) {
67 | float x, y, z;
68 | // Read each sample, removing it from the device's FIFO buffer
69 | if (!IMU.readAcceleration(x, y, z)) {
70 | error_reporter->Report("Failed to read data");
71 | break;
72 | }
73 | // Throw away this sample unless it's the nth
74 | if (sample_skip_counter != sample_every_n) {
75 | sample_skip_counter += 1;
76 | continue;
77 | }
78 | // Write samples to our buffer, converting to milli-Gs
79 | // and flipping y and x order for compatibility with
80 | // model (sensor orientation is different on Arduino
81 | // Nano BLE Sense compared with SparkFun Edge)
82 | save_data[begin_index++] = y * 1000;
83 | save_data[begin_index++] = x * 1000;
84 | save_data[begin_index++] = z * 1000;
85 | // Since we took a sample, reset the skip counter
86 | sample_skip_counter = 1;
87 | // If we reached the end of the circle buffer, reset
88 | if (begin_index >= 600) {
89 | begin_index = 0;
90 | }
91 | new_data = true;
92 | }
93 |
94 | // Skip this round if data is not ready yet
95 | if (!new_data) {
96 | return false;
97 | }
98 |
99 | // Check if we are ready for prediction or still pending more initial data
100 | if (pending_initial_data && begin_index >= 200) {
101 | pending_initial_data = false;
102 | }
103 |
104 | // Return if we don't have enough data
105 | if (pending_initial_data) {
106 | return false;
107 | }
108 |
109 | // Copy the requested number of bytes to the provided input tensor
110 | for (int i = 0; i < length; ++i) {
111 | int ring_array_index = begin_index + i - length;
112 | if (ring_array_index < 0) {
113 | ring_array_index += 600;
114 | }
115 | input[i] = save_data[ring_array_index];
116 | }
117 |
118 | return true;
119 | }
120 |
--------------------------------------------------------------------------------
/Examples/magic_wand_led/arduino_constants.cpp:
--------------------------------------------------------------------------------
1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 |
16 | #include "constants.h"
17 |
18 | // The number of expected consecutive inferences for each gesture type.
19 | // Established with the Arduino Nano 33 BLE Sense.
20 | const int kConsecutiveInferenceThresholds[3] = {8, 5, 4};
21 |
--------------------------------------------------------------------------------
/Examples/magic_wand_led/arduino_main.cpp:
--------------------------------------------------------------------------------
1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 |
16 | #include "main_functions.h"
17 |
18 | // Arduino automatically calls the setup() and loop() functions in a sketch, so
19 | // where other systems need their own main routine in this file, it can be left
20 | // empty.
21 |
--------------------------------------------------------------------------------
/Examples/magic_wand_led/arduino_output_handler.cpp:
--------------------------------------------------------------------------------
1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 |
16 | #include "output_handler.h"
17 |
18 | #include "Arduino.h"
19 |
20 |
21 |
22 | void HandleOutput(tflite::ErrorReporter* error_reporter, int kind) {
23 | // The first time this method runs, set up our LED
24 | static bool is_initialized = false;
25 | if (!is_initialized) {
26 | pinMode(LED_BUILTIN, OUTPUT);
27 | is_initialized = true;
28 | }
29 | // Toggle the LED every time an inference is performed
30 | static int count = 0;
31 | ++count;
32 | if (count & 1) {
33 | digitalWrite(LED_BUILTIN, HIGH);
34 | } else {
35 | digitalWrite(LED_BUILTIN, LOW);
36 | }
37 | // Print some ASCII art for each gesture
38 | if (kind == 0) {
39 | digitalWrite(2, HIGH);
40 | digitalWrite(3, LOW);
41 | digitalWrite(4, LOW);
42 | error_reporter->Report(
43 | "WING:\n\r* * *\n\r * * * "
44 | "*\n\r * * * *\n\r * * * *\n\r * * "
45 | "* *\n\r * *\n\r");
46 | } else if (kind == 1) {
47 | digitalWrite(2, LOW);
48 | digitalWrite(3, HIGH);
49 | digitalWrite(4, LOW);
50 | error_reporter->Report(
51 | "RING:\n\r *\n\r * *\n\r * *\n\r "
52 | " * *\n\r * *\n\r * *\n\r "
53 | " *\n\r");
54 | } else if (kind == 2) {
55 | digitalWrite(2, LOW);
56 | digitalWrite(3, LOW);
57 | digitalWrite(4, HIGH);
58 | error_reporter->Report(
59 | "SLOPE:\n\r *\n\r *\n\r *\n\r *\n\r "
60 | "*\n\r *\n\r *\n\r * * * * * * * *\n\r");
61 | }
62 | }
63 |
--------------------------------------------------------------------------------
/Examples/magic_wand_led/constants.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 |
16 | #ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICRO_EXAMPLES_MAGIC_WAND_CONSTANTS_H_
17 | #define TENSORFLOW_LITE_EXPERIMENTAL_MICRO_EXAMPLES_MAGIC_WAND_CONSTANTS_H_
18 |
19 | // The expected accelerometer data sample frequency
20 | const float kTargetHz = 25;
21 |
22 | // The number of expected consecutive inferences for each gesture type
23 | extern const int kConsecutiveInferenceThresholds[3];
24 | #endif // TENSORFLOW_LITE_EXPERIMENTAL_MICRO_EXAMPLES_MAGIC_WAND_CONSTANTS_H_
25 |
--------------------------------------------------------------------------------
/Examples/magic_wand_led/gesture_predictor.cpp:
--------------------------------------------------------------------------------
1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 |
16 | #include "gesture_predictor.h"
17 |
18 | #include "constants.h"
19 |
20 | // How many times the most recent gesture has been matched in a row
21 | int continuous_count = 0;
22 | // The result of the last prediction
23 | int last_predict = -1;
24 |
25 | // Return the result of the last prediction
26 | // 0: wing("W"), 1: ring("O"), 2: slope("angle"), 3: unknown
27 | int PredictGesture(float* output) {
28 | // Find whichever output has a probability > 0.8 (they sum to 1)
29 | int this_predict = -1;
30 | for (int i = 0; i < 3; i++) {
31 | if (output[i] > 0.8) this_predict = i;
32 | }
33 | // No gesture was detected above the threshold
34 | if (this_predict == -1) {
35 | continuous_count = 0;
36 | last_predict = 3;
37 | return 3;
38 | }
39 | if (last_predict == this_predict) {
40 | continuous_count += 1;
41 | } else {
42 | continuous_count = 0;
43 | }
44 | last_predict = this_predict;
45 | // If we haven't yet had enough consecutive matches for this gesture,
46 | // report a negative result
47 | if (continuous_count < kConsecutiveInferenceThresholds[this_predict]) {
48 | return 3;
49 | }
50 | // Otherwise, we've seen a positive result, so clear all our variables
51 | // and report it
52 | continuous_count = 0;
53 | last_predict = -1;
54 | return this_predict;
55 | }
56 |
--------------------------------------------------------------------------------
/Examples/magic_wand_led/gesture_predictor.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 |
16 | #ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICRO_EXAMPLES_MAGIC_WAND_GESTURE_PREDICTOR_H_
17 | #define TENSORFLOW_LITE_EXPERIMENTAL_MICRO_EXAMPLES_MAGIC_WAND_GESTURE_PREDICTOR_H_
18 |
19 | extern int PredictGesture(float* output);
20 |
21 | #endif // TENSORFLOW_LITE_EXPERIMENTAL_MICRO_EXAMPLES_MAGIC_WAND_GESTURE_PREDICTOR_H_
22 |
--------------------------------------------------------------------------------
/Examples/magic_wand_led/magic_wand_model_data.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 |
16 | // This is a standard TensorFlow Lite model file that has been converted into a
17 | // C data array, so it can be easily compiled into a binary for devices that
18 | // don't have a file system. It was created using the command:
19 | // xxd -i magic_wand_model.tflite > magic_wand_model_data.cc
20 |
21 | #ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICRO_EXAMPLES_MAGIC_WAND_MAGIC_WAND_MODEL_DATA_H_
22 | #define TENSORFLOW_LITE_EXPERIMENTAL_MICRO_EXAMPLES_MAGIC_WAND_MAGIC_WAND_MODEL_DATA_H_
23 |
24 | extern const unsigned char g_magic_wand_model_data[];
25 | extern const int g_magic_wand_model_data_len;
26 |
27 | #endif // TENSORFLOW_LITE_EXPERIMENTAL_MICRO_EXAMPLES_MAGIC_WAND_MAGIC_WAND_MODEL_DATA_H_
28 |
--------------------------------------------------------------------------------
/Examples/magic_wand_led/main_functions.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 |
16 | #ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICRO_EXAMPLES_MAGIC_WAND_MAIN_FUNCTIONS_H_
17 | #define TENSORFLOW_LITE_EXPERIMENTAL_MICRO_EXAMPLES_MAGIC_WAND_MAIN_FUNCTIONS_H_
18 |
19 | // Initializes all data needed for the example. The name is important, and needs
20 | // to be setup() for Arduino compatibility.
21 | void setup();
22 |
23 | // Runs one iteration of data gathering and inference. This should be called
24 | // repeatedly from the application code. The name needs to be loop() for Arduino
25 | // compatibility.
26 | void loop();
27 |
28 | #endif // TENSORFLOW_LITE_EXPERIMENTAL_MICRO_EXAMPLES_MAGIC_WAND_MAIN_FUNCTIONS_H_
29 |
--------------------------------------------------------------------------------
/Examples/magic_wand_led/output_handler.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 |
16 | #ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICRO_EXAMPLES_MAGIC_WAND_OUTPUT_HANDLER_H_
17 | #define TENSORFLOW_LITE_EXPERIMENTAL_MICRO_EXAMPLES_MAGIC_WAND_OUTPUT_HANDLER_H_
18 |
19 | #include "tensorflow/lite/c/c_api_internal.h"
20 | #include "tensorflow/lite/experimental/micro/micro_error_reporter.h"
21 |
22 | void HandleOutput(tflite::ErrorReporter* error_reporter, int kind);
23 |
24 | #endif // TENSORFLOW_LITE_EXPERIMENTAL_MICRO_EXAMPLES_MAGIC_WAND_OUTPUT_HANDLER_H_
25 |
--------------------------------------------------------------------------------
/Examples/ml5js/README.md:
--------------------------------------------------------------------------------
1 | - [Image Classification Video](https://editor.p5js.org/ml5/sketches/ImageClassification_Video)
2 | - [Speech Command Classification](https://editor.p5js.org/yining/sketches/F_Eh3jeP5)
3 | - [Teachable Machines with sound/pose/image classifier](../TeachableMachineArduino)
4 | - Thumb up down gesture classifier with LEDs, [code](../Examples/ThumbupDownLeds), p5 web editor [code](https://editor.p5js.org/yining/sketches/pF2Nwe2Fh), [demo](https://youtu.be/MXyGUNWpw6A)
5 | - [Hand pose tracking + KNN Classification](https://editor.p5js.org/yining/sketches/uUwg0z9Z5)
6 | - Hand pose tracking + Neural Network, [demo video](https://www.loom.com/share/420fa5941dea411491af817011622c86)
7 | - [Collect date](https://editor.p5js.org/yining/sketches/dCoPm-Opb)
8 | - [Train the model](https://editor.p5js.org/yining/sketches/IrBFfXbSF)
9 | - [Run the model](https://editor.p5js.org/yining/sketches/6cFF9-L-Z)
10 |
--------------------------------------------------------------------------------
/Examples/p5ble/README.md:
--------------------------------------------------------------------------------
1 | # Communicate with Arduino Nano BLE / loT board through Bluetooth LE
2 |
3 | [p5ble.js](https://itpnyu.github.io/p5ble-website/docs/write-one-char-callback) Write to one characteristic
4 |
5 | ## Demo / How to run it
6 | [vidoe](https://www.loom.com/share/2e3112daa4f6441dbf0eea188cf66c20)
7 |
8 | ## Circuit
9 |
10 |
11 | ## Arduino code
12 | [code](https://github.com/yining1023/Machine-Learning-for-Physical-Computing/tree/master/Examples/p5ble/p5ble-arduino)
13 |
14 | ## p5 sketch
15 | https://editor.p5js.org/yining/sketches/trVjIkaDM
16 |
--------------------------------------------------------------------------------
/Examples/p5ble/p5ble-arduino/p5ble-arduino.ino:
--------------------------------------------------------------------------------
1 | #include
2 |
3 | BLEService ledService("19B10000-E8F2-537E-4F6C-D104768A1214"); // BLE LED Service
4 |
5 | // BLE LED Switch Characteristic - custom 128-bit UUID, read and writable by central
6 | BLEByteCharacteristic switchCharacteristic("19B10001-E8F2-537E-4F6C-D104768A1214", BLERead | BLEWrite);
7 |
8 | const int ledPin = 4; // pin to use for the LED
9 |
10 | void setup() {
11 | Serial.begin(9600);
12 | while (!Serial);
13 |
14 | // set LED pin to output mode
15 | pinMode(ledPin, OUTPUT);
16 |
17 | // begin initialization
18 | if (!BLE.begin()) {
19 | Serial.println("starting BLE failed!");
20 |
21 | while (1);
22 | }
23 |
24 | // set advertised local name and service UUID:
25 | BLE.setLocalName("LED");
26 | BLE.setAdvertisedService(ledService);
27 |
28 | // add the characteristic to the service
29 | ledService.addCharacteristic(switchCharacteristic);
30 |
31 | // add service
32 | BLE.addService(ledService);
33 |
34 | // set the initial value for the characeristic:
35 | switchCharacteristic.writeValue(0);
36 |
37 | // start advertising
38 | BLE.advertise();
39 |
40 | Serial.println("BLE LED Peripheral");
41 | }
42 |
43 | void loop() {
44 | // listen for BLE peripherals to connect:
45 | BLEDevice central = BLE.central();
46 |
47 | // if a central is connected to peripheral:
48 | if (central) {
49 | Serial.print("Connected to central: ");
50 | // print the central's MAC address:
51 | Serial.println(central.address());
52 |
53 | // while the central is still connected to peripheral:
54 | while (central.connected()) {
55 | // if the remote device wrote to the characteristic,
56 | // use the value to control the LED:
57 | if (switchCharacteristic.written()) {
58 | if (switchCharacteristic.value()) { // any value other than 0
59 | Serial.println("LED on");
60 | digitalWrite(ledPin, HIGH); // will turn the LED on
61 | } else { // a 0 value
62 | Serial.println(F("LED off"));
63 | digitalWrite(ledPin, LOW); // will turn the LED off
64 | }
65 | }
66 | }
67 |
68 | // when the central disconnects, print it out:
69 | Serial.print(F("Disconnected from central: "));
70 | Serial.println(central.address());
71 | }
72 | }
73 |
--------------------------------------------------------------------------------
/Intro/README.md:
--------------------------------------------------------------------------------
1 | # Week 1 Introduction to Machine Learning
2 | ## [Slides](https://docs.google.com/presentation/d/1zqIRHqCsO8a4GkMIFVTHzD_CZQ-CBZsDKkLy4PEgLU8/edit?usp=sharing)
3 | ## Notes:
4 | - What’s Artificial Intelligence, Machine Learning, Deep learning?
5 | - Supervised Learning, Unsupervised Learning, Reinforcement Learning
6 | - Machine Learning output types: Regression, Classification, Clustering, Sequence prediction
7 | - Existing Machine Learning use cases and creative projects
8 | - Existing Machine Learning microcontrollers
9 | - Introduction to Tensorflow Lite
10 | - TensorFlow Lite for Microcontrollers
11 |
12 | ## Demo / Workshop
13 | - Running [Speech detection](/Examples/MicroSpeech/YesNo) (Yes/No) with TensorFlow Lite on Arduino Nano BLE Sense
14 | - [Setting up the Arduino IDE](https://blog.arduino.cc/2019/10/15/get-started-with-machine-learning-on-arduino/)(scroll down to Setting up the Arduino IDE) with Arduino Nano BLE Sense and Tensorflow Lite
15 | - [How to load the Arduino code](https://www.loom.com/share/b97fb5ad2c4c45ce87f7d025d8563594)
16 | - [Demo](https://youtu.be/2i1-XrqH9Ws)
17 |
18 | - Running [sound and image classifier](/Examples/ml5js) with ml5.js in the browser
19 |
20 | ## Homework
21 | - Reading: [TinyML](https://learning-oreilly-com.proxy.library.nyu.edu/library/view/tinyml/9781492052036/) Chapter 1 Introduction (Log in with your NYU NetId)
22 | - Reading: [Get started with machine learning on Arduino](https://blog.arduino.cc/2019/10/15/get-started-with-machine-learning-on-arduino/)
23 | - Video: [What is a Neural Network?](https://youtu.be/aircAruvnKk)
24 |
--------------------------------------------------------------------------------
/Next/README.md:
--------------------------------------------------------------------------------
1 | # Next Step, beyond classification, other tools
2 |
3 | ## Workflow for ML projects
4 | 1. Decide on a goal
5 | 2. Collect a dataset
6 | 3. Design a model architecture
7 | 4. Train the model
8 | 5. Convert the model
9 | 6. Run inference
10 | 7. Evaluate and troubleshoot
11 |
12 | ## Deisgn Your Own TinyML Applications
13 | - Read more in TinyML book Chapter 14
14 | - The Design Process
15 | - Do You Need a Microcontroller, or Would a Larger Device Work?
16 | - Understanding What’s Possible
17 | - Follow in Someone Else’s Footsteps
18 | - Find Some Similar Models to Train
19 | - Look at the Data
20 | - Wizard of Oz-ing
21 | - Get It Working on the Desktop First
22 |
23 | ## Examples
24 | - [Detect person example](https://github.com/yining1023/Machine-Learning-for-Physical-Computing/tree/master/Examples/PersonDetect) on Arduino Nano 33 BLE Sense
25 | - [p5 sketch](https://github.com/yining1023/Machine-Learning-for-Physical-Computing/tree/master/Examples/p5ble) to Arduino Wireless communication, Bluetooth LE, [p5ble.js](https://itpnyu.github.io/p5ble-website), [p5 sketch light up LEDs example](https://itpnyu.github.io/p5ble-website/docs/write-one-char-callback)
26 | - Running servo motor with Arduino Nano BLE (sense), [tutorial](https://github.com/yining1023/Machine-Learning-for-Physical-Computing/tree/master/Examples/ServoMotor)
27 | - Person Detection with [ml5js](https://learn.ml5js.org/#/reference)
28 | - [Object Detection](https://editor.p5js.org/ml5/sketches/ObjectDetector_COCOSSD_Video)
29 | - [Pose Dextion](https://editor.p5js.org/ml5/sketches/PoseNet_webcam)
30 | - [BodyPix](https://editor.p5js.org/ml5/sketches/BodyPix_Webcam_Parts)
31 | - [FaceMesh](https://editor.p5js.org/ml5/sketches/Facemesh_Webcam)
32 | - Sound detection with [p5 speech](https://idmnyu.github.io/p5.js-speech/)
33 | - Speech recognition [simple](https://idmnyu.github.io/p5.js-speech/examples/05continuousrecognition.html), [continuous](https://idmnyu.github.io/p5.js-speech/examples/04simplerecognition.html)
34 | - [Speech Synthesis](https://idmnyu.github.io/p5.js-speech/examples/01simple.html)
35 |
36 |
37 | ## Tools
38 | - Google Coral [models](https://coral.ai/models/), [examples](https://coral.ai/examples/#code-examples/)
39 | - [RunwayML](http://runwayml.com/)
40 | - Training image, text generation models, image transformation tasks
41 | - [RunwayML + Arduino](https://github.com/runwayml/arduino)
42 | - [RunwayML + p5js](https://github.com/runwayml/p5js)
43 | - [First order motion model for image animation](https://github.com/AliaksandrSiarohin/first-order-model)
44 | - [Liquid Warping GAN](https://github.com/svip-lab/impersonator)
45 |
46 | ## Power
47 | - Power Arduino Nano 33 BLE
48 | - [Lithium Ion Polymer Battery](https://www.adafruit.com/product/1578)
49 | - [JST-PH 2-Pin SMT Right Angle Breakout Board](https://www.adafruit.com/product/1862)
50 | - [Micro-Lipo Charger for LiPo](https://www.adafruit.com/product/1904)
51 | - [Power switch tail](https://www.adafruit.com/product/268), good for control electronics at home, like a regular lamp. Be safe when working with high voltage. Power switch tail with Arduino [demo video](https://youtu.be/kXjBvL2ECX8)
52 |
53 | ## Homework
54 | Keep working on the final assignment
55 |
--------------------------------------------------------------------------------
/RunAModel/README.md:
--------------------------------------------------------------------------------
1 | # Week 3 Run a model with Tensorflow Lite / Tensorflow.js
2 |
3 | ## [Slides](https://docs.google.com/presentation/d/1AM6FtkjbCZvBt1OjF6PikLbEu6eSvWovtNKj-Dxm1Gc/edit?usp=sharing)
4 |
5 | ## Notes:
6 | - Understand the concept of a “machine learning model.”
7 | - What is a “pre-trained model”?
8 | - Tensorflow Lite for Microcontrollers
9 | - 3 Tensorflow Lite on Mircocontrollers examples: Micro Speech, Magic Wand, Detect a person
10 | - Why running Tensorflow Lite on Mircocontrollers?
11 | - TF Lite on Microcontrollers Workflow
12 | - What is Quantized model?
13 | - What is FlatBuffer?
14 |
15 | ## Workshop:
16 | - Magic Wand, [Demo video](https://youtu.be/E42RYOEqfyA), [Guide](../Examples/magic_wand)
17 | - Magic Wand with LEDs, [Demo video](https://youtu.be/E42RYOEqfyA), [Guide](../Examples/magic_wand_led)
18 | - Thumb up down gesture classifier with LEDs, [code](../Examples/ThumbupDownLeds), p5 web editor [code](https://editor.p5js.org/yining/sketches/pF2Nwe2Fh), [demo](https://youtu.be/MXyGUNWpw6A)
19 |
20 | ## Resource:
21 | - [Tensorflow Lite Get started with microcontrollers](https://www.tensorflow.org/lite/microcontrollers/get_started)
22 | - [Arduino TensorFlow Lite Tutorials](https://github.com/arduino/ArduinoTensorFlowLiteTutorials)
23 | - [TinyML Book chapter 11: Magic Wand, chapter 9 Detect person](https://learning-oreilly-com.proxy.library.nyu.edu/library/view/tinyml/9781492052036/)(Log in with your NYU NetId)
24 | - [Arduino Project Hub: Person Detection with TensorFlow and Arduino](https://create.arduino.cc/projecthub/little_lookout/person-detection-with-tensorflow-and-arduino-47ae01)
25 | - [Arduino Project Hub: Magic Wandddddddd](https://create.arduino.cc/projecthub/team-182/magic-wandddddddd-ea87db)
26 | - [Face and hand tracking in the browser with MediaPipe and TensorFlow.js](https://blog.tensorflow.org/2020/03/face-and-hand-tracking-in-browser-with-mediapipe-and-tensorflowjs.html)
27 |
--------------------------------------------------------------------------------
/SensorDataClassification/README.md:
--------------------------------------------------------------------------------
1 | # Week 5 Sensor data classification
2 |
3 | ## [Slides](https://docs.google.com/presentation/d/1WQhE8FaJKdABevlmBQoHr7Vf6waeJc2ezviXRIV_xlM/edit?usp=sharing)
4 |
5 | ## Workshop:
6 | - [Fruit identification using Arduino and TensorFlow](https://blog.arduino.cc/2019/11/07/fruit-identification-using-arduino-and-tensorflow), [code](../Examples/FruitToEmoji)
7 | - [Regression with ml5js](https://editor.p5js.org/ml5/sketches/FeatureExtractor_Image_Regression)
8 | - More examples:
9 | - [https://editor.p5js.org/AndreasRef/sketches/4K_YGuMik](https://editor.p5js.org/AndreasRef/sketches/4K_YGuMik)
10 | - [https://editor.p5js.org/AndreasRef/sketches/HyEDToYnQ](https://editor.p5js.org/AndreasRef/sketches/HyEDToYnQ)
11 | - [https://editor.p5js.org/AndreasRef/sketches/BynhuHsqX](https://editor.p5js.org/AndreasRef/sketches/BynhuHsqX)
12 | - [Train a doodle classifier](https://github.com/yining1023/doodleNet)
13 | - Input & Output Brainstorm, [Figjam](https://www.figma.com/file/z8osO6itFfu9jhCNJFkfS4/ML4PC-Input-and-Output?node-id=0%3A1)
14 |
15 | ## Resource:
16 | - [Sensors on Arduino Nano 33 BLE Sense](https://www.arduino.cc/en/Guide/NANO33BLESense)
17 | - [ml5js Feature extractor](https://learn.ml5js.org/docs/#/reference/feature-extractor)
18 | - [Fruit identification using Arduino and TensorFlow](https://blog.arduino.cc/2019/11/07/fruit-identification-using-arduino-and-tensorflow)
19 | - [p5js sketch visualize sensor data from Arduino nano BLE Sense](https://github.com/sandeepmistry/Arduino-Nano-33-BLE-Sense-Examples)
20 |
21 | ## [Homework](https://github.com/yining1023/Machine-Learning-for-Physical-Computing/wiki/Week-5-Final-Assignment-Proposal)
22 |
--------------------------------------------------------------------------------
/TrainAModel/README.md:
--------------------------------------------------------------------------------
1 | # Week 4 Train a model with Tensorflow Lite / Tensorflow.js / ml5.js
2 |
3 | ## [Slides](https://docs.google.com/presentation/d/1s7c9oGQRg-tHeRoAsi8SuptjyBpkK0oA6ZMpYeERjFM/edit?usp=sharing)
4 |
5 | ## Notes:
6 | - Understand the full story of building a ML model for classification or regression.
7 | - Understand how data is formatted and downloaded including CSV and JSON.
8 | - Consider how to frame the problem and collect data.
9 | - Learn how to prepare a dataset, including how to normalize and properly format it.
10 |
11 | ## Workshop:
12 | - Training your own gesture classifier,
13 | - Gestures to Emoji, [Guide](../Examples/GestureToEmoji)
14 | - ABC Gestures to 3 LEDs, [Guide](../Examples/ABCGestures)
15 | - Training hand pose model
16 | - Hand pose tracking + KNN Classification, [p5 web editor code](https://editor.p5js.org/yining/sketches/uUwg0z9Z5), [demo video](https://www.loom.com/share/f81cf908e5b7404ba0071902019d67c2)
17 | - Hand pose tracking + Neural Network, [demo video](https://www.loom.com/share/420fa5941dea411491af817011622c86)
18 | - [Collect data](https://editor.p5js.org/yining/sketches/dCoPm-Opb)
19 | - [Train the model](https://editor.p5js.org/yining/sketches/IrBFfXbSF)
20 | - [Run the model](https://editor.p5js.org/yining/sketches/6cFF9-L-Z)
21 |
22 | ## Resource:
23 | - [TinyML Workshop](https://github.com/sandeepmistry/aimldevfest-workshop-2019)
24 | - [TinyML Book chapter 12: Magic Wand, Training a model](https://learning-oreilly-com.proxy.library.nyu.edu/library/view/tinyml/9781492052036/)(Log in with your NYU NetId)
25 | - [Coding training video: ml5 pose classifier](https://thecodingtrain.com/learning/ml5/7.2-pose-classifier.html)
26 | - [ml5.js: Pose Classification training all steps](https://editor.p5js.org/codingtrain/sketches/JoZl-QRPK)
27 | - [NeuralNetwork pose classifier](https://editor.p5js.org/ml5/sketches/NeuralNetwork_pose_classifier)
28 |
29 |
--------------------------------------------------------------------------------
/images/abcgesture_circuit.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yining1023/Machine-Learning-for-Physical-Computing/c03df5e5cbdd5c0d0e15f8bee3d85342efa850b5/images/abcgesture_circuit.jpg
--------------------------------------------------------------------------------
/images/detect_person_circuit1.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yining1023/Machine-Learning-for-Physical-Computing/c03df5e5cbdd5c0d0e15f8bee3d85342efa850b5/images/detect_person_circuit1.jpg
--------------------------------------------------------------------------------
/images/detect_person_circuit2.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yining1023/Machine-Learning-for-Physical-Computing/c03df5e5cbdd5c0d0e15f8bee3d85342efa850b5/images/detect_person_circuit2.jpg
--------------------------------------------------------------------------------
/images/detect_person_parts.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yining1023/Machine-Learning-for-Physical-Computing/c03df5e5cbdd5c0d0e15f8bee3d85342efa850b5/images/detect_person_parts.jpg
--------------------------------------------------------------------------------
/images/magicwand-led-circuit.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yining1023/Machine-Learning-for-Physical-Computing/c03df5e5cbdd5c0d0e15f8bee3d85342efa850b5/images/magicwand-led-circuit.jpg
--------------------------------------------------------------------------------
/images/p5blejs-circuit.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yining1023/Machine-Learning-for-Physical-Computing/c03df5e5cbdd5c0d0e15f8bee3d85342efa850b5/images/p5blejs-circuit.jpg
--------------------------------------------------------------------------------
/images/servo_circuit1.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yining1023/Machine-Learning-for-Physical-Computing/c03df5e5cbdd5c0d0e15f8bee3d85342efa850b5/images/servo_circuit1.jpg
--------------------------------------------------------------------------------
/images/servo_circuit2.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yining1023/Machine-Learning-for-Physical-Computing/c03df5e5cbdd5c0d0e15f8bee3d85342efa850b5/images/servo_circuit2.jpg
--------------------------------------------------------------------------------
/images/sound_servo.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yining1023/Machine-Learning-for-Physical-Computing/c03df5e5cbdd5c0d0e15f8bee3d85342efa850b5/images/sound_servo.jpg
--------------------------------------------------------------------------------
/images/tmarduino.jpeg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yining1023/Machine-Learning-for-Physical-Computing/c03df5e5cbdd5c0d0e15f8bee3d85342efa850b5/images/tmarduino.jpeg
--------------------------------------------------------------------------------