├── .gitignore ├── .travis.yml ├── .vscode ├── extensions.json └── settings.json ├── README.md ├── docs ├── IMG_1736.jpg └── timl_1109.png ├── include └── README ├── lib ├── Arduino_LSM9DS1 │ ├── .library.json │ ├── CHANGELOG │ ├── README.adoc │ ├── examples │ │ ├── SimpleAccelerometer │ │ │ └── SimpleAccelerometer.ino │ │ ├── SimpleGyroscope │ │ │ └── SimpleGyroscope.ino │ │ └── SimpleMagnetometer │ │ │ └── SimpleMagnetometer.ino │ ├── keywords.txt │ ├── library.properties │ └── src │ │ ├── Arduino_LSM9DS1.h │ │ ├── LSM9DS1.cpp │ │ └── LSM9DS1.h ├── Arduino_TensorFlowLite │ ├── LICENSE │ ├── library.properties │ └── src │ │ ├── TensorFlowLite.h │ │ ├── tensorflow │ │ ├── core │ │ │ └── public │ │ │ │ └── version.h │ │ └── lite │ │ │ ├── c │ │ │ ├── builtin_op_data.h │ │ │ ├── common.c │ │ │ └── common.h │ │ │ ├── core │ │ │ └── api │ │ │ │ ├── error_reporter.cpp │ │ │ │ ├── error_reporter.h │ │ │ │ ├── flatbuffer_conversions.cpp │ │ │ │ ├── flatbuffer_conversions.h │ │ │ │ ├── op_resolver.cpp │ │ │ │ ├── op_resolver.h │ │ │ │ ├── tensor_utils.cpp │ │ │ │ └── tensor_utils.h │ │ │ ├── kernels │ │ │ ├── internal │ │ │ │ ├── common.h │ │ │ │ ├── compatibility.h │ │ │ │ ├── optimized │ │ │ │ │ └── neon_check.h │ │ │ │ ├── quantization_util.cpp │ │ │ │ ├── quantization_util.h │ │ │ │ ├── reference │ │ │ │ │ ├── add.h │ │ │ │ │ ├── arg_min_max.h │ │ │ │ │ ├── binary_function.h │ │ │ │ │ ├── ceil.h │ │ │ │ │ ├── comparisons.h │ │ │ │ │ ├── concatenation.h │ │ │ │ │ ├── conv.h │ │ │ │ │ ├── depthwiseconv_float.h │ │ │ │ │ ├── depthwiseconv_uint8.h │ │ │ │ │ ├── dequantize.h │ │ │ │ │ ├── floor.h │ │ │ │ │ ├── fully_connected.h │ │ │ │ │ ├── integer_ops │ │ │ │ │ │ ├── add.h │ │ │ │ │ │ ├── conv.h │ │ │ │ │ │ ├── depthwise_conv.h │ │ │ │ │ │ ├── fully_connected.h │ │ │ │ │ │ ├── mul.h │ │ │ │ │ │ ├── pooling.h │ │ │ │ │ │ └── softmax.h │ │ │ │ │ ├── logistic.h │ │ │ │ │ ├── maximum_minimum.h │ │ │ │ │ ├── mul.h │ │ │ │ │ ├── neg.h │ │ │ │ │ ├── pad.h │ │ │ │ │ ├── pooling.h │ │ │ │ │ ├── prelu.h │ │ │ │ │ ├── process_broadcast_shapes.h │ │ │ │ │ ├── quantize.h │ │ │ │ │ ├── round.h │ │ │ │ │ ├── softmax.h │ │ │ │ │ └── strided_slice.h │ │ │ │ ├── round.h │ │ │ │ ├── scoped_profiling_label_wrapper.h │ │ │ │ ├── strided_slice_logic.h │ │ │ │ ├── tensor.h │ │ │ │ ├── tensor_ctypes.h │ │ │ │ └── types.h │ │ │ ├── kernel_util.cpp │ │ │ ├── kernel_util.h │ │ │ ├── op_macros.h │ │ │ └── padding.h │ │ │ ├── micro │ │ │ ├── arduino │ │ │ │ └── debug_log.cpp │ │ │ ├── compatibility.h │ │ │ ├── debug_log.h │ │ │ ├── debug_log_numbers.cpp │ │ │ ├── debug_log_numbers.h │ │ │ ├── kernels │ │ │ │ ├── activation_utils.h │ │ │ │ ├── activations.cpp │ │ │ │ ├── add.cpp │ │ │ │ ├── all_ops_resolver.cpp │ │ │ │ ├── all_ops_resolver.h │ │ │ │ ├── arg_min_max.cpp │ │ │ │ ├── ceil.cpp │ │ │ │ ├── comparisons.cpp │ │ │ │ ├── concatenation.cpp │ │ │ │ ├── conv.cpp │ │ │ │ ├── dequantize.cpp │ │ │ │ ├── elementwise.cpp │ │ │ │ ├── floor.cpp │ │ │ │ ├── fully_connected.cpp │ │ │ │ ├── logical.cpp │ │ │ │ ├── logistic.cpp │ │ │ │ ├── maximum_minimum.cpp │ │ │ │ ├── micro_ops.h │ │ │ │ ├── micro_utils.h │ │ │ │ ├── mul.cpp │ │ │ │ ├── neg.cpp │ │ │ │ ├── pack.cpp │ │ │ │ ├── pad.cpp │ │ │ │ ├── pooling.cpp │ │ │ │ ├── portable_optimized │ │ │ │ │ └── depthwise_conv.cpp │ │ │ │ ├── prelu.cpp │ │ │ │ ├── quantize.cpp │ │ │ │ ├── reshape.cpp │ │ │ │ ├── round.cpp │ │ │ │ ├── softmax.cpp │ │ │ │ ├── split.cpp │ │ │ │ ├── strided_slice.cpp │ │ │ │ ├── svdf.cpp │ │ │ │ └── unpack.cpp │ │ │ ├── memory_helpers.cpp │ │ │ ├── memory_helpers.h │ │ │ ├── memory_planner │ │ │ │ ├── greedy_memory_planner.cpp │ │ │ │ ├── greedy_memory_planner.h │ │ │ │ ├── linear_memory_planner.cpp │ │ │ │ ├── linear_memory_planner.h │ │ │ │ └── memory_planner.h │ │ │ ├── micro_allocator.cpp │ │ │ ├── micro_allocator.h │ │ │ ├── micro_error_reporter.cpp │ │ │ ├── micro_error_reporter.h │ │ │ ├── micro_interpreter.cpp │ │ │ ├── micro_interpreter.h │ │ │ ├── micro_mutable_op_resolver.cpp │ │ │ ├── micro_mutable_op_resolver.h │ │ │ ├── micro_optional_debug_tools.cpp │ │ │ ├── micro_optional_debug_tools.h │ │ │ ├── micro_utils.cpp │ │ │ ├── micro_utils.h │ │ │ ├── simple_memory_allocator.cpp │ │ │ ├── simple_memory_allocator.h │ │ │ ├── test_helpers.cpp │ │ │ ├── test_helpers.h │ │ │ └── testing │ │ │ │ ├── micro_test.h │ │ │ │ └── test_utils.h │ │ │ ├── schema │ │ │ └── schema_generated.h │ │ │ ├── string_type.h │ │ │ ├── string_util.h │ │ │ ├── type_to_tflitetype.h │ │ │ └── version.h │ │ └── third_party │ │ ├── flatbuffers │ │ ├── LICENSE.txt │ │ └── include │ │ │ └── flatbuffers │ │ │ ├── base.h │ │ │ ├── flatbuffers.h │ │ │ └── stl_emulation.h │ │ └── gemmlowp │ │ ├── LICENSE │ │ ├── fixedpoint │ │ ├── fixedpoint.h │ │ └── fixedpoint_sse.h │ │ └── internal │ │ └── detect_platform.h └── README ├── platformio.ini ├── src ├── accelerometer_handler.cpp ├── accelerometer_handler.h ├── constants.cpp ├── constants.h ├── gesture_predictor.cpp ├── gesture_predictor.h ├── magic_wand_model_data.cpp ├── magic_wand_model_data.h ├── main.cpp ├── output_handler.cpp ├── output_handler.h └── xmas_demo.hpp └── test └── README /.gitignore: -------------------------------------------------------------------------------- 1 | .pio 2 | .vscode/.browse.c_cpp.db* 3 | .vscode/c_cpp_properties.json 4 | .vscode/launch.json 5 | .vscode/ipch 6 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | # Continuous Integration (CI) is the practice, in software 2 | # engineering, of merging all developer working copies with a shared mainline 3 | # several times a day < https://docs.platformio.org/page/ci/index.html > 4 | # 5 | # Documentation: 6 | # 7 | # * Travis CI Embedded Builds with PlatformIO 8 | # < https://docs.travis-ci.com/user/integration/platformio/ > 9 | # 10 | # * PlatformIO integration with Travis CI 11 | # < https://docs.platformio.org/page/ci/travis.html > 12 | # 13 | # * User Guide for `platformio ci` command 14 | # < https://docs.platformio.org/page/userguide/cmd_ci.html > 15 | # 16 | # 17 | # Please choose one of the following templates (proposed below) and uncomment 18 | # it (remove "# " before each line) or use own configuration according to the 19 | # Travis CI documentation (see above). 20 | # 21 | 22 | 23 | # 24 | # Template #1: General project. Test it using existing `platformio.ini`. 25 | # 26 | 27 | # language: python 28 | # python: 29 | # - "2.7" 30 | # 31 | # sudo: false 32 | # cache: 33 | # directories: 34 | # - "~/.platformio" 35 | # 36 | # install: 37 | # - pip install -U platformio 38 | # - platformio update 39 | # 40 | # script: 41 | # - platformio run 42 | 43 | 44 | # 45 | # Template #2: The project is intended to be used as a library with examples. 46 | # 47 | 48 | # language: python 49 | # python: 50 | # - "2.7" 51 | # 52 | # sudo: false 53 | # cache: 54 | # directories: 55 | # - "~/.platformio" 56 | # 57 | # env: 58 | # - PLATFORMIO_CI_SRC=path/to/test/file.c 59 | # - PLATFORMIO_CI_SRC=examples/file.ino 60 | # - PLATFORMIO_CI_SRC=path/to/test/directory 61 | # 62 | # install: 63 | # - pip install -U platformio 64 | # - platformio update 65 | # 66 | # script: 67 | # - platformio ci --lib="." --board=ID_1 --board=ID_2 --board=ID_N 68 | -------------------------------------------------------------------------------- /.vscode/extensions.json: -------------------------------------------------------------------------------- 1 | { 2 | // See http://go.microsoft.com/fwlink/?LinkId=827846 3 | // for the documentation about the extensions.json format 4 | "recommendations": [ 5 | "platformio.platformio-ide" 6 | ] 7 | } -------------------------------------------------------------------------------- /.vscode/settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "files.associations": { 3 | "*.cpp": "cpp", 4 | "mstd_iterator": "cpp" 5 | } 6 | } -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Magic Wand with Machine Learning 2 | 3 | Magic Wand using Arduino Nano 33 BLE Sense, powered by TensorFlow Lite for Microcontrollers and PlatformIO. 4 | 5 | ## Demo Video 6 | 7 | Click the image to view video. 8 | 9 | [![Demo video thumbnail](http://i3.ytimg.com/vi/Lfv3WJnYhX0/hqdefault.jpg)](https://www.youtube.com/watch?v=Lfv3WJnYhX0) 10 | 11 | ## Prerequisites 12 | 13 | * [PlatformIO](http://platformio.org/) 14 | * [platform-nordicnrf52](https://github.com/platformio/platform-nordicnrf52). Should be installed automatically, as it's specified in platformio.ini. Please note that I'm using `develop` version/branch. 15 | 16 | ## Train 17 | You can train the Machine Learning model to recognize your own gesture, or even other gestures than Magic Wand. The model and training code is [here](https://github.com/tensorflow/tensorflow/tree/master/tensorflow/lite/micro/examples/magic_wand/train). 18 | 19 | I'm thinking to create some guideline to do that training in [Azure Machine Learning](https://azure.microsoft.com/en-us/services/machine-learning/), let me find the time. 20 | 21 | ## Credit 22 | 23 | * [Magic Wand original sample code](https://github.com/tensorflow/tensorflow/tree/master/tensorflow/lite/micro/examples/magic_wand) 24 | -------------------------------------------------------------------------------- /docs/IMG_1736.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/andriyadi/MagicWand-TFLite-Arduino/6c0e2a2355f5c199dc07d32f4bbf3e615d6aa718/docs/IMG_1736.jpg -------------------------------------------------------------------------------- /docs/timl_1109.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/andriyadi/MagicWand-TFLite-Arduino/6c0e2a2355f5c199dc07d32f4bbf3e615d6aa718/docs/timl_1109.png -------------------------------------------------------------------------------- /include/README: -------------------------------------------------------------------------------- 1 | 2 | This directory is intended for project header files. 3 | 4 | A header file is a file containing C declarations and macro definitions 5 | to be shared between several project source files. You request the use of a 6 | header file in your project source file (C, C++, etc) located in `src` folder 7 | by including it, with the C preprocessing directive `#include'. 8 | 9 | ```src/main.c 10 | 11 | #include "header.h" 12 | 13 | int main (void) 14 | { 15 | ... 16 | } 17 | ``` 18 | 19 | Including a header file produces the same results as copying the header file 20 | into each source file that needs it. Such copying would be time-consuming 21 | and error-prone. With a header file, the related declarations appear 22 | in only one place. If they need to be changed, they can be changed in one 23 | place, and programs that include the header file will automatically use the 24 | new version when next recompiled. The header file eliminates the labor of 25 | finding and changing all the copies as well as the risk that a failure to 26 | find one copy will result in inconsistencies within a program. 27 | 28 | In C, the usual convention is to give header files names that end with `.h'. 29 | It is most portable to use only letters, digits, dashes, and underscores in 30 | header file names, and at most one dot. 31 | 32 | Read more about using header files in official GCC documentation: 33 | 34 | * Include Syntax 35 | * Include Operation 36 | * Once-Only Headers 37 | * Computed Includes 38 | 39 | https://gcc.gnu.org/onlinedocs/cpp/Header-Files.html 40 | -------------------------------------------------------------------------------- /lib/Arduino_LSM9DS1/.library.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "Arduino_LSM9DS1", 3 | "version": "1.0.0", 4 | "keywords": [ 5 | "sensors" 6 | ], 7 | "description": "Allows you to read the accelerometer, magnetometer and gyroscope values from the LSM9DS1 IMU on your Arduino Nano 33 BLE Sense.", 8 | "frameworks": [ 9 | "arduino" 10 | ], 11 | "platforms": [ 12 | "atmelavr", 13 | "atmelsam", 14 | "espressif32", 15 | "espressif8266", 16 | "infineonxmc", 17 | "intel_arc32", 18 | "kendryte210", 19 | "microchippic32", 20 | "nordicnrf51", 21 | "nordicnrf52", 22 | "ststm32", 23 | "ststm8", 24 | "teensy", 25 | "timsp430" 26 | ], 27 | "authors": [ 28 | { 29 | "email": "info@arduino.cc", 30 | "url": null, 31 | "maintainer": true, 32 | "name": "Arduino" 33 | } 34 | ], 35 | "repository": { 36 | "type": "git", 37 | "url": "https://github.com/arduino-libraries/Arduino_LSM9DS1" 38 | }, 39 | "homepage": null, 40 | "export": { 41 | "include": null, 42 | "exclude": [ 43 | "extras", 44 | "docs", 45 | "tests", 46 | "test", 47 | "*.doxyfile", 48 | "*.pdf" 49 | ] 50 | }, 51 | "id": 6589 52 | } -------------------------------------------------------------------------------- /lib/Arduino_LSM9DS1/CHANGELOG: -------------------------------------------------------------------------------- 1 | Arduino_LSM9DS1 ?.?.? - ????.??.?? 2 | 3 | Arduino_LSM9DS1 1.0.0 - 2019.07.31 4 | 5 | * Initial release 6 | -------------------------------------------------------------------------------- /lib/Arduino_LSM9DS1/README.adoc: -------------------------------------------------------------------------------- 1 | = LSM9DS1 Library for Arduino = 2 | 3 | Allows you to read the accelerometer, magnetometer and gyroscope values from the LSM9DS1 IMU on your Arduino Nano 33 BLE Sense. 4 | 5 | == License == 6 | 7 | Copyright (c) 2019 Arduino SA. All rights reserved. 8 | 9 | This library is free software; you can redistribute it and/or 10 | modify it under the terms of the GNU Lesser General Public 11 | License as published by the Free Software Foundation; either 12 | version 2.1 of the License, or (at your option) any later version. 13 | 14 | This library is distributed in the hope that it will be useful, 15 | but WITHOUT ANY WARRANTY; without even the implied warranty of 16 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 17 | Lesser General Public License for more details. 18 | 19 | You should have received a copy of the GNU Lesser General Public 20 | License along with this library; if not, write to the Free Software 21 | Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 22 | -------------------------------------------------------------------------------- /lib/Arduino_LSM9DS1/examples/SimpleAccelerometer/SimpleAccelerometer.ino: -------------------------------------------------------------------------------- 1 | /* 2 | Arduino LSM9DS1 - Simple Accelerometer 3 | 4 | This example reads the acceleration values from the LSM9DS1 5 | sensor and continuously prints them to the Serial Monitor 6 | or Serial Plotter. 7 | 8 | The circuit: 9 | - Arduino Nano 33 BLE Sense 10 | 11 | created 10 Jul 2019 12 | by Riccardo Rizzo 13 | 14 | This example code is in the public domain. 15 | */ 16 | 17 | #include 18 | 19 | void setup() { 20 | Serial.begin(9600); 21 | while (!Serial); 22 | Serial.println("Started"); 23 | 24 | if (!IMU.begin()) { 25 | Serial.println("Failed to initialize IMU!"); 26 | while (1); 27 | } 28 | 29 | Serial.print("Accelerometer sample rate = "); 30 | Serial.print(IMU.accelerationSampleRate()); 31 | Serial.println(" Hz"); 32 | Serial.println(); 33 | Serial.println("Acceleration in G's"); 34 | Serial.println("X\tY\tZ"); 35 | } 36 | 37 | void loop() { 38 | float x, y, z; 39 | 40 | if (IMU.accelerationAvailable()) { 41 | IMU.readAcceleration(x, y, z); 42 | 43 | Serial.print(x); 44 | Serial.print('\t'); 45 | Serial.print(y); 46 | Serial.print('\t'); 47 | Serial.println(z); 48 | } 49 | } 50 | -------------------------------------------------------------------------------- /lib/Arduino_LSM9DS1/examples/SimpleGyroscope/SimpleGyroscope.ino: -------------------------------------------------------------------------------- 1 | /* 2 | Arduino LSM9DS1 - Simple Gyroscope 3 | 4 | This example reads the gyroscope values from the LSM9DS1 5 | sensor and continuously prints them to the Serial Monitor 6 | or Serial Plotter. 7 | 8 | The circuit: 9 | - Arduino Nano 33 BLE Sense 10 | 11 | created 10 Jul 2019 12 | by Riccardo Rizzo 13 | 14 | This example code is in the public domain. 15 | */ 16 | 17 | #include 18 | 19 | void setup() { 20 | Serial.begin(9600); 21 | while (!Serial); 22 | Serial.println("Started"); 23 | 24 | if (!IMU.begin()) { 25 | Serial.println("Failed to initialize IMU!"); 26 | while (1); 27 | } 28 | Serial.print("Gyroscope sample rate = "); 29 | Serial.print(IMU.gyroscopeSampleRate()); 30 | Serial.println(" Hz"); 31 | Serial.println(); 32 | Serial.println("Gyroscope in degrees/second"); 33 | Serial.println("X\tY\tZ"); 34 | } 35 | 36 | void loop() { 37 | float x, y, z; 38 | 39 | if (IMU.gyroscopeAvailable()) { 40 | IMU.readGyroscope(x, y, z); 41 | 42 | Serial.print(x); 43 | Serial.print('\t'); 44 | Serial.print(y); 45 | Serial.print('\t'); 46 | Serial.println(z); 47 | } 48 | } 49 | -------------------------------------------------------------------------------- /lib/Arduino_LSM9DS1/examples/SimpleMagnetometer/SimpleMagnetometer.ino: -------------------------------------------------------------------------------- 1 | /* 2 | Arduino LSM9DS1 - Simple Magnetometer 3 | 4 | This example reads the magnetic field values from the LSM9DS1 5 | sensor and continuously prints them to the Serial Monitor 6 | or Serial Plotter. 7 | 8 | The circuit: 9 | - Arduino Nano 33 BLE Sense 10 | 11 | created 10 Jul 2019 12 | by Riccardo Rizzo 13 | 14 | This example code is in the public domain. 15 | */ 16 | 17 | #include 18 | 19 | void setup() { 20 | Serial.begin(9600); 21 | while (!Serial); 22 | Serial.println("Started"); 23 | 24 | if (!IMU.begin()) { 25 | Serial.println("Failed to initialize IMU!"); 26 | while (1); 27 | } 28 | Serial.print("Magnetic field sample rate = "); 29 | Serial.print(IMU.magneticFieldSampleRate()); 30 | Serial.println(" uT"); 31 | Serial.println(); 32 | Serial.println("Magnetic Field in uT"); 33 | Serial.println("X\tY\tZ"); 34 | } 35 | 36 | void loop() { 37 | float x, y, z; 38 | 39 | if (IMU.magneticFieldAvailable()) { 40 | IMU.readMagneticField(x, y, z); 41 | 42 | Serial.print(x); 43 | Serial.print('\t'); 44 | Serial.print(y); 45 | Serial.print('\t'); 46 | Serial.println(z); 47 | } 48 | } 49 | -------------------------------------------------------------------------------- /lib/Arduino_LSM9DS1/keywords.txt: -------------------------------------------------------------------------------- 1 | ####################################### 2 | # Syntax Coloring Map For Arduino_LSM9DS1 3 | ####################################### 4 | # Class 5 | ####################################### 6 | 7 | Arduino_LSM9DS1 KEYWORD1 8 | LSM9DS1 KEYWORD1 9 | IMU KEYWORD1 10 | 11 | ####################################### 12 | # Methods and Functions 13 | ####################################### 14 | 15 | begin KEYWORD2 16 | end KEYWORD2 17 | 18 | readAcceleration KEYWORD2 19 | readGyroscope KEYWORD2 20 | readMagneticField KEYWORD2 21 | gyroscopeAvailable KEYWORD2 22 | accelerationAvailable KEYWORD2 23 | magneticFieldAvailable KEYWORD2 24 | accelerationSampleRate KEYWORD2 25 | gyroscopeSampleRate KEYWORD2 26 | magneticFieldSampleRate KEYWORD2 27 | 28 | ####################################### 29 | # Constants 30 | ####################################### 31 | -------------------------------------------------------------------------------- /lib/Arduino_LSM9DS1/library.properties: -------------------------------------------------------------------------------- 1 | name=Arduino_LSM9DS1 2 | version=1.0.0 3 | author=Arduino 4 | maintainer=Arduino 5 | sentence=Allows you to read the accelerometer, magnetometer and gyroscope values from the LSM9DS1 IMU on your Arduino Nano 33 BLE Sense. 6 | paragraph= 7 | category=Sensors 8 | url=https://github.com/arduino-libraries/Arduino_LSM9DS1 9 | architectures=* 10 | includes=Arduino_LSM9DS1.h 11 | -------------------------------------------------------------------------------- /lib/Arduino_LSM9DS1/src/Arduino_LSM9DS1.h: -------------------------------------------------------------------------------- 1 | /* 2 | This file is part of the Arduino_LSM9DS1 library. 3 | Copyright (c) 2019 Arduino SA. All rights reserved. 4 | 5 | This library is free software; you can redistribute it and/or 6 | modify it under the terms of the GNU Lesser General Public 7 | License as published by the Free Software Foundation; either 8 | version 2.1 of the License, or (at your option) any later version. 9 | 10 | This library is distributed in the hope that it will be useful, 11 | but WITHOUT ANY WARRANTY; without even the implied warranty of 12 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 13 | Lesser General Public License for more details. 14 | 15 | You should have received a copy of the GNU Lesser General Public 16 | License along with this library; if not, write to the Free Software 17 | Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA 18 | */ 19 | 20 | #ifndef _LSM9DS1_H_ 21 | #define _LSM9DS1_H_ 22 | 23 | #include "LSM9DS1.h" 24 | 25 | #endif 26 | -------------------------------------------------------------------------------- /lib/Arduino_LSM9DS1/src/LSM9DS1.h: -------------------------------------------------------------------------------- 1 | /* 2 | This file is part of the Arduino_LSM9DS1 library. 3 | Copyright (c) 2019 Arduino SA. All rights reserved. 4 | 5 | This library is free software; you can redistribute it and/or 6 | modify it under the terms of the GNU Lesser General Public 7 | License as published by the Free Software Foundation; either 8 | version 2.1 of the License, or (at your option) any later version. 9 | 10 | This library is distributed in the hope that it will be useful, 11 | but WITHOUT ANY WARRANTY; without even the implied warranty of 12 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 13 | Lesser General Public License for more details. 14 | 15 | You should have received a copy of the GNU Lesser General Public 16 | License along with this library; if not, write to the Free Software 17 | Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA 18 | */ 19 | 20 | #include 21 | #include 22 | 23 | class LSM9DS1Class { 24 | public: 25 | LSM9DS1Class(TwoWire& wire); 26 | virtual ~LSM9DS1Class(); 27 | 28 | int begin(); 29 | void end(); 30 | 31 | // Accelerometer 32 | virtual int readAcceleration(float& x, float& y, float& z); // Results are in G (earth gravity). 33 | virtual int accelerationAvailable(); // Number of samples in the FIFO. 34 | virtual float accelerationSampleRate(); // Sampling rate of the sensor. 35 | 36 | // Gyroscope 37 | virtual int readGyroscope(float& x, float& y, float& z); // Results are in degrees/second. 38 | virtual int gyroscopeAvailable(); // Number of samples in the FIFO. 39 | virtual float gyroscopeSampleRate(); // Sampling rate of the sensor. 40 | 41 | // Magnetometer 42 | virtual int readMagneticField(float& x, float& y, float& z); // Results are in uT (micro Tesla). 43 | virtual int magneticFieldAvailable(); // Number of samples in the FIFO. 44 | virtual float magneticFieldSampleRate(); // Sampling rate of the sensor. 45 | 46 | private: 47 | int readRegister(uint8_t slaveAddress, uint8_t address); 48 | int readRegisters(uint8_t slaveAddress, uint8_t address, uint8_t* data, size_t length); 49 | int writeRegister(uint8_t slaveAddress, uint8_t address, uint8_t value); 50 | 51 | private: 52 | TwoWire* _wire; 53 | }; 54 | 55 | extern LSM9DS1Class IMU; 56 | -------------------------------------------------------------------------------- /lib/Arduino_TensorFlowLite/library.properties: -------------------------------------------------------------------------------- 1 | name=Arduino_TensorFlowLite 2 | version=1.15.0-ALPHA 3 | author=TensorFlow Authors 4 | maintainer=Pete Warden 5 | sentence=Allows you to run machine learning models locally on your device. 6 | paragraph=This library runs TensorFlow machine learning models on microcontrollers, allowing you to build AI/ML applications powered by deep learning and neural networks. With the included examples, you can recognize speech, detect people using a camera, and recognise "magic wand" gestures using an accelerometer. The examples work best with the Arduino Nano 33 BLE Sense board, which has a microphone and accelerometer. 7 | category=Data Processing 8 | url=https://www.tensorflow.org/lite/microcontrollers/overview 9 | ldflags=-lm 10 | includes=TensorFlowLite.h -------------------------------------------------------------------------------- /lib/Arduino_TensorFlowLite/src/TensorFlowLite.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_LITE_MICRO_TOOLS_MAKE_TEMPLATES_TENSORFLOWLITE_H_ 16 | #define TENSORFLOW_LITE_MICRO_TOOLS_MAKE_TEMPLATES_TENSORFLOWLITE_H_ 17 | 18 | // This header is deliberately empty, and is only present because including it 19 | // in a .ino sketch forces the Arduino toolchain to build the rest of the 20 | // library. 21 | 22 | #endif // TENSORFLOW_LITE_MICRO_TOOLS_MAKE_TEMPLATES_TENSORFLOWLITE_H_ 23 | -------------------------------------------------------------------------------- /lib/Arduino_TensorFlowLite/src/tensorflow/lite/core/api/error_reporter.cpp: -------------------------------------------------------------------------------- 1 | /* Copyright 2017 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #include "tensorflow/lite/core/api/error_reporter.h" 16 | #include 17 | 18 | namespace tflite { 19 | 20 | int ErrorReporter::Report(const char* format, ...) { 21 | va_list args; 22 | va_start(args, format); 23 | int code = Report(format, args); 24 | va_end(args); 25 | return code; 26 | } 27 | 28 | // TODO(aselle): Make the name of ReportError on context the same, so 29 | // we can use the ensure functions w/o a context and w/ a reporter. 30 | int ErrorReporter::ReportError(void*, const char* format, ...) { 31 | va_list args; 32 | va_start(args, format); 33 | int code = Report(format, args); 34 | va_end(args); 35 | return code; 36 | } 37 | 38 | } // namespace tflite 39 | -------------------------------------------------------------------------------- /lib/Arduino_TensorFlowLite/src/tensorflow/lite/core/api/error_reporter.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2017 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_LITE_CORE_API_ERROR_REPORTER_H_ 16 | #define TENSORFLOW_LITE_CORE_API_ERROR_REPORTER_H_ 17 | 18 | #include 19 | 20 | namespace tflite { 21 | 22 | /// A functor that reports error to supporting system. Invoked similar to 23 | /// printf. 24 | /// 25 | /// Usage: 26 | /// ErrorReporter foo; 27 | /// foo.Report("test %d", 5); 28 | /// or 29 | /// va_list args; 30 | /// foo.Report("test %d", args); // where args is va_list 31 | /// 32 | /// Subclass ErrorReporter to provide another reporting destination. 33 | /// For example, if you have a GUI program, you might redirect to a buffer 34 | /// that drives a GUI error log box. 35 | class ErrorReporter { 36 | public: 37 | virtual ~ErrorReporter() {} 38 | virtual int Report(const char* format, va_list args) = 0; 39 | int Report(const char* format, ...); 40 | int ReportError(void*, const char* format, ...); 41 | }; 42 | 43 | } // namespace tflite 44 | 45 | #endif // TENSORFLOW_LITE_CORE_API_ERROR_REPORTER_H_ 46 | -------------------------------------------------------------------------------- /lib/Arduino_TensorFlowLite/src/tensorflow/lite/core/api/flatbuffer_conversions.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_LITE_CORE_API_FLATBUFFER_CONVERSIONS_H_ 16 | #define TENSORFLOW_LITE_CORE_API_FLATBUFFER_CONVERSIONS_H_ 17 | 18 | // These functions transform codes and data structures that are defined in the 19 | // flatbuffer serialization format into in-memory values that are used by the 20 | // runtime API and interpreter. 21 | 22 | #include "tensorflow/lite/c/common.h" 23 | #include "tensorflow/lite/core/api/error_reporter.h" 24 | #include "tensorflow/lite/core/api/op_resolver.h" 25 | #include "tensorflow/lite/schema/schema_generated.h" 26 | 27 | namespace tflite { 28 | 29 | // Interface class for builtin data allocations. 30 | class BuiltinDataAllocator { 31 | public: 32 | virtual void* Allocate(size_t size) = 0; 33 | virtual void Deallocate(void* data) = 0; 34 | 35 | // Allocate a structure, but make sure it is a POD structure that doesn't 36 | // require constructors to run. The reason we do this, is that Interpreter's C 37 | // extension part will take ownership so destructors will not be run during 38 | // deallocation. 39 | template 40 | T* AllocatePOD() { 41 | static_assert(std::is_pod::value, "Builtin data structure must be POD."); 42 | return static_cast(this->Allocate(sizeof(T))); 43 | } 44 | 45 | virtual ~BuiltinDataAllocator() {} 46 | }; 47 | 48 | // Parse the appropriate data out of the op. 49 | // 50 | // This handles builtin data explicitly as there are flatbuffer schemas. 51 | // If it returns kTfLiteOk, it passes the data out with `builtin_data`. The 52 | // calling function has to pass in an allocator object, and this allocator 53 | // will be called to reserve space for the output data. If the calling 54 | // function's allocator reserves memory on the heap, then it's the calling 55 | // function's responsibility to free it. 56 | // If it returns kTfLiteError, `builtin_data` will be `nullptr`. 57 | TfLiteStatus ParseOpData(const Operator* op, BuiltinOperator op_type, 58 | ErrorReporter* error_reporter, 59 | BuiltinDataAllocator* allocator, void** builtin_data); 60 | 61 | // Converts the tensor data type used in the flat buffer to the representation 62 | // used by the runtime. 63 | TfLiteStatus ConvertTensorType(TensorType tensor_type, TfLiteType* type, 64 | ErrorReporter* error_reporter); 65 | 66 | } // namespace tflite 67 | 68 | #endif // TENSORFLOW_LITE_CORE_API_FLATBUFFER_CONVERSIONS_H_ 69 | -------------------------------------------------------------------------------- /lib/Arduino_TensorFlowLite/src/tensorflow/lite/core/api/op_resolver.cpp: -------------------------------------------------------------------------------- 1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | #include "tensorflow/lite/core/api/op_resolver.h" 17 | 18 | namespace tflite { 19 | 20 | TfLiteStatus GetRegistrationFromOpCode( 21 | const OperatorCode* opcode, const OpResolver& op_resolver, 22 | ErrorReporter* error_reporter, const TfLiteRegistration** registration) { 23 | TfLiteStatus status = kTfLiteOk; 24 | *registration = nullptr; 25 | auto builtin_code = opcode->builtin_code(); 26 | int version = opcode->version(); 27 | 28 | if (builtin_code > BuiltinOperator_MAX || 29 | builtin_code < BuiltinOperator_MIN) { 30 | error_reporter->Report( 31 | "Op builtin_code out of range: %d. Are you using old TFLite binary " 32 | "with newer model?", 33 | builtin_code); 34 | status = kTfLiteError; 35 | } else if (builtin_code != BuiltinOperator_CUSTOM) { 36 | *registration = op_resolver.FindOp(builtin_code, version); 37 | if (*registration == nullptr) { 38 | error_reporter->Report( 39 | "Didn't find op for builtin opcode '%s' version '%d'\n", 40 | EnumNameBuiltinOperator(builtin_code), version); 41 | status = kTfLiteError; 42 | } 43 | } else if (!opcode->custom_code()) { 44 | error_reporter->Report( 45 | "Operator with CUSTOM builtin_code has no custom_code.\n"); 46 | status = kTfLiteError; 47 | } else { 48 | const char* name = opcode->custom_code()->c_str(); 49 | *registration = op_resolver.FindOp(name, version); 50 | if (*registration == nullptr) { 51 | // Do not report error for unresolved custom op, we do the final check 52 | // while preparing ops. 53 | status = kTfLiteError; 54 | } 55 | } 56 | return status; 57 | } 58 | 59 | } // namespace tflite 60 | -------------------------------------------------------------------------------- /lib/Arduino_TensorFlowLite/src/tensorflow/lite/core/api/op_resolver.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_LITE_CORE_API_OP_RESOLVER_H_ 16 | #define TENSORFLOW_LITE_CORE_API_OP_RESOLVER_H_ 17 | 18 | #include "tensorflow/lite/c/common.h" 19 | #include "tensorflow/lite/core/api/error_reporter.h" 20 | #include "tensorflow/lite/schema/schema_generated.h" 21 | 22 | namespace tflite { 23 | 24 | /// Abstract interface that returns TfLiteRegistrations given op codes or custom 25 | /// op names. This is the mechanism that ops being referenced in the flatbuffer 26 | /// model are mapped to executable function pointers (TfLiteRegistrations). 27 | class OpResolver { 28 | public: 29 | /// Finds the op registration for a builtin operator by enum code. 30 | virtual const TfLiteRegistration* FindOp(tflite::BuiltinOperator op, 31 | int version) const = 0; 32 | /// Finds the op registration of a custom operator by op name. 33 | virtual const TfLiteRegistration* FindOp(const char* op, 34 | int version) const = 0; 35 | virtual ~OpResolver() {} 36 | }; 37 | 38 | // Handles the logic for converting between an OperatorCode structure extracted 39 | // from a flatbuffer and information about a registered operator 40 | // implementation. 41 | TfLiteStatus GetRegistrationFromOpCode(const OperatorCode* opcode, 42 | const OpResolver& op_resolver, 43 | ErrorReporter* error_reporter, 44 | const TfLiteRegistration** registration); 45 | 46 | } // namespace tflite 47 | 48 | #endif // TENSORFLOW_LITE_CORE_API_OP_RESOLVER_H_ 49 | -------------------------------------------------------------------------------- /lib/Arduino_TensorFlowLite/src/tensorflow/lite/core/api/tensor_utils.cpp: -------------------------------------------------------------------------------- 1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | #include "tensorflow/lite/core/api/tensor_utils.h" 17 | 18 | #include 19 | 20 | namespace tflite { 21 | 22 | TfLiteStatus ResetVariableTensor(TfLiteTensor* tensor) { 23 | if (!tensor->is_variable) { 24 | return kTfLiteOk; 25 | } 26 | // TODO(b/115961645): Implement - If a variable tensor has a buffer, reset it 27 | // to the value of the buffer. 28 | int value = 0; 29 | if (tensor->type == kTfLiteInt8) { 30 | value = tensor->params.zero_point; 31 | } 32 | // TODO(b/139446230): Provide a platform header to better handle these 33 | // specific scenarios. 34 | #if __ANDROID__ || defined(__x86_64__) || defined(__i386__) || \ 35 | defined(__i386) || defined(__x86__) || defined(__X86__) || \ 36 | defined(_X86_) || defined(_M_IX86) || defined(_M_X64) 37 | memset(tensor->data.raw, value, tensor->bytes); 38 | #else 39 | char* raw_ptr = tensor->data.raw; 40 | for (int i = 0; i < tensor->bytes; ++i) { 41 | *raw_ptr = value; 42 | raw_ptr++; 43 | } 44 | #endif 45 | return kTfLiteOk; 46 | } 47 | 48 | } // namespace tflite 49 | -------------------------------------------------------------------------------- /lib/Arduino_TensorFlowLite/src/tensorflow/lite/core/api/tensor_utils.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | #ifndef TENSORFLOW_LITE_CORE_API_TENSOR_UTILS_H_ 17 | #define TENSORFLOW_LITE_CORE_API_TENSOR_UTILS_H_ 18 | 19 | #include "tensorflow/lite/c/common.h" 20 | 21 | namespace tflite { 22 | 23 | // Resets a variable tensor to the default value. 24 | TfLiteStatus ResetVariableTensor(TfLiteTensor* tensor); 25 | 26 | } // namespace tflite 27 | 28 | #endif // TENSORFLOW_LITE_CORE_API_TENSOR_UTILS_H_ 29 | -------------------------------------------------------------------------------- /lib/Arduino_TensorFlowLite/src/tensorflow/lite/kernels/internal/compatibility.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2017 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_COMPATIBILITY_H_ 16 | #define TENSORFLOW_LITE_KERNELS_INTERNAL_COMPATIBILITY_H_ 17 | 18 | #include 19 | 20 | #include "tensorflow/lite/kernels/op_macros.h" 21 | 22 | #ifndef TFLITE_DCHECK 23 | #define TFLITE_DCHECK(condition) (condition) ? (void)0 : TFLITE_ASSERT_FALSE 24 | #endif 25 | 26 | #ifndef TFLITE_DCHECK_EQ 27 | #define TFLITE_DCHECK_EQ(x, y) ((x) == (y)) ? (void)0 : TFLITE_ASSERT_FALSE 28 | #endif 29 | 30 | #ifndef TFLITE_DCHECK_NE 31 | #define TFLITE_DCHECK_NE(x, y) ((x) != (y)) ? (void)0 : TFLITE_ASSERT_FALSE 32 | #endif 33 | 34 | #ifndef TFLITE_DCHECK_GE 35 | #define TFLITE_DCHECK_GE(x, y) ((x) >= (y)) ? (void)0 : TFLITE_ASSERT_FALSE 36 | #endif 37 | 38 | #ifndef TFLITE_DCHECK_GT 39 | #define TFLITE_DCHECK_GT(x, y) ((x) > (y)) ? (void)0 : TFLITE_ASSERT_FALSE 40 | #endif 41 | 42 | #ifndef TFLITE_DCHECK_LE 43 | #define TFLITE_DCHECK_LE(x, y) ((x) <= (y)) ? (void)0 : TFLITE_ASSERT_FALSE 44 | #endif 45 | 46 | #ifndef TFLITE_DCHECK_LT 47 | #define TFLITE_DCHECK_LT(x, y) ((x) < (y)) ? (void)0 : TFLITE_ASSERT_FALSE 48 | #endif 49 | 50 | // TODO(ahentz): Clean up: We should stick to the DCHECK versions. 51 | #ifndef TFLITE_CHECK 52 | #define TFLITE_CHECK(condition) (condition) ? (void)0 : TFLITE_ABORT 53 | #endif 54 | 55 | #ifndef TFLITE_CHECK_EQ 56 | #define TFLITE_CHECK_EQ(x, y) ((x) == (y)) ? (void)0 : TFLITE_ABORT 57 | #endif 58 | 59 | #ifndef TFLITE_CHECK_NE 60 | #define TFLITE_CHECK_NE(x, y) ((x) != (y)) ? (void)0 : TFLITE_ABORT 61 | #endif 62 | 63 | #ifndef TFLITE_CHECK_GE 64 | #define TFLITE_CHECK_GE(x, y) ((x) >= (y)) ? (void)0 : TFLITE_ABORT 65 | #endif 66 | 67 | #ifndef TFLITE_CHECK_GT 68 | #define TFLITE_CHECK_GT(x, y) ((x) > (y)) ? (void)0 : TFLITE_ABORT 69 | #endif 70 | 71 | #ifndef TFLITE_CHECK_LE 72 | #define TFLITE_CHECK_LE(x, y) ((x) <= (y)) ? (void)0 : TFLITE_ABORT 73 | #endif 74 | 75 | #ifndef TFLITE_CHECK_LT 76 | #define TFLITE_CHECK_LT(x, y) ((x) < (y)) ? (void)0 : TFLITE_ABORT 77 | #endif 78 | 79 | // TODO(ahentz): Clean up. 80 | using int8 = std::int8_t; 81 | using uint8 = std::uint8_t; 82 | using int16 = std::int16_t; 83 | using uint16 = std::uint16_t; 84 | using int32 = std::int32_t; 85 | using uint32 = std::uint32_t; 86 | 87 | // TFLITE_DEPRECATED() 88 | // 89 | // Duplicated from absl/base/macros.h to avoid pulling in that library. 90 | // Marks a deprecated class, struct, enum, function, method and variable 91 | // declarations. The macro argument is used as a custom diagnostic message (e.g. 92 | // suggestion of a better alternative). 93 | // 94 | // Example: 95 | // 96 | // class TFLITE_DEPRECATED("Use Bar instead") Foo {...}; 97 | // TFLITE_DEPRECATED("Use Baz instead") void Bar() {...} 98 | // 99 | // Every usage of a deprecated entity will trigger a warning when compiled with 100 | // clang's `-Wdeprecated-declarations` option. This option is turned off by 101 | // default, but the warnings will be reported by clang-tidy. 102 | #if defined(__clang__) && __cplusplus >= 201103L 103 | #define TFLITE_DEPRECATED(message) __attribute__((deprecated(message))) 104 | #endif 105 | 106 | #ifndef TFLITE_DEPRECATED 107 | #define TFLITE_DEPRECATED(message) 108 | #endif 109 | 110 | #endif // TENSORFLOW_LITE_KERNELS_INTERNAL_COMPATIBILITY_H_ 111 | -------------------------------------------------------------------------------- /lib/Arduino_TensorFlowLite/src/tensorflow/lite/kernels/internal/optimized/neon_check.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_OPTIMIZED_NEON_CHECK_H_ 16 | #define TENSORFLOW_LITE_KERNELS_INTERNAL_OPTIMIZED_NEON_CHECK_H_ 17 | 18 | #if defined(__ARM_NEON__) || defined(__ARM_NEON) 19 | #define USE_NEON 20 | #include 21 | #endif 22 | 23 | #if defined __GNUC__ && defined __SSE4_1__ && !defined TF_LITE_DISABLE_X86_NEON 24 | #define USE_NEON 25 | #pragma GCC diagnostic push 26 | #pragma GCC diagnostic ignored "-Wdeprecated-declarations" 27 | #pragma GCC diagnostic ignored "-Wattributes" 28 | #pragma GCC diagnostic ignored "-Wnarrowing" 29 | #pragma GCC diagnostic ignored "-Wsequence-point" 30 | #include "NEON_2_SSE.h" 31 | #pragma GCC diagnostic pop 32 | #endif 33 | 34 | // NEON_OR_PORTABLE(SomeFunc, args) calls NeonSomeFunc(args) if USE_NEON is 35 | // defined, PortableSomeFunc(args) otherwise. 36 | #ifdef USE_NEON 37 | // Always use Neon code 38 | #define NEON_OR_PORTABLE(funcname, ...) Neon##funcname(__VA_ARGS__) 39 | 40 | #else 41 | // No NEON available: Use Portable code 42 | #define NEON_OR_PORTABLE(funcname, ...) Portable##funcname(__VA_ARGS__) 43 | 44 | #endif // defined(USE_NEON) 45 | 46 | #endif // TENSORFLOW_LITE_KERNELS_INTERNAL_OPTIMIZED_NEON_CHECK_H_ 47 | -------------------------------------------------------------------------------- /lib/Arduino_TensorFlowLite/src/tensorflow/lite/kernels/internal/reference/arg_min_max.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_ARG_MIN_MAX_H_ 16 | #define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_ARG_MIN_MAX_H_ 17 | 18 | #include "tensorflow/lite/kernels/internal/types.h" 19 | 20 | namespace tflite { 21 | 22 | namespace reference_ops { 23 | 24 | template 25 | void ArgMinMax(const RuntimeShape& input1_shape, const T1* input1_data, 26 | const T3* input2_data, const RuntimeShape& output_shape, 27 | T2* output_data, const Cmp& cmp) { 28 | TFLITE_DCHECK_GT(input1_shape.DimensionsCount(), 0); 29 | TFLITE_DCHECK_EQ(input1_shape.DimensionsCount() - 1, 30 | output_shape.DimensionsCount()); 31 | int axis = input2_data[0]; 32 | if (axis < 0) { 33 | axis += input1_shape.DimensionsCount(); 34 | } 35 | const int axis_size = input1_shape.Dims(axis); 36 | 37 | int outer_size = 1; 38 | for (int i = 0; i < axis; ++i) { 39 | TFLITE_DCHECK_EQ(input1_shape.Dims(i), output_shape.Dims(i)); 40 | outer_size *= input1_shape.Dims(i); 41 | } 42 | 43 | int inner_size = 1; 44 | const int dims_count = input1_shape.DimensionsCount(); 45 | for (int i = axis + 1; i < dims_count; ++i) { 46 | TFLITE_DCHECK_EQ(input1_shape.Dims(i), output_shape.Dims(i - 1)); 47 | inner_size *= input1_shape.Dims(i); 48 | } 49 | for (int outer = 0; outer < outer_size; ++outer) { 50 | for (int inner = 0; inner < inner_size; ++inner) { 51 | auto min_max_value = input1_data[outer * axis_size * inner_size + inner]; 52 | T2 min_max_index = 0; 53 | for (int i = 1; i < axis_size; ++i) { 54 | const auto& curr_value = 55 | input1_data[(outer * axis_size + i) * inner_size + inner]; 56 | if (cmp(curr_value, min_max_value)) { 57 | min_max_value = curr_value; 58 | min_max_index = static_cast(i); 59 | } 60 | } 61 | output_data[outer * inner_size + inner] = min_max_index; 62 | } 63 | } 64 | } 65 | } // namespace reference_ops 66 | } // namespace tflite 67 | 68 | #endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_ARG_MIN_MAX_H_ 69 | -------------------------------------------------------------------------------- /lib/Arduino_TensorFlowLite/src/tensorflow/lite/kernels/internal/reference/binary_function.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_BINARY_FUNCTION_H_ 16 | #define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_BINARY_FUNCTION_H_ 17 | 18 | #include "tensorflow/lite/kernels/internal/common.h" 19 | #include "tensorflow/lite/kernels/internal/compatibility.h" 20 | #include "tensorflow/lite/kernels/internal/types.h" 21 | 22 | namespace tflite { 23 | 24 | namespace reference_ops { 25 | 26 | // TODO(ycling): Refactoring. Remove BroadcastLogical and use the more 27 | // generalized and efficient BroadcastBinaryFunction. 28 | // 29 | // Also appears to duplicte MinimumMaximum. 30 | // 31 | // R: Result type. T1: Input 1 type. T2: Input 2 type. 32 | template 33 | inline void BroadcastBinaryFunction4DSlow( 34 | const RuntimeShape& unextended_input1_shape, const T1* input1_data, 35 | const RuntimeShape& unextended_input2_shape, const T2* input2_data, 36 | const RuntimeShape& unextended_output_shape, R* output_data, 37 | R (*func)(T1, T2)) { 38 | TFLITE_DCHECK_LE(unextended_input1_shape.DimensionsCount(), 4); 39 | TFLITE_DCHECK_LE(unextended_input2_shape.DimensionsCount(), 4); 40 | TFLITE_DCHECK_LE(unextended_output_shape.DimensionsCount(), 4); 41 | const RuntimeShape output_shape = 42 | RuntimeShape::ExtendedShape(4, unextended_output_shape); 43 | 44 | NdArrayDesc<4> desc1; 45 | NdArrayDesc<4> desc2; 46 | NdArrayDescsForElementwiseBroadcast(unextended_input1_shape, 47 | unextended_input2_shape, &desc1, &desc2); 48 | 49 | for (int b = 0; b < output_shape.Dims(0); ++b) { 50 | for (int y = 0; y < output_shape.Dims(1); ++y) { 51 | for (int x = 0; x < output_shape.Dims(2); ++x) { 52 | for (int c = 0; c < output_shape.Dims(3); ++c) { 53 | auto out_idx = Offset(output_shape, b, y, x, c); 54 | auto in1_idx = SubscriptToIndex(desc1, b, y, x, c); 55 | auto in2_idx = SubscriptToIndex(desc2, b, y, x, c); 56 | auto in1_val = input1_data[in1_idx]; 57 | auto in2_val = input2_data[in2_idx]; 58 | output_data[out_idx] = func(in1_val, in2_val); 59 | } 60 | } 61 | } 62 | } 63 | } 64 | 65 | // R: Result type. T1: Input 1 type. T2: Input 2 type. 66 | // TODO(renjieliu): Refactor other binary functions to use this one. 67 | template 68 | inline void BinaryFunction(const RuntimeShape& input1_shape, 69 | const T1* input1_data, 70 | const RuntimeShape& input2_shape, 71 | const T2* input2_data, 72 | const RuntimeShape& output_shape, R* output_data, 73 | R (*func)(T1, T2)) { 74 | const int flat_size = 75 | MatchingFlatSize(input1_shape, input2_shape, output_shape); 76 | for (int i = 0; i < flat_size; ++i) { 77 | output_data[i] = func(input1_data[i], input2_data[i]); 78 | } 79 | } 80 | 81 | } // namespace reference_ops 82 | } // namespace tflite 83 | 84 | #endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_BINARY_FUNCTION_H_ 85 | -------------------------------------------------------------------------------- /lib/Arduino_TensorFlowLite/src/tensorflow/lite/kernels/internal/reference/ceil.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_CEIL_H_ 16 | #define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_CEIL_H_ 17 | 18 | #include 19 | 20 | #include "tensorflow/lite/kernels/internal/types.h" 21 | 22 | namespace tflite { 23 | 24 | namespace reference_ops { 25 | 26 | inline void Ceil(const RuntimeShape& input_shape, const float* input_data, 27 | const RuntimeShape& output_shape, float* output_data) { 28 | const int flat_size = MatchingFlatSize(input_shape, output_shape); 29 | 30 | for (int i = 0; i < flat_size; ++i) { 31 | output_data[i] = std::ceil(input_data[i]); 32 | } 33 | } 34 | 35 | } // namespace reference_ops 36 | } // namespace tflite 37 | #endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_CEIL_H_ 38 | -------------------------------------------------------------------------------- /lib/Arduino_TensorFlowLite/src/tensorflow/lite/kernels/internal/reference/dequantize.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_DEQUANTIZE_H_ 16 | #define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_DEQUANTIZE_H_ 17 | 18 | #include "tensorflow/lite/kernels/internal/common.h" 19 | #include "tensorflow/lite/kernels/internal/types.h" 20 | 21 | namespace tflite { 22 | 23 | namespace reference_ops { 24 | 25 | template 26 | inline void Dequantize(const tflite::DequantizationParams& op_params, 27 | const RuntimeShape& input_shape, const T* input_data, 28 | const RuntimeShape& output_shape, float* output_data) { 29 | int32 zero_point = op_params.zero_point; 30 | const double scale = op_params.scale; 31 | const int flat_size = MatchingFlatSize(input_shape, output_shape); 32 | 33 | for (int i = 0; i < flat_size; i++) { 34 | const int32 val = input_data[i]; 35 | const float result = static_cast(scale * (val - zero_point)); 36 | output_data[i] = result; 37 | } 38 | } 39 | 40 | } // namespace reference_ops 41 | 42 | } // namespace tflite 43 | #endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_DEQUANTIZE_H_ 44 | -------------------------------------------------------------------------------- /lib/Arduino_TensorFlowLite/src/tensorflow/lite/kernels/internal/reference/floor.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_FLOOR_H_ 16 | #define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_FLOOR_H_ 17 | 18 | #include 19 | 20 | #include "tensorflow/lite/kernels/internal/types.h" 21 | 22 | namespace tflite { 23 | 24 | namespace reference_ops { 25 | 26 | inline void Floor(const RuntimeShape& input_shape, const float* input_data, 27 | const RuntimeShape& output_shape, float* output_data) { 28 | const int flat_size = MatchingFlatSize(input_shape, output_shape); 29 | 30 | for (int i = 0; i < flat_size; i++) { 31 | int offset = i; 32 | output_data[offset] = std::floor(input_data[offset]); 33 | } 34 | } 35 | 36 | } // namespace reference_ops 37 | } // namespace tflite 38 | 39 | #endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_FLOOR_H_ 40 | -------------------------------------------------------------------------------- /lib/Arduino_TensorFlowLite/src/tensorflow/lite/kernels/internal/reference/integer_ops/fully_connected.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_FULLY_CONNECTED_H_ 16 | #define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_FULLY_CONNECTED_H_ 17 | 18 | #include "tensorflow/lite/kernels/internal/common.h" 19 | 20 | namespace tflite { 21 | namespace reference_integer_ops { 22 | 23 | inline void FullyConnected( 24 | const FullyConnectedParams& params, const RuntimeShape& input_shape, 25 | const int8_t* input_data, const RuntimeShape& filter_shape, 26 | const int8_t* filter_data, const RuntimeShape& bias_shape, 27 | const int32* bias_data, const RuntimeShape& output_shape, 28 | int8_t* output_data) { 29 | const int32 input_offset = params.input_offset; 30 | const int32 filter_offset = params.weights_offset; 31 | const int32 output_offset = params.output_offset; 32 | const int32 output_multiplier = params.output_multiplier; 33 | const int output_shift = params.output_shift; 34 | const int32 output_activation_min = params.quantized_activation_min; 35 | const int32 output_activation_max = params.quantized_activation_max; 36 | TFLITE_DCHECK_GE(filter_shape.DimensionsCount(), 2); 37 | TFLITE_DCHECK_EQ(output_shape.DimensionsCount(), 2); 38 | 39 | TFLITE_DCHECK_LE(output_activation_min, output_activation_max); 40 | const int filter_dim_count = filter_shape.DimensionsCount(); 41 | const int batches = output_shape.Dims(0); 42 | const int output_depth = output_shape.Dims(1); 43 | TFLITE_DCHECK_LE(output_depth, filter_shape.Dims(filter_dim_count - 2)); 44 | const int accum_depth = filter_shape.Dims(filter_dim_count - 1); 45 | for (int b = 0; b < batches; ++b) { 46 | for (int out_c = 0; out_c < output_depth; ++out_c) { 47 | int32 acc = 0; 48 | for (int d = 0; d < accum_depth; ++d) { 49 | int32 input_val = input_data[b * accum_depth + d]; 50 | int32 filter_val = filter_data[out_c * accum_depth + d]; 51 | acc += (filter_val + filter_offset) * (input_val + input_offset); 52 | } 53 | if (bias_data) { 54 | acc += bias_data[out_c]; 55 | } 56 | acc = MultiplyByQuantizedMultiplier(acc, output_multiplier, output_shift); 57 | acc += output_offset; 58 | acc = std::max(acc, output_activation_min); 59 | acc = std::min(acc, output_activation_max); 60 | output_data[out_c + output_depth * b] = static_cast(acc); 61 | } 62 | } 63 | } 64 | 65 | } // namespace reference_integer_ops 66 | } // namespace tflite 67 | 68 | #endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_FULLY_CONNECTED_H_ 69 | -------------------------------------------------------------------------------- /lib/Arduino_TensorFlowLite/src/tensorflow/lite/kernels/internal/reference/integer_ops/softmax.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_SOFTMAX_H_ 16 | #define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_SOFTMAX_H_ 17 | 18 | #include "tensorflow/lite/kernels/internal/common.h" 19 | 20 | namespace tflite { 21 | namespace reference_integer_ops { 22 | 23 | // Quantized softmax with int8 input and output. 24 | inline void Softmax(const SoftmaxParams& params, 25 | const RuntimeShape& input_shape, const int8* input_data, 26 | const RuntimeShape& output_shape, int8* output_data) { 27 | const int32 input_beta_multiplier = params.input_multiplier; 28 | const int32 input_beta_left_shift = params.input_left_shift; 29 | const int diff_min = params.diff_min; 30 | // The representation chosen for the input to the exp() function is Q5.26. 31 | // We need to leave extra space since values that we skip might be as large as 32 | // -32 before multiplying by input_beta_multiplier, and therefore as large as 33 | // -16 afterwards. Note that exp(-8) is definitely not insignificant to 34 | // accumulation, but exp(-16) definitely is. 35 | static const int kScaledDiffIntegerBits = 5; 36 | static const int kAccumulationIntegerBits = 12; 37 | using FixedPointScaledDiff = 38 | gemmlowp::FixedPoint; 39 | using FixedPointAccum = gemmlowp::FixedPoint; 40 | using FixedPoint0 = gemmlowp::FixedPoint; 41 | 42 | const int trailing_dim = input_shape.DimensionsCount() - 1; 43 | const int outer_size = 44 | MatchingFlatSizeSkipDim(input_shape, trailing_dim, output_shape); 45 | const int depth = 46 | MatchingDim(input_shape, trailing_dim, output_shape, trailing_dim); 47 | 48 | for (int i = 0; i < outer_size; ++i) { 49 | int8 max_in_row = -128; 50 | for (int c = 0; c < depth; ++c) { 51 | max_in_row = std::max(max_in_row, input_data[i * depth + c]); 52 | } 53 | 54 | FixedPointAccum sum_of_exps = FixedPointAccum::Zero(); 55 | for (int c = 0; c < depth; ++c) { 56 | int32 input_diff = 57 | static_cast(input_data[i * depth + c]) - max_in_row; 58 | if (input_diff >= diff_min) { 59 | const int32 input_diff_rescaled = 60 | MultiplyByQuantizedMultiplierGreaterThanOne( 61 | input_diff, input_beta_multiplier, input_beta_left_shift); 62 | const FixedPointScaledDiff scaled_diff_f8 = 63 | FixedPointScaledDiff::FromRaw(input_diff_rescaled); 64 | sum_of_exps = sum_of_exps + gemmlowp::Rescale( 65 | exp_on_negative_values(scaled_diff_f8)); 66 | } 67 | } 68 | 69 | int num_bits_over_unit; 70 | FixedPoint0 shifted_scale = FixedPoint0::FromRaw(GetReciprocal( 71 | sum_of_exps.raw(), kAccumulationIntegerBits, &num_bits_over_unit)); 72 | 73 | for (int c = 0; c < depth; ++c) { 74 | int32 input_diff = 75 | static_cast(input_data[i * depth + c]) - max_in_row; 76 | if (input_diff >= diff_min) { 77 | const int32 input_diff_rescaled = 78 | MultiplyByQuantizedMultiplierGreaterThanOne( 79 | input_diff, input_beta_multiplier, input_beta_left_shift); 80 | const FixedPointScaledDiff scaled_diff_f8 = 81 | FixedPointScaledDiff::FromRaw(input_diff_rescaled); 82 | 83 | FixedPoint0 exp_in_0 = exp_on_negative_values(scaled_diff_f8); 84 | const int32 unsat_output = gemmlowp::RoundingDivideByPOT( 85 | (shifted_scale * exp_in_0).raw(), num_bits_over_unit + 31 - 8); 86 | const int32 shifted_output = unsat_output - 128; 87 | 88 | output_data[i * depth + c] = static_cast( 89 | std::max(std::min(shifted_output, static_cast(127)), 90 | static_cast(-128))); 91 | 92 | } else { 93 | output_data[i * depth + c] = -128; 94 | } 95 | } 96 | } 97 | } 98 | 99 | } // namespace reference_integer_ops 100 | } // namespace tflite 101 | 102 | #endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_SOFTMAX_H_ 103 | -------------------------------------------------------------------------------- /lib/Arduino_TensorFlowLite/src/tensorflow/lite/kernels/internal/reference/logistic.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_LOGISTIC_H_ 16 | #define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_LOGISTIC_H_ 17 | 18 | #include "third_party/gemmlowp/fixedpoint/fixedpoint.h" 19 | #include "tensorflow/lite/kernels/internal/common.h" 20 | #include "tensorflow/lite/kernels/internal/quantization_util.h" 21 | #include "tensorflow/lite/kernels/internal/round.h" 22 | #include "tensorflow/lite/kernels/internal/types.h" 23 | #include "tensorflow/lite/kernels/op_macros.h" 24 | 25 | namespace tflite { 26 | namespace reference_ops { 27 | 28 | inline void Logistic(const RuntimeShape& input_shape, const float* input_data, 29 | const RuntimeShape& output_shape, float* output_data) { 30 | const int flat_size = MatchingFlatSize(input_shape, output_shape); 31 | 32 | for (int i = 0; i < flat_size; i++) { 33 | float val = input_data[i]; 34 | float result = 1.f / (1.f + std::exp(-val)); 35 | output_data[i] = result; 36 | } 37 | } 38 | 39 | // Convenience version that allows, for example, generated-code calls to be 40 | // uniform between data types. 41 | inline void Logistic(const LogisticParams&, const RuntimeShape& input_shape, 42 | const float* input_data, const RuntimeShape& output_shape, 43 | float* output_data) { 44 | // Drop params: not needed. 45 | Logistic(input_shape, input_data, output_shape, output_data); 46 | } 47 | 48 | inline void Logistic(const LogisticParams& params, 49 | const RuntimeShape& input_shape, const int16* input_data, 50 | const RuntimeShape& output_shape, int16* output_data) { 51 | const int flat_size = MatchingFlatSize(input_shape, output_shape); 52 | 53 | for (int i = 0; i < flat_size; i++) { 54 | // F0 uses 0 integer bits, range [-1, 1]. 55 | // This is the return type of math functions such as tanh, logistic, 56 | // whose range is in [-1, 1]. 57 | using F0 = gemmlowp::FixedPoint; 58 | // F3 uses 3 integer bits, range [-8, 8], the input range expected here. 59 | using F3 = gemmlowp::FixedPoint; 60 | 61 | const F3 input = F3::FromRaw(input_data[i]); 62 | F0 output = gemmlowp::logistic(input); 63 | output_data[i] = output.raw(); 64 | } 65 | } 66 | 67 | } // namespace reference_ops 68 | } // namespace tflite 69 | 70 | #endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_LOGISTIC_H_ 71 | -------------------------------------------------------------------------------- /lib/Arduino_TensorFlowLite/src/tensorflow/lite/kernels/internal/reference/maximum_minimum.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2017 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_MAXIMUM_MINIMUM_H_ 16 | #define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_MAXIMUM_MINIMUM_H_ 17 | 18 | #include "tensorflow/lite/kernels/internal/common.h" 19 | #include "tensorflow/lite/kernels/internal/types.h" 20 | 21 | namespace tflite { 22 | namespace reference_ops { 23 | 24 | template 25 | void MaximumMinimumBroadcast4DSlow(const RuntimeShape& unextended_input1_shape, 26 | const T* input1_data, 27 | const RuntimeShape& unextended_input2_shape, 28 | const T* input2_data, 29 | const RuntimeShape& unextended_output_shape, 30 | T* output_data, Op op) { 31 | TFLITE_DCHECK_LE(unextended_input1_shape.DimensionsCount(), 4); 32 | TFLITE_DCHECK_LE(unextended_input2_shape.DimensionsCount(), 4); 33 | TFLITE_DCHECK_LE(unextended_output_shape.DimensionsCount(), 4); 34 | const RuntimeShape output_shape = 35 | RuntimeShape::ExtendedShape(4, unextended_output_shape); 36 | 37 | NdArrayDesc<4> desc1; 38 | NdArrayDesc<4> desc2; 39 | NdArrayDescsForElementwiseBroadcast(unextended_input1_shape, 40 | unextended_input2_shape, &desc1, &desc2); 41 | 42 | for (int b = 0; b < output_shape.Dims(0); ++b) { 43 | for (int y = 0; y < output_shape.Dims(1); ++y) { 44 | for (int x = 0; x < output_shape.Dims(2); ++x) { 45 | for (int c = 0; c < output_shape.Dims(3); ++c) { 46 | auto out_idx = Offset(output_shape, b, y, x, c); 47 | auto in1_idx = SubscriptToIndex(desc1, b, y, x, c); 48 | auto in2_idx = SubscriptToIndex(desc2, b, y, x, c); 49 | auto in1_val = input1_data[in1_idx]; 50 | auto in2_val = input2_data[in2_idx]; 51 | output_data[out_idx] = op(in1_val, in2_val); 52 | } 53 | } 54 | } 55 | } 56 | } 57 | 58 | } // namespace reference_ops 59 | } // namespace tflite 60 | 61 | #endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_MAXIMUM_MINIMUM_H_ 62 | -------------------------------------------------------------------------------- /lib/Arduino_TensorFlowLite/src/tensorflow/lite/kernels/internal/reference/neg.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_NEG_H_ 16 | #define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_NEG_H_ 17 | 18 | #include "tensorflow/lite/kernels/internal/types.h" 19 | 20 | namespace tflite { 21 | 22 | namespace reference_ops { 23 | 24 | template 25 | inline void Negate(const RuntimeShape& input_shape, const T* input_data, 26 | const RuntimeShape& output_shape, T* output_data) { 27 | const int flat_size = MatchingFlatSize(input_shape, output_shape); 28 | 29 | for (int i = 0; i < flat_size; ++i) { 30 | output_data[i] = -input_data[i]; 31 | } 32 | } 33 | 34 | } // namespace reference_ops 35 | } // namespace tflite 36 | 37 | #endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_NEG_H_ 38 | -------------------------------------------------------------------------------- /lib/Arduino_TensorFlowLite/src/tensorflow/lite/kernels/internal/reference/prelu.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_PRELU_H_ 16 | #define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_PRELU_H_ 17 | 18 | #include "tensorflow/lite/kernels/internal/common.h" 19 | #include "tensorflow/lite/kernels/internal/compatibility.h" 20 | #include "tensorflow/lite/kernels/internal/types.h" 21 | 22 | namespace tflite { 23 | 24 | namespace reference_ops { 25 | 26 | // Broadcast prelu to output_shape for quantized uint8 data. 27 | inline void BroadcastPrelu4DSlow(const PreluParams& params, 28 | const RuntimeShape& input_shape, 29 | const uint8* input_data, 30 | const RuntimeShape& alpha_shape, 31 | const uint8* alpha_data, 32 | const RuntimeShape& output_shape, 33 | uint8* output_data) { 34 | TFLITE_DCHECK_LE(input_shape.DimensionsCount(), 4); 35 | TFLITE_DCHECK_LE(alpha_shape.DimensionsCount(), 4); 36 | TFLITE_DCHECK_LE(output_shape.DimensionsCount(), 4); 37 | const RuntimeShape extended_output_shape = 38 | RuntimeShape::ExtendedShape(4, output_shape); 39 | NdArrayDesc<4> desc1; 40 | NdArrayDesc<4> desc2; 41 | NdArrayDescsForElementwiseBroadcast(input_shape, alpha_shape, &desc1, &desc2); 42 | 43 | for (int b = 0; b < extended_output_shape.Dims(0); ++b) { 44 | for (int y = 0; y < extended_output_shape.Dims(1); ++y) { 45 | for (int x = 0; x < extended_output_shape.Dims(2); ++x) { 46 | for (int c = 0; c < extended_output_shape.Dims(3); ++c) { 47 | int output_index = Offset(extended_output_shape, b, y, x, c); 48 | int input_index = SubscriptToIndex(desc1, b, y, x, c); 49 | const int32 input_value = 50 | params.input_offset + input_data[input_index]; 51 | if (input_value >= 0) { 52 | output_data[output_index] = input_data[input_index]; 53 | } else { 54 | auto alpha_index = SubscriptToIndex(desc2, b, y, x, c); 55 | const int32 alpha_value = 56 | params.alpha_offset + alpha_data[alpha_index]; 57 | const int32 unclamped_output = 58 | params.output_offset + 59 | MultiplyByQuantizedMultiplierSmallerThanOneExp( 60 | input_value * alpha_value, params.output_multiplier, 61 | params.output_shift); 62 | const int32 quantized_min = std::numeric_limits::min(); 63 | const int32 quantized_max = std::numeric_limits::max(); 64 | const int32 clamped_output = std::min( 65 | quantized_max, std::max(quantized_min, unclamped_output)); 66 | output_data[output_index] = static_cast(clamped_output); 67 | } 68 | } 69 | } 70 | } 71 | } 72 | } 73 | 74 | } // namespace reference_ops 75 | } // namespace tflite 76 | 77 | #endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_PRELU_H_ 78 | -------------------------------------------------------------------------------- /lib/Arduino_TensorFlowLite/src/tensorflow/lite/kernels/internal/reference/quantize.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_QUANTIZE_H_ 16 | #define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_QUANTIZE_H_ 17 | 18 | #include "tensorflow/lite/kernels/internal/common.h" 19 | #include "tensorflow/lite/kernels/internal/round.h" 20 | #include "tensorflow/lite/kernels/internal/types.h" 21 | 22 | namespace tflite { 23 | 24 | namespace reference_ops { 25 | 26 | template 27 | inline void AffineQuantize(const tflite::QuantizationParams& op_params, 28 | const RuntimeShape& input_shape, 29 | const float* input_data, 30 | const RuntimeShape& output_shape, T* output_data) { 31 | const int32 zero_point = op_params.zero_point; 32 | const double scale = static_cast(op_params.scale); 33 | const int flat_size = MatchingFlatSize(input_shape, output_shape); 34 | static constexpr int32 min_val = std::numeric_limits::min(); 35 | static constexpr int32 max_val = std::numeric_limits::max(); 36 | 37 | for (int i = 0; i < flat_size; i++) { 38 | const float val = input_data[i]; 39 | int32 unclamped = static_cast(TfLiteRound(val / scale)) + zero_point; 40 | int32 clamped = std::min(std::max(unclamped, min_val), max_val); 41 | output_data[i] = clamped; 42 | } 43 | } 44 | 45 | } // namespace reference_ops 46 | 47 | } // namespace tflite 48 | #endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_QUANTIZE_H_ 49 | -------------------------------------------------------------------------------- /lib/Arduino_TensorFlowLite/src/tensorflow/lite/kernels/internal/reference/round.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_ROUND_H_ 16 | #define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_ROUND_H_ 17 | 18 | #include 19 | 20 | #include "tensorflow/lite/kernels/internal/types.h" 21 | 22 | namespace tflite { 23 | 24 | namespace reference_ops { 25 | 26 | inline float RoundToNearest(float value) { 27 | auto floor_val = std::floor(value); 28 | auto diff = value - floor_val; 29 | if ((diff < 0.5f) || 30 | ((diff == 0.5f) && (static_cast(floor_val) % 2 == 0))) { 31 | return floor_val; 32 | } else { 33 | return floor_val = floor_val + 1.0f; 34 | } 35 | } 36 | 37 | inline void Round(const RuntimeShape& input_shape, const float* input_data, 38 | const RuntimeShape& output_shape, float* output_data) { 39 | const int flat_size = MatchingFlatSize(input_shape, output_shape); 40 | for (int i = 0; i < flat_size; ++i) { 41 | // Note that this implementation matches that of tensorFlow tf.round 42 | // and corresponds to the bankers rounding method. 43 | // cfenv (for fesetround) is not yet supported universally on Android, so 44 | // using a work around. 45 | output_data[i] = RoundToNearest(input_data[i]); 46 | } 47 | } 48 | 49 | } // namespace reference_ops 50 | } // namespace tflite 51 | #endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_ROUND_H_ 52 | -------------------------------------------------------------------------------- /lib/Arduino_TensorFlowLite/src/tensorflow/lite/kernels/internal/reference/strided_slice.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2017 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_STRIDED_SLICE_H_ 16 | #define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_STRIDED_SLICE_H_ 17 | 18 | #include "tensorflow/lite/kernels/internal/common.h" 19 | #include "tensorflow/lite/kernels/internal/strided_slice_logic.h" 20 | #include "tensorflow/lite/kernels/internal/types.h" 21 | 22 | namespace tflite { 23 | 24 | namespace reference_ops { 25 | template 26 | inline void StridedSlice(const tflite::StridedSliceParams& op_params, 27 | const RuntimeShape& unextended_input_shape, 28 | const T* input_data, 29 | const RuntimeShape& unextended_output_shape, 30 | T* output_data) { 31 | // Note that the output_shape is not used herein. 32 | tflite::StridedSliceParams params_copy = op_params; 33 | 34 | TFLITE_DCHECK_LE(unextended_input_shape.DimensionsCount(), 4); 35 | TFLITE_DCHECK_LE(unextended_output_shape.DimensionsCount(), 4); 36 | const RuntimeShape input_shape = 37 | RuntimeShape::ExtendedShape(4, unextended_input_shape); 38 | const RuntimeShape output_shape = 39 | RuntimeShape::ExtendedShape(4, unextended_output_shape); 40 | 41 | // Reverse and pad to 4 dimensions because that is what the runtime code 42 | // requires (ie. all shapes must be 4D and are given backwards). 43 | strided_slice::StridedSlicePadIndices(¶ms_copy, 4); 44 | 45 | const int start_b = strided_slice::StartForAxis(params_copy, input_shape, 0); 46 | const int stop_b = 47 | strided_slice::StopForAxis(params_copy, input_shape, 0, start_b); 48 | const int start_h = strided_slice::StartForAxis(params_copy, input_shape, 1); 49 | const int stop_h = 50 | strided_slice::StopForAxis(params_copy, input_shape, 1, start_h); 51 | const int start_w = strided_slice::StartForAxis(params_copy, input_shape, 2); 52 | const int stop_w = 53 | strided_slice::StopForAxis(params_copy, input_shape, 2, start_w); 54 | const int start_d = strided_slice::StartForAxis(params_copy, input_shape, 3); 55 | const int stop_d = 56 | strided_slice::StopForAxis(params_copy, input_shape, 3, start_d); 57 | 58 | T* out_ptr = output_data; 59 | for (int in_b = start_b; 60 | !strided_slice::LoopCondition(in_b, stop_b, params_copy.strides[0]); 61 | in_b += params_copy.strides[0]) { 62 | for (int in_h = start_h; 63 | !strided_slice::LoopCondition(in_h, stop_h, params_copy.strides[1]); 64 | in_h += params_copy.strides[1]) { 65 | for (int in_w = start_w; 66 | !strided_slice::LoopCondition(in_w, stop_w, params_copy.strides[2]); 67 | in_w += params_copy.strides[2]) { 68 | for (int in_d = start_d; !strided_slice::LoopCondition( 69 | in_d, stop_d, params_copy.strides[3]); 70 | in_d += params_copy.strides[3]) { 71 | *out_ptr++ = input_data[Offset(input_shape, in_b, in_h, in_w, in_d)]; 72 | } 73 | } 74 | } 75 | } 76 | } 77 | } // namespace reference_ops 78 | } // namespace tflite 79 | 80 | #endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_STRIDED_SLICE_H_ 81 | -------------------------------------------------------------------------------- /lib/Arduino_TensorFlowLite/src/tensorflow/lite/kernels/internal/round.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2017 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_ROUND_H_ 16 | #define TENSORFLOW_LITE_KERNELS_INTERNAL_ROUND_H_ 17 | 18 | #include 19 | 20 | namespace tflite { 21 | 22 | // TODO(aselle): See if we can do this only on jdk. Also mikecase, check 23 | // if you need this for java host build. 24 | #if defined(TF_LITE_USE_GLOBAL_ROUND) || \ 25 | (defined(__ANDROID__) && !defined(__NDK_MAJOR__)) || defined(ARDUINO) 26 | template 27 | inline float TfLiteRound(const float x) { 28 | return ::round(x); 29 | } 30 | inline double TfLiteRound(const double x) { return ::round(x); } 31 | #else 32 | template 33 | inline T TfLiteRound(const T x) { 34 | return std::round(x); 35 | } 36 | #endif 37 | 38 | } // namespace tflite 39 | 40 | #endif // TENSORFLOW_LITE_KERNELS_INTERNAL_ROUND_H_ 41 | -------------------------------------------------------------------------------- /lib/Arduino_TensorFlowLite/src/tensorflow/lite/kernels/internal/scoped_profiling_label_wrapper.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_SCOPED_PROFILING_LABEL_WRAPPER_H_ 16 | #define TENSORFLOW_LITE_KERNELS_INTERNAL_SCOPED_PROFILING_LABEL_WRAPPER_H_ 17 | 18 | // gemmlowp itself defines an empty class for ScopedProfilingLabel when 19 | // GEMMLOWP_PROFILING is not defined. However, that does not work for embedded 20 | // builds because instrumentation.h depends on pthread and defines a few Mutex 21 | // classes independent of GEMMLOWP_PROFILING. 22 | // 23 | // As a result, we are using GEMMLOWP_PROFILING to either pull in the 24 | // gemmlowp implementation or use our own empty class. 25 | // 26 | // The downside with this approach is that we are using a gemmlowp macro from 27 | // the TFLite codebase. The upside is that it is much simpler than the 28 | // alternatives (see history of this file). 29 | 30 | #ifdef GEMMLOWP_PROFILING 31 | 32 | #include "profiling/instrumentation.h" 33 | 34 | namespace tflite { 35 | class ScopedProfilingLabelWrapper { 36 | public: 37 | explicit ScopedProfilingLabelWrapper(const char* label) 38 | : scoped_profiling_label_(label) {} 39 | 40 | private: 41 | gemmlowp::ScopedProfilingLabel scoped_profiling_label_; 42 | }; 43 | } // namespace tflite 44 | 45 | #else // GEMMLOWP_PROFILING 46 | 47 | namespace tflite { 48 | class ScopedProfilingLabelWrapper { 49 | public: 50 | explicit ScopedProfilingLabelWrapper(const char* label) {} 51 | }; 52 | } // namespace tflite 53 | 54 | #endif // GEMMLOWP_PROFILING 55 | 56 | #endif // TENSORFLOW_LITE_KERNELS_INTERNAL_SCOPED_PROFILING_LABEL_WRAPPER_H_ 57 | -------------------------------------------------------------------------------- /lib/Arduino_TensorFlowLite/src/tensorflow/lite/kernels/internal/tensor_ctypes.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2017 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_TENSOR_CTYPES_H_ 16 | #define TENSORFLOW_LITE_KERNELS_INTERNAL_TENSOR_CTYPES_H_ 17 | 18 | #include "tensorflow/lite/c/common.h" 19 | #include "tensorflow/lite/kernels/internal/types.h" 20 | 21 | namespace tflite { 22 | 23 | template 24 | inline T* GetTensorData(TfLiteTensor* tensor) { 25 | return tensor != nullptr ? reinterpret_cast(tensor->data.raw) : nullptr; 26 | } 27 | 28 | template 29 | inline const T* GetTensorData(const TfLiteTensor* tensor) { 30 | return tensor != nullptr ? reinterpret_cast(tensor->data.raw) 31 | : nullptr; 32 | } 33 | 34 | inline RuntimeShape GetTensorShape(const TfLiteTensor* tensor) { 35 | if (tensor == nullptr) { 36 | return RuntimeShape(); 37 | } 38 | 39 | TfLiteIntArray* dims = tensor->dims; 40 | const int dims_size = dims->size; 41 | const int32_t* dims_data = reinterpret_cast(dims->data); 42 | return RuntimeShape(dims_size, dims_data); 43 | } 44 | 45 | } // namespace tflite 46 | 47 | #endif // TENSORFLOW_LITE_KERNELS_INTERNAL_TENSOR_CTYPES_H_ 48 | -------------------------------------------------------------------------------- /lib/Arduino_TensorFlowLite/src/tensorflow/lite/kernels/op_macros.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2017 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_LITE_KERNELS_OP_MACROS_H_ 16 | #define TENSORFLOW_LITE_KERNELS_OP_MACROS_H_ 17 | 18 | // If we're on a platform without standard IO functions, fall back to a 19 | // non-portable function. 20 | #ifdef TF_LITE_MCU_DEBUG_LOG 21 | 22 | #include "tensorflow/lite/micro/micro_error_reporter.h" 23 | 24 | #define DEBUG_LOG(x) \ 25 | do { \ 26 | DebugLog(x); \ 27 | } while (0) 28 | 29 | inline void InfiniteLoop() { 30 | DEBUG_LOG("HALTED\n"); 31 | while (1) { 32 | } 33 | } 34 | #define TFLITE_ASSERT_FALSE InfiniteLoop(); 35 | #define TFLITE_ABORT InfiniteLoop(); 36 | 37 | #else // TF_LITE_MCU_DEBUG_LOG 38 | 39 | #include 40 | #include 41 | #include 42 | 43 | #define DEBUG_LOG(x) \ 44 | do { \ 45 | fprintf(stderr, "%s", (x)); \ 46 | } while (0) 47 | 48 | #define TFLITE_ABORT abort() 49 | 50 | #ifdef NDEBUG 51 | #define TFLITE_ASSERT_FALSE (static_cast(0)) 52 | #else 53 | #define TFLITE_ASSERT_FALSE TFLITE_ABORT 54 | #endif 55 | 56 | #endif // TF_LITE_MCU_DEBUG_LOG 57 | 58 | #define TF_LITE_FATAL(msg) \ 59 | do { \ 60 | DEBUG_LOG(msg); \ 61 | DEBUG_LOG("\nFATAL\n"); \ 62 | TFLITE_ABORT; \ 63 | } while (0) 64 | 65 | #define TF_LITE_ASSERT(x) \ 66 | do { \ 67 | if (!(x)) TF_LITE_FATAL(#x); \ 68 | } while (0) 69 | 70 | #define TF_LITE_ASSERT_EQ(x, y) \ 71 | do { \ 72 | if ((x) != (y)) TF_LITE_FATAL(#x " didn't equal " #y); \ 73 | } while (0) 74 | 75 | #endif // TENSORFLOW_LITE_KERNELS_OP_MACROS_H_ 76 | -------------------------------------------------------------------------------- /lib/Arduino_TensorFlowLite/src/tensorflow/lite/kernels/padding.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2017 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_LITE_KERNELS_PADDING_H_ 16 | #define TENSORFLOW_LITE_KERNELS_PADDING_H_ 17 | 18 | #include "tensorflow/lite/c/builtin_op_data.h" 19 | 20 | namespace tflite { 21 | 22 | // TODO(renjieliu): Migrate others to use ComputePaddingWithLeftover. 23 | inline int ComputePadding(int stride, int dilation_rate, int in_size, 24 | int filter_size, int out_size) { 25 | int effective_filter_size = (filter_size - 1) * dilation_rate + 1; 26 | int padding = ((out_size - 1) * stride + effective_filter_size - in_size) / 2; 27 | return padding > 0 ? padding : 0; 28 | } 29 | 30 | // It's not guaranteed that padding is symmetric. It's important to keep 31 | // offset for algorithms need all paddings. 32 | inline int ComputePaddingWithOffset(int stride, int dilation_rate, int in_size, 33 | int filter_size, int out_size, 34 | int* offset) { 35 | int effective_filter_size = (filter_size - 1) * dilation_rate + 1; 36 | int total_padding = 37 | ((out_size - 1) * stride + effective_filter_size - in_size); 38 | total_padding = total_padding > 0 ? total_padding : 0; 39 | *offset = total_padding % 2; 40 | return total_padding / 2; 41 | } 42 | 43 | // Matching GetWindowedOutputSize in TensorFlow. 44 | inline int ComputeOutSize(TfLitePadding padding, int image_size, 45 | int filter_size, int stride, int dilation_rate = 1) { 46 | int effective_filter_size = (filter_size - 1) * dilation_rate + 1; 47 | switch (padding) { 48 | case kTfLitePaddingSame: 49 | return (image_size + stride - 1) / stride; 50 | case kTfLitePaddingValid: 51 | return (image_size + stride - effective_filter_size) / stride; 52 | default: 53 | return 0; 54 | } 55 | } 56 | 57 | inline TfLitePaddingValues ComputePaddingHeightWidth( 58 | int stride_height, int stride_width, int dilation_rate_height, 59 | int dilation_rate_width, int in_height, int in_width, int filter_height, 60 | int filter_width, TfLitePadding padding, int* out_height, int* out_width) { 61 | *out_width = ComputeOutSize(padding, in_width, filter_width, stride_width, 62 | dilation_rate_width); 63 | *out_height = ComputeOutSize(padding, in_height, filter_height, stride_height, 64 | dilation_rate_height); 65 | 66 | TfLitePaddingValues padding_values; 67 | int offset = 0; 68 | padding_values.height = 69 | ComputePaddingWithOffset(stride_height, dilation_rate_height, in_height, 70 | filter_height, *out_height, &offset); 71 | padding_values.height_offset = offset; 72 | padding_values.width = 73 | ComputePaddingWithOffset(stride_width, dilation_rate_width, in_width, 74 | filter_width, *out_width, &offset); 75 | padding_values.width_offset = offset; 76 | return padding_values; 77 | } 78 | } // namespace tflite 79 | 80 | #endif // TENSORFLOW_LITE_KERNELS_PADDING_H_ 81 | -------------------------------------------------------------------------------- /lib/Arduino_TensorFlowLite/src/tensorflow/lite/micro/arduino/debug_log.cpp: -------------------------------------------------------------------------------- 1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | #include "tensorflow/lite/micro/debug_log.h" 17 | 18 | #include "Arduino.h" 19 | 20 | // The Arduino DUE uses a different object for the default serial port shown in 21 | // the monitor than most other models, so make sure we pick the right one. See 22 | // https://github.com/arduino/Arduino/issues/3088#issuecomment-406655244 23 | #if defined(__SAM3X8E__) 24 | #define DEBUG_SERIAL_OBJECT (SerialUSB) 25 | #else 26 | #define DEBUG_SERIAL_OBJECT (Serial) 27 | #endif 28 | 29 | // On Arduino platforms, we set up a serial port and write to it for debug 30 | // logging. 31 | extern "C" void DebugLog(const char* s) { 32 | static bool is_initialized = false; 33 | if (!is_initialized) { 34 | DEBUG_SERIAL_OBJECT.begin(115200); 35 | is_initialized = true; 36 | } 37 | DEBUG_SERIAL_OBJECT.print(s); 38 | } 39 | -------------------------------------------------------------------------------- /lib/Arduino_TensorFlowLite/src/tensorflow/lite/micro/compatibility.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_LITE_MICRO_COMPATIBILITY_H_ 16 | #define TENSORFLOW_LITE_MICRO_COMPATIBILITY_H_ 17 | 18 | // C++ will automatically create class-specific delete operators for virtual 19 | // objects, which by default call the global delete function. For embedded 20 | // applications we want to avoid this, and won't be calling new/delete on these 21 | // objects, so we need to override the default implementation with one that does 22 | // nothing to avoid linking in ::delete(). 23 | // This macro needs to be included in all subclasses of a virtual base class in 24 | // the private section. 25 | #ifdef TF_LITE_STATIC_MEMORY 26 | #define TF_LITE_REMOVE_VIRTUAL_DELETE \ 27 | void operator delete(void* p) {} 28 | #else 29 | #define TF_LITE_REMOVE_VIRTUAL_DELETE 30 | #endif 31 | 32 | #endif // TENSORFLOW_LITE_MICRO_COMPATIBILITY_H_ 33 | -------------------------------------------------------------------------------- /lib/Arduino_TensorFlowLite/src/tensorflow/lite/micro/debug_log.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_LITE_MICRO_DEBUG_LOG_H_ 16 | #define TENSORFLOW_LITE_MICRO_DEBUG_LOG_H_ 17 | 18 | // This function should be implemented by each target platform, and provide a 19 | // way for strings to be output to some text stream. For more information, see 20 | // tensorflow/lite/micro/debug_log.cc. 21 | extern "C" void DebugLog(const char* s); 22 | 23 | #endif // TENSORFLOW_LITE_MICRO_DEBUG_LOG_H_ 24 | -------------------------------------------------------------------------------- /lib/Arduino_TensorFlowLite/src/tensorflow/lite/micro/debug_log_numbers.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_LITE_MICRO_DEBUG_LOG_NUMBERS_H_ 16 | #define TENSORFLOW_LITE_MICRO_DEBUG_LOG_NUMBERS_H_ 17 | 18 | #include 19 | 20 | // Output numbers to the debug logging stream. 21 | extern "C" { 22 | void DebugLogInt32(int32_t i); 23 | void DebugLogUInt32(uint32_t i); 24 | void DebugLogHex(uint32_t i); 25 | void DebugLogFloat(float i); 26 | } 27 | 28 | #endif // TENSORFLOW_LITE_MICRO_DEBUG_LOG_NUMBERS_H_ 29 | -------------------------------------------------------------------------------- /lib/Arduino_TensorFlowLite/src/tensorflow/lite/micro/kernels/activation_utils.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | #ifndef TENSORFLOW_LITE_MICRO_KERNELS_ACTIVATION_UTILS_H_ 17 | #define TENSORFLOW_LITE_MICRO_KERNELS_ACTIVATION_UTILS_H_ 18 | 19 | #include 20 | #include 21 | #include 22 | 23 | #include "tensorflow/lite/c/builtin_op_data.h" 24 | 25 | namespace tflite { 26 | namespace ops { 27 | namespace micro { 28 | 29 | // Returns the floating point value for a fused activation: 30 | inline float ActivationValFloat(TfLiteFusedActivation act, float a) { 31 | switch (act) { 32 | case kTfLiteActNone: 33 | return a; 34 | case kTfLiteActRelu: 35 | return a < 0.f ? 0.f : a; 36 | case kTfLiteActRelu1: 37 | return a < 0.f ? 0.f : ((a > 1.f) ? 1.f : a); 38 | case kTfLiteActRelu6: 39 | return a < 0.f ? 0.f : ((a > 6.f) ? 6.f : a); 40 | case kTfLiteActTanh: 41 | return (expf(a) - expf(-a)) / (expf(a) + expf(-a)); 42 | case kTfLiteActSignBit: 43 | return std::signbit(a); 44 | case kTfLiteActSigmoid: 45 | return 1.f / (1.f + expf(-a)); 46 | default: 47 | return a; 48 | } 49 | } 50 | 51 | } // namespace micro 52 | } // namespace ops 53 | } // namespace tflite 54 | 55 | #endif // TENSORFLOW_LITE_MICRO_KERNELS_ACTIVATION_UTILS_H_ 56 | -------------------------------------------------------------------------------- /lib/Arduino_TensorFlowLite/src/tensorflow/lite/micro/kernels/all_ops_resolver.cpp: -------------------------------------------------------------------------------- 1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved. 2 | Licensed under the Apache License, Version 2.0 (the "License"); 3 | you may not use this file except in compliance with the License. 4 | You may obtain a copy of the License at 5 | http://www.apache.org/licenses/LICENSE-2.0 6 | Unless required by applicable law or agreed to in writing, software 7 | distributed under the License is distributed on an "AS IS" BASIS, 8 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 9 | See the License for the specific language governing permissions and 10 | limitations under the License. 11 | ==============================================================================*/ 12 | 13 | #include "tensorflow/lite/micro/kernels/all_ops_resolver.h" 14 | 15 | #include "tensorflow/lite/micro/kernels/micro_ops.h" 16 | 17 | namespace tflite { 18 | namespace ops { 19 | namespace micro { 20 | 21 | // Register each supported op with: 22 | // AddBuiltin(, , [min version], [max version]) 23 | AllOpsResolver::AllOpsResolver() { 24 | AddBuiltin(BuiltinOperator_FULLY_CONNECTED, Register_FULLY_CONNECTED(), 1, 4); 25 | AddBuiltin(BuiltinOperator_MAX_POOL_2D, Register_MAX_POOL_2D()); 26 | AddBuiltin(BuiltinOperator_SOFTMAX, Register_SOFTMAX()); 27 | AddBuiltin(BuiltinOperator_LOGISTIC, Register_LOGISTIC()); 28 | AddBuiltin(BuiltinOperator_SVDF, Register_SVDF()); 29 | AddBuiltin(BuiltinOperator_CONV_2D, Register_CONV_2D(), 1, 3); 30 | AddBuiltin(BuiltinOperator_CONCATENATION, Register_CONCATENATION(), 1, 3); 31 | AddBuiltin(BuiltinOperator_DEPTHWISE_CONV_2D, Register_DEPTHWISE_CONV_2D(), 1, 32 | 3); 33 | AddBuiltin(BuiltinOperator_AVERAGE_POOL_2D, Register_AVERAGE_POOL_2D()); 34 | AddBuiltin(BuiltinOperator_ABS, Register_ABS()); 35 | AddBuiltin(BuiltinOperator_SIN, Register_SIN()); 36 | AddBuiltin(BuiltinOperator_COS, Register_COS()); 37 | AddBuiltin(BuiltinOperator_LOG, Register_LOG()); 38 | AddBuiltin(BuiltinOperator_SQRT, Register_SQRT()); 39 | AddBuiltin(BuiltinOperator_RSQRT, Register_RSQRT()); 40 | AddBuiltin(BuiltinOperator_SQUARE, Register_SQUARE()); 41 | AddBuiltin(BuiltinOperator_PRELU, Register_PRELU()); 42 | AddBuiltin(BuiltinOperator_FLOOR, Register_FLOOR()); 43 | AddBuiltin(BuiltinOperator_MAXIMUM, Register_MAXIMUM()); 44 | AddBuiltin(BuiltinOperator_MINIMUM, Register_MINIMUM()); 45 | AddBuiltin(BuiltinOperator_ARG_MAX, Register_ARG_MAX()); 46 | AddBuiltin(BuiltinOperator_ARG_MIN, Register_ARG_MIN()); 47 | AddBuiltin(BuiltinOperator_LOGICAL_OR, Register_LOGICAL_OR()); 48 | AddBuiltin(BuiltinOperator_LOGICAL_AND, Register_LOGICAL_AND()); 49 | AddBuiltin(BuiltinOperator_LOGICAL_NOT, Register_LOGICAL_NOT()); 50 | AddBuiltin(BuiltinOperator_RESHAPE, Register_RESHAPE()); 51 | AddBuiltin(BuiltinOperator_EQUAL, Register_EQUAL()); 52 | AddBuiltin(BuiltinOperator_NOT_EQUAL, Register_NOT_EQUAL()); 53 | AddBuiltin(BuiltinOperator_GREATER, Register_GREATER()); 54 | AddBuiltin(BuiltinOperator_GREATER_EQUAL, Register_GREATER_EQUAL()); 55 | AddBuiltin(BuiltinOperator_LESS, Register_LESS()); 56 | AddBuiltin(BuiltinOperator_LESS_EQUAL, Register_LESS_EQUAL()); 57 | AddBuiltin(BuiltinOperator_CEIL, Register_CEIL()); 58 | AddBuiltin(BuiltinOperator_ROUND, Register_ROUND()); 59 | AddBuiltin(BuiltinOperator_STRIDED_SLICE, Register_STRIDED_SLICE()); 60 | AddBuiltin(BuiltinOperator_PACK, Register_PACK()); 61 | AddBuiltin(BuiltinOperator_PAD, Register_PAD()); 62 | AddBuiltin(BuiltinOperator_PADV2, Register_PADV2()); 63 | AddBuiltin(BuiltinOperator_SPLIT, Register_SPLIT(), 1, 3); 64 | AddBuiltin(BuiltinOperator_UNPACK, Register_UNPACK()); 65 | AddBuiltin(BuiltinOperator_NEG, Register_NEG()); 66 | AddBuiltin(BuiltinOperator_ADD, Register_ADD()); 67 | AddBuiltin(BuiltinOperator_MUL, Register_MUL()); 68 | AddBuiltin(BuiltinOperator_QUANTIZE, Register_QUANTIZE()); 69 | AddBuiltin(BuiltinOperator_DEQUANTIZE, Register_DEQUANTIZE(), 1, 2); 70 | AddBuiltin(BuiltinOperator_RELU, Register_RELU()); 71 | AddBuiltin(BuiltinOperator_RELU6, Register_RELU6()); 72 | } 73 | 74 | } // namespace micro 75 | } // namespace ops 76 | } // namespace tflite 77 | -------------------------------------------------------------------------------- /lib/Arduino_TensorFlowLite/src/tensorflow/lite/micro/kernels/all_ops_resolver.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved. 2 | Licensed under the Apache License, Version 2.0 (the "License"); 3 | you may not use this file except in compliance with the License. 4 | You may obtain a copy of the License at 5 | http://www.apache.org/licenses/LICENSE-2.0 6 | Unless required by applicable law or agreed to in writing, software 7 | distributed under the License is distributed on an "AS IS" BASIS, 8 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 9 | See the License for the specific language governing permissions and 10 | limitations under the License. 11 | ==============================================================================*/ 12 | #ifndef TENSORFLOW_LITE_MICRO_KERNELS_ALL_OPS_RESOLVER_H_ 13 | #define TENSORFLOW_LITE_MICRO_KERNELS_ALL_OPS_RESOLVER_H_ 14 | 15 | #include "tensorflow/lite/micro/compatibility.h" 16 | #include "tensorflow/lite/micro/micro_mutable_op_resolver.h" 17 | 18 | namespace tflite { 19 | namespace ops { 20 | namespace micro { 21 | 22 | class AllOpsResolver : public MicroMutableOpResolver { 23 | public: 24 | AllOpsResolver(); 25 | 26 | private: 27 | TF_LITE_REMOVE_VIRTUAL_DELETE 28 | }; 29 | 30 | } // namespace micro 31 | } // namespace ops 32 | } // namespace tflite 33 | 34 | #endif // TENSORFLOW_LITE_MICRO_KERNELS_ALL_OPS_RESOLVER_H_ 35 | -------------------------------------------------------------------------------- /lib/Arduino_TensorFlowLite/src/tensorflow/lite/micro/kernels/arg_min_max.cpp: -------------------------------------------------------------------------------- 1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | #include "tensorflow/lite/kernels/internal/reference/arg_min_max.h" 17 | 18 | #include "tensorflow/lite/c/builtin_op_data.h" 19 | #include "tensorflow/lite/c/common.h" 20 | #include "tensorflow/lite/kernels/internal/tensor_ctypes.h" 21 | #include "tensorflow/lite/kernels/kernel_util.h" 22 | #include "tensorflow/lite/micro/kernels/micro_utils.h" 23 | 24 | namespace tflite { 25 | namespace ops { 26 | namespace micro { 27 | namespace arg_min_max { 28 | 29 | constexpr int kInputTensor = 0; 30 | constexpr int kAxis = 1; 31 | constexpr int kOutputTensor = 0; 32 | 33 | TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { 34 | return kTfLiteOk; 35 | } 36 | 37 | template 38 | inline void ArgMinMaxHelper(const RuntimeShape& input1_shape, 39 | const T1* input1_data, const T3* input2_data, 40 | const RuntimeShape& output_shape, T2* output_data, 41 | bool is_arg_max) { 42 | if (is_arg_max) { 43 | reference_ops::ArgMinMax(input1_shape, input1_data, input2_data, 44 | output_shape, output_data, micro::Greater()); 45 | } else { 46 | reference_ops::ArgMinMax(input1_shape, input1_data, input2_data, 47 | output_shape, output_data, micro::Less()); 48 | } 49 | } 50 | 51 | TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node, bool is_arg_max) { 52 | const TfLiteTensor* input = GetInput(context, node, kInputTensor); 53 | const TfLiteTensor* axis = GetInput(context, node, kAxis); 54 | TfLiteTensor* output = GetOutput(context, node, kOutputTensor); 55 | 56 | #define TF_LITE_ARG_MIN_MAX(data_type, axis_type, output_type) \ 57 | ArgMinMaxHelper(GetTensorShape(input), GetTensorData(input), \ 58 | GetTensorData(axis), GetTensorShape(output), \ 59 | GetTensorData(output), is_arg_max) 60 | if (axis->type == kTfLiteInt32) { 61 | if (output->type == kTfLiteInt32) { 62 | switch (input->type) { 63 | case kTfLiteFloat32: 64 | TF_LITE_ARG_MIN_MAX(float, int32_t, int32_t); 65 | break; 66 | case kTfLiteUInt8: 67 | TF_LITE_ARG_MIN_MAX(uint8_t, int32_t, int32_t); 68 | break; 69 | case kTfLiteInt8: 70 | TF_LITE_ARG_MIN_MAX(int8_t, int32_t, int32_t); 71 | break; 72 | default: 73 | context->ReportError(context, 74 | "Only float32, uint8 and int8 are " 75 | "supported currently, got %s.", 76 | TfLiteTypeGetName(input->type)); 77 | return kTfLiteError; 78 | } 79 | } else { 80 | context->ReportError(context, 81 | "Only int32 are supported currently, got %s.", 82 | TfLiteTypeGetName(output->type)); 83 | return kTfLiteError; 84 | } 85 | } else { 86 | context->ReportError(context, "Only int32 are supported currently, got %s.", 87 | TfLiteTypeGetName(axis->type)); 88 | return kTfLiteError; 89 | } 90 | 91 | #undef TF_LITE_ARG_MIN_MAX 92 | 93 | return kTfLiteOk; 94 | } 95 | 96 | TfLiteStatus ArgMinEval(TfLiteContext* context, TfLiteNode* node) { 97 | return Eval(context, node, false); 98 | } 99 | 100 | TfLiteStatus ArgMaxEval(TfLiteContext* context, TfLiteNode* node) { 101 | return Eval(context, node, true); 102 | } 103 | 104 | } // namespace arg_min_max 105 | 106 | TfLiteRegistration* Register_ARG_MAX() { 107 | static TfLiteRegistration r = {nullptr, nullptr, arg_min_max::Prepare, 108 | arg_min_max::ArgMaxEval}; 109 | return &r; 110 | } 111 | 112 | TfLiteRegistration* Register_ARG_MIN() { 113 | static TfLiteRegistration r = {nullptr, nullptr, arg_min_max::Prepare, 114 | arg_min_max::ArgMinEval}; 115 | return &r; 116 | } 117 | 118 | } // namespace micro 119 | } // namespace ops 120 | } // namespace tflite 121 | -------------------------------------------------------------------------------- /lib/Arduino_TensorFlowLite/src/tensorflow/lite/micro/kernels/ceil.cpp: -------------------------------------------------------------------------------- 1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | #include "tensorflow/lite/kernels/internal/reference/ceil.h" 17 | 18 | #include "tensorflow/lite/c/common.h" 19 | #include "tensorflow/lite/kernels/internal/tensor_ctypes.h" 20 | #include "tensorflow/lite/kernels/kernel_util.h" 21 | 22 | namespace tflite { 23 | namespace ops { 24 | namespace micro { 25 | namespace ceil { 26 | 27 | constexpr int kInputTensor = 0; 28 | constexpr int kOutputTensor = 0; 29 | 30 | TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { 31 | const TfLiteTensor* input = GetInput(context, node, kInputTensor); 32 | TfLiteTensor* output = GetOutput(context, node, kOutputTensor); 33 | TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); 34 | TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); 35 | TF_LITE_ENSURE_EQ(context, input->type, kTfLiteFloat32); 36 | TF_LITE_ENSURE_EQ(context, output->type, input->type); 37 | TF_LITE_ENSURE_EQ(context, output->bytes, input->bytes); 38 | TF_LITE_ENSURE_EQ(context, output->dims->size, input->dims->size); 39 | for (int i = 0; i < output->dims->size; ++i) { 40 | TF_LITE_ENSURE_EQ(context, output->dims->data[i], input->dims->data[i]); 41 | } 42 | return kTfLiteOk; 43 | } 44 | 45 | TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { 46 | const TfLiteTensor* input = GetInput(context, node, kInputTensor); 47 | TfLiteTensor* output = GetOutput(context, node, kOutputTensor); 48 | 49 | reference_ops::Ceil(GetTensorShape(input), GetTensorData(input), 50 | GetTensorShape(output), GetTensorData(output)); 51 | 52 | return kTfLiteOk; 53 | } 54 | } // namespace ceil 55 | 56 | TfLiteRegistration* Register_CEIL() { 57 | static TfLiteRegistration r = {/*init=*/nullptr, 58 | /*free=*/nullptr, ceil::Prepare, ceil::Eval}; 59 | return &r; 60 | } 61 | 62 | } // namespace micro 63 | } // namespace ops 64 | } // namespace tflite 65 | -------------------------------------------------------------------------------- /lib/Arduino_TensorFlowLite/src/tensorflow/lite/micro/kernels/dequantize.cpp: -------------------------------------------------------------------------------- 1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | #include "tensorflow/lite/kernels/internal/reference/dequantize.h" 17 | 18 | #include "tensorflow/lite/c/builtin_op_data.h" 19 | #include "tensorflow/lite/c/common.h" 20 | #include "tensorflow/lite/kernels/internal/tensor_ctypes.h" 21 | #include "tensorflow/lite/kernels/kernel_util.h" 22 | 23 | namespace tflite { 24 | namespace ops { 25 | namespace micro { 26 | namespace dequantize { 27 | 28 | TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { 29 | TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); 30 | TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); 31 | 32 | // TODO(b/140515557): Add cached dequant to improve hybrid model performance. 33 | TfLiteTensor* input = &context->tensors[node->inputs->data[0]]; 34 | TfLiteTensor* output = &context->tensors[node->outputs->data[0]]; 35 | 36 | TF_LITE_ENSURE(context, 37 | input->type == kTfLiteUInt8 || input->type == kTfLiteInt8); 38 | TF_LITE_ENSURE(context, output->type == kTfLiteFloat32); 39 | 40 | return kTfLiteOk; 41 | } 42 | 43 | TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { 44 | TfLiteTensor* input = &context->tensors[node->inputs->data[0]]; 45 | TfLiteTensor* output = &context->tensors[node->outputs->data[0]]; 46 | 47 | tflite::DequantizationParams op_params; 48 | op_params.zero_point = input->params.zero_point; 49 | op_params.scale = input->params.scale; 50 | switch (input->type) { 51 | case kTfLiteUInt8: 52 | reference_ops::Dequantize( 53 | op_params, GetTensorShape(input), GetTensorData(input), 54 | GetTensorShape(output), GetTensorData(output)); 55 | break; 56 | case kTfLiteInt8: 57 | reference_ops::Dequantize( 58 | op_params, GetTensorShape(input), GetTensorData(input), 59 | GetTensorShape(output), GetTensorData(output)); 60 | break; 61 | default: 62 | context->ReportError(context, "Type %s (%d) not supported.", 63 | TfLiteTypeGetName(input->type), input->type); 64 | return kTfLiteError; 65 | } 66 | 67 | return kTfLiteOk; 68 | } 69 | 70 | } // namespace dequantize 71 | 72 | TfLiteRegistration* Register_DEQUANTIZE() { 73 | static TfLiteRegistration r = {nullptr, nullptr, dequantize::Prepare, 74 | dequantize::Eval}; 75 | return &r; 76 | } 77 | 78 | } // namespace micro 79 | } // namespace ops 80 | } // namespace tflite 81 | -------------------------------------------------------------------------------- /lib/Arduino_TensorFlowLite/src/tensorflow/lite/micro/kernels/floor.cpp: -------------------------------------------------------------------------------- 1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | #include "tensorflow/lite/c/common.h" 17 | #include "tensorflow/lite/kernels/internal/reference/floor.h" 18 | #include "tensorflow/lite/kernels/internal/tensor_ctypes.h" 19 | #include "tensorflow/lite/kernels/kernel_util.h" 20 | 21 | namespace tflite { 22 | namespace ops { 23 | namespace micro { 24 | namespace floor { 25 | 26 | constexpr int kInputTensor = 0; 27 | constexpr int kOutputTensor = 0; 28 | 29 | TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { 30 | const TfLiteTensor* input = GetInput(context, node, kInputTensor); 31 | TF_LITE_ENSURE_EQ(context, input->type, kTfLiteFloat32); 32 | TfLiteTensor* output = GetOutput(context, node, kOutputTensor); 33 | reference_ops::Floor(GetTensorShape(input), GetTensorData(input), 34 | GetTensorShape(output), GetTensorData(output)); 35 | return kTfLiteOk; 36 | } 37 | } // namespace floor 38 | 39 | TfLiteRegistration* Register_FLOOR() { 40 | static TfLiteRegistration r = {/*init=*/nullptr, 41 | /*free=*/nullptr, /*prepare=*/nullptr, 42 | floor::Eval}; 43 | return &r; 44 | } 45 | 46 | } // namespace micro 47 | } // namespace ops 48 | } // namespace tflite 49 | -------------------------------------------------------------------------------- /lib/Arduino_TensorFlowLite/src/tensorflow/lite/micro/kernels/logical.cpp: -------------------------------------------------------------------------------- 1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #include "tensorflow/lite/c/common.h" 16 | #include "tensorflow/lite/kernels/internal/reference/binary_function.h" 17 | #include "tensorflow/lite/kernels/internal/tensor_ctypes.h" 18 | #include "tensorflow/lite/kernels/kernel_util.h" 19 | #include "tensorflow/lite/kernels/op_macros.h" 20 | 21 | namespace tflite { 22 | namespace ops { 23 | namespace micro { 24 | namespace logical { 25 | namespace { 26 | 27 | // Input/output tensor index. 28 | constexpr int kInputTensor1 = 0; 29 | constexpr int kInputTensor2 = 1; 30 | constexpr int kOutputTensor = 0; 31 | 32 | TfLiteStatus LogicalImpl(TfLiteContext* context, TfLiteNode* node, 33 | bool (*func)(bool, bool)) { 34 | const TfLiteTensor* input1 = GetInput(context, node, kInputTensor1); 35 | const TfLiteTensor* input2 = GetInput(context, node, kInputTensor2); 36 | TfLiteTensor* output = GetOutput(context, node, kOutputTensor); 37 | 38 | if (HaveSameShapes(input1, input2)) { 39 | reference_ops::BinaryFunction( 40 | GetTensorShape(input1), GetTensorData(input1), 41 | GetTensorShape(input2), GetTensorData(input2), 42 | GetTensorShape(output), GetTensorData(output), func); 43 | } else { 44 | reference_ops::BroadcastBinaryFunction4DSlow( 45 | GetTensorShape(input1), GetTensorData(input1), 46 | GetTensorShape(input2), GetTensorData(input2), 47 | GetTensorShape(output), GetTensorData(output), func); 48 | } 49 | 50 | return kTfLiteOk; 51 | } 52 | 53 | bool LogicalOr(bool x, bool y) { return x || y; } 54 | 55 | TfLiteStatus LogicalOrEval(TfLiteContext* context, TfLiteNode* node) { 56 | return LogicalImpl(context, node, LogicalOr); 57 | } 58 | 59 | bool LogicalAnd(bool x, bool y) { return x && y; } 60 | 61 | TfLiteStatus LogicalAndEval(TfLiteContext* context, TfLiteNode* node) { 62 | return LogicalImpl(context, node, LogicalAnd); 63 | } 64 | 65 | } // namespace 66 | } // namespace logical 67 | 68 | TfLiteRegistration* Register_LOGICAL_OR() { 69 | // Init, Free, Prepare, Eval are satisfying the Interface required by 70 | // TfLiteRegistration. 71 | static TfLiteRegistration r = {/* init */ nullptr, /* free */ nullptr, 72 | /* prepare */ nullptr, logical::LogicalOrEval}; 73 | return &r; 74 | } 75 | 76 | TfLiteRegistration* Register_LOGICAL_AND() { 77 | // Init, Free, Prepare, Eval are satisfying the Interface required by 78 | // TfLiteRegistration. 79 | static TfLiteRegistration r = {/* init */ nullptr, /* free */ nullptr, 80 | /* prepare */ nullptr, 81 | logical::LogicalAndEval}; 82 | return &r; 83 | } 84 | 85 | } // namespace micro 86 | } // namespace ops 87 | } // namespace tflite 88 | -------------------------------------------------------------------------------- /lib/Arduino_TensorFlowLite/src/tensorflow/lite/micro/kernels/logistic.cpp: -------------------------------------------------------------------------------- 1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | #include "tensorflow/lite/kernels/internal/reference/logistic.h" 17 | 18 | #include "tensorflow/lite/c/builtin_op_data.h" 19 | #include "tensorflow/lite/c/common.h" 20 | #include "tensorflow/lite/kernels/internal/common.h" 21 | #include "tensorflow/lite/kernels/internal/quantization_util.h" 22 | #include "tensorflow/lite/kernels/internal/tensor_ctypes.h" 23 | #include "tensorflow/lite/kernels/kernel_util.h" 24 | #include "tensorflow/lite/kernels/op_macros.h" 25 | 26 | namespace tflite { 27 | namespace ops { 28 | namespace micro { 29 | namespace activations { 30 | 31 | constexpr int kInputTensor = 0; 32 | constexpr int kOutputTensor = 0; 33 | 34 | TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { 35 | return kTfLiteOk; 36 | } 37 | 38 | TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { 39 | const TfLiteTensor* input = GetInput(context, node, kInputTensor); 40 | TfLiteTensor* output = GetOutput(context, node, kOutputTensor); 41 | 42 | switch (input->type) { 43 | case kTfLiteFloat32: { 44 | reference_ops::Logistic( 45 | GetTensorShape(input), GetTensorData(input), 46 | GetTensorShape(output), GetTensorData(output)); 47 | return kTfLiteOk; 48 | } 49 | default: { 50 | // TODO(b/141211002): Also support other data types once we have supported 51 | // temporary tensors in TFLM. 52 | context->ReportError(context, 53 | "Only float32 is supported currently, got %s", 54 | TfLiteTypeGetName(input->type)); 55 | return kTfLiteError; 56 | } 57 | } 58 | } 59 | 60 | } // namespace activations 61 | 62 | TfLiteRegistration* Register_LOGISTIC() { 63 | static TfLiteRegistration r = {/*init=*/nullptr, 64 | /*free=*/nullptr, activations::Prepare, 65 | activations::Eval}; 66 | return &r; 67 | } 68 | } // namespace micro 69 | } // namespace ops 70 | } // namespace tflite 71 | -------------------------------------------------------------------------------- /lib/Arduino_TensorFlowLite/src/tensorflow/lite/micro/kernels/micro_ops.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_LITE_MICRO_KERNELS_MICRO_OPS_H_ 16 | #define TENSORFLOW_LITE_MICRO_KERNELS_MICRO_OPS_H_ 17 | 18 | #include "tensorflow/lite/c/common.h" 19 | 20 | namespace tflite { 21 | namespace ops { 22 | namespace micro { 23 | 24 | // Forward declaration of all micro op kernel registration methods. These 25 | // registrations are included with the standard `BuiltinOpResolver`. 26 | // 27 | // This header is particularly useful in cases where only a subset of ops are 28 | // needed. In such cases, the client can selectively add only the registrations 29 | // their model requires, using a custom `(Micro)MutableOpResolver`. Selective 30 | // registration in turn allows the linker to strip unused kernels. 31 | 32 | TfLiteRegistration* Register_ABS(); 33 | TfLiteRegistration* Register_ADD(); 34 | TfLiteRegistration* Register_ARG_MAX(); 35 | TfLiteRegistration* Register_ARG_MIN(); 36 | TfLiteRegistration* Register_AVERAGE_POOL_2D(); 37 | TfLiteRegistration* Register_CEIL(); 38 | TfLiteRegistration* Register_CONV_2D(); 39 | TfLiteRegistration* Register_CONCATENATION(); 40 | TfLiteRegistration* Register_COS(); 41 | TfLiteRegistration* Register_DEPTHWISE_CONV_2D(); 42 | TfLiteRegistration* Register_DEQUANTIZE(); 43 | TfLiteRegistration* Register_EQUAL(); 44 | TfLiteRegistration* Register_FLOOR(); 45 | TfLiteRegistration* Register_FULLY_CONNECTED(); 46 | TfLiteRegistration* Register_GREATER(); 47 | TfLiteRegistration* Register_GREATER_EQUAL(); 48 | TfLiteRegistration* Register_LESS(); 49 | TfLiteRegistration* Register_LESS_EQUAL(); 50 | TfLiteRegistration* Register_LOG(); 51 | TfLiteRegistration* Register_LOGICAL_AND(); 52 | TfLiteRegistration* Register_LOGICAL_NOT(); 53 | TfLiteRegistration* Register_LOGICAL_OR(); 54 | TfLiteRegistration* Register_LOGISTIC(); 55 | TfLiteRegistration* Register_MAXIMUM(); 56 | TfLiteRegistration* Register_MAX_POOL_2D(); 57 | TfLiteRegistration* Register_MINIMUM(); 58 | TfLiteRegistration* Register_MUL(); 59 | TfLiteRegistration* Register_NEG(); 60 | TfLiteRegistration* Register_NOT_EQUAL(); 61 | TfLiteRegistration* Register_PACK(); 62 | TfLiteRegistration* Register_PAD(); 63 | TfLiteRegistration* Register_PADV2(); 64 | TfLiteRegistration* Register_PRELU(); 65 | TfLiteRegistration* Register_QUANTIZE(); 66 | TfLiteRegistration* Register_RELU(); 67 | TfLiteRegistration* Register_RELU6(); 68 | TfLiteRegistration* Register_RESHAPE(); 69 | TfLiteRegistration* Register_ROUND(); 70 | TfLiteRegistration* Register_RSQRT(); 71 | TfLiteRegistration* Register_SIN(); 72 | TfLiteRegistration* Register_SOFTMAX(); 73 | TfLiteRegistration* Register_SPLIT(); 74 | TfLiteRegistration* Register_SQRT(); 75 | TfLiteRegistration* Register_SQUARE(); 76 | TfLiteRegistration* Register_STRIDED_SLICE(); 77 | TfLiteRegistration* Register_SVDF(); 78 | TfLiteRegistration* Register_UNPACK(); 79 | 80 | } // namespace micro 81 | } // namespace ops 82 | } // namespace tflite 83 | 84 | #endif // TENSORFLOW_LITE_MICRO_KERNELS_MICRO_OPS_H_ 85 | -------------------------------------------------------------------------------- /lib/Arduino_TensorFlowLite/src/tensorflow/lite/micro/kernels/micro_utils.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved. 2 | Licensed under the Apache License, Version 2.0 (the "License"); 3 | you may not use this file except in compliance with the License. 4 | You may obtain a copy of the License at 5 | http://www.apache.org/licenses/LICENSE-2.0 6 | Unless required by applicable law or agreed to in writing, software 7 | distributed under the License is distributed on an "AS IS" BASIS, 8 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 9 | See the License for the specific language governing permissions and 10 | limitations under the License. 11 | ==============================================================================*/ 12 | #ifndef TENSORFLOW_LITE_MICRO_KERNELS_MICRO_UTILS_H_ 13 | #define TENSORFLOW_LITE_MICRO_KERNELS_MICRO_UTILS_H_ 14 | namespace tflite { 15 | namespace ops { 16 | namespace micro { 17 | 18 | // Same as gtl::Greater but defined here to reduce dependencies and 19 | // binary size for micro environment. 20 | struct Greater { 21 | template 22 | bool operator()(const T& x, const T& y) const { 23 | return x > y; 24 | } 25 | }; 26 | 27 | struct Less { 28 | template 29 | bool operator()(const T& x, const T& y) const { 30 | return x < y; 31 | } 32 | }; 33 | 34 | } // namespace micro 35 | } // namespace ops 36 | } // namespace tflite 37 | #endif // TENSORFLOW_LITE_MICRO_KERNELS_MICRO_UTILS_H_ 38 | -------------------------------------------------------------------------------- /lib/Arduino_TensorFlowLite/src/tensorflow/lite/micro/kernels/neg.cpp: -------------------------------------------------------------------------------- 1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | #include "tensorflow/lite/kernels/internal/reference/neg.h" 17 | 18 | #include "tensorflow/lite/c/common.h" 19 | #include "tensorflow/lite/kernels/internal/tensor_ctypes.h" 20 | #include "tensorflow/lite/kernels/kernel_util.h" 21 | 22 | namespace tflite { 23 | namespace ops { 24 | namespace micro { 25 | namespace neg { 26 | 27 | constexpr int kInputTensor = 0; 28 | constexpr int kOutputTensor = 0; 29 | 30 | TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { 31 | const TfLiteTensor* input = GetInput(context, node, kInputTensor); 32 | TfLiteTensor* output = GetOutput(context, node, kOutputTensor); 33 | switch (input->type) { 34 | // TODO(wangtz): handle for kTfLiteInt8 35 | case kTfLiteFloat32: 36 | reference_ops::Negate(GetTensorShape(input), GetTensorData(input), 37 | GetTensorShape(output), 38 | GetTensorData(output)); 39 | break; 40 | default: 41 | context->ReportError( 42 | context, "Neg only currently supports float32, got %d.", input->type); 43 | return kTfLiteError; 44 | } 45 | return kTfLiteOk; 46 | } 47 | 48 | } // namespace neg 49 | 50 | TfLiteRegistration* Register_NEG() { 51 | static TfLiteRegistration r = {/*init=*/nullptr, /*free=*/nullptr, 52 | /*prepare=*/nullptr, neg::Eval}; 53 | return &r; 54 | } 55 | 56 | } // namespace micro 57 | } // namespace ops 58 | } // namespace tflite 59 | -------------------------------------------------------------------------------- /lib/Arduino_TensorFlowLite/src/tensorflow/lite/micro/kernels/pack.cpp: -------------------------------------------------------------------------------- 1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | #include "tensorflow/lite/c/builtin_op_data.h" 17 | #include "tensorflow/lite/c/common.h" 18 | #include "tensorflow/lite/kernels/internal/tensor_ctypes.h" 19 | #include "tensorflow/lite/kernels/kernel_util.h" 20 | 21 | namespace tflite { 22 | namespace ops { 23 | namespace micro { 24 | namespace pack { 25 | namespace { 26 | 27 | constexpr int kOutputTensor = 0; 28 | 29 | TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { 30 | return kTfLiteOk; 31 | } 32 | 33 | template 34 | TfLiteStatus PackImpl(TfLiteContext* context, TfLiteNode* node, 35 | TfLiteTensor* output, int values_count, int axis) { 36 | const int dimensions = output->dims->size; 37 | const TfLiteTensor* input0 = &context->tensors[node->inputs->data[0]]; 38 | const TfLiteIntArray* input_dims = input0->dims; 39 | const TfLiteIntArray* output_dims = output->dims; 40 | 41 | if (axis < 0) { 42 | axis += dimensions; 43 | } 44 | 45 | int outer_size = 1; 46 | for (int i = 0; i < axis; ++i) { 47 | outer_size *= output_dims->data[i]; 48 | } 49 | int copy_size = 1; 50 | for (int i = axis + 1; i < dimensions; ++i) { 51 | copy_size *= output_dims->data[i]; 52 | } 53 | int input_size = 1; 54 | for (int i = 0; i < input_dims->size; ++i) { 55 | input_size *= input_dims->data[i]; 56 | } 57 | TFLITE_DCHECK_EQ(input_size, copy_size * outer_size); 58 | 59 | T* output_data = GetTensorData(output); 60 | 61 | for (int i = 0; i < values_count; ++i) { 62 | TfLiteTensor* t = &context->tensors[node->inputs->data[i]]; 63 | const T* input_data = GetTensorData(t); 64 | for (int k = 0; k < outer_size; ++k) { 65 | const T* input_ptr = input_data + copy_size * k; 66 | int loc = k * values_count * copy_size + i * copy_size; 67 | T* output_ptr = output_data + loc; 68 | for (int j = 0; j < copy_size; ++j) output_ptr[j] = input_ptr[j]; 69 | } 70 | } 71 | 72 | return kTfLiteOk; 73 | } 74 | 75 | TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { 76 | const TfLitePackParams* data = 77 | reinterpret_cast(node->builtin_data); 78 | 79 | TfLiteTensor* output = GetOutput(context, node, kOutputTensor); 80 | 81 | switch (output->type) { 82 | case kTfLiteFloat32: { 83 | return PackImpl(context, node, output, data->values_count, 84 | data->axis); 85 | } 86 | case kTfLiteUInt8: { 87 | return PackImpl(context, node, output, data->values_count, 88 | data->axis); 89 | } 90 | case kTfLiteInt8: { 91 | return PackImpl(context, node, output, data->values_count, 92 | data->axis); 93 | } 94 | case kTfLiteInt32: { 95 | return PackImpl(context, node, output, data->values_count, 96 | data->axis); 97 | } 98 | case kTfLiteInt64: { 99 | return PackImpl(context, node, output, data->values_count, 100 | data->axis); 101 | } 102 | default: { 103 | context->ReportError(context, "Type '%s' is not supported by pack.", 104 | TfLiteTypeGetName(output->type)); 105 | return kTfLiteError; 106 | } 107 | } 108 | 109 | return kTfLiteOk; 110 | } 111 | 112 | } // namespace 113 | } // namespace pack 114 | 115 | TfLiteRegistration* Register_PACK() { 116 | static TfLiteRegistration r = {nullptr, nullptr, pack::Prepare, pack::Eval}; 117 | return &r; 118 | } 119 | 120 | } // namespace micro 121 | } // namespace ops 122 | } // namespace tflite 123 | -------------------------------------------------------------------------------- /lib/Arduino_TensorFlowLite/src/tensorflow/lite/micro/kernels/quantize.cpp: -------------------------------------------------------------------------------- 1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #include "tensorflow/lite/kernels/internal/reference/quantize.h" 16 | 17 | #include "tensorflow/lite/c/common.h" 18 | #include "tensorflow/lite/kernels/internal/quantization_util.h" 19 | #include "tensorflow/lite/kernels/internal/tensor_ctypes.h" 20 | #include "tensorflow/lite/kernels/kernel_util.h" 21 | 22 | namespace tflite { 23 | namespace ops { 24 | namespace micro { 25 | namespace quantize { 26 | 27 | void* Init(TfLiteContext* context, const char* buffer, size_t length) { 28 | return nullptr; 29 | } 30 | 31 | void Free(TfLiteContext* context, void* buffer) {} 32 | 33 | TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { 34 | TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); 35 | TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); 36 | 37 | TfLiteTensor* input = &context->tensors[node->inputs->data[0]]; 38 | TfLiteTensor* output = &context->tensors[node->outputs->data[0]]; 39 | 40 | // TODO(b/128934713): Add support for fixed-point per-channel quantization. 41 | // Currently this only support affine per-layer quantization. 42 | TF_LITE_ENSURE_EQ(context, output->quantization.type, 43 | kTfLiteAffineQuantization); 44 | const auto* affine_quantization = 45 | reinterpret_cast(output->quantization.params); 46 | TF_LITE_ENSURE(context, affine_quantization); 47 | TF_LITE_ENSURE(context, affine_quantization->scale); 48 | TF_LITE_ENSURE(context, affine_quantization->scale->size == 1); 49 | 50 | TF_LITE_ENSURE(context, input->type == kTfLiteFloat32); 51 | TF_LITE_ENSURE(context, 52 | output->type == kTfLiteUInt8 || output->type == kTfLiteInt8); 53 | 54 | return kTfLiteOk; 55 | } 56 | 57 | TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { 58 | TfLiteTensor* input = &context->tensors[node->inputs->data[0]]; 59 | TfLiteTensor* output = &context->tensors[node->outputs->data[0]]; 60 | 61 | tflite::QuantizationParams op_params; 62 | op_params.zero_point = output->params.zero_point; 63 | op_params.scale = output->params.scale; 64 | switch (output->type) { 65 | case kTfLiteInt8: 66 | reference_ops::AffineQuantize( 67 | op_params, GetTensorShape(input), GetTensorData(input), 68 | GetTensorShape(output), GetTensorData(output)); 69 | break; 70 | case kTfLiteUInt8: 71 | reference_ops::AffineQuantize( 72 | op_params, GetTensorShape(input), GetTensorData(input), 73 | GetTensorShape(output), GetTensorData(output)); 74 | break; 75 | default: 76 | context->ReportError(context, "Output type %s (%d) not supported", 77 | TfLiteTypeGetName(input->type), output->type); 78 | return kTfLiteError; 79 | } 80 | 81 | return kTfLiteOk; 82 | } 83 | 84 | } // namespace quantize 85 | 86 | // This Op (QUANTIZE) quantizes the input and produces quantized output. 87 | // AffineQuantize takes scale and zero point and quantizes the float value to 88 | // quantized output, in int8 or uint8 format. 89 | TfLiteRegistration* Register_QUANTIZE() { 90 | static TfLiteRegistration r = {quantize::Init, quantize::Free, 91 | quantize::Prepare, quantize::Eval}; 92 | return &r; 93 | } 94 | 95 | } // namespace micro 96 | } // namespace ops 97 | } // namespace tflite 98 | -------------------------------------------------------------------------------- /lib/Arduino_TensorFlowLite/src/tensorflow/lite/micro/kernels/reshape.cpp: -------------------------------------------------------------------------------- 1 | /* Copyright 2017 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | #include "tensorflow/lite/c/builtin_op_data.h" 17 | #include "tensorflow/lite/c/common.h" 18 | #include "tensorflow/lite/kernels/internal/tensor_ctypes.h" 19 | #include "tensorflow/lite/kernels/kernel_util.h" 20 | #include "tensorflow/lite/kernels/op_macros.h" 21 | 22 | namespace tflite { 23 | namespace ops { 24 | namespace micro { 25 | namespace reshape { 26 | 27 | constexpr int kInputTensor = 0; 28 | constexpr int kShapeTensor = 1; 29 | constexpr int kOutputTensor = 0; 30 | 31 | TfLiteStatus ReshapeOutput(TfLiteContext* context, TfLiteNode* node) { 32 | const TfLiteTensor* input = GetInput(context, node, kInputTensor); 33 | TfLiteTensor* output = GetOutput(context, node, kOutputTensor); 34 | // Tensorflow's Reshape allows one of the shape components to have the 35 | // special -1 value, meaning it will be calculated automatically based on the 36 | // input. Here we calculate what that dimension should be so that the number 37 | // of output elements in the same as the number of input elements. 38 | int num_input_elements = NumElements(input); 39 | TfLiteIntArray* output_shape = output->dims; 40 | 41 | if (NumInputs(node) == 1 && // Legacy scalar supported with params. 42 | output_shape->size == 1 && output_shape->data[0] == 0) { 43 | // Legacy tflite models use a shape parameter of [0] to indicate scalars, 44 | // so adjust accordingly. TODO(b/111614235): Allow zero-sized buffers during 45 | // toco conversion. 46 | output_shape->size = 0; 47 | } 48 | 49 | int num_output_elements = 1; 50 | int stretch_dim = -1; 51 | for (int i = 0; i < output_shape->size; ++i) { 52 | int value = output_shape->data[i]; 53 | if (value == -1) { 54 | TF_LITE_ENSURE_EQ(context, stretch_dim, -1); 55 | stretch_dim = i; 56 | } else { 57 | num_output_elements *= value; 58 | } 59 | } 60 | if (stretch_dim != -1) { 61 | output_shape->data[stretch_dim] = num_input_elements / num_output_elements; 62 | num_output_elements *= output_shape->data[stretch_dim]; 63 | } 64 | 65 | TF_LITE_ENSURE_EQ(context, input->type, output->type); 66 | TF_LITE_ENSURE_EQ(context, num_input_elements, num_output_elements); 67 | return kTfLiteOk; 68 | } 69 | 70 | TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { 71 | TF_LITE_ENSURE(context, NumInputs(node) == 1 || NumInputs(node) == 2); 72 | TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); 73 | return kTfLiteOk; 74 | } 75 | 76 | TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { 77 | const TfLiteTensor* input = GetInput(context, node, kInputTensor); 78 | TfLiteTensor* output = GetOutput(context, node, kOutputTensor); 79 | if (ReshapeOutput(context, node) != kTfLiteOk) { 80 | return kTfLiteError; 81 | } 82 | 83 | for (int i = 0; i < input->bytes; ++i) { 84 | output->data.raw[i] = input->data.raw[i]; 85 | } 86 | return kTfLiteOk; 87 | } 88 | 89 | } // namespace reshape 90 | 91 | TfLiteRegistration* Register_RESHAPE() { 92 | static TfLiteRegistration r = {nullptr, nullptr, reshape::Prepare, 93 | reshape::Eval}; 94 | return &r; 95 | } 96 | 97 | } // namespace micro 98 | } // namespace ops 99 | } // namespace tflite 100 | -------------------------------------------------------------------------------- /lib/Arduino_TensorFlowLite/src/tensorflow/lite/micro/kernels/round.cpp: -------------------------------------------------------------------------------- 1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | #include "tensorflow/lite/kernels/internal/reference/round.h" 17 | 18 | #include "tensorflow/lite/c/common.h" 19 | #include "tensorflow/lite/kernels/internal/tensor_ctypes.h" 20 | #include "tensorflow/lite/kernels/kernel_util.h" 21 | 22 | namespace tflite { 23 | namespace ops { 24 | namespace micro { 25 | namespace round { 26 | 27 | constexpr int kInputTensor = 0; 28 | constexpr int kOutputTensor = 0; 29 | 30 | TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { 31 | const TfLiteTensor* input = GetInput(context, node, kInputTensor); 32 | TfLiteTensor* output = GetOutput(context, node, kOutputTensor); 33 | TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); 34 | TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); 35 | TF_LITE_ENSURE_EQ(context, input->type, kTfLiteFloat32); 36 | TF_LITE_ENSURE_EQ(context, output->type, input->type); 37 | TF_LITE_ENSURE_EQ(context, output->bytes, input->bytes); 38 | TF_LITE_ENSURE_EQ(context, output->dims->size, input->dims->size); 39 | for (int i = 0; i < output->dims->size; ++i) { 40 | TF_LITE_ENSURE_EQ(context, output->dims->data[i], input->dims->data[i]); 41 | } 42 | return kTfLiteOk; 43 | } 44 | 45 | TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { 46 | const TfLiteTensor* input = GetInput(context, node, kInputTensor); 47 | TfLiteTensor* output = GetOutput(context, node, kOutputTensor); 48 | 49 | reference_ops::Round(GetTensorShape(input), GetTensorData(input), 50 | GetTensorShape(output), GetTensorData(output)); 51 | 52 | return kTfLiteOk; 53 | } 54 | } // namespace round 55 | 56 | TfLiteRegistration* Register_ROUND() { 57 | static TfLiteRegistration r = {/*init=*/nullptr, 58 | /*free=*/nullptr, round::Prepare, round::Eval}; 59 | return &r; 60 | } 61 | 62 | } // namespace micro 63 | } // namespace ops 64 | } // namespace tflite 65 | -------------------------------------------------------------------------------- /lib/Arduino_TensorFlowLite/src/tensorflow/lite/micro/kernels/split.cpp: -------------------------------------------------------------------------------- 1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | #include "tensorflow/lite/c/builtin_op_data.h" 17 | #include "tensorflow/lite/c/common.h" 18 | #include "tensorflow/lite/kernels/internal/tensor_ctypes.h" 19 | #include "tensorflow/lite/kernels/kernel_util.h" 20 | 21 | namespace tflite { 22 | namespace ops { 23 | namespace micro { 24 | namespace split { 25 | 26 | TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { 27 | return kTfLiteOk; 28 | } 29 | 30 | template 31 | TfLiteStatus SplitImpl(TfLiteContext* context, TfLiteNode* node, 32 | const TfLiteTensor* input, int axis_value) { 33 | const int output_count = NumOutputs(node); 34 | const TfLiteIntArray* input_dims = input->dims; 35 | const TfLiteTensor* output0 = &context->tensors[node->outputs->data[0]]; 36 | const TfLiteIntArray* output_dims = output0->dims; 37 | 38 | const int split_dimensions = input_dims->size; 39 | int axis = axis_value < 0 ? axis_value + split_dimensions : axis_value; 40 | 41 | TFLITE_DCHECK_LT(axis, split_dimensions); 42 | TFLITE_DCHECK_EQ(output_dims->size, split_dimensions); 43 | 44 | int64_t split_size = output_dims->data[axis] * output_count; 45 | 46 | TFLITE_DCHECK_EQ(split_size, input_dims->data[axis]); 47 | int64_t outer_size = 1; 48 | for (int i = 0; i < axis; ++i) { 49 | outer_size *= input_dims->data[i]; 50 | } 51 | 52 | int64_t base_inner_size = 1; 53 | for (int i = axis + 1; i < split_dimensions; ++i) { 54 | base_inner_size *= input_dims->data[i]; 55 | } 56 | 57 | const T* input_ptr = GetTensorData(input); 58 | for (int k = 0; k < outer_size; ++k) { 59 | for (int i = 0; i < output_count; ++i) { 60 | TfLiteTensor* t = &context->tensors[node->outputs->data[i]]; 61 | T* output_data = GetTensorData(t); 62 | const int copy_size = output_dims->data[axis] * base_inner_size; 63 | T* output_ptr = output_data + k * copy_size; 64 | for (int j = 0; j < copy_size; ++j) output_ptr[j] = input_ptr[j]; 65 | input_ptr += copy_size; 66 | } 67 | } 68 | 69 | return kTfLiteOk; 70 | } 71 | 72 | TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { 73 | const TfLiteTensor* axis = GetInput(context, node, 0); 74 | const TfLiteTensor* input = GetInput(context, node, 1); 75 | 76 | // Dynamic output tensors are needed if axis tensor is not constant. 77 | // But Micro doesn't support dynamic memeory allocation, so we only support 78 | // constant axis tensor for now. 79 | TF_LITE_ENSURE_MSG(context, IsConstantTensor(axis), 80 | "Non constant axis tensor not supported"); 81 | 82 | int axis_value = GetTensorData(axis)[0]; 83 | if (axis_value < 0) { 84 | axis_value += NumDimensions(input); 85 | } 86 | 87 | TF_LITE_ENSURE(context, axis_value >= 0); 88 | TF_LITE_ENSURE(context, axis_value < NumDimensions(input)); 89 | 90 | switch (input->type) { 91 | case kTfLiteFloat32: { 92 | return SplitImpl(context, node, input, axis_value); 93 | } 94 | case kTfLiteUInt8: { 95 | return SplitImpl(context, node, input, axis_value); 96 | } 97 | case kTfLiteInt8: { 98 | return SplitImpl(context, node, input, axis_value); 99 | } 100 | case kTfLiteInt16: { 101 | return SplitImpl(context, node, input, axis_value); 102 | } 103 | case kTfLiteInt32: { 104 | return SplitImpl(context, node, input, axis_value); 105 | } 106 | default: 107 | context->ReportError(context, "Type %s currently not supported.", 108 | TfLiteTypeGetName(input->type)); 109 | return kTfLiteError; 110 | } 111 | #undef TF_LITE_SPLIT 112 | 113 | return kTfLiteOk; 114 | } 115 | 116 | } // namespace split 117 | 118 | TfLiteRegistration* Register_SPLIT() { 119 | static TfLiteRegistration r = {nullptr, nullptr, split::Prepare, split::Eval}; 120 | return &r; 121 | } 122 | 123 | } // namespace micro 124 | } // namespace ops 125 | } // namespace tflite 126 | -------------------------------------------------------------------------------- /lib/Arduino_TensorFlowLite/src/tensorflow/lite/micro/kernels/unpack.cpp: -------------------------------------------------------------------------------- 1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | #include "tensorflow/lite/c/builtin_op_data.h" 17 | #include "tensorflow/lite/c/common.h" 18 | #include "tensorflow/lite/kernels/internal/tensor_ctypes.h" 19 | #include "tensorflow/lite/kernels/kernel_util.h" 20 | 21 | namespace tflite { 22 | namespace ops { 23 | namespace micro { 24 | namespace unpack { 25 | namespace { 26 | 27 | constexpr int kInputTensor = 0; 28 | 29 | TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { 30 | return kTfLiteOk; 31 | } 32 | 33 | template 34 | TfLiteStatus UnpackImpl(TfLiteContext* context, TfLiteNode* node, 35 | const TfLiteTensor* input, int output_count, int axis) { 36 | const TfLiteTensor* output0 = &context->tensors[node->outputs->data[0]]; 37 | const TfLiteIntArray* input_dims = input->dims; 38 | const TfLiteIntArray* output_dims = output0->dims; 39 | const int dimensions = input_dims->size; 40 | 41 | if (axis < 0) { 42 | axis += NumDimensions(input); 43 | } 44 | 45 | TFLITE_DCHECK_LT(axis, dimensions); 46 | 47 | int outer_size = 1; 48 | for (int i = 0; i < axis; ++i) { 49 | outer_size *= input_dims->data[i]; 50 | } 51 | int copy_size = 1; 52 | for (int i = axis + 1; i < dimensions; ++i) { 53 | copy_size *= input_dims->data[i]; 54 | } 55 | int output_size = 1; 56 | for (int i = 0; i < output_dims->size; ++i) { 57 | output_size *= output_dims->data[i]; 58 | } 59 | TFLITE_DCHECK_EQ(output_size, copy_size * outer_size); 60 | 61 | const T* input_data = GetTensorData(input); 62 | 63 | for (int i = 0; i < output_count; ++i) { 64 | TfLiteTensor* t = &context->tensors[node->outputs->data[i]]; 65 | T* output_data = GetTensorData(t); 66 | for (int k = 0; k < outer_size; ++k) { 67 | T* output_ptr = output_data + copy_size * k; 68 | int loc = k * output_count * copy_size + i * copy_size; 69 | const T* input_ptr = input_data + loc; 70 | for (int j = 0; j < copy_size; ++j) output_ptr[j] = input_ptr[j]; 71 | } 72 | } 73 | 74 | return kTfLiteOk; 75 | } 76 | 77 | TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { 78 | TfLiteUnpackParams* data = 79 | reinterpret_cast(node->builtin_data); 80 | 81 | const TfLiteTensor* input = GetInput(context, node, kInputTensor); 82 | 83 | switch (input->type) { 84 | case kTfLiteFloat32: { 85 | return UnpackImpl(context, node, input, data->num, data->axis); 86 | } 87 | case kTfLiteInt32: { 88 | return UnpackImpl(context, node, input, data->num, data->axis); 89 | } 90 | case kTfLiteUInt8: { 91 | return UnpackImpl(context, node, input, data->num, data->axis); 92 | } 93 | case kTfLiteInt8: { 94 | return UnpackImpl(context, node, input, data->num, data->axis); 95 | } 96 | default: { 97 | context->ReportError(context, "Type '%s' is not supported by unpack.", 98 | TfLiteTypeGetName(input->type)); 99 | return kTfLiteError; 100 | } 101 | } 102 | 103 | return kTfLiteOk; 104 | } 105 | } // namespace 106 | } // namespace unpack 107 | 108 | TfLiteRegistration* Register_UNPACK() { 109 | static TfLiteRegistration r = {nullptr, nullptr, unpack::Prepare, 110 | unpack::Eval}; 111 | return &r; 112 | } 113 | 114 | } // namespace micro 115 | } // namespace ops 116 | } // namespace tflite 117 | -------------------------------------------------------------------------------- /lib/Arduino_TensorFlowLite/src/tensorflow/lite/micro/memory_helpers.cpp: -------------------------------------------------------------------------------- 1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | #include "tensorflow/lite/micro/memory_helpers.h" 17 | 18 | #include 19 | 20 | #include "tensorflow/lite/core/api/flatbuffer_conversions.h" 21 | 22 | namespace tflite { 23 | 24 | uint8_t* AlignPointerUp(uint8_t* data, size_t alignment) { 25 | std::uintptr_t data_as_uintptr_t = reinterpret_cast(data); 26 | uint8_t* aligned_result = reinterpret_cast( 27 | ((data_as_uintptr_t + (alignment - 1)) / alignment) * alignment); 28 | return aligned_result; 29 | } 30 | 31 | uint8_t* AlignPointerDown(uint8_t* data, size_t alignment) { 32 | std::uintptr_t data_as_uintptr_t = reinterpret_cast(data); 33 | uint8_t* aligned_result = 34 | reinterpret_cast((data_as_uintptr_t / alignment) * alignment); 35 | return aligned_result; 36 | } 37 | 38 | size_t AlignSizeUp(size_t size, size_t alignment) { 39 | size_t aligned_size = (((size + (alignment - 1)) / alignment) * alignment); 40 | return aligned_size; 41 | } 42 | 43 | TfLiteStatus TfLiteTypeSizeOf(TfLiteType type, size_t* size, 44 | ErrorReporter* reporter) { 45 | switch (type) { 46 | case kTfLiteFloat32: 47 | *size = sizeof(float); 48 | break; 49 | case kTfLiteInt16: 50 | *size = sizeof(int16_t); 51 | break; 52 | case kTfLiteInt32: 53 | *size = sizeof(int32_t); 54 | break; 55 | case kTfLiteUInt8: 56 | *size = sizeof(uint8_t); 57 | break; 58 | case kTfLiteInt8: 59 | *size = sizeof(int8_t); 60 | break; 61 | case kTfLiteInt64: 62 | *size = sizeof(int64_t); 63 | break; 64 | case kTfLiteBool: 65 | *size = sizeof(bool); 66 | break; 67 | case kTfLiteComplex64: 68 | *size = sizeof(float) * 2; 69 | break; 70 | default: 71 | reporter->Report("Type %s (%d) not is not supported", 72 | TfLiteTypeGetName(type), type); 73 | return kTfLiteError; 74 | } 75 | return kTfLiteOk; 76 | } 77 | 78 | TfLiteStatus BytesRequiredForTensor(const tflite::Tensor& flatbuffer_tensor, 79 | size_t* bytes, size_t* type_size, 80 | ErrorReporter* error_reporter) { 81 | int element_count = 1; 82 | for (size_t n = 0; n < flatbuffer_tensor.shape()->Length(); ++n) { 83 | element_count *= flatbuffer_tensor.shape()->Get(n); 84 | } 85 | 86 | TfLiteType tf_lite_type; 87 | TF_LITE_ENSURE_STATUS(ConvertTensorType(flatbuffer_tensor.type(), 88 | &tf_lite_type, error_reporter)); 89 | TF_LITE_ENSURE_STATUS( 90 | TfLiteTypeSizeOf(tf_lite_type, type_size, error_reporter)); 91 | *bytes = element_count * (*type_size); 92 | return kTfLiteOk; 93 | } 94 | 95 | } // namespace tflite 96 | -------------------------------------------------------------------------------- /lib/Arduino_TensorFlowLite/src/tensorflow/lite/micro/memory_helpers.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_LITE_MICRO_MEMORY_HELPERS_H_ 16 | #define TENSORFLOW_LITE_MICRO_MEMORY_HELPERS_H_ 17 | 18 | #include "tensorflow/lite/c/common.h" 19 | #include "tensorflow/lite/core/api/error_reporter.h" 20 | #include "tensorflow/lite/schema/schema_generated.h" 21 | 22 | namespace tflite { 23 | 24 | // Returns the next pointer address aligned to the given alignment. 25 | uint8_t* AlignPointerUp(uint8_t* data, size_t alignment); 26 | 27 | // Returns the previous pointer address aligned to the given alignment. 28 | uint8_t* AlignPointerDown(uint8_t* data, size_t alignment); 29 | 30 | // Returns an increased size that's a multiple of alignment. 31 | size_t AlignSizeUp(size_t size, size_t alignment); 32 | 33 | // Returns size in bytes for a given TfLiteType. 34 | TfLiteStatus TfLiteTypeSizeOf(TfLiteType type, size_t* size, 35 | ErrorReporter* reporter); 36 | 37 | // How many bytes are needed to hold a tensor's contents. 38 | TfLiteStatus BytesRequiredForTensor(const tflite::Tensor& flatbuffer_tensor, 39 | size_t* bytes, size_t* type_size, 40 | ErrorReporter* error_reporter); 41 | 42 | } // namespace tflite 43 | 44 | #endif // TENSORFLOW_LITE_MICRO_MEMORY_HELPERS_H_ 45 | -------------------------------------------------------------------------------- /lib/Arduino_TensorFlowLite/src/tensorflow/lite/micro/memory_planner/linear_memory_planner.cpp: -------------------------------------------------------------------------------- 1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | #include "tensorflow/lite/micro/memory_planner/linear_memory_planner.h" 17 | 18 | namespace tflite { 19 | 20 | LinearMemoryPlanner::LinearMemoryPlanner() 21 | : current_buffer_count_(0), next_free_offset_(0) {} 22 | LinearMemoryPlanner::~LinearMemoryPlanner() {} 23 | 24 | TfLiteStatus LinearMemoryPlanner::AddBuffer( 25 | tflite::ErrorReporter* error_reporter, int size, int first_time_used, 26 | int last_time_used) { 27 | if (current_buffer_count_ >= kMaxBufferCount) { 28 | error_reporter->Report("Too many buffers (max is %d)", kMaxBufferCount); 29 | return kTfLiteError; 30 | } 31 | buffer_offsets_[current_buffer_count_] = next_free_offset_; 32 | next_free_offset_ += size; 33 | ++current_buffer_count_; 34 | return kTfLiteOk; 35 | } 36 | 37 | int LinearMemoryPlanner::GetMaximumMemorySize() { return next_free_offset_; } 38 | 39 | int LinearMemoryPlanner::GetBufferCount() { return current_buffer_count_; } 40 | 41 | TfLiteStatus LinearMemoryPlanner::GetOffsetForBuffer( 42 | tflite::ErrorReporter* error_reporter, int buffer_index, int* offset) { 43 | if ((buffer_index < 0) || (buffer_index >= current_buffer_count_)) { 44 | error_reporter->Report("buffer index %d is outside range 0 to %d", 45 | buffer_index, current_buffer_count_); 46 | return kTfLiteError; 47 | } 48 | *offset = buffer_offsets_[buffer_index]; 49 | return kTfLiteOk; 50 | } 51 | 52 | } // namespace tflite 53 | -------------------------------------------------------------------------------- /lib/Arduino_TensorFlowLite/src/tensorflow/lite/micro/memory_planner/linear_memory_planner.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | #ifndef TENSORFLOW_LITE_MICRO_MEMORY_PLANNER_LINEAR_MEMORY_PLANNER_H_ 17 | #define TENSORFLOW_LITE_MICRO_MEMORY_PLANNER_LINEAR_MEMORY_PLANNER_H_ 18 | 19 | #include "tensorflow/lite/micro/memory_planner/memory_planner.h" 20 | 21 | namespace tflite { 22 | 23 | // The simplest possible memory planner that just lays out all buffers at 24 | // increasing offsets without trying to reuse memory. 25 | class LinearMemoryPlanner : public MemoryPlanner { 26 | public: 27 | LinearMemoryPlanner(); 28 | ~LinearMemoryPlanner() override; 29 | 30 | TfLiteStatus AddBuffer(tflite::ErrorReporter* error_reporter, int size, 31 | int first_time_used, int last_time_used) override; 32 | 33 | int GetMaximumMemorySize() override; 34 | int GetBufferCount() override; 35 | TfLiteStatus GetOffsetForBuffer(tflite::ErrorReporter* error_reporter, 36 | int buffer_index, int* offset) override; 37 | 38 | private: 39 | static constexpr int kMaxBufferCount = 1024; 40 | int buffer_offsets_[kMaxBufferCount]; 41 | int current_buffer_count_; 42 | int next_free_offset_; 43 | }; 44 | 45 | } // namespace tflite 46 | 47 | #endif // TENSORFLOW_LITE_MICRO_MEMORY_PLANNER_LINEAR_MEMORY_PLANNER_H_ 48 | -------------------------------------------------------------------------------- /lib/Arduino_TensorFlowLite/src/tensorflow/lite/micro/memory_planner/memory_planner.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | #ifndef TENSORFLOW_LITE_MICRO_MEMORY_PLANNER_MEMORY_PLANNER_H_ 17 | #define TENSORFLOW_LITE_MICRO_MEMORY_PLANNER_MEMORY_PLANNER_H_ 18 | 19 | #include "tensorflow/lite/c/common.h" 20 | #include "tensorflow/lite/core/api/error_reporter.h" 21 | 22 | namespace tflite { 23 | 24 | // Interface class for planning the layout of memory buffers during the 25 | // execution of a graph. 26 | // It's designed to be used by a client that iterates in any order through the 27 | // buffers it wants to lay out, and then calls the getter functions for 28 | // information about the calculated layout. For example: 29 | // 30 | // SomeMemoryPlanner planner; 31 | // planner.AddBuffer(reporter, 100, 0, 1); // Buffer 0 32 | // planner.AddBuffer(reporter, 50, 2, 3); // Buffer 1 33 | // planner.AddBuffer(reporter, 50, 2, 3); // Buffer 2 34 | // 35 | // int offset0; 36 | // TF_EXPECT_OK(planner.GetOffsetForBuffer(reporter, 0, &offset0)); 37 | // int offset1; 38 | // TF_EXPECT_OK(planner.GetOffsetForBuffer(reporter, 1, &offset1)); 39 | // int offset2; 40 | // TF_EXPECT_OK(planner.GetOffsetForBuffer(reporter, 2, &offset2)); 41 | // const int arena_size_needed = planner.GetMaximumMemorySize(); 42 | // 43 | // The goal is for applications to be able to experiment with different layout 44 | // strategies without changing their client code, by swapping out classes that 45 | // implement this interface.= 46 | class MemoryPlanner { 47 | public: 48 | MemoryPlanner() {} 49 | virtual ~MemoryPlanner() {} 50 | 51 | // Pass information about a buffer's size and lifetime to the layout 52 | // algorithm. The order this is called implicitly assigns an index to the 53 | // result, so the buffer information that's passed into the N-th call of 54 | // this method will be used as the buffer_index argument to 55 | // GetOffsetForBuffer(). 56 | virtual TfLiteStatus AddBuffer(tflite::ErrorReporter* error_reporter, 57 | int size, int first_time_used, 58 | int last_time_used) = 0; 59 | 60 | // The largest contguous block of memory that's needed to hold the layout. 61 | virtual int GetMaximumMemorySize() = 0; 62 | // How many buffers have been added to the planner. 63 | virtual int GetBufferCount() = 0; 64 | // Calculated layout offset for the N-th buffer added to the planner. 65 | virtual TfLiteStatus GetOffsetForBuffer(tflite::ErrorReporter* error_reporter, 66 | int buffer_index, int* offset) = 0; 67 | }; 68 | 69 | } // namespace tflite 70 | 71 | #endif // TENSORFLOW_LITE_MICRO_MEMORY_PLANNER_MEMORY_PLANNER_H_ 72 | -------------------------------------------------------------------------------- /lib/Arduino_TensorFlowLite/src/tensorflow/lite/micro/micro_allocator.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_LITE_MICRO_MICRO_ALLOCATOR_H_ 16 | #define TENSORFLOW_LITE_MICRO_MICRO_ALLOCATOR_H_ 17 | 18 | #include "tensorflow/lite/c/common.h" 19 | #include "tensorflow/lite/core/api/error_reporter.h" 20 | #include "tensorflow/lite/core/api/flatbuffer_conversions.h" 21 | #include "tensorflow/lite/micro/simple_memory_allocator.h" 22 | #include "tensorflow/lite/schema/schema_generated.h" 23 | 24 | namespace tflite { 25 | 26 | typedef struct { 27 | TfLiteNode node; 28 | const TfLiteRegistration* registration; 29 | } NodeAndRegistration; 30 | 31 | // Allocator responsible for allocating memory for all intermediate tensors 32 | // necessary to invoke a model. 33 | class MicroAllocator { 34 | public: 35 | // The lifetime of the model, tensor allocator and error reporter must be at 36 | // least as long as that of the allocator object, since the allocator needs 37 | // them to be accessible during its entire lifetime. 38 | MicroAllocator(TfLiteContext* context, const Model* model, 39 | uint8_t* tensor_arena, size_t arena_size, 40 | ErrorReporter* error_reporter); 41 | 42 | // Sets up all of the data structure members for a runtime tensor based on the 43 | // contents of a serialized tensor. 44 | TfLiteStatus InitializeRuntimeTensor( 45 | const tflite::Tensor& flatbuffer_tensor, 46 | const flatbuffers::Vector>* buffers, 47 | ErrorReporter* error_reporter, TfLiteTensor* result); 48 | 49 | // Runs through the model and allocates all necessary input, output and 50 | // intermediate tensors. 51 | // WARNING: doing any allocation after calling this method has the risk of 52 | // corrupting tensor data so this method should be the last method to be 53 | // called in this class. 54 | TfLiteStatus FinishTensorAllocation(); 55 | 56 | // Run through the model to allocate nodes and registrations. We need to keep 57 | // them for the entire life time of the model to allow persistent tensors. 58 | // This method needs to be called before FinishTensorAllocation method. 59 | TfLiteStatus AllocateNodeAndRegistrations( 60 | const OpResolver& op_resolver, 61 | NodeAndRegistration** node_and_registrations); 62 | 63 | private: 64 | const Model* model_; 65 | SimpleMemoryAllocator memory_allocator_; 66 | ErrorReporter* error_reporter_; 67 | TfLiteContext* context_; 68 | uint8_t* arena_; 69 | size_t arena_size_; 70 | // Indicating if the allocator is ready for allocation. 71 | bool active_ = false; 72 | 73 | const SubGraph* subgraph_; 74 | const flatbuffers::Vector>* operators_; 75 | const flatbuffers::Vector>* tensors_; 76 | }; 77 | 78 | } // namespace tflite 79 | #endif // TENSORFLOW_LITE_MICRO_MICRO_ALLOCATOR_H_ 80 | -------------------------------------------------------------------------------- /lib/Arduino_TensorFlowLite/src/tensorflow/lite/micro/micro_error_reporter.cpp: -------------------------------------------------------------------------------- 1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | #include "tensorflow/lite/micro/micro_error_reporter.h" 17 | 18 | namespace tflite { 19 | namespace { 20 | void DebugLogPrintf(const char* format, va_list args) { 21 | const int output_cache_size = 64; 22 | char output_cache[output_cache_size + 1]; 23 | int output_cache_index = 0; 24 | const char* current = format; 25 | while (*current != 0) { 26 | if (*current == '%') { 27 | const char next = *(current + 1); 28 | if ((next == 'd') || (next == 's') || (next == 'f')) { 29 | current += 1; 30 | if (output_cache_index > 0) { 31 | output_cache[output_cache_index] = 0; 32 | DebugLog(output_cache); 33 | output_cache_index = 0; 34 | } 35 | if (next == 'd') { 36 | DebugLogInt32(va_arg(args, int)); 37 | } else if (next == 's') { 38 | DebugLog(va_arg(args, char*)); 39 | } else if (next == 'f') { 40 | DebugLogFloat(va_arg(args, double)); 41 | } 42 | } 43 | } else { 44 | output_cache[output_cache_index] = *current; 45 | output_cache_index += 1; 46 | } 47 | if (output_cache_index >= output_cache_size) { 48 | output_cache[output_cache_index] = 0; 49 | DebugLog(output_cache); 50 | output_cache_index = 0; 51 | } 52 | current += 1; 53 | } 54 | if (output_cache_index > 0) { 55 | output_cache[output_cache_index] = 0; 56 | DebugLog(output_cache); 57 | output_cache_index = 0; 58 | } 59 | DebugLog("\r\n"); 60 | } 61 | } // namespace 62 | 63 | int MicroErrorReporter::Report(const char* format, va_list args) { 64 | DebugLogPrintf(format, args); 65 | return 0; 66 | } 67 | 68 | } // namespace tflite 69 | -------------------------------------------------------------------------------- /lib/Arduino_TensorFlowLite/src/tensorflow/lite/micro/micro_error_reporter.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_LITE_MICRO_MICRO_ERROR_REPORTER_H_ 16 | #define TENSORFLOW_LITE_MICRO_MICRO_ERROR_REPORTER_H_ 17 | 18 | #include "tensorflow/lite/core/api/error_reporter.h" 19 | #include "tensorflow/lite/micro/compatibility.h" 20 | #include "tensorflow/lite/micro/debug_log.h" 21 | #include "tensorflow/lite/micro/debug_log_numbers.h" 22 | 23 | namespace tflite { 24 | 25 | class MicroErrorReporter : public ErrorReporter { 26 | public: 27 | ~MicroErrorReporter() {} 28 | int Report(const char* format, va_list args) override; 29 | 30 | private: 31 | TF_LITE_REMOVE_VIRTUAL_DELETE 32 | }; 33 | 34 | } // namespace tflite 35 | 36 | #endif // TENSORFLOW_LITE_MICRO_MICRO_ERROR_REPORTER_H_ 37 | -------------------------------------------------------------------------------- /lib/Arduino_TensorFlowLite/src/tensorflow/lite/micro/micro_mutable_op_resolver.cpp: -------------------------------------------------------------------------------- 1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | #include "tensorflow/lite/micro/micro_mutable_op_resolver.h" 17 | 18 | namespace tflite { 19 | 20 | namespace { 21 | 22 | const int kDefaultOpVersions[] = {1}; 23 | 24 | } // namespace 25 | 26 | const TfLiteRegistration* MicroMutableOpResolver::FindOp( 27 | tflite::BuiltinOperator op, int version) const { 28 | for (int i = 0; i < registrations_len_; ++i) { 29 | const TfLiteRegistration& registration = registrations_[i]; 30 | if ((registration.builtin_code == op) && 31 | (registration.version == version)) { 32 | return ®istration; 33 | } 34 | } 35 | return nullptr; 36 | } 37 | 38 | const TfLiteRegistration* MicroMutableOpResolver::FindOp(const char* op, 39 | int version) const { 40 | for (int i = 0; i < registrations_len_; ++i) { 41 | const TfLiteRegistration& registration = registrations_[i]; 42 | if ((registration.builtin_code == BuiltinOperator_CUSTOM) && 43 | (strcmp(registration.custom_name, op) == 0) && 44 | (registration.version == version)) { 45 | return ®istration; 46 | } 47 | } 48 | return nullptr; 49 | } 50 | 51 | void MicroMutableOpResolver::AddBuiltin(tflite::BuiltinOperator op, 52 | TfLiteRegistration* registration, 53 | int min_version, int max_version) { 54 | for (int version = min_version; version <= max_version; ++version) { 55 | if (registrations_len_ >= TFLITE_REGISTRATIONS_MAX) { 56 | // TODO(petewarden) - Add error reporting hooks so we can report this! 57 | return; 58 | } 59 | TfLiteRegistration* new_registration = ®istrations_[registrations_len_]; 60 | registrations_len_ += 1; 61 | 62 | *new_registration = *registration; 63 | new_registration->builtin_code = op; 64 | new_registration->version = version; 65 | } 66 | } 67 | 68 | void MicroMutableOpResolver::AddCustom(const char* name, 69 | TfLiteRegistration* registration, 70 | int min_version, int max_version) { 71 | for (int version = min_version; version <= max_version; ++version) { 72 | if (registrations_len_ >= TFLITE_REGISTRATIONS_MAX) { 73 | // TODO(petewarden) - Add error reporting hooks so we can report this! 74 | return; 75 | } 76 | TfLiteRegistration* new_registration = ®istrations_[registrations_len_]; 77 | registrations_len_ += 1; 78 | 79 | *new_registration = *registration; 80 | new_registration->builtin_code = BuiltinOperator_CUSTOM; 81 | new_registration->custom_name = name; 82 | new_registration->version = version; 83 | } 84 | } 85 | 86 | } // namespace tflite 87 | -------------------------------------------------------------------------------- /lib/Arduino_TensorFlowLite/src/tensorflow/lite/micro/micro_mutable_op_resolver.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_LITE_MICRO_MICRO_MUTABLE_OP_RESOLVER_H_ 16 | #define TENSORFLOW_LITE_MICRO_MICRO_MUTABLE_OP_RESOLVER_H_ 17 | 18 | #include "tensorflow/lite/c/common.h" 19 | #include "tensorflow/lite/core/api/op_resolver.h" 20 | #include "tensorflow/lite/micro/compatibility.h" 21 | #include "tensorflow/lite/schema/schema_generated.h" 22 | 23 | #ifndef TFLITE_REGISTRATIONS_MAX 24 | #define TFLITE_REGISTRATIONS_MAX (128) 25 | #endif 26 | 27 | namespace tflite { 28 | 29 | // Op versions discussed in this file are enumerated here: 30 | // tensorflow/lite/tools/versioning/op_version.cc 31 | 32 | class MicroMutableOpResolver : public OpResolver { 33 | public: 34 | const TfLiteRegistration* FindOp(tflite::BuiltinOperator op, 35 | int version) const override; 36 | const TfLiteRegistration* FindOp(const char* op, int version) const override; 37 | void AddBuiltin(tflite::BuiltinOperator op, TfLiteRegistration* registration, 38 | int min_version = 1, int max_version = 1); 39 | void AddCustom(const char* name, TfLiteRegistration* registration, 40 | int min_version = 1, int max_version = 1); 41 | 42 | private: 43 | TfLiteRegistration registrations_[TFLITE_REGISTRATIONS_MAX]; 44 | int registrations_len_ = 0; 45 | 46 | TF_LITE_REMOVE_VIRTUAL_DELETE 47 | }; 48 | 49 | } // namespace tflite 50 | 51 | #endif // TENSORFLOW_LITE_MICRO_MICRO_MUTABLE_OP_RESOLVER_H_ 52 | -------------------------------------------------------------------------------- /lib/Arduino_TensorFlowLite/src/tensorflow/lite/micro/micro_optional_debug_tools.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2017 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | // Optional debugging functionality. For small sized binaries, these are not 16 | // needed. 17 | #ifndef TENSORFLOW_LITE_MICRO_MICRO_OPTIONAL_DEBUG_TOOLS_H_ 18 | #define TENSORFLOW_LITE_MICRO_MICRO_OPTIONAL_DEBUG_TOOLS_H_ 19 | 20 | #include "tensorflow/lite/micro/micro_interpreter.h" 21 | 22 | namespace tflite { 23 | // Prints a dump of what tensors and what nodes are in the interpreter. 24 | class MicroInterpreter; 25 | void PrintInterpreterState(MicroInterpreter* interpreter); 26 | 27 | #ifdef __cplusplus 28 | extern "C" { 29 | #endif // __cplusplus 30 | struct pairTfLiteNodeAndRegistration { 31 | TfLiteNode node; 32 | const TfLiteRegistration* registration; 33 | }; 34 | #ifdef __cplusplus 35 | } // extern "C" 36 | #endif // __cplusplus 37 | 38 | } // namespace tflite 39 | 40 | #endif // TENSORFLOW_LITE_MICRO_MICRO_OPTIONAL_DEBUG_TOOLS_H_ 41 | -------------------------------------------------------------------------------- /lib/Arduino_TensorFlowLite/src/tensorflow/lite/micro/micro_utils.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | #ifndef TENSORFLOW_LITE_MICRO_MICRO_UTILS_H_ 17 | #define TENSORFLOW_LITE_MICRO_MICRO_UTILS_H_ 18 | 19 | #include 20 | 21 | #include "tensorflow/lite/c/common.h" 22 | 23 | namespace tflite { 24 | 25 | // Returns number of elements in the shape array. 26 | 27 | int ElementCount(const TfLiteIntArray& dims); 28 | 29 | uint8_t FloatToAsymmetricQuantizedUInt8(const float value, const float scale, 30 | const int zero_point); 31 | 32 | uint8_t FloatToSymmetricQuantizedUInt8(const float value, const float scale); 33 | 34 | int8_t FloatToAsymmetricQuantizedInt8(const float value, const float scale, 35 | const int zero_point); 36 | 37 | int8_t FloatToSymmetricQuantizedInt8(const float value, const float scale); 38 | 39 | // Converts a float value into a signed thirty-two-bit quantized value. Note 40 | // that values close to max int and min int may see significant error due to 41 | // a lack of floating point granularity for large values. 42 | int32_t FloatToSymmetricQuantizedInt32(const float value, const float scale); 43 | 44 | // Helper methods to quantize arrays of floats to the desired format. 45 | // 46 | // There are several key flavors of quantization in TfLite: 47 | // asymmetric symmetric per channel 48 | // int8 | X | X | X | 49 | // uint8 | X | X | | 50 | // int32 | | X | X | 51 | // 52 | // The per-op quantizaiton spec can be found here: 53 | // https://www.tensorflow.org/lite/performance/quantization_spec 54 | 55 | void AsymmetricQuantize(const float* input, int8_t* output, int num_elements, 56 | float scale, int zero_point = 0); 57 | 58 | void AsymmetricQuantize(const float* input, uint8_t* output, int num_elements, 59 | float scale, int zero_point = 128); 60 | 61 | void SymmetricQuantize(const float* input, int32_t* output, int num_elements, 62 | float scale); 63 | 64 | void SymmetricPerChannelQuantize(const float* input, int32_t* output, 65 | int num_elements, int num_channels, 66 | float* scales); 67 | 68 | void SignedSymmetricPerChannelQuantize(const float* values, 69 | TfLiteIntArray* dims, 70 | int quantized_dimension, 71 | int8_t* quantized_values, 72 | float* scaling_factor); 73 | 74 | void SignedSymmetricQuantize(const float* values, TfLiteIntArray* dims, 75 | int8_t* quantized_values, float* scaling_factor); 76 | 77 | void SignedSymmetricQuantize(const float* values, TfLiteIntArray* dims, 78 | int16_t* quantized_values, float* scaling_factor); 79 | 80 | void SignedSymmetricQuantize(const float* values, TfLiteIntArray* dims, 81 | int32_t* quantized_values, float* scaling_factor); 82 | 83 | void SymmetricQuantize(const float* values, TfLiteIntArray* dims, 84 | uint8_t* quantized_values, float* scaling_factor); 85 | 86 | void SymmetricDequantize(const int8_t* values, const int size, 87 | const float dequantization_scale, 88 | float* dequantized_values); 89 | 90 | } // namespace tflite 91 | 92 | #endif // TENSORFLOW_LITE_MICRO_MICRO_UTILS_H_ 93 | -------------------------------------------------------------------------------- /lib/Arduino_TensorFlowLite/src/tensorflow/lite/micro/simple_memory_allocator.cpp: -------------------------------------------------------------------------------- 1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | #include "tensorflow/lite/micro/simple_memory_allocator.h" 17 | 18 | #include "tensorflow/lite/core/api/flatbuffer_conversions.h" 19 | #include "tensorflow/lite/micro/memory_helpers.h" 20 | 21 | namespace tflite { 22 | 23 | uint8_t* SimpleMemoryAllocator::AllocateFromTail(size_t size, 24 | size_t alignment) { 25 | if (has_child_allocator_) { 26 | // TODO(wangtz): Add error reporting when the parent allocator is locked! 27 | return nullptr; 28 | } 29 | uint8_t* previous_free = (data_ + data_size_max_) - data_size_; 30 | uint8_t* current_data = previous_free - size; 31 | uint8_t* aligned_result = AlignPointerDown(current_data, alignment); 32 | size_t aligned_size = (previous_free - aligned_result); 33 | if ((data_size_ + aligned_size) > data_size_max_) { 34 | // TODO(petewarden): Add error reporting beyond returning null! 35 | return nullptr; 36 | } 37 | data_size_ += aligned_size; 38 | return aligned_result; 39 | } 40 | 41 | SimpleMemoryAllocator SimpleMemoryAllocator::CreateChildAllocator() { 42 | // Note that the parameterized constructor initializes data_size_ to 0 which 43 | // is not what we expected. 44 | SimpleMemoryAllocator child = *this; 45 | child.parent_allocator_ = this; 46 | // With C++ copy elision, &child should be available after return. 47 | has_child_allocator_ = true; 48 | return child; 49 | } 50 | 51 | SimpleMemoryAllocator::~SimpleMemoryAllocator() { 52 | // Root allocator doesn't have a parent. 53 | if (nullptr != parent_allocator_) { 54 | parent_allocator_->has_child_allocator_ = false; 55 | } 56 | } 57 | 58 | } // namespace tflite 59 | -------------------------------------------------------------------------------- /lib/Arduino_TensorFlowLite/src/tensorflow/lite/micro/simple_memory_allocator.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | #ifndef TENSORFLOW_LITE_MICRO_SIMPLE_MEMORY_ALLOCATOR_H_ 17 | #define TENSORFLOW_LITE_MICRO_SIMPLE_MEMORY_ALLOCATOR_H_ 18 | 19 | #include "tensorflow/lite/c/common.h" 20 | #include "tensorflow/lite/core/api/error_reporter.h" 21 | #include "tensorflow/lite/schema/schema_generated.h" 22 | 23 | namespace tflite { 24 | 25 | // TODO(petewarden): This allocator never frees up or reuses any memory, even 26 | // though we have enough information about lifetimes of the tensors to do so. 27 | // This makes it pretty wasteful, so we should use a more intelligent method. 28 | class SimpleMemoryAllocator { 29 | public: 30 | SimpleMemoryAllocator(uint8_t* buffer, size_t buffer_size) 31 | : data_size_max_(buffer_size), data_(buffer) {} 32 | 33 | // Allocates memory starting at the end of the arena (highest address and 34 | // moving downwards, so that tensor buffers can be allocated from the start 35 | // in ascending order. 36 | uint8_t* AllocateFromTail(size_t size, size_t alignment); 37 | 38 | int GetDataSize() const { return data_size_; } 39 | 40 | // Child allocator is something like a temporary allocator. Memory allocated 41 | // by the child allocator will be freed once the child allocator is 42 | // deallocated. Child allocator could be cascaded to have for example 43 | // grandchild allocator. But at any given time, only the latest child 44 | // allocator can be used. All its ancestors will be locked to avoid memory 45 | // corruption. Locked means that the allocator can't allocate memory. 46 | // WARNING: Parent allocator needs to live longer than the child allocator. 47 | SimpleMemoryAllocator CreateChildAllocator(); 48 | 49 | // Unlocks parent allocator when the child allocator is deconstructed. 50 | ~SimpleMemoryAllocator(); 51 | 52 | private: 53 | int data_size_ = 0; 54 | size_t data_size_max_; 55 | uint8_t* data_; 56 | SimpleMemoryAllocator* parent_allocator_ = nullptr; 57 | // The allocator is locaked if it has a child. 58 | bool has_child_allocator_ = false; 59 | }; 60 | 61 | } // namespace tflite 62 | 63 | #endif // TENSORFLOW_LITE_MICRO_SIMPLE_MEMORY_ALLOCATOR_H_ 64 | -------------------------------------------------------------------------------- /lib/Arduino_TensorFlowLite/src/tensorflow/lite/string_type.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2017 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | // Abstract string. We don't want even absl at this level. 16 | #ifndef TENSORFLOW_LITE_STRING_TYPE_H_ 17 | #define TENSORFLOW_LITE_STRING_TYPE_H_ 18 | 19 | #include 20 | 21 | namespace tflite { 22 | 23 | using std::string; 24 | 25 | } // namespace tflite 26 | 27 | #endif // TENSORFLOW_LITE_STRING_TYPE_H_ 28 | -------------------------------------------------------------------------------- /lib/Arduino_TensorFlowLite/src/tensorflow/lite/string_util.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2017 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | // Util methods to read and write String tensors. 17 | // String tensors are considered to be char tensor with protocol. 18 | // [0, 3] 4 bytes: N, num of strings in the tensor in little endian. 19 | // [(i+1)*4, (i+1)*4+3] 4 bytes: offset of i-th string in little endian. 20 | // [(N+2)*4, (N+2)*4+3] 4 bytes: length of the whole char buffer. 21 | // [offset(i), offset(i+1) - 1] : content of i-th string. 22 | // Example of a string tensor: 23 | // [ 24 | // 2, 0, 0, 0, # 2 strings. 25 | // 16, 0, 0, 0, # 0-th string starts from index 12. 26 | // 18, 0, 0, 0, # 1-st string starts from index 18. 27 | // 18, 0, 0, 0, # total length of array. 28 | // 'A', 'B', # 0-th string [16..17]: "AB" 29 | // ] # 1-th string, empty 30 | // 31 | // A typical usage: 32 | // In op.Eval(context, node): 33 | // DynamicBuffer buf; 34 | // # Add string "AB" to tensor, string is stored in dynamic buffer. 35 | // buf.AddString("AB", 2); 36 | // # Write content of DynamicBuffer to tensor in format of string tensor 37 | // # described above. 38 | // buf.WriteToTensor(tensor, nullptr) 39 | 40 | #ifndef TENSORFLOW_LITE_STRING_UTIL_H_ 41 | #define TENSORFLOW_LITE_STRING_UTIL_H_ 42 | 43 | #include 44 | 45 | #include "tensorflow/lite/c/common.h" 46 | #include "tensorflow/lite/string_type.h" 47 | 48 | namespace tflite { 49 | 50 | // Convenient structure to store string pointer and length. 51 | typedef struct { 52 | const char* str; 53 | int len; 54 | } StringRef; 55 | 56 | // DynamicBuffer holds temporary buffer that will be used to create a dynamic 57 | // tensor. A typical usage is to initialize a DynamicBuffer object, fill in 58 | // content and call CreateStringTensor in op.Eval(). 59 | class DynamicBuffer { 60 | public: 61 | DynamicBuffer() : offset_({0}) {} 62 | 63 | // Add string to dynamic buffer by resizing the buffer and copying the data. 64 | void AddString(const StringRef& string); 65 | 66 | // Add string to dynamic buffer by resizing the buffer and copying the data. 67 | void AddString(const char* str, size_t len); 68 | 69 | // Join a list of string with separator, and add as a single string to the 70 | // buffer. 71 | void AddJoinedString(const std::vector& strings, char separator); 72 | 73 | // Fill content into a buffer and returns the number of bytes stored. 74 | // The function allocates space for the buffer but does NOT take ownership. 75 | int WriteToBuffer(char** buffer); 76 | 77 | // Fill content into a string tensor, with the given new_shape. The new shape 78 | // must match the number of strings in this object. Caller relinquishes 79 | // ownership of new_shape. If 'new_shape' is nullptr, keep the tensor's 80 | // existing shape. 81 | void WriteToTensor(TfLiteTensor* tensor, TfLiteIntArray* new_shape); 82 | 83 | // Fill content into a string tensor. Set shape to {num_strings}. 84 | void WriteToTensorAsVector(TfLiteTensor* tensor); 85 | 86 | private: 87 | // Data buffer to store contents of strings, not including headers. 88 | std::vector data_; 89 | // Offset of the starting index of each string in data buffer. 90 | std::vector offset_; 91 | }; 92 | 93 | // Return num of strings in a String tensor. 94 | int GetStringCount(const void* raw_buffer); 95 | int GetStringCount(const TfLiteTensor* tensor); 96 | 97 | // Get String pointer and length of index-th string in tensor. 98 | // NOTE: This will not create a copy of string data. 99 | StringRef GetString(const void* raw_buffer, int string_index); 100 | StringRef GetString(const TfLiteTensor* tensor, int string_index); 101 | } // namespace tflite 102 | 103 | #endif // TENSORFLOW_LITE_STRING_UTIL_H_ 104 | -------------------------------------------------------------------------------- /lib/Arduino_TensorFlowLite/src/tensorflow/lite/type_to_tflitetype.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_LITE_TYPE_TO_TFLITETYPE_H_ 16 | #define TENSORFLOW_LITE_TYPE_TO_TFLITETYPE_H_ 17 | 18 | // Arduino build defines abs as a macro here. That is invalid C++, and breaks 19 | // libc++'s header, undefine it. 20 | #ifdef abs 21 | #undef abs 22 | #endif 23 | 24 | #include 25 | #include 26 | 27 | #include "tensorflow/lite/c/common.h" 28 | 29 | namespace tflite { 30 | 31 | // Map statically from a c++ type to a TfLiteType. Used in interpreter for safe 32 | // casts. 33 | template 34 | constexpr TfLiteType typeToTfLiteType() { 35 | return kTfLiteNoType; 36 | } 37 | template <> 38 | constexpr TfLiteType typeToTfLiteType() { 39 | return kTfLiteInt32; 40 | } 41 | template <> 42 | constexpr TfLiteType typeToTfLiteType() { 43 | return kTfLiteInt16; 44 | } 45 | template <> 46 | constexpr TfLiteType typeToTfLiteType() { 47 | return kTfLiteInt64; 48 | } 49 | template <> 50 | constexpr TfLiteType typeToTfLiteType() { 51 | return kTfLiteFloat32; 52 | } 53 | template <> 54 | constexpr TfLiteType typeToTfLiteType() { 55 | return kTfLiteUInt8; 56 | } 57 | template <> 58 | constexpr TfLiteType typeToTfLiteType() { 59 | return kTfLiteInt8; 60 | } 61 | template <> 62 | constexpr TfLiteType typeToTfLiteType() { 63 | return kTfLiteBool; 64 | } 65 | template <> 66 | constexpr TfLiteType typeToTfLiteType>() { 67 | return kTfLiteComplex64; 68 | } 69 | template <> 70 | constexpr TfLiteType typeToTfLiteType() { 71 | return kTfLiteString; 72 | } 73 | template <> 74 | constexpr TfLiteType typeToTfLiteType() { 75 | return kTfLiteFloat16; 76 | } 77 | } // namespace tflite 78 | #endif // TENSORFLOW_LITE_TYPE_TO_TFLITETYPE_H_ 79 | -------------------------------------------------------------------------------- /lib/Arduino_TensorFlowLite/src/tensorflow/lite/version.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2017 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_LITE_VERSION_H_ 16 | #define TENSORFLOW_LITE_VERSION_H_ 17 | 18 | #include "tensorflow/core/public/version.h" 19 | 20 | // The version number of the Schema. Ideally all changes will be backward 21 | // compatible. If that ever changes, we must ensure that version is the first 22 | // entry in the new tflite root so that we can see that version is not 1. 23 | #define TFLITE_SCHEMA_VERSION (3) 24 | 25 | // TensorFlow Lite Runtime version. 26 | // This value is currently shared with that of TensorFlow. 27 | #define TFLITE_VERSION_STRING TF_VERSION_STRING 28 | 29 | #endif // TENSORFLOW_LITE_VERSION_H_ 30 | -------------------------------------------------------------------------------- /lib/README: -------------------------------------------------------------------------------- 1 | 2 | This directory is intended for project specific (private) libraries. 3 | PlatformIO will compile them to static libraries and link into executable file. 4 | 5 | The source code of each library should be placed in a an own separate directory 6 | ("lib/your_library_name/[here are source files]"). 7 | 8 | For example, see a structure of the following two libraries `Foo` and `Bar`: 9 | 10 | |--lib 11 | | | 12 | | |--Bar 13 | | | |--docs 14 | | | |--examples 15 | | | |--src 16 | | | |- Bar.c 17 | | | |- Bar.h 18 | | | |- library.json (optional, custom build options, etc) https://docs.platformio.org/page/librarymanager/config.html 19 | | | 20 | | |--Foo 21 | | | |- Foo.c 22 | | | |- Foo.h 23 | | | 24 | | |- README --> THIS FILE 25 | | 26 | |- platformio.ini 27 | |--src 28 | |- main.c 29 | 30 | and a contents of `src/main.c`: 31 | ``` 32 | #include 33 | #include 34 | 35 | int main (void) 36 | { 37 | ... 38 | } 39 | 40 | ``` 41 | 42 | PlatformIO Library Dependency Finder will find automatically dependent 43 | libraries scanning project source files. 44 | 45 | More information about PlatformIO Library Dependency Finder 46 | - https://docs.platformio.org/page/librarymanager/ldf.html 47 | -------------------------------------------------------------------------------- /platformio.ini: -------------------------------------------------------------------------------- 1 | ;PlatformIO Project Configuration File 2 | ; 3 | ; Build options: build flags, source filter 4 | ; Upload options: custom upload port, speed and extra flags 5 | ; Library options: dependencies, extra library storages 6 | ; Advanced options: extra scripting 7 | ; 8 | ; Please visit documentation for the other options and examples 9 | ; https://docs.platformio.org/page/projectconf.html 10 | 11 | [env:nano33ble] 12 | platform = https://github.com/platformio/platform-nordicnrf52.git 13 | board = nano33ble 14 | framework = arduino 15 | monitor_speed = 115200 -------------------------------------------------------------------------------- /src/accelerometer_handler.cpp: -------------------------------------------------------------------------------- 1 | /* 2 | Adapted by Andri Yadi. 3 | Copyright 2019 The TensorFlow Authors. All Rights Reserved. 4 | 5 | Licensed under the Apache License, Version 2.0 (the "License"); 6 | you may not use this file except in compliance with the License. 7 | You may obtain a copy of the License at 8 | 9 | http://www.apache.org/licenses/LICENSE-2.0 10 | 11 | Unless required by applicable law or agreed to in writing, software 12 | distributed under the License is distributed on an "AS IS" BASIS, 13 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | See the License for the specific language governing permissions and 15 | limitations under the License. 16 | ==============================================================================*/ 17 | 18 | #include "accelerometer_handler.h" 19 | 20 | #include 21 | #include 22 | 23 | #include "constants.h" 24 | 25 | #define RING_BUFFER_SIZE 600 26 | 27 | // A buffer holding the last 200 sets of 3-channel values 28 | float save_data[RING_BUFFER_SIZE] = {0.0}; 29 | // Most recent position in the save_data buffer 30 | int begin_index = 0; 31 | // True if there is not yet enough data to run inference 32 | bool pending_initial_data = true; 33 | // How often we should save a measurement during downsampling 34 | int sample_every_n; 35 | // The number of measurements since we last saved one 36 | int sample_skip_counter = 1; 37 | 38 | TfLiteStatus SetupAccelerometer(tflite::ErrorReporter* error_reporter) { 39 | 40 | // Switch on the IMU 41 | if (!IMU.begin()) { 42 | error_reporter->Report("Failed to initialize IMU"); 43 | return kTfLiteError; 44 | } 45 | 46 | // Determine how many measurements to keep in order to meet kTargetHz 47 | float sample_rate = IMU.accelerationSampleRate(); 48 | sample_every_n = static_cast(roundf(sample_rate / kTargetHz)); 49 | 50 | return kTfLiteOk; 51 | } 52 | 53 | bool ReadAccelerometer(tflite::ErrorReporter* error_reporter, float* input, 54 | int input_length, bool reset_buffer) { 55 | // Clear the buffer if required, e.g. after a successful prediction 56 | if (reset_buffer) { 57 | memset(save_data, 0, RING_BUFFER_SIZE * sizeof(float)); 58 | begin_index = 0; 59 | pending_initial_data = true; 60 | } 61 | // Keep track of whether we stored any new data 62 | bool new_data = false; 63 | // Loop through new samples and add to buffer 64 | while (IMU.accelerationAvailable()) { 65 | float x, y, z; 66 | // Read each sample, removing it from the device's FIFO buffer 67 | if (!IMU.readAcceleration(x, y, z)) { 68 | error_reporter->Report("Failed to read data"); 69 | break; 70 | } 71 | // Throw away this sample unless it's the nth 72 | if (sample_skip_counter != sample_every_n) { 73 | sample_skip_counter += 1; 74 | continue; 75 | } 76 | 77 | // Write samples to our buffer, converting to milli-Gs 78 | // Change board orientation (for my purpose) that's specific for 79 | // Arduino Nano BLE Sense, for compatibility with model 80 | // (sensor orientation is different on Arduino Nano BLE Sense 81 | // compared with SparkFun Edge) 82 | save_data[begin_index++] = -x * 1000; 83 | save_data[begin_index++] = y * 1000; 84 | save_data[begin_index++] = z * 1000; 85 | 86 | // Since we took a sample, reset the skip counter 87 | sample_skip_counter = 1; 88 | // If we reached the end of the circle buffer, reset 89 | if (begin_index >= RING_BUFFER_SIZE) { 90 | begin_index = 0; 91 | } 92 | new_data = true; 93 | } 94 | 95 | // Skip this round if data is not ready yet 96 | if (!new_data) { 97 | return false; 98 | } 99 | 100 | // Check if we are ready for prediction or still pending more initial data 101 | if (pending_initial_data && begin_index >= 200) { 102 | pending_initial_data = false; 103 | } 104 | 105 | // Return if we don't have enough data 106 | if (pending_initial_data) { 107 | return false; 108 | } 109 | 110 | // Copy the requested number of bytes to the provided input tensor 111 | for (int i = 0; i < input_length; ++i) { 112 | int ring_array_index = begin_index + i - input_length; 113 | if (ring_array_index < 0) { 114 | ring_array_index += 600; 115 | } 116 | input[i] = save_data[ring_array_index]; 117 | } 118 | 119 | return true; 120 | } 121 | -------------------------------------------------------------------------------- /src/accelerometer_handler.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | #ifndef TENSORFLOW_LITE_MICRO_EXAMPLES_MAGIC_WAND_ACCELEROMETER_HANDLER_H_ 17 | #define TENSORFLOW_LITE_MICRO_EXAMPLES_MAGIC_WAND_ACCELEROMETER_HANDLER_H_ 18 | 19 | #define kChannelNumber 3 20 | 21 | #include "tensorflow/lite/c/common.h" 22 | #include "tensorflow/lite/micro/micro_error_reporter.h" 23 | 24 | extern int begin_index; 25 | extern TfLiteStatus SetupAccelerometer(tflite::ErrorReporter* error_reporter); 26 | extern bool ReadAccelerometer(tflite::ErrorReporter* error_reporter, 27 | float* input, int input_length, bool reset_buffer); 28 | 29 | #endif // TENSORFLOW_LITE_MICRO_EXAMPLES_MAGIC_WAND_ACCELEROMETER_HANDLER_H_ 30 | -------------------------------------------------------------------------------- /src/constants.cpp: -------------------------------------------------------------------------------- 1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | #include "constants.h" 17 | 18 | // The number of expected consecutive inferences for each gesture type. 19 | // Established with the Arduino Nano 33 BLE Sense. 20 | // const int kConsecutiveInferenceThresholds[3] = {8, 5, 4}; 21 | const int kConsecutiveInferenceThresholds[3] = {7, 6, 4}; -------------------------------------------------------------------------------- /src/constants.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | #ifndef TENSORFLOW_LITE_MICRO_EXAMPLES_MAGIC_WAND_CONSTANTS_H_ 17 | #define TENSORFLOW_LITE_MICRO_EXAMPLES_MAGIC_WAND_CONSTANTS_H_ 18 | 19 | // The expected accelerometer data sample frequency 20 | const float kTargetHz = 25; 21 | 22 | // The number of expected consecutive inferences for each gesture type 23 | extern const int kConsecutiveInferenceThresholds[3]; 24 | 25 | // Inference result threshold to be considered 26 | const float kMinInferenceThreshold = 0.65; 27 | 28 | #endif // TENSORFLOW_LITE_MICRO_EXAMPLES_MAGIC_WAND_CONSTANTS_H_ 29 | -------------------------------------------------------------------------------- /src/gesture_predictor.cpp: -------------------------------------------------------------------------------- 1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | #include "gesture_predictor.h" 17 | 18 | #include "constants.h" 19 | #include "Arduino.h" 20 | 21 | // Set to 1 to display each inference results 22 | #define DEBUG_INF_RES 1 23 | 24 | String LABELS[3] = {"W", "O", "L"}; 25 | 26 | // How many times the most recent gesture has been matched in a row 27 | int continuous_count = 0; 28 | // The result of the last prediction 29 | int last_predict = -1; 30 | 31 | // Return the result of the last prediction 32 | // 0: wing, 1: ring, 2: slope, 3: unknown 33 | int PredictGesture(float* output) { 34 | // Find whichever output has a probability > 0.8 (they sum to 1) 35 | int this_predict = -1; 36 | for (int i = 0; i < 3; i++) { 37 | 38 | #if DEBUG_INF_RES 39 | //Using percentage to visualize inference result 40 | //Serial.print(LABELS[i]); Serial.print(": "); Serial.print(output[i]*100); Serial.print("%,\t"); 41 | 42 | //Using bar graph to visualize inference result 43 | Serial.print(LABELS[i]); Serial.print(": "); 44 | int barNum = static_cast(roundf(output[i]*10)); 45 | for(int k = 0; k < barNum; k++) { 46 | Serial.print("█"); 47 | } 48 | for(int k=barNum-1; k < 10; k++) { 49 | Serial.print(" "); 50 | } 51 | Serial.print("\t"); 52 | #endif 53 | 54 | if (output[i] > kMinInferenceThreshold) this_predict = i; 55 | } 56 | 57 | #if DEBUG_INF_RES 58 | Serial.println(); 59 | Serial.println(); 60 | #endif 61 | 62 | // No gesture was detected above the threshold 63 | if (this_predict == -1) { 64 | continuous_count = 0; 65 | last_predict = 3; 66 | return 3; 67 | } 68 | if (last_predict == this_predict) { 69 | continuous_count += 1; 70 | } else { 71 | continuous_count = 0; 72 | } 73 | last_predict = this_predict; 74 | // If we haven't yet had enough consecutive matches for this gesture, 75 | // report a negative result 76 | if (continuous_count < kConsecutiveInferenceThresholds[this_predict]) { 77 | return 3; 78 | } 79 | // Otherwise, we've seen a positive result, so clear all our variables 80 | // and report it 81 | continuous_count = 0; 82 | last_predict = -1; 83 | return this_predict; 84 | } 85 | -------------------------------------------------------------------------------- /src/gesture_predictor.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | #ifndef TENSORFLOW_LITE_MICRO_EXAMPLES_MAGIC_WAND_GESTURE_PREDICTOR_H_ 17 | #define TENSORFLOW_LITE_MICRO_EXAMPLES_MAGIC_WAND_GESTURE_PREDICTOR_H_ 18 | 19 | extern int PredictGesture(float* output); 20 | 21 | #endif // TENSORFLOW_LITE_MICRO_EXAMPLES_MAGIC_WAND_GESTURE_PREDICTOR_H_ 22 | -------------------------------------------------------------------------------- /src/magic_wand_model_data.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | // This is a standard TensorFlow Lite model file that has been converted into a 17 | // C data array, so it can be easily compiled into a binary for devices that 18 | // don't have a file system. It was created using the command: 19 | // xxd -i magic_wand_model.tflite > magic_wand_model_data.cc 20 | 21 | #ifndef TENSORFLOW_LITE_MICRO_EXAMPLES_MAGIC_WAND_MAGIC_WAND_MODEL_DATA_H_ 22 | #define TENSORFLOW_LITE_MICRO_EXAMPLES_MAGIC_WAND_MAGIC_WAND_MODEL_DATA_H_ 23 | 24 | extern const unsigned char g_magic_wand_model_data[]; 25 | extern const int g_magic_wand_model_data_len; 26 | 27 | #endif // TENSORFLOW_LITE_MICRO_EXAMPLES_MAGIC_WAND_MAGIC_WAND_MODEL_DATA_H_ 28 | -------------------------------------------------------------------------------- /src/output_handler.cpp: -------------------------------------------------------------------------------- 1 | /* 2 | Adapted by Andri Yadi. 3 | Copyright 2019 The TensorFlow Authors. All Rights Reserved. 4 | 5 | Licensed under the Apache License, Version 2.0 (the "License"); 6 | you may not use this file except in compliance with the License. 7 | You may obtain a copy of the License at 8 | 9 | http://www.apache.org/licenses/LICENSE-2.0 10 | 11 | Unless required by applicable law or agreed to in writing, software 12 | distributed under the License is distributed on an "AS IS" BASIS, 13 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | See the License for the specific language governing permissions and 15 | limitations under the License. 16 | ==============================================================================*/ 17 | 18 | #include "output_handler.h" 19 | 20 | #include "Arduino.h" 21 | 22 | const int ledPinRed = 22; 23 | const int ledPinGreen = 23; 24 | const int ledPinBlue = 24; 25 | 26 | #define XMAS_DEMO 0 27 | 28 | #if XMAS_DEMO 29 | #include "xmas_demo.hpp" 30 | #endif 31 | 32 | void LightUpRGB(int kind) { 33 | digitalWrite(ledPinRed, HIGH); 34 | digitalWrite(ledPinGreen, HIGH); 35 | digitalWrite(ledPinBlue, HIGH); 36 | 37 | switch (kind) 38 | { 39 | case 0/* W */: 40 | digitalWrite(ledPinRed, LOW); 41 | break; 42 | case 1/* O */: 43 | digitalWrite(ledPinGreen, LOW); 44 | break; 45 | case 2/* L */: 46 | digitalWrite(ledPinBlue, LOW); 47 | break; 48 | default: 49 | break; 50 | } 51 | } 52 | 53 | void HandleOutput(tflite::ErrorReporter* error_reporter, int kind) { 54 | // The first time this method runs, set up our LED 55 | static bool is_initialized = false; 56 | if (!is_initialized) { 57 | pinMode(LED_BUILTIN, OUTPUT); 58 | pinMode(ledPinRed, OUTPUT); 59 | pinMode(ledPinGreen, OUTPUT); 60 | pinMode(ledPinBlue, OUTPUT); 61 | is_initialized = true; 62 | } 63 | // Toggle the LED every time an inference is performed 64 | static int count = 0; 65 | ++count; 66 | if (count & 1) { 67 | digitalWrite(LED_BUILTIN, HIGH); 68 | } else { 69 | digitalWrite(LED_BUILTIN, LOW); 70 | } 71 | 72 | LightUpRGB(kind); 73 | 74 | // Print some ASCII art for each gesture 75 | if (kind == 0) { 76 | // error_reporter->Report("\n\r█ Wingardium Leviosa █\n\r"); 77 | error_reporter->Report( 78 | "\n\r* * *\n\r * * * " 79 | "*\n\r * * * *\n\r * * * *\n\r * * " 80 | "* *\n\r * *\n\r\n\r"); 81 | #if !XMAS_DEMO 82 | error_reporter->Report("\n\r"); 83 | error_reporter->Report("╔══════════════════════╗"); 84 | error_reporter->Report("║ Wingardium Leviosa ║"); 85 | error_reporter->Report("╚══════════════════════╝\n\r"); 86 | #else 87 | error_reporter->Report(w_label.c_str()); 88 | delay(2000); 89 | #endif 90 | } else if (kind == 1) { 91 | // error_reporter->Report("\n\r█ Obliviate █\n\r"); 92 | error_reporter->Report( 93 | "\n\r *\n\r * *\n\r * *\n\r " 94 | " * *\n\r * *\n\r * *\n\r " 95 | " *\n\r"); 96 | #if !XMAS_DEMO 97 | error_reporter->Report("\n\r"); 98 | error_reporter->Report("╔══════════════════════╗"); 99 | error_reporter->Report("║ Obliviate ║"); 100 | error_reporter->Report("╚══════════════════════╝\n\r"); 101 | #else 102 | error_reporter->Report(o_label.c_str()); 103 | delay(2000); 104 | #endif 105 | } else if (kind == 2) { 106 | // error_reporter->Report("\n\r█ Lumos █\n\r"); 107 | error_reporter->Report( 108 | "\n\r *\n\r *\n\r *\n\r *\n\r " 109 | "*\n\r *\n\r *\n\r * * * * * * * *\n\r"); 110 | #if !XMAS_DEMO 111 | error_reporter->Report("\n\r"); 112 | error_reporter->Report("╔══════════════════════╗"); 113 | error_reporter->Report("║ Lumos ║"); 114 | error_reporter->Report("╚══════════════════════╝\n\r"); 115 | #else 116 | error_reporter->Report(l_label.c_str()); 117 | error_reporter->Report("\r\nLet there be light\n\r"); 118 | delay(3000); 119 | error_reporter->Report(dycodex_label.c_str()); 120 | error_reporter->Report(asciiArt.c_str()); 121 | delay(5000); 122 | error_reporter->Report(dycodex_logo.c_str()); 123 | error_reporter->Report(tensorflow_logo.c_str()); 124 | delay(8000); 125 | #endif 126 | } 127 | } 128 | -------------------------------------------------------------------------------- /src/output_handler.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | #ifndef TENSORFLOW_LITE_MICRO_EXAMPLES_MAGIC_WAND_OUTPUT_HANDLER_H_ 17 | #define TENSORFLOW_LITE_MICRO_EXAMPLES_MAGIC_WAND_OUTPUT_HANDLER_H_ 18 | 19 | #include "tensorflow/lite/c/common.h" 20 | #include "tensorflow/lite/micro/micro_error_reporter.h" 21 | 22 | void HandleOutput(tflite::ErrorReporter* error_reporter, int kind); 23 | 24 | #endif // TENSORFLOW_LITE_MICRO_EXAMPLES_MAGIC_WAND_OUTPUT_HANDLER_H_ 25 | -------------------------------------------------------------------------------- /test/README: -------------------------------------------------------------------------------- 1 | 2 | This directory is intended for PIO Unit Testing and project tests. 3 | 4 | Unit Testing is a software testing method by which individual units of 5 | source code, sets of one or more MCU program modules together with associated 6 | control data, usage procedures, and operating procedures, are tested to 7 | determine whether they are fit for use. Unit testing finds problems early 8 | in the development cycle. 9 | 10 | More information about PIO Unit Testing: 11 | - https://docs.platformio.org/page/plus/unit-testing.html 12 | --------------------------------------------------------------------------------