├── .gitignore ├── .travis.yml ├── .vscode └── extensions.json ├── README.md ├── custom.csv ├── include └── README ├── lib ├── CMakeLists.txt ├── README ├── tfmicro │ ├── _kiss_fft_guts.h │ ├── fixedpoint │ │ ├── fixedpoint.h │ │ └── fixedpoint_sse.h │ ├── flatbuffers │ │ ├── base.h │ │ ├── flatbuffers.h │ │ └── stl_emulation.h │ ├── internal │ │ └── detect_platform.h │ ├── kiss_fft.h │ ├── kissfft │ │ ├── COPYING │ │ ├── _kiss_fft_guts.h │ │ ├── kiss_fft.h │ │ └── tools │ │ │ └── kiss_fftr.h │ ├── ruy │ │ └── profiler │ │ │ └── instrumentation.h │ ├── tensorflow │ │ ├── core │ │ │ └── public │ │ │ │ └── version.h │ │ └── lite │ │ │ ├── c │ │ │ ├── builtin_op_data.h │ │ │ ├── common.c │ │ │ └── common.h │ │ │ ├── core │ │ │ └── api │ │ │ │ ├── error_reporter.cc │ │ │ │ ├── error_reporter.h │ │ │ │ ├── flatbuffer_conversions.cc │ │ │ │ ├── flatbuffer_conversions.h │ │ │ │ ├── op_resolver.cc │ │ │ │ ├── op_resolver.h │ │ │ │ ├── tensor_utils.cc │ │ │ │ └── tensor_utils.h │ │ │ ├── experimental │ │ │ └── microfrontend │ │ │ │ └── lib │ │ │ │ ├── bits.h │ │ │ │ ├── fft.cc │ │ │ │ ├── fft.h │ │ │ │ ├── fft_util.cc │ │ │ │ ├── fft_util.h │ │ │ │ ├── filterbank.c │ │ │ │ ├── filterbank.h │ │ │ │ ├── filterbank_util.c │ │ │ │ ├── filterbank_util.h │ │ │ │ ├── frontend.c │ │ │ │ ├── frontend.h │ │ │ │ ├── frontend_util.c │ │ │ │ ├── frontend_util.h │ │ │ │ ├── log_lut.c │ │ │ │ ├── log_lut.h │ │ │ │ ├── log_scale.c │ │ │ │ ├── log_scale.h │ │ │ │ ├── log_scale_util.c │ │ │ │ ├── log_scale_util.h │ │ │ │ ├── noise_reduction.c │ │ │ │ ├── noise_reduction.h │ │ │ │ ├── noise_reduction_util.c │ │ │ │ ├── noise_reduction_util.h │ │ │ │ ├── pcan_gain_control.c │ │ │ │ ├── pcan_gain_control.h │ │ │ │ ├── pcan_gain_control_util.c │ │ │ │ ├── pcan_gain_control_util.h │ │ │ │ ├── window.c │ │ │ │ ├── window.h │ │ │ │ ├── window_util.c │ │ │ │ └── window_util.h │ │ │ ├── kernels │ │ │ ├── internal │ │ │ │ ├── common.h │ │ │ │ ├── compatibility.h │ │ │ │ ├── cppmath.h │ │ │ │ ├── optimized │ │ │ │ │ └── neon_check.h │ │ │ │ ├── quantization_util.cc │ │ │ │ ├── quantization_util.h │ │ │ │ ├── reference │ │ │ │ │ ├── add.h │ │ │ │ │ ├── arg_min_max.h │ │ │ │ │ ├── binary_function.h │ │ │ │ │ ├── ceil.h │ │ │ │ │ ├── comparisons.h │ │ │ │ │ ├── concatenation.h │ │ │ │ │ ├── conv.h │ │ │ │ │ ├── depthwiseconv_float.h │ │ │ │ │ ├── depthwiseconv_uint8.h │ │ │ │ │ ├── dequantize.h │ │ │ │ │ ├── floor.h │ │ │ │ │ ├── fully_connected.h │ │ │ │ │ ├── integer_ops │ │ │ │ │ │ ├── add.h │ │ │ │ │ │ ├── conv.h │ │ │ │ │ │ ├── depthwise_conv.h │ │ │ │ │ │ ├── fully_connected.h │ │ │ │ │ │ ├── l2normalization.h │ │ │ │ │ │ ├── logistic.h │ │ │ │ │ │ ├── mul.h │ │ │ │ │ │ └── pooling.h │ │ │ │ │ ├── l2normalization.h │ │ │ │ │ ├── logistic.h │ │ │ │ │ ├── maximum_minimum.h │ │ │ │ │ ├── mul.h │ │ │ │ │ ├── neg.h │ │ │ │ │ ├── pad.h │ │ │ │ │ ├── pooling.h │ │ │ │ │ ├── prelu.h │ │ │ │ │ ├── process_broadcast_shapes.h │ │ │ │ │ ├── quantize.h │ │ │ │ │ ├── reduce.h │ │ │ │ │ ├── requantize.h │ │ │ │ │ ├── resize_nearest_neighbor.h │ │ │ │ │ ├── round.h │ │ │ │ │ ├── softmax.h │ │ │ │ │ ├── strided_slice.h │ │ │ │ │ └── sub.h │ │ │ │ ├── strided_slice_logic.h │ │ │ │ ├── tensor.h │ │ │ │ ├── tensor_ctypes.h │ │ │ │ └── types.h │ │ │ ├── kernel_util.cc │ │ │ ├── kernel_util.h │ │ │ ├── op_macros.h │ │ │ └── padding.h │ │ │ ├── micro │ │ │ ├── compatibility.h │ │ │ ├── debug_log.cc │ │ │ ├── debug_log.h │ │ │ ├── kernels │ │ │ │ ├── activation_utils.h │ │ │ │ ├── activations.cc │ │ │ │ ├── add.cc │ │ │ │ ├── all_ops_resolver.cc │ │ │ │ ├── all_ops_resolver.h │ │ │ │ ├── arg_min_max.cc │ │ │ │ ├── ceil.cc │ │ │ │ ├── circular_buffer.cc │ │ │ │ ├── comparisons.cc │ │ │ │ ├── concatenation.cc │ │ │ │ ├── conv.cc │ │ │ │ ├── depthwise_conv.cc │ │ │ │ ├── dequantize.cc │ │ │ │ ├── elementwise.cc │ │ │ │ ├── floor.cc │ │ │ │ ├── fully_connected.cc │ │ │ │ ├── l2norm.cc │ │ │ │ ├── logical.cc │ │ │ │ ├── logistic.cc │ │ │ │ ├── maximum_minimum.cc │ │ │ │ ├── micro_ops.h │ │ │ │ ├── micro_utils.h │ │ │ │ ├── mul.cc │ │ │ │ ├── neg.cc │ │ │ │ ├── pack.cc │ │ │ │ ├── pad.cc │ │ │ │ ├── pooling.cc │ │ │ │ ├── prelu.cc │ │ │ │ ├── quantize.cc │ │ │ │ ├── reduce.cc │ │ │ │ ├── reshape.cc │ │ │ │ ├── resize_nearest_neighbor.cc │ │ │ │ ├── round.cc │ │ │ │ ├── softmax.cc │ │ │ │ ├── split.cc │ │ │ │ ├── strided_slice.cc │ │ │ │ ├── sub.cc │ │ │ │ ├── svdf.cc │ │ │ │ └── unpack.cc │ │ │ ├── memory_helpers.cc │ │ │ ├── memory_helpers.h │ │ │ ├── memory_planner │ │ │ │ ├── greedy_memory_planner.cc │ │ │ │ ├── greedy_memory_planner.h │ │ │ │ ├── linear_memory_planner.cc │ │ │ │ ├── linear_memory_planner.h │ │ │ │ └── memory_planner.h │ │ │ ├── micro_allocator.cc │ │ │ ├── micro_allocator.h │ │ │ ├── micro_error_reporter.cc │ │ │ ├── micro_error_reporter.h │ │ │ ├── micro_interpreter.cc │ │ │ ├── micro_interpreter.h │ │ │ ├── micro_mutable_op_resolver.h │ │ │ ├── micro_optional_debug_tools.cc │ │ │ ├── micro_optional_debug_tools.h │ │ │ ├── micro_string.cc │ │ │ ├── micro_string.h │ │ │ ├── micro_time.cc │ │ │ ├── micro_time.h │ │ │ ├── micro_utils.cc │ │ │ ├── micro_utils.h │ │ │ ├── simple_memory_allocator.cc │ │ │ ├── simple_memory_allocator.h │ │ │ ├── test_helpers.cc │ │ │ ├── test_helpers.h │ │ │ ├── testing │ │ │ │ ├── micro_benchmark.h │ │ │ │ ├── micro_test.h │ │ │ │ ├── test_utils.cc │ │ │ │ └── test_utils.h │ │ │ └── tools │ │ │ │ └── make │ │ │ │ └── downloads │ │ │ │ └── kissfft │ │ │ │ ├── COPYING │ │ │ │ ├── _kiss_fft_guts.h │ │ │ │ ├── kiss_fft.c │ │ │ │ ├── kiss_fft.h │ │ │ │ └── tools │ │ │ │ ├── kiss_fftr.c │ │ │ │ └── kiss_fftr.h │ │ │ ├── schema │ │ │ └── schema_generated.h │ │ │ ├── string_type.h │ │ │ ├── string_util.h │ │ │ ├── type_to_tflitetype.h │ │ │ └── version.h │ └── tools │ │ └── kiss_fftr.h └── third_party │ ├── flatbuffers │ └── LICENSE.txt │ └── gemmlowp │ └── LICENSE ├── platformio.ini ├── src ├── audio_provider.h ├── command_responder.cc ├── command_responder.h ├── esp │ ├── audio_provider.cc │ ├── ringbuf.c │ └── ringbuf.h ├── feature_provider.cc ├── feature_provider.h ├── main.cc ├── micro_features │ ├── micro_features_generator.cc │ ├── micro_features_generator.h │ ├── micro_model_settings.cc │ ├── micro_model_settings.h │ ├── model.cc │ ├── model.h │ ├── no_micro_features_data.cc │ ├── no_micro_features_data.h │ ├── yes_micro_features_data.cc │ └── yes_micro_features_data.h ├── recognize_commands.cc └── recognize_commands.h └── test └── README /.gitignore: -------------------------------------------------------------------------------- 1 | .pio 2 | .vscode/.browse.c_cpp.db* 3 | .vscode/c_cpp_properties.json 4 | .vscode/launch.json 5 | .vscode/ipch 6 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | # Continuous Integration (CI) is the practice, in software 2 | # engineering, of merging all developer working copies with a shared mainline 3 | # several times a day < https://docs.platformio.org/page/ci/index.html > 4 | # 5 | # Documentation: 6 | # 7 | # * Travis CI Embedded Builds with PlatformIO 8 | # < https://docs.travis-ci.com/user/integration/platformio/ > 9 | # 10 | # * PlatformIO integration with Travis CI 11 | # < https://docs.platformio.org/page/ci/travis.html > 12 | # 13 | # * User Guide for `platformio ci` command 14 | # < https://docs.platformio.org/page/userguide/cmd_ci.html > 15 | # 16 | # 17 | # Please choose one of the following templates (proposed below) and uncomment 18 | # it (remove "# " before each line) or use own configuration according to the 19 | # Travis CI documentation (see above). 20 | # 21 | 22 | 23 | # 24 | # Template #1: General project. Test it using existing `platformio.ini`. 25 | # 26 | 27 | # language: python 28 | # python: 29 | # - "2.7" 30 | # 31 | # sudo: false 32 | # cache: 33 | # directories: 34 | # - "~/.platformio" 35 | # 36 | # install: 37 | # - pip install -U platformio 38 | # - platformio update 39 | # 40 | # script: 41 | # - platformio run 42 | 43 | 44 | # 45 | # Template #2: The project is intended to be used as a library with examples. 46 | # 47 | 48 | # language: python 49 | # python: 50 | # - "2.7" 51 | # 52 | # sudo: false 53 | # cache: 54 | # directories: 55 | # - "~/.platformio" 56 | # 57 | # env: 58 | # - PLATFORMIO_CI_SRC=path/to/test/file.c 59 | # - PLATFORMIO_CI_SRC=examples/file.ino 60 | # - PLATFORMIO_CI_SRC=path/to/test/directory 61 | # 62 | # install: 63 | # - pip install -U platformio 64 | # - platformio update 65 | # 66 | # script: 67 | # - platformio ci --lib="." --board=ID_1 --board=ID_2 --board=ID_N 68 | -------------------------------------------------------------------------------- /.vscode/extensions.json: -------------------------------------------------------------------------------- 1 | { 2 | // See http://go.microsoft.com/fwlink/?LinkId=827846 3 | // for the documentation about the extensions.json format 4 | "recommendations": [ 5 | "platformio.platformio-ide" 6 | ] 7 | } 8 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # esp32-tensorflow-microspeech 2 | 3 | ESP32 wake word detection with tensor flow. This project demonstrates how signals from a I2S microphone can be 4 | processed to detect keywords such as "yes" and "no". 5 | 6 | ## Hardware 7 | 8 | This project is running on the [LillyGo TTGO T-Camera](https://www.banggood.com/custlink/vKDDuGnnRQ) and nearly reaches 9 | 5 inferences per second. 10 | 11 | ## Running the code 12 | 13 | Download/ clone this repository and open it with Platformio in VSCode. Compile it and flash it to your ESP32. The board 14 | requires a microphone to properly work. 15 | 16 | ## Credits 17 | 18 | This sample was created based on the [TinyML](https://www.oreilly.com/library/view/tinyml/9781492052036/) book by 19 | Pete Warden, Daniel Situnayake. Getting the code to run in platformio for ESP32 and the Arduino platform was done with the 20 | help of Wezley Sherman's [Medium Blog Post](https://towardsdatascience.com/tensorflow-meet-the-esp32-3ac36d7f32c7) 21 | -------------------------------------------------------------------------------- /custom.csv: -------------------------------------------------------------------------------- 1 | # Name, Type, SubType, Offset, Size, Flags 2 | nvs, data, nvs, 0x9000, 20K, 3 | otadata, data, ota, 0xe000, 8K, 4 | firm, app, ota_0, , 3400K, 5 | eeprom, data, 0x99, , 4K, 6 | spiffs, data, spiffs, , 444K, -------------------------------------------------------------------------------- /include/README: -------------------------------------------------------------------------------- 1 | 2 | This directory is intended for project header files. 3 | 4 | A header file is a file containing C declarations and macro definitions 5 | to be shared between several project source files. You request the use of a 6 | header file in your project source file (C, C++, etc) located in `src` folder 7 | by including it, with the C preprocessing directive `#include'. 8 | 9 | ```src/main.c 10 | 11 | #include "header.h" 12 | 13 | int main (void) 14 | { 15 | ... 16 | } 17 | ``` 18 | 19 | Including a header file produces the same results as copying the header file 20 | into each source file that needs it. Such copying would be time-consuming 21 | and error-prone. With a header file, the related declarations appear 22 | in only one place. If they need to be changed, they can be changed in one 23 | place, and programs that include the header file will automatically use the 24 | new version when next recompiled. The header file eliminates the labor of 25 | finding and changing all the copies as well as the risk that a failure to 26 | find one copy will result in inconsistencies within a program. 27 | 28 | In C, the usual convention is to give header files names that end with `.h'. 29 | It is most portable to use only letters, digits, dashes, and underscores in 30 | header file names, and at most one dot. 31 | 32 | Read more about using header files in official GCC documentation: 33 | 34 | * Include Syntax 35 | * Include Operation 36 | * Once-Only Headers 37 | * Computed Includes 38 | 39 | https://gcc.gnu.org/onlinedocs/cpp/Header-Files.html 40 | -------------------------------------------------------------------------------- /lib/README: -------------------------------------------------------------------------------- 1 | 2 | This directory is intended for project specific (private) libraries. 3 | PlatformIO will compile them to static libraries and link into executable file. 4 | 5 | The source code of each library should be placed in a an own separate directory 6 | ("lib/your_library_name/[here are source files]"). 7 | 8 | For example, see a structure of the following two libraries `Foo` and `Bar`: 9 | 10 | |--lib 11 | | | 12 | | |--Bar 13 | | | |--docs 14 | | | |--examples 15 | | | |--src 16 | | | |- Bar.c 17 | | | |- Bar.h 18 | | | |- library.json (optional, custom build options, etc) https://docs.platformio.org/page/librarymanager/config.html 19 | | | 20 | | |--Foo 21 | | | |- Foo.c 22 | | | |- Foo.h 23 | | | 24 | | |- README --> THIS FILE 25 | | 26 | |- platformio.ini 27 | |--src 28 | |- main.c 29 | 30 | and a contents of `src/main.c`: 31 | ``` 32 | #include 33 | #include 34 | 35 | int main (void) 36 | { 37 | ... 38 | } 39 | 40 | ``` 41 | 42 | PlatformIO Library Dependency Finder will find automatically dependent 43 | libraries scanning project source files. 44 | 45 | More information about PlatformIO Library Dependency Finder 46 | - https://docs.platformio.org/page/librarymanager/ldf.html 47 | -------------------------------------------------------------------------------- /lib/tfmicro/kissfft/COPYING: -------------------------------------------------------------------------------- 1 | Copyright (c) 2003-2010 Mark Borgerding 2 | 3 | All rights reserved. 4 | 5 | Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 6 | 7 | * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 8 | * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 9 | * Neither the author nor the names of any contributors may be used to endorse or promote products derived from this software without specific prior written permission. 10 | 11 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 12 | -------------------------------------------------------------------------------- /lib/tfmicro/kissfft/tools/kiss_fftr.h: -------------------------------------------------------------------------------- 1 | //#ifndef KISS_FTR_H 2 | #define KISS_FTR_H 3 | 4 | #include "kiss_fft.h" 5 | #ifdef __cplusplus 6 | extern "C" { 7 | #endif 8 | 9 | 10 | /* 11 | 12 | Real optimized version can save about 45% cpu time vs. complex fft of a real seq. 13 | 14 | 15 | 16 | */ 17 | 18 | typedef struct kiss_fftr_state *kiss_fftr_cfg; 19 | 20 | 21 | kiss_fftr_cfg kiss_fftr_alloc(int nfft,int inverse_fft,void * mem, size_t * lenmem); 22 | /* 23 | nfft must be even 24 | 25 | If you don't care to allocate space, use mem = lenmem = NULL 26 | */ 27 | 28 | 29 | void kiss_fftr(kiss_fftr_cfg cfg,const kiss_fft_scalar *timedata,kiss_fft_cpx *freqdata); 30 | /* 31 | input timedata has nfft scalar points 32 | output freqdata has nfft/2+1 complex points 33 | */ 34 | 35 | void kiss_fftri(kiss_fftr_cfg cfg,const kiss_fft_cpx *freqdata,kiss_fft_scalar *timedata); 36 | /* 37 | input freqdata has nfft/2+1 complex points 38 | output timedata has nfft scalar points 39 | */ 40 | 41 | #define kiss_fftr_free free 42 | 43 | #ifdef __cplusplus 44 | } 45 | #endif 46 | //#endif 47 | -------------------------------------------------------------------------------- /lib/tfmicro/tensorflow/lite/core/api/error_reporter.cc: -------------------------------------------------------------------------------- 1 | /* Copyright 2017 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #include "tensorflow/lite/core/api/error_reporter.h" 16 | #include 17 | 18 | namespace tflite { 19 | 20 | int ErrorReporter::Report(const char* format, ...) { 21 | va_list args; 22 | va_start(args, format); 23 | int code = Report(format, args); 24 | va_end(args); 25 | return code; 26 | } 27 | 28 | // TODO(aselle): Make the name of ReportError on context the same, so 29 | // we can use the ensure functions w/o a context and w/ a reporter. 30 | int ErrorReporter::ReportError(void*, const char* format, ...) { 31 | va_list args; 32 | va_start(args, format); 33 | int code = Report(format, args); 34 | va_end(args); 35 | return code; 36 | } 37 | 38 | } // namespace tflite 39 | -------------------------------------------------------------------------------- /lib/tfmicro/tensorflow/lite/core/api/error_reporter.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2017 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_LITE_CORE_API_ERROR_REPORTER_H_ 16 | #define TENSORFLOW_LITE_CORE_API_ERROR_REPORTER_H_ 17 | 18 | #include 19 | 20 | namespace tflite { 21 | 22 | /// A functor that reports error to supporting system. Invoked similar to 23 | /// printf. 24 | /// 25 | /// Usage: 26 | /// ErrorReporter foo; 27 | /// foo.Report("test %d", 5); 28 | /// or 29 | /// va_list args; 30 | /// foo.Report("test %d", args); // where args is va_list 31 | /// 32 | /// Subclass ErrorReporter to provide another reporting destination. 33 | /// For example, if you have a GUI program, you might redirect to a buffer 34 | /// that drives a GUI error log box. 35 | class ErrorReporter { 36 | public: 37 | virtual ~ErrorReporter() {} 38 | virtual int Report(const char* format, va_list args) = 0; 39 | int Report(const char* format, ...); 40 | int ReportError(void*, const char* format, ...); 41 | }; 42 | 43 | } // namespace tflite 44 | 45 | // You should not make bare calls to the error reporter, instead use the 46 | // TF_LITE_REPORT_ERROR macro, since this allows message strings to be 47 | // stripped when the binary size has to be optimized. If you are looking to 48 | // reduce binary size, define TF_LITE_STRIP_ERROR_STRINGS when compiling and 49 | // every call will be stubbed out, taking no memory. 50 | #ifndef TF_LITE_STRIP_ERROR_STRINGS 51 | #define TF_LITE_REPORT_ERROR(reporter, ...) \ 52 | do { \ 53 | static_cast(reporter)->Report(__VA_ARGS__); \ 54 | } while (false) 55 | #else // TF_LITE_STRIP_ERROR_STRINGS 56 | #define TF_LITE_REPORT_ERROR(reporter, ...) 57 | #endif // TF_LITE_STRIP_ERROR_STRINGS 58 | 59 | #endif // TENSORFLOW_LITE_CORE_API_ERROR_REPORTER_H_ 60 | -------------------------------------------------------------------------------- /lib/tfmicro/tensorflow/lite/core/api/flatbuffer_conversions.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_LITE_CORE_API_FLATBUFFER_CONVERSIONS_H_ 16 | #define TENSORFLOW_LITE_CORE_API_FLATBUFFER_CONVERSIONS_H_ 17 | 18 | // These functions transform codes and data structures that are defined in the 19 | // flatbuffer serialization format into in-memory values that are used by the 20 | // runtime API and interpreter. 21 | 22 | #include "tensorflow/lite/c/common.h" 23 | #include "tensorflow/lite/core/api/error_reporter.h" 24 | #include "tensorflow/lite/core/api/op_resolver.h" 25 | #include "tensorflow/lite/schema/schema_generated.h" 26 | 27 | namespace tflite { 28 | 29 | // Interface class for builtin data allocations. 30 | class BuiltinDataAllocator { 31 | public: 32 | virtual void* Allocate(size_t size, size_t alignment_hint) = 0; 33 | virtual void Deallocate(void* data) = 0; 34 | 35 | // Allocate a structure, but make sure it is a POD structure that doesn't 36 | // require constructors to run. The reason we do this, is that Interpreter's C 37 | // extension part will take ownership so destructors will not be run during 38 | // deallocation. 39 | template 40 | T* AllocatePOD() { 41 | // TODO(b/154346074): Change this to is_trivially_destructible when all 42 | // platform targets support that properly. 43 | static_assert(std::is_pod::value, "Builtin data structure must be POD."); 44 | void* allocated_memory = this->Allocate(sizeof(T), alignof(T)); 45 | return new (allocated_memory) T; 46 | } 47 | 48 | virtual ~BuiltinDataAllocator() {} 49 | }; 50 | 51 | // Parse the appropriate data out of the op. 52 | // 53 | // This handles builtin data explicitly as there are flatbuffer schemas. 54 | // If it returns kTfLiteOk, it passes the data out with `builtin_data`. The 55 | // calling function has to pass in an allocator object, and this allocator 56 | // will be called to reserve space for the output data. If the calling 57 | // function's allocator reserves memory on the heap, then it's the calling 58 | // function's responsibility to free it. 59 | // If it returns kTfLiteError, `builtin_data` will be `nullptr`. 60 | TfLiteStatus ParseOpData(const Operator* op, BuiltinOperator op_type, 61 | ErrorReporter* error_reporter, 62 | BuiltinDataAllocator* allocator, void** builtin_data); 63 | 64 | // Converts the tensor data type used in the flat buffer to the representation 65 | // used by the runtime. 66 | TfLiteStatus ConvertTensorType(TensorType tensor_type, TfLiteType* type, 67 | ErrorReporter* error_reporter); 68 | 69 | } // namespace tflite 70 | 71 | #endif // TENSORFLOW_LITE_CORE_API_FLATBUFFER_CONVERSIONS_H_ 72 | -------------------------------------------------------------------------------- /lib/tfmicro/tensorflow/lite/core/api/op_resolver.cc: -------------------------------------------------------------------------------- 1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | #include "tensorflow/lite/core/api/op_resolver.h" 17 | 18 | namespace tflite { 19 | 20 | TfLiteStatus GetRegistrationFromOpCode( 21 | const OperatorCode* opcode, const OpResolver& op_resolver, 22 | ErrorReporter* error_reporter, const TfLiteRegistration** registration) { 23 | TfLiteStatus status = kTfLiteOk; 24 | *registration = nullptr; 25 | auto builtin_code = opcode->builtin_code(); 26 | int version = opcode->version(); 27 | 28 | if (builtin_code > BuiltinOperator_MAX || 29 | builtin_code < BuiltinOperator_MIN) { 30 | TF_LITE_REPORT_ERROR( 31 | error_reporter, 32 | "Op builtin_code out of range: %d. Are you using old TFLite binary " 33 | "with newer model?", 34 | builtin_code); 35 | status = kTfLiteError; 36 | } else if (builtin_code != BuiltinOperator_CUSTOM) { 37 | *registration = op_resolver.FindOp(builtin_code, version); 38 | if (*registration == nullptr) { 39 | TF_LITE_REPORT_ERROR( 40 | error_reporter, 41 | "Didn't find op for builtin opcode '%s' version '%d'\n", 42 | EnumNameBuiltinOperator(builtin_code), version); 43 | status = kTfLiteError; 44 | } 45 | } else if (!opcode->custom_code()) { 46 | TF_LITE_REPORT_ERROR( 47 | error_reporter, 48 | "Operator with CUSTOM builtin_code has no custom_code.\n"); 49 | status = kTfLiteError; 50 | } else { 51 | const char* name = opcode->custom_code()->c_str(); 52 | *registration = op_resolver.FindOp(name, version); 53 | if (*registration == nullptr) { 54 | // Do not report error for unresolved custom op, we do the final check 55 | // while preparing ops. 56 | status = kTfLiteError; 57 | } 58 | } 59 | return status; 60 | } 61 | 62 | } // namespace tflite 63 | -------------------------------------------------------------------------------- /lib/tfmicro/tensorflow/lite/core/api/op_resolver.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_LITE_CORE_API_OP_RESOLVER_H_ 16 | #define TENSORFLOW_LITE_CORE_API_OP_RESOLVER_H_ 17 | 18 | #include "tensorflow/lite/c/common.h" 19 | #include "tensorflow/lite/core/api/error_reporter.h" 20 | #include "tensorflow/lite/schema/schema_generated.h" 21 | 22 | namespace tflite { 23 | 24 | /// Abstract interface that returns TfLiteRegistrations given op codes or custom 25 | /// op names. This is the mechanism that ops being referenced in the flatbuffer 26 | /// model are mapped to executable function pointers (TfLiteRegistrations). 27 | class OpResolver { 28 | public: 29 | /// Finds the op registration for a builtin operator by enum code. 30 | virtual const TfLiteRegistration* FindOp(tflite::BuiltinOperator op, 31 | int version) const = 0; 32 | /// Finds the op registration of a custom operator by op name. 33 | virtual const TfLiteRegistration* FindOp(const char* op, 34 | int version) const = 0; 35 | virtual ~OpResolver() {} 36 | }; 37 | 38 | // Handles the logic for converting between an OperatorCode structure extracted 39 | // from a flatbuffer and information about a registered operator 40 | // implementation. 41 | TfLiteStatus GetRegistrationFromOpCode(const OperatorCode* opcode, 42 | const OpResolver& op_resolver, 43 | ErrorReporter* error_reporter, 44 | const TfLiteRegistration** registration); 45 | 46 | } // namespace tflite 47 | 48 | #endif // TENSORFLOW_LITE_CORE_API_OP_RESOLVER_H_ 49 | -------------------------------------------------------------------------------- /lib/tfmicro/tensorflow/lite/core/api/tensor_utils.cc: -------------------------------------------------------------------------------- 1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | #include "tensorflow/lite/core/api/tensor_utils.h" 17 | 18 | #include 19 | 20 | namespace tflite { 21 | 22 | TfLiteStatus ResetVariableTensor(TfLiteTensor* tensor) { 23 | if (!tensor->is_variable) { 24 | return kTfLiteOk; 25 | } 26 | // TODO(b/115961645): Implement - If a variable tensor has a buffer, reset it 27 | // to the value of the buffer. 28 | int value = 0; 29 | if (tensor->type == kTfLiteInt8) { 30 | value = tensor->params.zero_point; 31 | } 32 | // TODO(b/139446230): Provide a platform header to better handle these 33 | // specific scenarios. 34 | #if __ANDROID__ || defined(__x86_64__) || defined(__i386__) || \ 35 | defined(__i386) || defined(__x86__) || defined(__X86__) || \ 36 | defined(_X86_) || defined(_M_IX86) || defined(_M_X64) 37 | memset(tensor->data.raw, value, tensor->bytes); 38 | #else 39 | char* raw_ptr = tensor->data.raw; 40 | for (size_t i = 0; i < tensor->bytes; ++i) { 41 | *raw_ptr = value; 42 | raw_ptr++; 43 | } 44 | #endif 45 | return kTfLiteOk; 46 | } 47 | 48 | } // namespace tflite 49 | -------------------------------------------------------------------------------- /lib/tfmicro/tensorflow/lite/core/api/tensor_utils.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | #ifndef TENSORFLOW_LITE_CORE_API_TENSOR_UTILS_H_ 17 | #define TENSORFLOW_LITE_CORE_API_TENSOR_UTILS_H_ 18 | 19 | #include "tensorflow/lite/c/common.h" 20 | 21 | namespace tflite { 22 | 23 | // Resets a variable tensor to the default value. 24 | TfLiteStatus ResetVariableTensor(TfLiteTensor* tensor); 25 | 26 | } // namespace tflite 27 | 28 | #endif // TENSORFLOW_LITE_CORE_API_TENSOR_UTILS_H_ 29 | -------------------------------------------------------------------------------- /lib/tfmicro/tensorflow/lite/experimental/microfrontend/lib/bits.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_BITS_H_ 16 | #define TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_BITS_H_ 17 | 18 | #ifdef __cplusplus 19 | #include 20 | 21 | extern "C" { 22 | #endif 23 | 24 | static inline int CountLeadingZeros32Slow(uint64_t n) { 25 | int zeroes = 28; 26 | if (n >> 16) zeroes -= 16, n >>= 16; 27 | if (n >> 8) zeroes -= 8, n >>= 8; 28 | if (n >> 4) zeroes -= 4, n >>= 4; 29 | return "\4\3\2\2\1\1\1\1\0\0\0\0\0\0\0"[n] + zeroes; 30 | } 31 | 32 | static inline int CountLeadingZeros32(uint32_t n) { 33 | #if defined(_MSC_VER) 34 | unsigned long result = 0; // NOLINT(runtime/int) 35 | if (_BitScanReverse(&result, n)) { 36 | return 31 - result; 37 | } 38 | return 32; 39 | #elif defined(__GNUC__) 40 | 41 | // Handle 0 as a special case because __builtin_clz(0) is undefined. 42 | if (n == 0) { 43 | return 32; 44 | } 45 | return __builtin_clz(n); 46 | #else 47 | return CountLeadingZeros32Slow(n); 48 | #endif 49 | } 50 | 51 | static inline int MostSignificantBit32(uint32_t n) { 52 | return 32 - CountLeadingZeros32(n); 53 | } 54 | 55 | static inline int CountLeadingZeros64Slow(uint64_t n) { 56 | int zeroes = 60; 57 | if (n >> 32) zeroes -= 32, n >>= 32; 58 | if (n >> 16) zeroes -= 16, n >>= 16; 59 | if (n >> 8) zeroes -= 8, n >>= 8; 60 | if (n >> 4) zeroes -= 4, n >>= 4; 61 | return "\4\3\2\2\1\1\1\1\0\0\0\0\0\0\0"[n] + zeroes; 62 | } 63 | 64 | static inline int CountLeadingZeros64(uint64_t n) { 65 | #if defined(_MSC_VER) && defined(_M_X64) 66 | // MSVC does not have __builtin_clzll. Use _BitScanReverse64. 67 | unsigned long result = 0; // NOLINT(runtime/int) 68 | if (_BitScanReverse64(&result, n)) { 69 | return 63 - result; 70 | } 71 | return 64; 72 | #elif defined(_MSC_VER) 73 | // MSVC does not have __builtin_clzll. Compose two calls to _BitScanReverse 74 | unsigned long result = 0; // NOLINT(runtime/int) 75 | if ((n >> 32) && _BitScanReverse(&result, n >> 32)) { 76 | return 31 - result; 77 | } 78 | if (_BitScanReverse(&result, n)) { 79 | return 63 - result; 80 | } 81 | return 64; 82 | #elif defined(__GNUC__) 83 | 84 | // Handle 0 as a special case because __builtin_clzll(0) is undefined. 85 | if (n == 0) { 86 | return 64; 87 | } 88 | return __builtin_clzll(n); 89 | #else 90 | return CountLeadingZeros64Slow(n); 91 | #endif 92 | } 93 | 94 | static inline int MostSignificantBit64(uint64_t n) { 95 | return 64 - CountLeadingZeros64(n); 96 | } 97 | 98 | #ifdef __cplusplus 99 | } // extern "C" 100 | #endif 101 | 102 | #endif // TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_BITS_H_ 103 | -------------------------------------------------------------------------------- /lib/tfmicro/tensorflow/lite/experimental/microfrontend/lib/fft.cc: -------------------------------------------------------------------------------- 1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #include "tensorflow/lite/experimental/microfrontend/lib/fft.h" 16 | 17 | #include 18 | 19 | #define FIXED_POINT 16 20 | #include "kiss_fft.h" 21 | #include "tools/kiss_fftr.h" 22 | 23 | void FftCompute(struct FftState* state, const int16_t* input, 24 | int input_scale_shift) { 25 | const size_t input_size = state->input_size; 26 | const size_t fft_size = state->fft_size; 27 | 28 | int16_t* fft_input = state->input; 29 | // First, scale the input by the given shift. 30 | int i; 31 | for (i = 0; i < input_size; ++i) { 32 | fft_input[i] = static_cast(static_cast(input[i]) 33 | << input_scale_shift); 34 | } 35 | // Zero out whatever else remains in the top part of the input. 36 | for (; i < fft_size; ++i) { 37 | fft_input[i] = 0; 38 | } 39 | 40 | // Apply the FFT. 41 | kiss_fftr( 42 | reinterpret_cast(state->scratch), 43 | state->input, 44 | reinterpret_cast(state->output)); 45 | } 46 | 47 | void FftInit(struct FftState* state) { 48 | // All the initialization is done in FftPopulateState() 49 | } 50 | 51 | void FftReset(struct FftState* state) { 52 | memset(state->input, 0, state->fft_size * sizeof(*state->input)); 53 | memset(state->output, 0, (state->fft_size / 2 + 1) * sizeof(*state->output)); 54 | } 55 | -------------------------------------------------------------------------------- /lib/tfmicro/tensorflow/lite/experimental/microfrontend/lib/fft.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_FFT_H_ 16 | #define TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_FFT_H_ 17 | 18 | #include 19 | #include 20 | 21 | #ifdef __cplusplus 22 | extern "C" { 23 | #endif 24 | 25 | struct complex_int16_t { 26 | int16_t real; 27 | int16_t imag; 28 | }; 29 | 30 | struct FftState { 31 | int16_t* input; 32 | struct complex_int16_t* output; 33 | size_t fft_size; 34 | size_t input_size; 35 | void* scratch; 36 | size_t scratch_size; 37 | }; 38 | 39 | void FftCompute(struct FftState* state, const int16_t* input, 40 | int input_scale_shift); 41 | 42 | void FftInit(struct FftState* state); 43 | 44 | void FftReset(struct FftState* state); 45 | 46 | #ifdef __cplusplus 47 | } // extern "C" 48 | #endif 49 | 50 | #endif // TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_FFT_H_ 51 | -------------------------------------------------------------------------------- /lib/tfmicro/tensorflow/lite/experimental/microfrontend/lib/fft_util.cc: -------------------------------------------------------------------------------- 1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #include "tensorflow/lite/experimental/microfrontend/lib/fft_util.h" 16 | 17 | #include 18 | 19 | #define FIXED_POINT 16 20 | #include "kiss_fft.h" 21 | #include "tools/kiss_fftr.h" 22 | 23 | int FftPopulateState(struct FftState* state, size_t input_size) { 24 | state->input_size = input_size; 25 | state->fft_size = 1; 26 | while (state->fft_size < state->input_size) { 27 | state->fft_size <<= 1; 28 | } 29 | 30 | state->input = reinterpret_cast( 31 | malloc(state->fft_size * sizeof(*state->input))); 32 | if (state->input == nullptr) { 33 | fprintf(stderr, "Failed to alloc fft input buffer\n"); 34 | return 0; 35 | } 36 | 37 | state->output = reinterpret_cast( 38 | malloc((state->fft_size / 2 + 1) * sizeof(*state->output) * 2)); 39 | if (state->output == nullptr) { 40 | fprintf(stderr, "Failed to alloc fft output buffer\n"); 41 | return 0; 42 | } 43 | 44 | // Ask kissfft how much memory it wants. 45 | size_t scratch_size = 0; 46 | kiss_fftr_cfg kfft_cfg = kiss_fftr_alloc( 47 | state->fft_size, 0, nullptr, &scratch_size); 48 | if (kfft_cfg != nullptr) { 49 | fprintf(stderr, "Kiss memory sizing failed.\n"); 50 | return 0; 51 | } 52 | state->scratch = malloc(scratch_size); 53 | if (state->scratch == nullptr) { 54 | fprintf(stderr, "Failed to alloc fft scratch buffer\n"); 55 | return 0; 56 | } 57 | state->scratch_size = scratch_size; 58 | // Let kissfft configure the scratch space we just allocated 59 | kfft_cfg = kiss_fftr_alloc(state->fft_size, 0, 60 | state->scratch, &scratch_size); 61 | if (kfft_cfg != state->scratch) { 62 | fprintf(stderr, "Kiss memory preallocation strategy failed.\n"); 63 | return 0; 64 | } 65 | return 1; 66 | } 67 | 68 | void FftFreeStateContents(struct FftState* state) { 69 | free(state->input); 70 | free(state->output); 71 | free(state->scratch); 72 | } 73 | -------------------------------------------------------------------------------- /lib/tfmicro/tensorflow/lite/experimental/microfrontend/lib/fft_util.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_FFT_UTIL_H_ 16 | #define TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_FFT_UTIL_H_ 17 | 18 | #include "tensorflow/lite/experimental/microfrontend/lib/fft.h" 19 | 20 | #ifdef __cplusplus 21 | extern "C" { 22 | #endif 23 | 24 | // Prepares and FFT for the given input size. 25 | int FftPopulateState(struct FftState* state, size_t input_size); 26 | 27 | // Frees any allocated buffers. 28 | void FftFreeStateContents(struct FftState* state); 29 | 30 | #ifdef __cplusplus 31 | } // extern "C" 32 | #endif 33 | 34 | #endif // TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_FFT_UTIL_H_ 35 | -------------------------------------------------------------------------------- /lib/tfmicro/tensorflow/lite/experimental/microfrontend/lib/filterbank.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_FILTERBANK_H_ 16 | #define TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_FILTERBANK_H_ 17 | 18 | #include 19 | #include 20 | 21 | #include "tensorflow/lite/experimental/microfrontend/lib/fft.h" 22 | 23 | #define kFilterbankBits 12 24 | 25 | #ifdef __cplusplus 26 | extern "C" { 27 | #endif 28 | 29 | struct FilterbankState { 30 | int num_channels; 31 | int start_index; 32 | int end_index; 33 | int16_t* channel_frequency_starts; 34 | int16_t* channel_weight_starts; 35 | int16_t* channel_widths; 36 | int16_t* weights; 37 | int16_t* unweights; 38 | uint64_t* work; 39 | }; 40 | 41 | // Converts the relevant complex values of an FFT output into energy (the 42 | // square magnitude). 43 | void FilterbankConvertFftComplexToEnergy(struct FilterbankState* state, 44 | struct complex_int16_t* fft_output, 45 | int32_t* energy); 46 | 47 | // Computes the mel-scale filterbank on the given energy array. Output is cached 48 | // internally - to fetch it, you need to call FilterbankSqrt. 49 | void FilterbankAccumulateChannels(struct FilterbankState* state, 50 | const int32_t* energy); 51 | 52 | // Applies an integer square root to the 64 bit intermediate values of the 53 | // filterbank, and returns a pointer to them. Memory will be invalidated the 54 | // next time FilterbankAccumulateChannels is called. 55 | uint32_t* FilterbankSqrt(struct FilterbankState* state, int scale_down_shift); 56 | 57 | void FilterbankReset(struct FilterbankState* state); 58 | 59 | #ifdef __cplusplus 60 | } // extern "C" 61 | #endif 62 | 63 | #endif // TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_FILTERBANK_H_ 64 | -------------------------------------------------------------------------------- /lib/tfmicro/tensorflow/lite/experimental/microfrontend/lib/filterbank_util.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_FILTERBANK_UTIL_H_ 16 | #define TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_FILTERBANK_UTIL_H_ 17 | 18 | #include "tensorflow/lite/experimental/microfrontend/lib/filterbank.h" 19 | 20 | #ifdef __cplusplus 21 | extern "C" { 22 | #endif 23 | 24 | struct FilterbankConfig { 25 | // number of frequency channel buckets for filterbank 26 | int num_channels; 27 | // maximum frequency to include 28 | float upper_band_limit; 29 | // minimum frequency to include 30 | float lower_band_limit; 31 | // unused 32 | int output_scale_shift; 33 | }; 34 | 35 | // Fills the frontendConfig with "sane" defaults. 36 | void FilterbankFillConfigWithDefaults(struct FilterbankConfig* config); 37 | 38 | // Allocates any buffers. 39 | int FilterbankPopulateState(const struct FilterbankConfig* config, 40 | struct FilterbankState* state, int sample_rate, 41 | int spectrum_size); 42 | 43 | // Frees any allocated buffers. 44 | void FilterbankFreeStateContents(struct FilterbankState* state); 45 | 46 | #ifdef __cplusplus 47 | } // extern "C" 48 | #endif 49 | 50 | #endif // TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_FILTERBANK_UTIL_H_ 51 | -------------------------------------------------------------------------------- /lib/tfmicro/tensorflow/lite/experimental/microfrontend/lib/frontend.c: -------------------------------------------------------------------------------- 1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #include "tensorflow/lite/experimental/microfrontend/lib/frontend.h" 16 | 17 | #include "tensorflow/lite/experimental/microfrontend/lib/bits.h" 18 | 19 | struct FrontendOutput FrontendProcessSamples(struct FrontendState* state, 20 | const int16_t* samples, 21 | size_t num_samples, 22 | size_t* num_samples_read) { 23 | struct FrontendOutput output; 24 | output.values = NULL; 25 | output.size = 0; 26 | 27 | // Try to apply the window - if it fails, return and wait for more data. 28 | if (!WindowProcessSamples(&state->window, samples, num_samples, 29 | num_samples_read)) { 30 | return output; 31 | } 32 | 33 | // Apply the FFT to the window's output (and scale it so that the fixed point 34 | // FFT can have as much resolution as possible). 35 | int input_shift = 36 | 15 - MostSignificantBit32(state->window.max_abs_output_value); 37 | FftCompute(&state->fft, state->window.output, input_shift); 38 | 39 | // We can re-ruse the fft's output buffer to hold the energy. 40 | int32_t* energy = (int32_t*)state->fft.output; 41 | 42 | FilterbankConvertFftComplexToEnergy(&state->filterbank, state->fft.output, 43 | energy); 44 | 45 | FilterbankAccumulateChannels(&state->filterbank, energy); 46 | uint32_t* scaled_filterbank = FilterbankSqrt(&state->filterbank, input_shift); 47 | 48 | // Apply noise reduction. 49 | NoiseReductionApply(&state->noise_reduction, scaled_filterbank); 50 | 51 | if (state->pcan_gain_control.enable_pcan) { 52 | PcanGainControlApply(&state->pcan_gain_control, scaled_filterbank); 53 | } 54 | 55 | // Apply the log and scale. 56 | int correction_bits = 57 | MostSignificantBit32(state->fft.fft_size) - 1 - (kFilterbankBits / 2); 58 | uint16_t* logged_filterbank = 59 | LogScaleApply(&state->log_scale, scaled_filterbank, 60 | state->filterbank.num_channels, correction_bits); 61 | 62 | output.size = state->filterbank.num_channels; 63 | output.values = logged_filterbank; 64 | return output; 65 | } 66 | 67 | void FrontendReset(struct FrontendState* state) { 68 | WindowReset(&state->window); 69 | FftReset(&state->fft); 70 | FilterbankReset(&state->filterbank); 71 | NoiseReductionReset(&state->noise_reduction); 72 | } 73 | -------------------------------------------------------------------------------- /lib/tfmicro/tensorflow/lite/experimental/microfrontend/lib/frontend.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_FRONTEND_H_ 16 | #define TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_FRONTEND_H_ 17 | 18 | #include 19 | #include 20 | 21 | #include "tensorflow/lite/experimental/microfrontend/lib/fft.h" 22 | #include "tensorflow/lite/experimental/microfrontend/lib/filterbank.h" 23 | #include "tensorflow/lite/experimental/microfrontend/lib/log_scale.h" 24 | #include "tensorflow/lite/experimental/microfrontend/lib/noise_reduction.h" 25 | #include "tensorflow/lite/experimental/microfrontend/lib/pcan_gain_control.h" 26 | #include "tensorflow/lite/experimental/microfrontend/lib/window.h" 27 | 28 | #ifdef __cplusplus 29 | extern "C" { 30 | #endif 31 | 32 | struct FrontendState { 33 | struct WindowState window; 34 | struct FftState fft; 35 | struct FilterbankState filterbank; 36 | struct NoiseReductionState noise_reduction; 37 | struct PcanGainControlState pcan_gain_control; 38 | struct LogScaleState log_scale; 39 | }; 40 | 41 | struct FrontendOutput { 42 | const uint16_t* values; 43 | size_t size; 44 | }; 45 | 46 | // Main entry point to processing frontend samples. Updates num_samples_read to 47 | // contain the number of samples that have been consumed from the input array. 48 | // Returns a struct containing the generated output. If not enough samples were 49 | // added to generate a feature vector, the returned size will be 0 and the 50 | // values pointer will be NULL. Note that the output pointer will be invalidated 51 | // as soon as FrontendProcessSamples is called again, so copy the contents 52 | // elsewhere if you need to use them later. 53 | struct FrontendOutput FrontendProcessSamples(struct FrontendState* state, 54 | const int16_t* samples, 55 | size_t num_samples, 56 | size_t* num_samples_read); 57 | 58 | void FrontendReset(struct FrontendState* state); 59 | 60 | #ifdef __cplusplus 61 | } // extern "C" 62 | #endif 63 | 64 | #endif // TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_FRONTEND_H_ 65 | -------------------------------------------------------------------------------- /lib/tfmicro/tensorflow/lite/experimental/microfrontend/lib/frontend_util.c: -------------------------------------------------------------------------------- 1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #include "tensorflow/lite/experimental/microfrontend/lib/frontend_util.h" 16 | 17 | #include 18 | #include 19 | 20 | #include "tensorflow/lite/experimental/microfrontend/lib/bits.h" 21 | 22 | void FrontendFillConfigWithDefaults(struct FrontendConfig* config) { 23 | WindowFillConfigWithDefaults(&config->window); 24 | FilterbankFillConfigWithDefaults(&config->filterbank); 25 | NoiseReductionFillConfigWithDefaults(&config->noise_reduction); 26 | PcanGainControlFillConfigWithDefaults(&config->pcan_gain_control); 27 | LogScaleFillConfigWithDefaults(&config->log_scale); 28 | } 29 | 30 | int FrontendPopulateState(const struct FrontendConfig* config, 31 | struct FrontendState* state, int sample_rate) { 32 | memset(state, 0, sizeof(*state)); 33 | 34 | if (!WindowPopulateState(&config->window, &state->window, sample_rate)) { 35 | fprintf(stderr, "Failed to populate window state\n"); 36 | return 0; 37 | } 38 | 39 | if (!FftPopulateState(&state->fft, state->window.size)) { 40 | fprintf(stderr, "Failed to populate fft state\n"); 41 | return 0; 42 | } 43 | FftInit(&state->fft); 44 | 45 | if (!FilterbankPopulateState(&config->filterbank, &state->filterbank, 46 | sample_rate, state->fft.fft_size / 2 + 1)) { 47 | fprintf(stderr, "Failed to populate filterbank state\n"); 48 | return 0; 49 | } 50 | 51 | if (!NoiseReductionPopulateState(&config->noise_reduction, 52 | &state->noise_reduction, 53 | state->filterbank.num_channels)) { 54 | fprintf(stderr, "Failed to populate noise reduction state\n"); 55 | return 0; 56 | } 57 | 58 | int input_correction_bits = 59 | MostSignificantBit32(state->fft.fft_size) - 1 - (kFilterbankBits / 2); 60 | if (!PcanGainControlPopulateState( 61 | &config->pcan_gain_control, &state->pcan_gain_control, 62 | state->noise_reduction.estimate, state->filterbank.num_channels, 63 | state->noise_reduction.smoothing_bits, input_correction_bits)) { 64 | fprintf(stderr, "Failed to populate pcan gain control state\n"); 65 | return 0; 66 | } 67 | 68 | if (!LogScalePopulateState(&config->log_scale, &state->log_scale)) { 69 | fprintf(stderr, "Failed to populate log scale state\n"); 70 | return 0; 71 | } 72 | 73 | FrontendReset(state); 74 | 75 | // All good, return a true value. 76 | return 1; 77 | } 78 | 79 | void FrontendFreeStateContents(struct FrontendState* state) { 80 | WindowFreeStateContents(&state->window); 81 | FftFreeStateContents(&state->fft); 82 | FilterbankFreeStateContents(&state->filterbank); 83 | NoiseReductionFreeStateContents(&state->noise_reduction); 84 | PcanGainControlFreeStateContents(&state->pcan_gain_control); 85 | } 86 | -------------------------------------------------------------------------------- /lib/tfmicro/tensorflow/lite/experimental/microfrontend/lib/frontend_util.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_FRONTEND_UTIL_H_ 16 | #define TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_FRONTEND_UTIL_H_ 17 | 18 | #include "tensorflow/lite/experimental/microfrontend/lib/fft_util.h" 19 | #include "tensorflow/lite/experimental/microfrontend/lib/filterbank_util.h" 20 | #include "tensorflow/lite/experimental/microfrontend/lib/frontend.h" 21 | #include "tensorflow/lite/experimental/microfrontend/lib/log_scale_util.h" 22 | #include "tensorflow/lite/experimental/microfrontend/lib/noise_reduction_util.h" 23 | #include "tensorflow/lite/experimental/microfrontend/lib/pcan_gain_control_util.h" 24 | #include "tensorflow/lite/experimental/microfrontend/lib/window_util.h" 25 | 26 | #ifdef __cplusplus 27 | extern "C" { 28 | #endif 29 | 30 | struct FrontendConfig { 31 | struct WindowConfig window; 32 | struct FilterbankConfig filterbank; 33 | struct NoiseReductionConfig noise_reduction; 34 | struct PcanGainControlConfig pcan_gain_control; 35 | struct LogScaleConfig log_scale; 36 | }; 37 | 38 | // Fills the frontendConfig with "sane" defaults. 39 | void FrontendFillConfigWithDefaults(struct FrontendConfig* config); 40 | 41 | // Allocates any buffers. 42 | int FrontendPopulateState(const struct FrontendConfig* config, 43 | struct FrontendState* state, int sample_rate); 44 | 45 | // Frees any allocated buffers. 46 | void FrontendFreeStateContents(struct FrontendState* state); 47 | 48 | #ifdef __cplusplus 49 | } // extern "C" 50 | #endif 51 | 52 | #endif // TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_FRONTEND_UTIL_H_ 53 | -------------------------------------------------------------------------------- /lib/tfmicro/tensorflow/lite/experimental/microfrontend/lib/log_lut.c: -------------------------------------------------------------------------------- 1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #include "tensorflow/lite/experimental/microfrontend/lib/log_lut.h" 16 | const uint16_t kLogLut[] 17 | #ifndef _MSC_VER 18 | __attribute__((aligned(4))) 19 | #endif // _MSV_VER 20 | = {0, 224, 442, 654, 861, 1063, 1259, 1450, 1636, 1817, 1992, 2163, 21 | 2329, 2490, 2646, 2797, 2944, 3087, 3224, 3358, 3487, 3611, 3732, 3848, 22 | 3960, 4068, 4172, 4272, 4368, 4460, 4549, 4633, 4714, 4791, 4864, 4934, 23 | 5001, 5063, 5123, 5178, 5231, 5280, 5326, 5368, 5408, 5444, 5477, 5507, 24 | 5533, 5557, 5578, 5595, 5610, 5622, 5631, 5637, 5640, 5641, 5638, 5633, 25 | 5626, 5615, 5602, 5586, 5568, 5547, 5524, 5498, 5470, 5439, 5406, 5370, 26 | 5332, 5291, 5249, 5203, 5156, 5106, 5054, 5000, 4944, 4885, 4825, 4762, 27 | 4697, 4630, 4561, 4490, 4416, 4341, 4264, 4184, 4103, 4020, 3935, 3848, 28 | 3759, 3668, 3575, 3481, 3384, 3286, 3186, 3084, 2981, 2875, 2768, 2659, 29 | 2549, 2437, 2323, 2207, 2090, 1971, 1851, 1729, 1605, 1480, 1353, 1224, 30 | 1094, 963, 830, 695, 559, 421, 282, 142, 0, 0}; 31 | -------------------------------------------------------------------------------- /lib/tfmicro/tensorflow/lite/experimental/microfrontend/lib/log_lut.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_LOG_LUT_H_ 16 | #define TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_LOG_LUT_H_ 17 | 18 | #include 19 | 20 | #ifdef __cplusplus 21 | extern "C" { 22 | #endif 23 | 24 | // Number of segments in the log lookup table. The table will be kLogSegments+1 25 | // in length (with some padding). 26 | #define kLogSegments 128 27 | #define kLogSegmentsLog2 7 28 | 29 | // Scale used by lookup table. 30 | #define kLogScale 65536 31 | #define kLogScaleLog2 16 32 | #define kLogCoeff 45426 33 | 34 | extern const uint16_t kLogLut[]; 35 | 36 | #ifdef __cplusplus 37 | } // extern "C" 38 | #endif 39 | 40 | #endif // TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_LOG_LUT_H_ 41 | -------------------------------------------------------------------------------- /lib/tfmicro/tensorflow/lite/experimental/microfrontend/lib/log_scale.c: -------------------------------------------------------------------------------- 1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #include "tensorflow/lite/experimental/microfrontend/lib/log_scale.h" 16 | 17 | #include "tensorflow/lite/experimental/microfrontend/lib/bits.h" 18 | #include "tensorflow/lite/experimental/microfrontend/lib/log_lut.h" 19 | 20 | #define kuint16max 0x0000FFFF 21 | 22 | // The following functions implement integer logarithms of various sizes. The 23 | // approximation is calculated according to method described in 24 | // www.inti.gob.ar/electronicaeinformatica/instrumentacion/utic/ 25 | // publicaciones/SPL2007/Log10-spl07.pdf 26 | // It first calculates log2 of the input and then converts it to natural 27 | // logarithm. 28 | 29 | static uint32_t Log2FractionPart(const uint32_t x, const uint32_t log2x) { 30 | // Part 1 31 | int32_t frac = x - (1LL << log2x); 32 | if (log2x < kLogScaleLog2) { 33 | frac <<= kLogScaleLog2 - log2x; 34 | } else { 35 | frac >>= log2x - kLogScaleLog2; 36 | } 37 | // Part 2 38 | const uint32_t base_seg = frac >> (kLogScaleLog2 - kLogSegmentsLog2); 39 | const uint32_t seg_unit = 40 | (((uint32_t)1) << kLogScaleLog2) >> kLogSegmentsLog2; 41 | 42 | const int32_t c0 = kLogLut[base_seg]; 43 | const int32_t c1 = kLogLut[base_seg + 1]; 44 | const int32_t seg_base = seg_unit * base_seg; 45 | const int32_t rel_pos = ((c1 - c0) * (frac - seg_base)) >> kLogScaleLog2; 46 | return frac + c0 + rel_pos; 47 | } 48 | 49 | static uint32_t Log(const uint32_t x, const uint32_t scale_shift) { 50 | const uint32_t integer = MostSignificantBit32(x) - 1; 51 | const uint32_t fraction = Log2FractionPart(x, integer); 52 | const uint32_t log2 = (integer << kLogScaleLog2) + fraction; 53 | const uint32_t round = kLogScale / 2; 54 | const uint32_t loge = (((uint64_t)kLogCoeff) * log2 + round) >> kLogScaleLog2; 55 | // Finally scale to our output scale 56 | const uint32_t loge_scaled = ((loge << scale_shift) + round) >> kLogScaleLog2; 57 | return loge_scaled; 58 | } 59 | 60 | uint16_t* LogScaleApply(struct LogScaleState* state, uint32_t* signal, 61 | int signal_size, int correction_bits) { 62 | const int scale_shift = state->scale_shift; 63 | uint16_t* output = (uint16_t*)signal; 64 | uint16_t* ret = output; 65 | int i; 66 | for (i = 0; i < signal_size; ++i) { 67 | uint32_t value = *signal++; 68 | if (state->enable_log) { 69 | if (correction_bits < 0) { 70 | value >>= -correction_bits; 71 | } else { 72 | value <<= correction_bits; 73 | } 74 | if (value > 1) { 75 | value = Log(value, scale_shift); 76 | } else { 77 | value = 0; 78 | } 79 | } 80 | *output++ = (value < kuint16max) ? value : kuint16max; 81 | } 82 | return ret; 83 | } 84 | -------------------------------------------------------------------------------- /lib/tfmicro/tensorflow/lite/experimental/microfrontend/lib/log_scale.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_LOG_SCALE_H_ 16 | #define TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_LOG_SCALE_H_ 17 | 18 | #include 19 | #include 20 | 21 | #ifdef __cplusplus 22 | extern "C" { 23 | #endif 24 | 25 | struct LogScaleState { 26 | int enable_log; 27 | int scale_shift; 28 | }; 29 | 30 | // Applies a fixed point logarithm to the signal and converts it to 16 bit. Note 31 | // that the signal array will be modified. 32 | uint16_t* LogScaleApply(struct LogScaleState* state, uint32_t* signal, 33 | int signal_size, int correction_bits); 34 | 35 | #ifdef __cplusplus 36 | } // extern "C" 37 | #endif 38 | 39 | #endif // TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_LOG_SCALE_H_ 40 | -------------------------------------------------------------------------------- /lib/tfmicro/tensorflow/lite/experimental/microfrontend/lib/log_scale_util.c: -------------------------------------------------------------------------------- 1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #include "tensorflow/lite/experimental/microfrontend/lib/log_scale_util.h" 16 | 17 | void LogScaleFillConfigWithDefaults(struct LogScaleConfig* config) { 18 | config->enable_log = 1; 19 | config->scale_shift = 6; 20 | } 21 | 22 | int LogScalePopulateState(const struct LogScaleConfig* config, 23 | struct LogScaleState* state) { 24 | state->enable_log = config->enable_log; 25 | state->scale_shift = config->scale_shift; 26 | return 1; 27 | } 28 | -------------------------------------------------------------------------------- /lib/tfmicro/tensorflow/lite/experimental/microfrontend/lib/log_scale_util.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_LOG_SCALE_UTIL_H_ 16 | #define TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_LOG_SCALE_UTIL_H_ 17 | 18 | #include 19 | #include 20 | 21 | #include "tensorflow/lite/experimental/microfrontend/lib/log_scale.h" 22 | 23 | #ifdef __cplusplus 24 | extern "C" { 25 | #endif 26 | 27 | struct LogScaleConfig { 28 | // set to false (0) to disable this module 29 | int enable_log; 30 | // scale results by 2^(scale_shift) 31 | int scale_shift; 32 | }; 33 | 34 | // Populates the LogScaleConfig with "sane" default values. 35 | void LogScaleFillConfigWithDefaults(struct LogScaleConfig* config); 36 | 37 | // Allocates any buffers. 38 | int LogScalePopulateState(const struct LogScaleConfig* config, 39 | struct LogScaleState* state); 40 | 41 | #ifdef __cplusplus 42 | } // extern "C" 43 | #endif 44 | 45 | #endif // TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_LOG_SCALE_UTIL_H_ 46 | -------------------------------------------------------------------------------- /lib/tfmicro/tensorflow/lite/experimental/microfrontend/lib/noise_reduction.c: -------------------------------------------------------------------------------- 1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #include "tensorflow/lite/experimental/microfrontend/lib/noise_reduction.h" 16 | 17 | #include 18 | 19 | void NoiseReductionApply(struct NoiseReductionState* state, uint32_t* signal) { 20 | int i; 21 | for (i = 0; i < state->num_channels; ++i) { 22 | const uint32_t smoothing = 23 | ((i & 1) == 0) ? state->even_smoothing : state->odd_smoothing; 24 | const uint32_t one_minus_smoothing = (1 << kNoiseReductionBits) - smoothing; 25 | 26 | // Update the estimate of the noise. 27 | const uint32_t signal_scaled_up = signal[i] << state->smoothing_bits; 28 | uint32_t estimate = 29 | (((uint64_t)signal_scaled_up * smoothing) + 30 | ((uint64_t)state->estimate[i] * one_minus_smoothing)) >> 31 | kNoiseReductionBits; 32 | state->estimate[i] = estimate; 33 | 34 | // Make sure that we can't get a negative value for the signal - estimate. 35 | if (estimate > signal_scaled_up) { 36 | estimate = signal_scaled_up; 37 | } 38 | 39 | const uint32_t floor = 40 | ((uint64_t)signal[i] * state->min_signal_remaining) >> 41 | kNoiseReductionBits; 42 | const uint32_t subtracted = 43 | (signal_scaled_up - estimate) >> state->smoothing_bits; 44 | const uint32_t output = subtracted > floor ? subtracted : floor; 45 | signal[i] = output; 46 | } 47 | } 48 | 49 | void NoiseReductionReset(struct NoiseReductionState* state) { 50 | memset(state->estimate, 0, sizeof(*state->estimate) * state->num_channels); 51 | } 52 | -------------------------------------------------------------------------------- /lib/tfmicro/tensorflow/lite/experimental/microfrontend/lib/noise_reduction.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_NOISE_REDUCTION_H_ 16 | #define TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_NOISE_REDUCTION_H_ 17 | 18 | #define kNoiseReductionBits 14 19 | 20 | #include 21 | #include 22 | 23 | #ifdef __cplusplus 24 | extern "C" { 25 | #endif 26 | 27 | struct NoiseReductionState { 28 | int smoothing_bits; 29 | uint16_t even_smoothing; 30 | uint16_t odd_smoothing; 31 | uint16_t min_signal_remaining; 32 | int num_channels; 33 | uint32_t* estimate; 34 | }; 35 | 36 | // Removes stationary noise from each channel of the signal using a low pass 37 | // filter. 38 | void NoiseReductionApply(struct NoiseReductionState* state, uint32_t* signal); 39 | 40 | void NoiseReductionReset(struct NoiseReductionState* state); 41 | 42 | #ifdef __cplusplus 43 | } // extern "C" 44 | #endif 45 | 46 | #endif // TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_NOISE_REDUCTION_H_ 47 | -------------------------------------------------------------------------------- /lib/tfmicro/tensorflow/lite/experimental/microfrontend/lib/noise_reduction_util.c: -------------------------------------------------------------------------------- 1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #include "tensorflow/lite/experimental/microfrontend/lib/noise_reduction_util.h" 16 | 17 | #include 18 | 19 | void NoiseReductionFillConfigWithDefaults(struct NoiseReductionConfig* config) { 20 | config->smoothing_bits = 10; 21 | config->even_smoothing = 0.025; 22 | config->odd_smoothing = 0.06; 23 | config->min_signal_remaining = 0.05; 24 | } 25 | 26 | int NoiseReductionPopulateState(const struct NoiseReductionConfig* config, 27 | struct NoiseReductionState* state, 28 | int num_channels) { 29 | state->smoothing_bits = config->smoothing_bits; 30 | state->odd_smoothing = config->odd_smoothing * (1 << kNoiseReductionBits); 31 | state->even_smoothing = config->even_smoothing * (1 << kNoiseReductionBits); 32 | state->min_signal_remaining = 33 | config->min_signal_remaining * (1 << kNoiseReductionBits); 34 | state->num_channels = num_channels; 35 | state->estimate = calloc(state->num_channels, sizeof(*state->estimate)); 36 | if (state->estimate == NULL) { 37 | fprintf(stderr, "Failed to alloc estimate buffer\n"); 38 | return 0; 39 | } 40 | return 1; 41 | } 42 | 43 | void NoiseReductionFreeStateContents(struct NoiseReductionState* state) { 44 | free(state->estimate); 45 | } 46 | -------------------------------------------------------------------------------- /lib/tfmicro/tensorflow/lite/experimental/microfrontend/lib/noise_reduction_util.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_NOISE_REDUCTION_UTIL_H_ 16 | #define TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_NOISE_REDUCTION_UTIL_H_ 17 | 18 | #include "tensorflow/lite/experimental/microfrontend/lib/noise_reduction.h" 19 | 20 | #ifdef __cplusplus 21 | extern "C" { 22 | #endif 23 | 24 | struct NoiseReductionConfig { 25 | // scale the signal up by 2^(smoothing_bits) before reduction 26 | int smoothing_bits; 27 | // smoothing coefficient for even-numbered channels 28 | float even_smoothing; 29 | // smoothing coefficient for odd-numbered channels 30 | float odd_smoothing; 31 | // fraction of signal to preserve (1.0 disables this module) 32 | float min_signal_remaining; 33 | }; 34 | 35 | // Populates the NoiseReductionConfig with "sane" default values. 36 | void NoiseReductionFillConfigWithDefaults(struct NoiseReductionConfig* config); 37 | 38 | // Allocates any buffers. 39 | int NoiseReductionPopulateState(const struct NoiseReductionConfig* config, 40 | struct NoiseReductionState* state, 41 | int num_channels); 42 | 43 | // Frees any allocated buffers. 44 | void NoiseReductionFreeStateContents(struct NoiseReductionState* state); 45 | 46 | #ifdef __cplusplus 47 | } // extern "C" 48 | #endif 49 | 50 | #endif // TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_NOISE_REDUCTION_UTIL_H_ 51 | -------------------------------------------------------------------------------- /lib/tfmicro/tensorflow/lite/experimental/microfrontend/lib/pcan_gain_control.c: -------------------------------------------------------------------------------- 1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #include "tensorflow/lite/experimental/microfrontend/lib/pcan_gain_control.h" 16 | 17 | #include "tensorflow/lite/experimental/microfrontend/lib/bits.h" 18 | 19 | int16_t WideDynamicFunction(const uint32_t x, const int16_t* lut) { 20 | if (x <= 2) { 21 | return lut[x]; 22 | } 23 | 24 | const int16_t interval = MostSignificantBit32(x); 25 | lut += 4 * interval - 6; 26 | 27 | const int16_t frac = 28 | ((interval < 11) ? (x << (11 - interval)) : (x >> (interval - 11))) & 29 | 0x3FF; 30 | 31 | int32_t result = ((int32_t)lut[2] * frac) >> 5; 32 | result += (int32_t)((uint32_t)lut[1] << 5); 33 | result *= frac; 34 | result = (result + (1 << 14)) >> 15; 35 | result += lut[0]; 36 | return (int16_t)result; 37 | } 38 | 39 | uint32_t PcanShrink(const uint32_t x) { 40 | if (x < (2 << kPcanSnrBits)) { 41 | return (x * x) >> (2 + 2 * kPcanSnrBits - kPcanOutputBits); 42 | } else { 43 | return (x >> (kPcanSnrBits - kPcanOutputBits)) - (1 << kPcanOutputBits); 44 | } 45 | } 46 | 47 | void PcanGainControlApply(struct PcanGainControlState* state, 48 | uint32_t* signal) { 49 | int i; 50 | for (i = 0; i < state->num_channels; ++i) { 51 | const uint32_t gain = 52 | WideDynamicFunction(state->noise_estimate[i], state->gain_lut); 53 | const uint32_t snr = ((uint64_t)signal[i] * gain) >> state->snr_shift; 54 | signal[i] = PcanShrink(snr); 55 | } 56 | } 57 | -------------------------------------------------------------------------------- /lib/tfmicro/tensorflow/lite/experimental/microfrontend/lib/pcan_gain_control.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_PCAN_GAIN_CONTROL_H_ 16 | #define TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_PCAN_GAIN_CONTROL_H_ 17 | 18 | #include 19 | #include 20 | 21 | #define kPcanSnrBits 12 22 | #define kPcanOutputBits 6 23 | 24 | #ifdef __cplusplus 25 | extern "C" { 26 | #endif 27 | 28 | // Details at https://research.google/pubs/pub45911.pdf 29 | struct PcanGainControlState { 30 | int enable_pcan; 31 | uint32_t* noise_estimate; 32 | int num_channels; 33 | int16_t* gain_lut; 34 | int32_t snr_shift; 35 | }; 36 | 37 | int16_t WideDynamicFunction(const uint32_t x, const int16_t* lut); 38 | 39 | uint32_t PcanShrink(const uint32_t x); 40 | 41 | void PcanGainControlApply(struct PcanGainControlState* state, uint32_t* signal); 42 | 43 | #ifdef __cplusplus 44 | } // extern "C" 45 | #endif 46 | 47 | #endif // TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_PCAN_GAIN_CONTROL_H_ 48 | -------------------------------------------------------------------------------- /lib/tfmicro/tensorflow/lite/experimental/microfrontend/lib/pcan_gain_control_util.c: -------------------------------------------------------------------------------- 1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #include "tensorflow/lite/experimental/microfrontend/lib/pcan_gain_control_util.h" 16 | 17 | #include 18 | #include 19 | 20 | #define kint16max 0x00007FFF 21 | 22 | void PcanGainControlFillConfigWithDefaults( 23 | struct PcanGainControlConfig* config) { 24 | config->enable_pcan = 0; 25 | config->strength = 0.95; 26 | config->offset = 80.0; 27 | config->gain_bits = 21; 28 | } 29 | 30 | int16_t PcanGainLookupFunction(const struct PcanGainControlConfig* config, 31 | int32_t input_bits, uint32_t x) { 32 | const float x_as_float = ((float)x) / ((uint32_t)1 << input_bits); 33 | const float gain_as_float = 34 | ((uint32_t)1 << config->gain_bits) * 35 | powf(x_as_float + config->offset, -config->strength); 36 | 37 | if (gain_as_float > kint16max) { 38 | return kint16max; 39 | } 40 | return (int16_t)(gain_as_float + 0.5f); 41 | } 42 | 43 | int PcanGainControlPopulateState(const struct PcanGainControlConfig* config, 44 | struct PcanGainControlState* state, 45 | uint32_t* noise_estimate, 46 | const int num_channels, 47 | const uint16_t smoothing_bits, 48 | const int32_t input_correction_bits) { 49 | state->enable_pcan = config->enable_pcan; 50 | if (!state->enable_pcan) { 51 | return 1; 52 | } 53 | state->noise_estimate = noise_estimate; 54 | state->num_channels = num_channels; 55 | state->gain_lut = malloc(kWideDynamicFunctionLUTSize * sizeof(int16_t)); 56 | if (state->gain_lut == NULL) { 57 | fprintf(stderr, "Failed to allocate gain LUT\n"); 58 | return 0; 59 | } 60 | state->snr_shift = config->gain_bits - input_correction_bits - kPcanSnrBits; 61 | 62 | const int32_t input_bits = smoothing_bits - input_correction_bits; 63 | state->gain_lut[0] = PcanGainLookupFunction(config, input_bits, 0); 64 | state->gain_lut[1] = PcanGainLookupFunction(config, input_bits, 1); 65 | state->gain_lut -= 6; 66 | int interval; 67 | for (interval = 2; interval <= kWideDynamicFunctionBits; ++interval) { 68 | const uint32_t x0 = (uint32_t)1 << (interval - 1); 69 | const uint32_t x1 = x0 + (x0 >> 1); 70 | const uint32_t x2 = 71 | (interval == kWideDynamicFunctionBits) ? x0 + (x0 - 1) : 2 * x0; 72 | 73 | const int16_t y0 = PcanGainLookupFunction(config, input_bits, x0); 74 | const int16_t y1 = PcanGainLookupFunction(config, input_bits, x1); 75 | const int16_t y2 = PcanGainLookupFunction(config, input_bits, x2); 76 | 77 | const int32_t diff1 = (int32_t)y1 - y0; 78 | const int32_t diff2 = (int32_t)y2 - y0; 79 | const int32_t a1 = 4 * diff1 - diff2; 80 | const int32_t a2 = diff2 - a1; 81 | 82 | state->gain_lut[4 * interval] = y0; 83 | state->gain_lut[4 * interval + 1] = (int16_t)a1; 84 | state->gain_lut[4 * interval + 2] = (int16_t)a2; 85 | } 86 | state->gain_lut += 6; 87 | return 1; 88 | } 89 | 90 | void PcanGainControlFreeStateContents(struct PcanGainControlState* state) { 91 | free(state->gain_lut); 92 | } 93 | -------------------------------------------------------------------------------- /lib/tfmicro/tensorflow/lite/experimental/microfrontend/lib/pcan_gain_control_util.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_PCAN_GAIN_CONTROL_UTIL_H_ 16 | #define TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_PCAN_GAIN_CONTROL_UTIL_H_ 17 | 18 | #include "tensorflow/lite/experimental/microfrontend/lib/pcan_gain_control.h" 19 | 20 | #define kWideDynamicFunctionBits 32 21 | #define kWideDynamicFunctionLUTSize (4 * kWideDynamicFunctionBits - 3) 22 | 23 | #ifdef __cplusplus 24 | extern "C" { 25 | #endif 26 | 27 | struct PcanGainControlConfig { 28 | // set to false (0) to disable this module 29 | int enable_pcan; 30 | // gain normalization exponent (0.0 disables, 1.0 full strength) 31 | float strength; 32 | // positive value added in the normalization denominator 33 | float offset; 34 | // number of fractional bits in the gain 35 | int gain_bits; 36 | }; 37 | 38 | void PcanGainControlFillConfigWithDefaults( 39 | struct PcanGainControlConfig* config); 40 | 41 | int16_t PcanGainLookupFunction(const struct PcanGainControlConfig* config, 42 | int32_t input_bits, uint32_t x); 43 | 44 | int PcanGainControlPopulateState(const struct PcanGainControlConfig* config, 45 | struct PcanGainControlState* state, 46 | uint32_t* noise_estimate, 47 | const int num_channels, 48 | const uint16_t smoothing_bits, 49 | const int32_t input_correction_bits); 50 | 51 | void PcanGainControlFreeStateContents(struct PcanGainControlState* state); 52 | 53 | #ifdef __cplusplus 54 | } // extern "C" 55 | #endif 56 | 57 | #endif // TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_PCAN_GAIN_CONTROL_UTIL_H_ 58 | -------------------------------------------------------------------------------- /lib/tfmicro/tensorflow/lite/experimental/microfrontend/lib/window.c: -------------------------------------------------------------------------------- 1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #include "tensorflow/lite/experimental/microfrontend/lib/window.h" 16 | 17 | #include 18 | 19 | int WindowProcessSamples(struct WindowState* state, const int16_t* samples, 20 | size_t num_samples, size_t* num_samples_read) { 21 | const int size = state->size; 22 | 23 | // Copy samples from the samples buffer over to our local input. 24 | size_t max_samples_to_copy = state->size - state->input_used; 25 | if (max_samples_to_copy > num_samples) { 26 | max_samples_to_copy = num_samples; 27 | } 28 | memcpy(state->input + state->input_used, samples, 29 | max_samples_to_copy * sizeof(*samples)); 30 | *num_samples_read = max_samples_to_copy; 31 | state->input_used += max_samples_to_copy; 32 | 33 | if (state->input_used < state->size) { 34 | // We don't have enough samples to compute a window. 35 | return 0; 36 | } 37 | 38 | // Apply the window to the input. 39 | const int16_t* coefficients = state->coefficients; 40 | const int16_t* input = state->input; 41 | int16_t* output = state->output; 42 | int i; 43 | int16_t max_abs_output_value = 0; 44 | for (i = 0; i < size; ++i) { 45 | int16_t new_value = 46 | (((int32_t)*input++) * *coefficients++) >> kFrontendWindowBits; 47 | *output++ = new_value; 48 | if (new_value < 0) { 49 | new_value = -new_value; 50 | } 51 | if (new_value > max_abs_output_value) { 52 | max_abs_output_value = new_value; 53 | } 54 | } 55 | // Shuffle the input down by the step size, and update how much we have used. 56 | memmove(state->input, state->input + state->step, 57 | sizeof(*state->input) * (state->size - state->step)); 58 | state->input_used -= state->step; 59 | state->max_abs_output_value = max_abs_output_value; 60 | 61 | // Indicate that the output buffer is valid for the next stage. 62 | return 1; 63 | } 64 | 65 | void WindowReset(struct WindowState* state) { 66 | memset(state->input, 0, state->size * sizeof(*state->input)); 67 | memset(state->output, 0, state->size * sizeof(*state->output)); 68 | state->input_used = 0; 69 | state->max_abs_output_value = 0; 70 | } 71 | -------------------------------------------------------------------------------- /lib/tfmicro/tensorflow/lite/experimental/microfrontend/lib/window.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_WINDOW_H_ 16 | #define TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_WINDOW_H_ 17 | 18 | #include 19 | #include 20 | 21 | #define kFrontendWindowBits 12 22 | 23 | #ifdef __cplusplus 24 | extern "C" { 25 | #endif 26 | 27 | struct WindowState { 28 | size_t size; 29 | int16_t* coefficients; 30 | size_t step; 31 | 32 | int16_t* input; 33 | size_t input_used; 34 | int16_t* output; 35 | int16_t max_abs_output_value; 36 | }; 37 | 38 | // Applies a window to the samples coming in, stepping forward at the given 39 | // rate. 40 | int WindowProcessSamples(struct WindowState* state, const int16_t* samples, 41 | size_t num_samples, size_t* num_samples_read); 42 | 43 | void WindowReset(struct WindowState* state); 44 | 45 | #ifdef __cplusplus 46 | } // extern "C" 47 | #endif 48 | 49 | #endif // TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_WINDOW_H_ 50 | -------------------------------------------------------------------------------- /lib/tfmicro/tensorflow/lite/experimental/microfrontend/lib/window_util.c: -------------------------------------------------------------------------------- 1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #include "tensorflow/lite/experimental/microfrontend/lib/window_util.h" 16 | 17 | #include 18 | #include 19 | #include 20 | #include 21 | 22 | // Some platforms don't have M_PI 23 | #ifndef M_PI 24 | #define M_PI 3.14159265358979323846 25 | #endif 26 | 27 | void WindowFillConfigWithDefaults(struct WindowConfig* config) { 28 | config->size_ms = 25; 29 | config->step_size_ms = 10; 30 | } 31 | 32 | int WindowPopulateState(const struct WindowConfig* config, 33 | struct WindowState* state, int sample_rate) { 34 | state->size = config->size_ms * sample_rate / 1000; 35 | state->step = config->step_size_ms * sample_rate / 1000; 36 | 37 | state->coefficients = malloc(state->size * sizeof(*state->coefficients)); 38 | if (state->coefficients == NULL) { 39 | fprintf(stderr, "Failed to allocate window coefficients\n"); 40 | return 0; 41 | } 42 | 43 | // Populate the window values. 44 | const float arg = M_PI * 2.0 / ((float)state->size); 45 | int i; 46 | for (i = 0; i < state->size; ++i) { 47 | float float_value = 0.5 - (0.5 * cos(arg * (i + 0.5))); 48 | // Scale it to fixed point and round it. 49 | state->coefficients[i] = 50 | floor(float_value * (1 << kFrontendWindowBits) + 0.5); 51 | } 52 | 53 | state->input_used = 0; 54 | state->input = malloc(state->size * sizeof(*state->input)); 55 | if (state->input == NULL) { 56 | fprintf(stderr, "Failed to allocate window input\n"); 57 | return 0; 58 | } 59 | 60 | state->output = malloc(state->size * sizeof(*state->output)); 61 | if (state->output == NULL) { 62 | fprintf(stderr, "Failed to allocate window output\n"); 63 | return 0; 64 | } 65 | 66 | return 1; 67 | } 68 | 69 | void WindowFreeStateContents(struct WindowState* state) { 70 | free(state->coefficients); 71 | free(state->input); 72 | free(state->output); 73 | } 74 | -------------------------------------------------------------------------------- /lib/tfmicro/tensorflow/lite/experimental/microfrontend/lib/window_util.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_WINDOW_UTIL_H_ 16 | #define TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_WINDOW_UTIL_H_ 17 | 18 | #include "tensorflow/lite/experimental/microfrontend/lib/window.h" 19 | 20 | #ifdef __cplusplus 21 | extern "C" { 22 | #endif 23 | 24 | struct WindowConfig { 25 | // length of window frame in milliseconds 26 | size_t size_ms; 27 | // length of step for next frame in milliseconds 28 | size_t step_size_ms; 29 | }; 30 | 31 | // Populates the WindowConfig with "sane" default values. 32 | void WindowFillConfigWithDefaults(struct WindowConfig* config); 33 | 34 | // Allocates any buffers. 35 | int WindowPopulateState(const struct WindowConfig* config, 36 | struct WindowState* state, int sample_rate); 37 | 38 | // Frees any allocated buffers. 39 | void WindowFreeStateContents(struct WindowState* state); 40 | 41 | #ifdef __cplusplus 42 | } // extern "C" 43 | #endif 44 | 45 | #endif // TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_WINDOW_UTIL_H_ 46 | -------------------------------------------------------------------------------- /lib/tfmicro/tensorflow/lite/kernels/internal/cppmath.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2020 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_CPPMATH_H_ 16 | #define TENSORFLOW_LITE_KERNELS_INTERNAL_CPPMATH_H_ 17 | 18 | #include 19 | 20 | namespace tflite { 21 | 22 | #if defined(TF_LITE_USE_GLOBAL_CMATH_FUNCTIONS) || \ 23 | (defined(__ANDROID__) && !defined(__NDK_MAJOR__)) || defined(ARDUINO) 24 | #define TF_LITE_GLOBAL_STD_PREFIX 25 | #else 26 | #define TF_LITE_GLOBAL_STD_PREFIX std 27 | #endif 28 | 29 | #define DECLARE_STD_GLOBAL_SWITCH1(tf_name, std_name) \ 30 | template \ 31 | inline T tf_name(const T x) { \ 32 | return TF_LITE_GLOBAL_STD_PREFIX::std_name(x); \ 33 | } 34 | 35 | DECLARE_STD_GLOBAL_SWITCH1(TfLiteRound, round); 36 | 37 | } // namespace tflite 38 | 39 | #endif // TENSORFLOW_LITE_KERNELS_INTERNAL_CPPMATH_H_ 40 | -------------------------------------------------------------------------------- /lib/tfmicro/tensorflow/lite/kernels/internal/optimized/neon_check.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_OPTIMIZED_NEON_CHECK_H_ 16 | #define TENSORFLOW_LITE_KERNELS_INTERNAL_OPTIMIZED_NEON_CHECK_H_ 17 | 18 | #if defined(__ARM_NEON__) || defined(__ARM_NEON) 19 | #define USE_NEON 20 | #include 21 | #endif 22 | 23 | #if defined __GNUC__ && defined __SSE4_1__ && !defined TF_LITE_DISABLE_X86_NEON 24 | #define USE_NEON 25 | #include "NEON_2_SSE.h" 26 | #endif 27 | 28 | // NEON_OR_PORTABLE(SomeFunc, args) calls NeonSomeFunc(args) if USE_NEON is 29 | // defined, PortableSomeFunc(args) otherwise. 30 | #ifdef USE_NEON 31 | // Always use Neon code 32 | #define NEON_OR_PORTABLE(funcname, ...) Neon##funcname(__VA_ARGS__) 33 | 34 | #else 35 | // No NEON available: Use Portable code 36 | #define NEON_OR_PORTABLE(funcname, ...) Portable##funcname(__VA_ARGS__) 37 | 38 | #endif // defined(USE_NEON) 39 | 40 | #endif // TENSORFLOW_LITE_KERNELS_INTERNAL_OPTIMIZED_NEON_CHECK_H_ 41 | -------------------------------------------------------------------------------- /lib/tfmicro/tensorflow/lite/kernels/internal/reference/arg_min_max.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_ARG_MIN_MAX_H_ 16 | #define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_ARG_MIN_MAX_H_ 17 | 18 | #include "tensorflow/lite/kernels/internal/types.h" 19 | 20 | namespace tflite { 21 | 22 | namespace reference_ops { 23 | 24 | template 25 | void ArgMinMax(const RuntimeShape& input1_shape, const T1* input1_data, 26 | const T3* input2_data, const RuntimeShape& output_shape, 27 | T2* output_data, const Cmp& cmp) { 28 | TFLITE_DCHECK_GT(input1_shape.DimensionsCount(), 0); 29 | TFLITE_DCHECK_EQ(input1_shape.DimensionsCount() - 1, 30 | output_shape.DimensionsCount()); 31 | int axis = input2_data[0]; 32 | if (axis < 0) { 33 | axis += input1_shape.DimensionsCount(); 34 | } 35 | const int axis_size = input1_shape.Dims(axis); 36 | 37 | int outer_size = 1; 38 | for (int i = 0; i < axis; ++i) { 39 | TFLITE_DCHECK_EQ(input1_shape.Dims(i), output_shape.Dims(i)); 40 | outer_size *= input1_shape.Dims(i); 41 | } 42 | 43 | int inner_size = 1; 44 | const int dims_count = input1_shape.DimensionsCount(); 45 | for (int i = axis + 1; i < dims_count; ++i) { 46 | TFLITE_DCHECK_EQ(input1_shape.Dims(i), output_shape.Dims(i - 1)); 47 | inner_size *= input1_shape.Dims(i); 48 | } 49 | for (int outer = 0; outer < outer_size; ++outer) { 50 | for (int inner = 0; inner < inner_size; ++inner) { 51 | auto min_max_value = input1_data[outer * axis_size * inner_size + inner]; 52 | T2 min_max_index = 0; 53 | for (int i = 1; i < axis_size; ++i) { 54 | const auto& curr_value = 55 | input1_data[(outer * axis_size + i) * inner_size + inner]; 56 | if (cmp(curr_value, min_max_value)) { 57 | min_max_value = curr_value; 58 | min_max_index = static_cast(i); 59 | } 60 | } 61 | output_data[outer * inner_size + inner] = min_max_index; 62 | } 63 | } 64 | } 65 | } // namespace reference_ops 66 | } // namespace tflite 67 | 68 | #endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_ARG_MIN_MAX_H_ 69 | -------------------------------------------------------------------------------- /lib/tfmicro/tensorflow/lite/kernels/internal/reference/binary_function.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_BINARY_FUNCTION_H_ 16 | #define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_BINARY_FUNCTION_H_ 17 | 18 | #include "tensorflow/lite/kernels/internal/common.h" 19 | #include "tensorflow/lite/kernels/internal/compatibility.h" 20 | #include "tensorflow/lite/kernels/internal/types.h" 21 | 22 | namespace tflite { 23 | 24 | namespace reference_ops { 25 | 26 | // TODO(ycling): Refactoring. Remove BroadcastLogical and use the more 27 | // generalized and efficient BroadcastBinaryFunction. 28 | // 29 | // Also appears to duplicate MinimumMaximum. 30 | // 31 | // R: Result type. T1: Input 1 type. T2: Input 2 type. 32 | template 33 | inline void BroadcastBinaryFunction4DSlow( 34 | const RuntimeShape& unextended_input1_shape, const T1* input1_data, 35 | const RuntimeShape& unextended_input2_shape, const T2* input2_data, 36 | const RuntimeShape& unextended_output_shape, R* output_data, 37 | R (*func)(T1, T2)) { 38 | TFLITE_DCHECK_LE(unextended_input1_shape.DimensionsCount(), 4); 39 | TFLITE_DCHECK_LE(unextended_input2_shape.DimensionsCount(), 4); 40 | TFLITE_DCHECK_LE(unextended_output_shape.DimensionsCount(), 4); 41 | const RuntimeShape output_shape = 42 | RuntimeShape::ExtendedShape(4, unextended_output_shape); 43 | 44 | NdArrayDesc<4> desc1; 45 | NdArrayDesc<4> desc2; 46 | NdArrayDescsForElementwiseBroadcast(unextended_input1_shape, 47 | unextended_input2_shape, &desc1, &desc2); 48 | 49 | for (int b = 0; b < output_shape.Dims(0); ++b) { 50 | for (int y = 0; y < output_shape.Dims(1); ++y) { 51 | for (int x = 0; x < output_shape.Dims(2); ++x) { 52 | for (int c = 0; c < output_shape.Dims(3); ++c) { 53 | auto out_idx = Offset(output_shape, b, y, x, c); 54 | auto in1_idx = SubscriptToIndex(desc1, b, y, x, c); 55 | auto in2_idx = SubscriptToIndex(desc2, b, y, x, c); 56 | auto in1_val = input1_data[in1_idx]; 57 | auto in2_val = input2_data[in2_idx]; 58 | output_data[out_idx] = func(in1_val, in2_val); 59 | } 60 | } 61 | } 62 | } 63 | } 64 | 65 | // R: Result type. T1: Input 1 type. T2: Input 2 type. 66 | // TODO(renjieliu): Refactor other binary functions to use this one. 67 | template 68 | inline void BinaryFunction(const RuntimeShape& input1_shape, 69 | const T1* input1_data, 70 | const RuntimeShape& input2_shape, 71 | const T2* input2_data, 72 | const RuntimeShape& output_shape, R* output_data, 73 | R (*func)(T1, T2)) { 74 | const int flat_size = 75 | MatchingFlatSize(input1_shape, input2_shape, output_shape); 76 | for (int i = 0; i < flat_size; ++i) { 77 | output_data[i] = func(input1_data[i], input2_data[i]); 78 | } 79 | } 80 | 81 | } // namespace reference_ops 82 | } // namespace tflite 83 | 84 | #endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_BINARY_FUNCTION_H_ 85 | -------------------------------------------------------------------------------- /lib/tfmicro/tensorflow/lite/kernels/internal/reference/ceil.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_CEIL_H_ 16 | #define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_CEIL_H_ 17 | 18 | #include 19 | 20 | #include "tensorflow/lite/kernels/internal/types.h" 21 | 22 | namespace tflite { 23 | 24 | namespace reference_ops { 25 | 26 | inline void Ceil(const RuntimeShape& input_shape, const float* input_data, 27 | const RuntimeShape& output_shape, float* output_data) { 28 | const int flat_size = MatchingFlatSize(input_shape, output_shape); 29 | 30 | for (int i = 0; i < flat_size; ++i) { 31 | output_data[i] = std::ceil(input_data[i]); 32 | } 33 | } 34 | 35 | } // namespace reference_ops 36 | } // namespace tflite 37 | #endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_CEIL_H_ 38 | -------------------------------------------------------------------------------- /lib/tfmicro/tensorflow/lite/kernels/internal/reference/dequantize.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_DEQUANTIZE_H_ 16 | #define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_DEQUANTIZE_H_ 17 | 18 | #include 19 | 20 | #include 21 | 22 | #include "tensorflow/lite/kernels/internal/common.h" 23 | #include "tensorflow/lite/kernels/internal/types.h" 24 | 25 | namespace tflite { 26 | 27 | namespace reference_ops { 28 | 29 | // Dequantizes into a float without rounding. 30 | template 31 | inline void Dequantize(const tflite::DequantizationParams& op_params, 32 | const RuntimeShape& input_shape, 33 | const InputT* input_data, 34 | const RuntimeShape& output_shape, OutputT* output_data) { 35 | int32 zero_point = op_params.zero_point; 36 | const double scale = op_params.scale; 37 | const int flat_size = MatchingFlatSize(input_shape, output_shape); 38 | 39 | for (int i = 0; i < flat_size; i++) { 40 | const int32 val = input_data[i]; 41 | const OutputT result = static_cast(scale * (val - zero_point)); 42 | output_data[i] = result; 43 | } 44 | } 45 | 46 | // Dequantizes per-channel quantized tensor to float. 47 | template 48 | inline void PerChannelDequantize( 49 | const tflite::PerChannelDequantizationParams& op_params, 50 | const RuntimeShape& input_shape, const T* input_data, 51 | const RuntimeShape& output_shape, float* output_data) { 52 | // Ensure flat size is same. 53 | MatchingFlatSize(input_shape, output_shape); 54 | 55 | const int32* zero_point = op_params.zero_point; 56 | const float* scale = op_params.scale; 57 | const int32 quantized_dimension = op_params.quantized_dimension; 58 | const int32 num_dims = input_shape.DimensionsCount(); 59 | const int32* dims_data = input_shape.DimsData(); 60 | std::vector current_dim(num_dims, 0); 61 | 62 | do { 63 | size_t offset = 64 | ReducedOutputOffset(num_dims, reinterpret_cast(dims_data), 65 | current_dim.data(), 0, nullptr); 66 | const int channel = current_dim[quantized_dimension]; 67 | const int32 val = input_data[offset]; 68 | const float result = 69 | static_cast(scale[channel] * (val - zero_point[channel])); 70 | output_data[offset] = result; 71 | } while (NextIndex(num_dims, reinterpret_cast(dims_data), 72 | current_dim.data())); 73 | } 74 | 75 | } // namespace reference_ops 76 | 77 | } // namespace tflite 78 | #endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_DEQUANTIZE_H_ 79 | -------------------------------------------------------------------------------- /lib/tfmicro/tensorflow/lite/kernels/internal/reference/floor.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_FLOOR_H_ 16 | #define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_FLOOR_H_ 17 | 18 | #include 19 | 20 | #include "tensorflow/lite/kernels/internal/types.h" 21 | 22 | namespace tflite { 23 | 24 | namespace reference_ops { 25 | 26 | inline void Floor(const RuntimeShape& input_shape, const float* input_data, 27 | const RuntimeShape& output_shape, float* output_data) { 28 | const int flat_size = MatchingFlatSize(input_shape, output_shape); 29 | 30 | for (int i = 0; i < flat_size; i++) { 31 | int offset = i; 32 | output_data[offset] = std::floor(input_data[offset]); 33 | } 34 | } 35 | 36 | } // namespace reference_ops 37 | } // namespace tflite 38 | 39 | #endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_FLOOR_H_ 40 | -------------------------------------------------------------------------------- /lib/tfmicro/tensorflow/lite/kernels/internal/reference/integer_ops/l2normalization.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_L2NORMALIZATION_H_ 16 | #define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_L2NORMALIZATION_H_ 17 | 18 | #include "tensorflow/lite/kernels/internal/common.h" 19 | 20 | namespace tflite { 21 | namespace reference_integer_ops { 22 | 23 | inline void L2Normalization(int32_t input_zero_point, int32_t outer_size, 24 | int32_t depth, const int8* input_data, 25 | int8* output_data) { 26 | static constexpr int8_t kMinInt8 = std::numeric_limits::min(); 27 | static constexpr int8_t kMaxInt8 = std::numeric_limits::max(); 28 | // The output scale must be in sync with Prepare(). 29 | // Output is in 1/128 scale so the actual output range is nudged from [-1, 1] 30 | // to [-1, 127/128]. 31 | static constexpr int32_t kOutputScale = 7; 32 | for (int outer_index = 0; outer_index < outer_size; ++outer_index) { 33 | // int32 = (int8 - int8) ^ 2. 34 | // ([-128, 127] - [-128, 127]) ^ 2 = [0, (2^8 - 1)^2] so the accumulator is 35 | // safe from overflowing in at least 2^16 steps. 36 | int32_t acc = 0; 37 | for (int inner_index = 0; inner_index < depth; ++inner_index) { 38 | int32_t input = 39 | input_data[depth * outer_index + inner_index] - input_zero_point; 40 | acc += input * input; 41 | } 42 | int32_t inv_l2norm_multiplier; 43 | int inv_l2norm_shift; 44 | GetInvSqrtQuantizedMultiplierExp(acc, kReverseShift, &inv_l2norm_multiplier, 45 | &inv_l2norm_shift); 46 | 47 | for (int inner_index = 0; inner_index < depth; ++inner_index) { 48 | int32_t input = 49 | input_data[depth * outer_index + inner_index] - input_zero_point; 50 | 51 | // Rescale and downcast. Rescale is folded into the division. 52 | int32_t output_in_q24 = MultiplyByQuantizedMultiplier( 53 | input, inv_l2norm_multiplier, inv_l2norm_shift + kOutputScale); 54 | output_in_q24 = 55 | std::min(static_cast(kMaxInt8), 56 | std::max(static_cast(kMinInt8), output_in_q24)); 57 | output_data[depth * outer_index + inner_index] = 58 | static_cast(output_in_q24); 59 | } 60 | } 61 | } 62 | } // namespace reference_integer_ops 63 | } // namespace tflite 64 | 65 | #endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_L2NORMALIZATION_H_ 66 | -------------------------------------------------------------------------------- /lib/tfmicro/tensorflow/lite/kernels/internal/reference/maximum_minimum.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2017 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_MAXIMUM_MINIMUM_H_ 16 | #define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_MAXIMUM_MINIMUM_H_ 17 | 18 | #include "tensorflow/lite/kernels/internal/common.h" 19 | #include "tensorflow/lite/kernels/internal/types.h" 20 | 21 | namespace tflite { 22 | namespace reference_ops { 23 | 24 | template 25 | void MaximumMinimumBroadcastSlow(const RuntimeShape& unextended_input1_shape, 26 | const T* input1_data, 27 | const RuntimeShape& unextended_input2_shape, 28 | const T* input2_data, 29 | const RuntimeShape& unextended_output_shape, 30 | T* output_data, Op op) { 31 | // Uses element-wise calculation if broadcast is not required. 32 | if (unextended_input1_shape == unextended_input2_shape) { 33 | const int flat_size = 34 | MatchingElementsSize(unextended_input1_shape, unextended_input2_shape, 35 | unextended_output_shape); 36 | for (int i = 0; i < flat_size; ++i) { 37 | output_data[i] = op(input1_data[i], input2_data[i]); 38 | } 39 | } else { 40 | TFLITE_DCHECK_LE(unextended_input1_shape.DimensionsCount(), N); 41 | TFLITE_DCHECK_LE(unextended_input2_shape.DimensionsCount(), N); 42 | TFLITE_DCHECK_LE(unextended_output_shape.DimensionsCount(), N); 43 | 44 | NdArrayDesc desc1; 45 | NdArrayDesc desc2; 46 | NdArrayDesc output_desc; 47 | NdArrayDescsForElementwiseBroadcast( 48 | unextended_input1_shape, unextended_input2_shape, &desc1, &desc2); 49 | CopyDimsToDesc(RuntimeShape::ExtendedShape(N, unextended_output_shape), 50 | &output_desc); 51 | 52 | auto maxmin_func = [&](int indexes[N]) { 53 | output_data[SubscriptToIndex(output_desc, indexes)] = 54 | op(input1_data[SubscriptToIndex(desc1, indexes)], 55 | input2_data[SubscriptToIndex(desc2, indexes)]); 56 | }; 57 | NDOpsHelper(output_desc, maxmin_func); 58 | } 59 | } 60 | 61 | } // namespace reference_ops 62 | } // namespace tflite 63 | 64 | #endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_MAXIMUM_MINIMUM_H_ 65 | -------------------------------------------------------------------------------- /lib/tfmicro/tensorflow/lite/kernels/internal/reference/neg.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_NEG_H_ 16 | #define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_NEG_H_ 17 | 18 | #include "tensorflow/lite/kernels/internal/types.h" 19 | 20 | namespace tflite { 21 | 22 | namespace reference_ops { 23 | 24 | template 25 | inline void Negate(const RuntimeShape& input_shape, const T* input_data, 26 | const RuntimeShape& output_shape, T* output_data) { 27 | const int flat_size = MatchingFlatSize(input_shape, output_shape); 28 | 29 | for (int i = 0; i < flat_size; ++i) { 30 | output_data[i] = -input_data[i]; 31 | } 32 | } 33 | 34 | } // namespace reference_ops 35 | } // namespace tflite 36 | 37 | #endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_NEG_H_ 38 | -------------------------------------------------------------------------------- /lib/tfmicro/tensorflow/lite/kernels/internal/reference/prelu.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_PRELU_H_ 16 | #define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_PRELU_H_ 17 | 18 | #include "tensorflow/lite/kernels/internal/common.h" 19 | #include "tensorflow/lite/kernels/internal/compatibility.h" 20 | #include "tensorflow/lite/kernels/internal/types.h" 21 | 22 | namespace tflite { 23 | 24 | namespace reference_ops { 25 | 26 | // Broadcast prelu to output_shape for quantized uint8/int8 data. 27 | template 28 | inline void BroadcastPrelu4DSlow( 29 | const PreluParams& params, const RuntimeShape& input_shape, 30 | const T* input_data, const RuntimeShape& alpha_shape, const T* alpha_data, 31 | const RuntimeShape& output_shape, T* output_data) { 32 | TFLITE_DCHECK_LE(input_shape.DimensionsCount(), 4); 33 | TFLITE_DCHECK_LE(alpha_shape.DimensionsCount(), 4); 34 | TFLITE_DCHECK_LE(output_shape.DimensionsCount(), 4); 35 | const RuntimeShape extended_output_shape = 36 | RuntimeShape::ExtendedShape(4, output_shape); 37 | NdArrayDesc<4> desc1; 38 | NdArrayDesc<4> desc2; 39 | NdArrayDescsForElementwiseBroadcast(input_shape, alpha_shape, &desc1, &desc2); 40 | 41 | for (int b = 0; b < extended_output_shape.Dims(0); ++b) { 42 | for (int y = 0; y < extended_output_shape.Dims(1); ++y) { 43 | for (int x = 0; x < extended_output_shape.Dims(2); ++x) { 44 | for (int c = 0; c < extended_output_shape.Dims(3); ++c) { 45 | int output_index = Offset(extended_output_shape, b, y, x, c); 46 | int input_index = SubscriptToIndex(desc1, b, y, x, c); 47 | const int32 input_value = 48 | params.input_offset + input_data[input_index]; 49 | int32 output_value; 50 | if (input_value >= 0) { 51 | output_value = MultiplyByQuantizedMultiplier( 52 | input_value, params.output_multiplier_1, params.output_shift_1); 53 | } else { 54 | auto alpha_index = SubscriptToIndex(desc2, b, y, x, c); 55 | const int32 alpha_value = 56 | params.alpha_offset + alpha_data[alpha_index]; 57 | 58 | output_value = MultiplyByQuantizedMultiplier( 59 | input_value * alpha_value, params.output_multiplier_2, 60 | params.output_shift_2); 61 | } 62 | output_value += params.output_offset; 63 | 64 | const int32 quantized_min = std::numeric_limits::min(); 65 | const int32 quantized_max = std::numeric_limits::max(); 66 | const int32 clamped_output = 67 | std::min(quantized_max, std::max(quantized_min, output_value)); 68 | output_data[output_index] = static_cast(clamped_output); 69 | } 70 | } 71 | } 72 | } 73 | } 74 | 75 | } // namespace reference_ops 76 | } // namespace tflite 77 | 78 | #endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_PRELU_H_ 79 | -------------------------------------------------------------------------------- /lib/tfmicro/tensorflow/lite/kernels/internal/reference/quantize.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_QUANTIZE_H_ 16 | #define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_QUANTIZE_H_ 17 | 18 | #include "tensorflow/lite/kernels/internal/common.h" 19 | #include "tensorflow/lite/kernels/internal/cppmath.h" 20 | #include "tensorflow/lite/kernels/internal/types.h" 21 | 22 | namespace tflite { 23 | 24 | namespace reference_ops { 25 | 26 | template 27 | inline void AffineQuantize(const tflite::QuantizationParams& op_params, 28 | const RuntimeShape& input_shape, 29 | const InputT* input_data, 30 | const RuntimeShape& output_shape, 31 | OutputT* output_data) { 32 | const int32 zero_point = op_params.zero_point; 33 | const double scale = op_params.scale; 34 | const int flat_size = MatchingFlatSize(input_shape, output_shape); 35 | static constexpr int32 min_val = std::numeric_limits::min(); 36 | static constexpr int32 max_val = std::numeric_limits::max(); 37 | 38 | for (int i = 0; i < flat_size; i++) { 39 | const InputT val = input_data[i]; 40 | int32 unclamped = 41 | static_cast(TfLiteRound(val / static_cast(scale))) + 42 | zero_point; 43 | int32 clamped = std::min(std::max(unclamped, min_val), max_val); 44 | output_data[i] = clamped; 45 | } 46 | } 47 | 48 | } // namespace reference_ops 49 | 50 | } // namespace tflite 51 | #endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_QUANTIZE_H_ 52 | -------------------------------------------------------------------------------- /lib/tfmicro/tensorflow/lite/kernels/internal/reference/requantize.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2020 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_REQUANTIZE_H_ 16 | #define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_REQUANTIZE_H_ 17 | 18 | #include "ruy/profiler/instrumentation.h" // from @ruy 19 | #include "tensorflow/lite/kernels/internal/common.h" 20 | #include "tensorflow/lite/kernels/internal/types.h" 21 | 22 | namespace tflite { 23 | namespace reference_ops { 24 | 25 | template 26 | inline void Requantize(const input_type* input_data, int32_t size, 27 | int32_t effective_scale_multiplier, 28 | int32_t effective_scale_shift, int32_t input_zeropoint, 29 | int32_t output_zeropoint, output_type* output_data) { 30 | ruy::profiler::ScopeLabel label("Requantize"); 31 | const bool same_scale = 32 | (effective_scale_multiplier == 1 << 30 && effective_scale_shift == 1); 33 | if (same_scale) { 34 | const bool mixed_type_int8_uint8 = 35 | std::is_same::value && 36 | std::is_same::value; 37 | const bool mixed_type_uint8_int8 = 38 | std::is_same::value && 39 | std::is_same::value; 40 | const int32_t zero_point_diff = input_zeropoint - output_zeropoint; 41 | // Fast path to do requantization for the case when just a shift of 128 is 42 | // needed. 43 | if ((mixed_type_int8_uint8 && zero_point_diff == -128) || 44 | (mixed_type_uint8_int8 && zero_point_diff == 128)) { 45 | for (int i = 0; i < size; ++i) { 46 | output_data[i] = input_data[i] ^ 0x80; 47 | } 48 | } 49 | } 50 | static constexpr int32_t kMinOutput = std::numeric_limits::min(); 51 | static constexpr int32_t kMaxOutput = std::numeric_limits::max(); 52 | for (int i = 0; i < size; ++i) { 53 | const int32_t input = input_data[i] - input_zeropoint; 54 | const int32_t output = 55 | MultiplyByQuantizedMultiplier(input, effective_scale_multiplier, 56 | effective_scale_shift) + 57 | output_zeropoint; 58 | const int32_t clamped_output = 59 | std::max(std::min(output, kMaxOutput), kMinOutput); 60 | output_data[i] = static_cast(clamped_output); 61 | } 62 | } 63 | 64 | } // namespace reference_ops 65 | } // namespace tflite 66 | 67 | #endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_REQUANTIZE_H_ 68 | -------------------------------------------------------------------------------- /lib/tfmicro/tensorflow/lite/kernels/internal/reference/round.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_ROUND_H_ 16 | #define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_ROUND_H_ 17 | 18 | #include 19 | 20 | #include "tensorflow/lite/kernels/internal/types.h" 21 | 22 | namespace tflite { 23 | 24 | namespace reference_ops { 25 | 26 | inline float RoundToNearest(float value) { 27 | auto floor_val = std::floor(value); 28 | auto diff = value - floor_val; 29 | if ((diff < 0.5f) || 30 | ((diff == 0.5f) && (static_cast(floor_val) % 2 == 0))) { 31 | return floor_val; 32 | } else { 33 | return floor_val = floor_val + 1.0f; 34 | } 35 | } 36 | 37 | inline void Round(const RuntimeShape& input_shape, const float* input_data, 38 | const RuntimeShape& output_shape, float* output_data) { 39 | const int flat_size = MatchingFlatSize(input_shape, output_shape); 40 | for (int i = 0; i < flat_size; ++i) { 41 | // Note that this implementation matches that of tensorFlow tf.round 42 | // and corresponds to the bankers rounding method. 43 | // cfenv (for fesetround) is not yet supported universally on Android, so 44 | // using a work around. 45 | output_data[i] = RoundToNearest(input_data[i]); 46 | } 47 | } 48 | 49 | } // namespace reference_ops 50 | } // namespace tflite 51 | #endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_ROUND_H_ 52 | -------------------------------------------------------------------------------- /lib/tfmicro/tensorflow/lite/kernels/internal/tensor_ctypes.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2017 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_TENSOR_CTYPES_H_ 16 | #define TENSORFLOW_LITE_KERNELS_INTERNAL_TENSOR_CTYPES_H_ 17 | 18 | #include "tensorflow/lite/c/common.h" 19 | #include "tensorflow/lite/kernels/internal/types.h" 20 | 21 | namespace tflite { 22 | 23 | template 24 | inline T* GetTensorData(TfLiteTensor* tensor) { 25 | return tensor != nullptr ? reinterpret_cast(tensor->data.raw) : nullptr; 26 | } 27 | 28 | template 29 | inline const T* GetTensorData(const TfLiteTensor* tensor) { 30 | return tensor != nullptr ? reinterpret_cast(tensor->data.raw) 31 | : nullptr; 32 | } 33 | 34 | inline RuntimeShape GetTensorShape(const TfLiteTensor* tensor) { 35 | if (tensor == nullptr) { 36 | return RuntimeShape(); 37 | } 38 | 39 | TfLiteIntArray* dims = tensor->dims; 40 | const int dims_size = dims->size; 41 | const int32_t* dims_data = reinterpret_cast(dims->data); 42 | return RuntimeShape(dims_size, dims_data); 43 | } 44 | 45 | } // namespace tflite 46 | 47 | #endif // TENSORFLOW_LITE_KERNELS_INTERNAL_TENSOR_CTYPES_H_ 48 | -------------------------------------------------------------------------------- /lib/tfmicro/tensorflow/lite/kernels/op_macros.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2017 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_LITE_KERNELS_OP_MACROS_H_ 16 | #define TENSORFLOW_LITE_KERNELS_OP_MACROS_H_ 17 | 18 | // If we're on a platform without standard IO functions, fall back to a 19 | // non-portable function. 20 | #ifdef TF_LITE_MCU_DEBUG_LOG 21 | 22 | #include "tensorflow/lite/micro/micro_error_reporter.h" 23 | 24 | #define DEBUG_LOG(x) \ 25 | do { \ 26 | DebugLog(x); \ 27 | } while (0) 28 | 29 | inline void InfiniteLoop() { 30 | DEBUG_LOG("HALTED\n"); 31 | while (1) { 32 | } 33 | } 34 | 35 | #define TFLITE_ABORT InfiniteLoop(); 36 | 37 | #else // TF_LITE_MCU_DEBUG_LOG 38 | 39 | #include 40 | #include 41 | #include 42 | 43 | #define DEBUG_LOG(x) \ 44 | do { \ 45 | fprintf(stderr, "%s", (x)); \ 46 | } while (0) 47 | 48 | #define TFLITE_ABORT abort() 49 | 50 | #endif // TF_LITE_MCU_DEBUG_LOG 51 | 52 | #ifdef NDEBUG 53 | #define TFLITE_ASSERT_FALSE (static_cast(0)) 54 | #else 55 | #define TFLITE_ASSERT_FALSE TFLITE_ABORT 56 | #endif 57 | 58 | #define TF_LITE_FATAL(msg) \ 59 | do { \ 60 | DEBUG_LOG(msg); \ 61 | DEBUG_LOG("\nFATAL\n"); \ 62 | TFLITE_ABORT; \ 63 | } while (0) 64 | 65 | #define TF_LITE_ASSERT(x) \ 66 | do { \ 67 | if (!(x)) TF_LITE_FATAL(#x); \ 68 | } while (0) 69 | 70 | #define TF_LITE_ASSERT_EQ(x, y) \ 71 | do { \ 72 | if ((x) != (y)) TF_LITE_FATAL(#x " didn't equal " #y); \ 73 | } while (0) 74 | 75 | #endif // TENSORFLOW_LITE_KERNELS_OP_MACROS_H_ 76 | -------------------------------------------------------------------------------- /lib/tfmicro/tensorflow/lite/kernels/padding.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2017 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_LITE_KERNELS_PADDING_H_ 16 | #define TENSORFLOW_LITE_KERNELS_PADDING_H_ 17 | 18 | #include "tensorflow/lite/c/builtin_op_data.h" 19 | 20 | namespace tflite { 21 | 22 | // TODO(renjieliu): Migrate others to use ComputePaddingWithLeftover. 23 | inline int ComputePadding(int stride, int dilation_rate, int in_size, 24 | int filter_size, int out_size) { 25 | int effective_filter_size = (filter_size - 1) * dilation_rate + 1; 26 | int padding = ((out_size - 1) * stride + effective_filter_size - in_size) / 2; 27 | return padding > 0 ? padding : 0; 28 | } 29 | 30 | // It's not guaranteed that padding is symmetric. It's important to keep 31 | // offset for algorithms need all paddings. 32 | inline int ComputePaddingWithOffset(int stride, int dilation_rate, int in_size, 33 | int filter_size, int out_size, 34 | int* offset) { 35 | int effective_filter_size = (filter_size - 1) * dilation_rate + 1; 36 | int total_padding = 37 | ((out_size - 1) * stride + effective_filter_size - in_size); 38 | total_padding = total_padding > 0 ? total_padding : 0; 39 | *offset = total_padding % 2; 40 | return total_padding / 2; 41 | } 42 | 43 | // Matching GetWindowedOutputSize in TensorFlow. 44 | inline int ComputeOutSize(TfLitePadding padding, int image_size, 45 | int filter_size, int stride, int dilation_rate = 1) { 46 | int effective_filter_size = (filter_size - 1) * dilation_rate + 1; 47 | switch (padding) { 48 | case kTfLitePaddingSame: 49 | return (image_size + stride - 1) / stride; 50 | case kTfLitePaddingValid: 51 | return (image_size + stride - effective_filter_size) / stride; 52 | default: 53 | return 0; 54 | } 55 | } 56 | 57 | inline TfLitePaddingValues ComputePaddingHeightWidth( 58 | int stride_height, int stride_width, int dilation_rate_height, 59 | int dilation_rate_width, int in_height, int in_width, int filter_height, 60 | int filter_width, TfLitePadding padding, int* out_height, int* out_width) { 61 | *out_width = ComputeOutSize(padding, in_width, filter_width, stride_width, 62 | dilation_rate_width); 63 | *out_height = ComputeOutSize(padding, in_height, filter_height, stride_height, 64 | dilation_rate_height); 65 | 66 | TfLitePaddingValues padding_values; 67 | int offset = 0; 68 | padding_values.height = 69 | ComputePaddingWithOffset(stride_height, dilation_rate_height, in_height, 70 | filter_height, *out_height, &offset); 71 | padding_values.height_offset = offset; 72 | padding_values.width = 73 | ComputePaddingWithOffset(stride_width, dilation_rate_width, in_width, 74 | filter_width, *out_width, &offset); 75 | padding_values.width_offset = offset; 76 | return padding_values; 77 | } 78 | } // namespace tflite 79 | 80 | #endif // TENSORFLOW_LITE_KERNELS_PADDING_H_ 81 | -------------------------------------------------------------------------------- /lib/tfmicro/tensorflow/lite/micro/compatibility.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_LITE_MICRO_COMPATIBILITY_H_ 16 | #define TENSORFLOW_LITE_MICRO_COMPATIBILITY_H_ 17 | 18 | // C++ will automatically create class-specific delete operators for virtual 19 | // objects, which by default call the global delete function. For embedded 20 | // applications we want to avoid this, and won't be calling new/delete on these 21 | // objects, so we need to override the default implementation with one that does 22 | // nothing to avoid linking in ::delete(). 23 | // This macro needs to be included in all subclasses of a virtual base class in 24 | // the private section. 25 | #ifdef TF_LITE_STATIC_MEMORY 26 | #define TF_LITE_REMOVE_VIRTUAL_DELETE \ 27 | void operator delete(void* p) {} 28 | #else 29 | #define TF_LITE_REMOVE_VIRTUAL_DELETE 30 | #endif 31 | 32 | #endif // TENSORFLOW_LITE_MICRO_COMPATIBILITY_H_ 33 | -------------------------------------------------------------------------------- /lib/tfmicro/tensorflow/lite/micro/debug_log.cc: -------------------------------------------------------------------------------- 1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | // Reference implementation of the DebugLog() function that's required for a 17 | // platform to support the TensorFlow Lite for Microcontrollers library. This is 18 | // the only function that's absolutely required to be available on a target 19 | // device, since it's used for communicating test results back to the host so 20 | // that we can verify the implementation is working correctly. 21 | // It's designed to be as easy as possible to supply an implementation though. 22 | // On platforms that have a POSIX stack or C library, it can be written as a 23 | // single call to `fprintf(stderr, "%s", s)` to output a string to the error 24 | // stream of the console, but if there's no OS or C library available, there's 25 | // almost always an equivalent way to write out a string to some serial 26 | // interface that can be used instead. For example on Arm M-series MCUs, calling 27 | // the `bkpt #0xAB` assembler instruction will output the string in r1 to 28 | // whatever debug serial connection is available. If you're running mbed, you 29 | // can do the same by creating `Serial pc(USBTX, USBRX)` and then calling 30 | // `pc.printf("%s", s)`. 31 | // To add an equivalent function for your own platform, create your own 32 | // implementation file, and place it in a subfolder with named after the OS 33 | // you're targeting. For example, see the Cortex M bare metal version in 34 | // tensorflow/lite/micro/bluepill/debug_log.cc or the mbed one on 35 | // tensorflow/lite/micro/mbed/debug_log.cc. 36 | 37 | #include "tensorflow/lite/micro/debug_log.h" 38 | 39 | #include 40 | 41 | extern "C" void DebugLog(const char* s) { fprintf(stderr, "%s", s); } 42 | -------------------------------------------------------------------------------- /lib/tfmicro/tensorflow/lite/micro/debug_log.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_LITE_MICRO_DEBUG_LOG_H_ 16 | #define TENSORFLOW_LITE_MICRO_DEBUG_LOG_H_ 17 | 18 | // This function should be implemented by each target platform, and provide a 19 | // way for strings to be output to some text stream. For more information, see 20 | // tensorflow/lite/micro/debug_log.cc. 21 | extern "C" void DebugLog(const char* s); 22 | 23 | #endif // TENSORFLOW_LITE_MICRO_DEBUG_LOG_H_ 24 | -------------------------------------------------------------------------------- /lib/tfmicro/tensorflow/lite/micro/kernels/activation_utils.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | #ifndef TENSORFLOW_LITE_MICRO_KERNELS_ACTIVATION_UTILS_H_ 17 | #define TENSORFLOW_LITE_MICRO_KERNELS_ACTIVATION_UTILS_H_ 18 | 19 | #include 20 | #include 21 | 22 | #include "tensorflow/lite/c/builtin_op_data.h" 23 | #include "tensorflow/lite/kernels/internal/cppmath.h" 24 | 25 | namespace tflite { 26 | namespace ops { 27 | namespace micro { 28 | 29 | // Returns the floating point value for a fused activation: 30 | inline float ActivationValFloat(TfLiteFusedActivation act, float a) { 31 | switch (act) { 32 | case kTfLiteActNone: 33 | return a; 34 | case kTfLiteActRelu: 35 | return std::max(0.0f, a); 36 | case kTfLiteActRelu1: 37 | return std::max(-1.0f, std::min(a, 1.0f)); 38 | case kTfLiteActRelu6: 39 | return std::max(0.0f, std::min(a, 6.0f)); 40 | case kTfLiteActTanh: 41 | return std::tanh(a); 42 | case kTfLiteActSignBit: 43 | return std::signbit(a); 44 | case kTfLiteActSigmoid: 45 | return 1.0f / (1.0f + std::exp(-a)); 46 | } 47 | return 0.0f; // To indicate an unsupported activation (i.e. when a new fused 48 | // activation is added to the enum and not handled here). 49 | } 50 | 51 | } // namespace micro 52 | } // namespace ops 53 | } // namespace tflite 54 | 55 | #endif // TENSORFLOW_LITE_MICRO_KERNELS_ACTIVATION_UTILS_H_ 56 | -------------------------------------------------------------------------------- /lib/tfmicro/tensorflow/lite/micro/kernels/all_ops_resolver.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved. 2 | Licensed under the Apache License, Version 2.0 (the "License"); 3 | you may not use this file except in compliance with the License. 4 | You may obtain a copy of the License at 5 | http://www.apache.org/licenses/LICENSE-2.0 6 | Unless required by applicable law or agreed to in writing, software 7 | distributed under the License is distributed on an "AS IS" BASIS, 8 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 9 | See the License for the specific language governing permissions and 10 | limitations under the License. 11 | ==============================================================================*/ 12 | #ifndef TENSORFLOW_LITE_MICRO_KERNELS_ALL_OPS_RESOLVER_H_ 13 | #define TENSORFLOW_LITE_MICRO_KERNELS_ALL_OPS_RESOLVER_H_ 14 | 15 | #include "tensorflow/lite/micro/compatibility.h" 16 | #include "tensorflow/lite/micro/micro_mutable_op_resolver.h" 17 | 18 | namespace tflite { 19 | namespace ops { 20 | namespace micro { 21 | 22 | class AllOpsResolver : public MicroMutableOpResolver { 23 | public: 24 | AllOpsResolver(); 25 | 26 | private: 27 | TF_LITE_REMOVE_VIRTUAL_DELETE 28 | }; 29 | 30 | } // namespace micro 31 | } // namespace ops 32 | } // namespace tflite 33 | 34 | #endif // TENSORFLOW_LITE_MICRO_KERNELS_ALL_OPS_RESOLVER_H_ 35 | -------------------------------------------------------------------------------- /lib/tfmicro/tensorflow/lite/micro/kernels/ceil.cc: -------------------------------------------------------------------------------- 1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | #include "tensorflow/lite/kernels/internal/reference/ceil.h" 17 | 18 | #include "tensorflow/lite/c/common.h" 19 | #include "tensorflow/lite/kernels/internal/tensor_ctypes.h" 20 | #include "tensorflow/lite/kernels/kernel_util.h" 21 | 22 | namespace tflite { 23 | namespace ops { 24 | namespace micro { 25 | namespace ceil { 26 | 27 | constexpr int kInputTensor = 0; 28 | constexpr int kOutputTensor = 0; 29 | 30 | TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { 31 | const TfLiteTensor* input = GetInput(context, node, kInputTensor); 32 | TfLiteTensor* output = GetOutput(context, node, kOutputTensor); 33 | TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); 34 | TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); 35 | TF_LITE_ENSURE_EQ(context, input->type, kTfLiteFloat32); 36 | TF_LITE_ENSURE_EQ(context, output->type, input->type); 37 | TF_LITE_ENSURE_EQ(context, output->bytes, input->bytes); 38 | TF_LITE_ENSURE_EQ(context, output->dims->size, input->dims->size); 39 | for (int i = 0; i < output->dims->size; ++i) { 40 | TF_LITE_ENSURE_EQ(context, output->dims->data[i], input->dims->data[i]); 41 | } 42 | return kTfLiteOk; 43 | } 44 | 45 | TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { 46 | const TfLiteTensor* input = GetInput(context, node, kInputTensor); 47 | TfLiteTensor* output = GetOutput(context, node, kOutputTensor); 48 | 49 | reference_ops::Ceil(GetTensorShape(input), GetTensorData(input), 50 | GetTensorShape(output), GetTensorData(output)); 51 | 52 | return kTfLiteOk; 53 | } 54 | } // namespace ceil 55 | 56 | TfLiteRegistration* Register_CEIL() { 57 | static TfLiteRegistration r = {/*init=*/nullptr, 58 | /*free=*/nullptr, 59 | /*prepare=*/ceil::Prepare, 60 | /*invoke=*/ceil::Eval, 61 | /*profiling_string=*/nullptr, 62 | /*builtin_code=*/0, 63 | /*custom_name=*/nullptr, 64 | /*version=*/0}; 65 | return &r; 66 | } 67 | 68 | } // namespace micro 69 | } // namespace ops 70 | } // namespace tflite 71 | -------------------------------------------------------------------------------- /lib/tfmicro/tensorflow/lite/micro/kernels/floor.cc: -------------------------------------------------------------------------------- 1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | #include "tensorflow/lite/kernels/internal/reference/floor.h" 17 | 18 | #include "tensorflow/lite/c/common.h" 19 | #include "tensorflow/lite/kernels/internal/tensor_ctypes.h" 20 | #include "tensorflow/lite/kernels/kernel_util.h" 21 | 22 | namespace tflite { 23 | namespace ops { 24 | namespace micro { 25 | namespace floor { 26 | 27 | constexpr int kInputTensor = 0; 28 | constexpr int kOutputTensor = 0; 29 | 30 | TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { 31 | const TfLiteTensor* input = GetInput(context, node, kInputTensor); 32 | TF_LITE_ENSURE_EQ(context, input->type, kTfLiteFloat32); 33 | TfLiteTensor* output = GetOutput(context, node, kOutputTensor); 34 | reference_ops::Floor(GetTensorShape(input), GetTensorData(input), 35 | GetTensorShape(output), GetTensorData(output)); 36 | return kTfLiteOk; 37 | } 38 | } // namespace floor 39 | 40 | TfLiteRegistration* Register_FLOOR() { 41 | static TfLiteRegistration r = {/*init=*/nullptr, 42 | /*free=*/nullptr, 43 | /*prepare=*/nullptr, 44 | /*invoke=*/floor::Eval, 45 | /*profiling_string=*/nullptr, 46 | /*builtin_code=*/0, 47 | /*custom_name=*/nullptr, 48 | /*version=*/0}; 49 | return &r; 50 | } 51 | 52 | } // namespace micro 53 | } // namespace ops 54 | } // namespace tflite 55 | -------------------------------------------------------------------------------- /lib/tfmicro/tensorflow/lite/micro/kernels/micro_ops.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_LITE_MICRO_KERNELS_MICRO_OPS_H_ 16 | #define TENSORFLOW_LITE_MICRO_KERNELS_MICRO_OPS_H_ 17 | 18 | #include "tensorflow/lite/c/common.h" 19 | 20 | namespace tflite { 21 | namespace ops { 22 | namespace micro { 23 | 24 | // Forward declaration of all micro op kernel registration methods. These 25 | // registrations are included with the standard `BuiltinOpResolver`. 26 | // 27 | // This header is particularly useful in cases where only a subset of ops are 28 | // needed. In such cases, the client can selectively add only the registrations 29 | // their model requires, using a custom `(Micro)MutableOpResolver`. Selective 30 | // registration in turn allows the linker to strip unused kernels. 31 | 32 | TfLiteRegistration* Register_ABS(); 33 | TfLiteRegistration* Register_ADD(); 34 | TfLiteRegistration* Register_ARG_MAX(); 35 | TfLiteRegistration* Register_ARG_MIN(); 36 | TfLiteRegistration* Register_AVERAGE_POOL_2D(); 37 | TfLiteRegistration* Register_CEIL(); 38 | TfLiteRegistration* Register_CIRCULAR_BUFFER(); 39 | TfLiteRegistration* Register_CONV_2D(); 40 | TfLiteRegistration* Register_CONCATENATION(); 41 | TfLiteRegistration* Register_COS(); 42 | TfLiteRegistration* Register_DEPTHWISE_CONV_2D(); 43 | TfLiteRegistration* Register_DEQUANTIZE(); 44 | TfLiteRegistration* Register_EQUAL(); 45 | TfLiteRegistration* Register_FLOOR(); 46 | TfLiteRegistration* Register_FULLY_CONNECTED(); 47 | TfLiteRegistration* Register_GREATER(); 48 | TfLiteRegistration* Register_GREATER_EQUAL(); 49 | TfLiteRegistration* Register_LESS(); 50 | TfLiteRegistration* Register_LESS_EQUAL(); 51 | TfLiteRegistration* Register_LOG(); 52 | TfLiteRegistration* Register_LOGICAL_AND(); 53 | TfLiteRegistration* Register_LOGICAL_NOT(); 54 | TfLiteRegistration* Register_LOGICAL_OR(); 55 | TfLiteRegistration* Register_LOGISTIC(); 56 | TfLiteRegistration* Register_MAXIMUM(); 57 | TfLiteRegistration* Register_MAX_POOL_2D(); 58 | TfLiteRegistration* Register_MEAN(); 59 | TfLiteRegistration* Register_MINIMUM(); 60 | TfLiteRegistration* Register_MUL(); 61 | TfLiteRegistration* Register_NEG(); 62 | TfLiteRegistration* Register_NOT_EQUAL(); 63 | TfLiteRegistration* Register_PACK(); 64 | TfLiteRegistration* Register_PAD(); 65 | TfLiteRegistration* Register_PADV2(); 66 | TfLiteRegistration* Register_PRELU(); 67 | TfLiteRegistration* Register_QUANTIZE(); 68 | TfLiteRegistration* Register_RELU(); 69 | TfLiteRegistration* Register_RELU6(); 70 | TfLiteRegistration* Register_RESHAPE(); 71 | TfLiteRegistration* Register_RESIZE_NEAREST_NEIGHBOR(); 72 | TfLiteRegistration* Register_ROUND(); 73 | TfLiteRegistration* Register_RSQRT(); 74 | TfLiteRegistration* Register_SIN(); 75 | TfLiteRegistration* Register_SOFTMAX(); 76 | TfLiteRegistration* Register_SPLIT(); 77 | TfLiteRegistration* Register_SQRT(); 78 | TfLiteRegistration* Register_SQUARE(); 79 | TfLiteRegistration* Register_STRIDED_SLICE(); 80 | TfLiteRegistration* Register_SUB(); 81 | TfLiteRegistration* Register_SVDF(); 82 | TfLiteRegistration* Register_UNPACK(); 83 | TfLiteRegistration* Register_L2_NORMALIZATION(); 84 | TfLiteRegistration* Register_TANH(); 85 | 86 | } // namespace micro 87 | } // namespace ops 88 | } // namespace tflite 89 | 90 | #endif // TENSORFLOW_LITE_MICRO_KERNELS_MICRO_OPS_H_ 91 | -------------------------------------------------------------------------------- /lib/tfmicro/tensorflow/lite/micro/kernels/micro_utils.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved. 2 | Licensed under the Apache License, Version 2.0 (the "License"); 3 | you may not use this file except in compliance with the License. 4 | You may obtain a copy of the License at 5 | http://www.apache.org/licenses/LICENSE-2.0 6 | Unless required by applicable law or agreed to in writing, software 7 | distributed under the License is distributed on an "AS IS" BASIS, 8 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 9 | See the License for the specific language governing permissions and 10 | limitations under the License. 11 | ==============================================================================*/ 12 | #ifndef TENSORFLOW_LITE_MICRO_KERNELS_MICRO_UTILS_H_ 13 | #define TENSORFLOW_LITE_MICRO_KERNELS_MICRO_UTILS_H_ 14 | namespace tflite { 15 | namespace ops { 16 | namespace micro { 17 | 18 | // Same as gtl::Greater but defined here to reduce dependencies and 19 | // binary size for micro environment. 20 | struct Greater { 21 | template 22 | bool operator()(const T& x, const T& y) const { 23 | return x > y; 24 | } 25 | }; 26 | 27 | struct Less { 28 | template 29 | bool operator()(const T& x, const T& y) const { 30 | return x < y; 31 | } 32 | }; 33 | 34 | } // namespace micro 35 | } // namespace ops 36 | } // namespace tflite 37 | #endif // TENSORFLOW_LITE_MICRO_KERNELS_MICRO_UTILS_H_ 38 | -------------------------------------------------------------------------------- /lib/tfmicro/tensorflow/lite/micro/kernels/neg.cc: -------------------------------------------------------------------------------- 1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | #include "tensorflow/lite/kernels/internal/reference/neg.h" 17 | 18 | #include "tensorflow/lite/c/common.h" 19 | #include "tensorflow/lite/kernels/internal/tensor_ctypes.h" 20 | #include "tensorflow/lite/kernels/kernel_util.h" 21 | 22 | namespace tflite { 23 | namespace ops { 24 | namespace micro { 25 | namespace neg { 26 | 27 | constexpr int kInputTensor = 0; 28 | constexpr int kOutputTensor = 0; 29 | 30 | TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { 31 | const TfLiteTensor* input = GetInput(context, node, kInputTensor); 32 | TfLiteTensor* output = GetOutput(context, node, kOutputTensor); 33 | switch (input->type) { 34 | // TODO(wangtz): handle for kTfLiteInt8 35 | case kTfLiteFloat32: 36 | reference_ops::Negate(GetTensorShape(input), GetTensorData(input), 37 | GetTensorShape(output), 38 | GetTensorData(output)); 39 | break; 40 | default: 41 | TF_LITE_KERNEL_LOG(context, "Type %s (%d) not supported.", 42 | TfLiteTypeGetName(input->type), input->type); 43 | return kTfLiteError; 44 | } 45 | return kTfLiteOk; 46 | } 47 | 48 | } // namespace neg 49 | 50 | TfLiteRegistration* Register_NEG() { 51 | static TfLiteRegistration r = {/*init=*/nullptr, 52 | /*free=*/nullptr, 53 | /*prepare=*/nullptr, 54 | /*invoke=*/neg::Eval, 55 | /*profiling_string=*/nullptr, 56 | /*builtin_code=*/0, 57 | /*custom_name=*/nullptr, 58 | /*version=*/0}; 59 | return &r; 60 | } 61 | 62 | } // namespace micro 63 | } // namespace ops 64 | } // namespace tflite 65 | -------------------------------------------------------------------------------- /lib/tfmicro/tensorflow/lite/micro/kernels/round.cc: -------------------------------------------------------------------------------- 1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | #include "tensorflow/lite/kernels/internal/reference/round.h" 17 | 18 | #include "tensorflow/lite/c/common.h" 19 | #include "tensorflow/lite/kernels/internal/tensor_ctypes.h" 20 | #include "tensorflow/lite/kernels/kernel_util.h" 21 | 22 | namespace tflite { 23 | namespace ops { 24 | namespace micro { 25 | namespace round { 26 | 27 | constexpr int kInputTensor = 0; 28 | constexpr int kOutputTensor = 0; 29 | 30 | TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { 31 | const TfLiteTensor* input = GetInput(context, node, kInputTensor); 32 | TfLiteTensor* output = GetOutput(context, node, kOutputTensor); 33 | TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); 34 | TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); 35 | TF_LITE_ENSURE_EQ(context, input->type, kTfLiteFloat32); 36 | TF_LITE_ENSURE_EQ(context, output->type, input->type); 37 | TF_LITE_ENSURE_EQ(context, output->bytes, input->bytes); 38 | TF_LITE_ENSURE_EQ(context, output->dims->size, input->dims->size); 39 | for (int i = 0; i < output->dims->size; ++i) { 40 | TF_LITE_ENSURE_EQ(context, output->dims->data[i], input->dims->data[i]); 41 | } 42 | return kTfLiteOk; 43 | } 44 | 45 | TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { 46 | const TfLiteTensor* input = GetInput(context, node, kInputTensor); 47 | TfLiteTensor* output = GetOutput(context, node, kOutputTensor); 48 | 49 | reference_ops::Round(GetTensorShape(input), GetTensorData(input), 50 | GetTensorShape(output), GetTensorData(output)); 51 | 52 | return kTfLiteOk; 53 | } 54 | } // namespace round 55 | 56 | TfLiteRegistration* Register_ROUND() { 57 | static TfLiteRegistration r = {/*init=*/nullptr, 58 | /*free=*/nullptr, 59 | /*prepare=*/round::Prepare, 60 | /*invoke=*/round::Eval, 61 | /*profiling_string=*/nullptr, 62 | /*builtin_code=*/0, 63 | /*custom_name=*/nullptr, 64 | /*version=*/0}; 65 | return &r; 66 | } 67 | 68 | } // namespace micro 69 | } // namespace ops 70 | } // namespace tflite 71 | -------------------------------------------------------------------------------- /lib/tfmicro/tensorflow/lite/micro/memory_helpers.cc: -------------------------------------------------------------------------------- 1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | #include "tensorflow/lite/micro/memory_helpers.h" 17 | 18 | #include 19 | 20 | #include "tensorflow/lite/core/api/flatbuffer_conversions.h" 21 | 22 | namespace tflite { 23 | 24 | uint8_t* AlignPointerUp(uint8_t* data, size_t alignment) { 25 | std::uintptr_t data_as_uintptr_t = reinterpret_cast(data); 26 | uint8_t* aligned_result = reinterpret_cast( 27 | ((data_as_uintptr_t + (alignment - 1)) / alignment) * alignment); 28 | return aligned_result; 29 | } 30 | 31 | uint8_t* AlignPointerDown(uint8_t* data, size_t alignment) { 32 | std::uintptr_t data_as_uintptr_t = reinterpret_cast(data); 33 | uint8_t* aligned_result = 34 | reinterpret_cast((data_as_uintptr_t / alignment) * alignment); 35 | return aligned_result; 36 | } 37 | 38 | size_t AlignSizeUp(size_t size, size_t alignment) { 39 | size_t aligned_size = (((size + (alignment - 1)) / alignment) * alignment); 40 | return aligned_size; 41 | } 42 | 43 | TfLiteStatus TfLiteTypeSizeOf(TfLiteType type, size_t* size, 44 | ErrorReporter* reporter) { 45 | switch (type) { 46 | case kTfLiteFloat32: 47 | *size = sizeof(float); 48 | break; 49 | case kTfLiteInt16: 50 | *size = sizeof(int16_t); 51 | break; 52 | case kTfLiteInt32: 53 | *size = sizeof(int32_t); 54 | break; 55 | case kTfLiteUInt8: 56 | *size = sizeof(uint8_t); 57 | break; 58 | case kTfLiteInt8: 59 | *size = sizeof(int8_t); 60 | break; 61 | case kTfLiteInt64: 62 | *size = sizeof(int64_t); 63 | break; 64 | case kTfLiteBool: 65 | *size = sizeof(bool); 66 | break; 67 | case kTfLiteComplex64: 68 | *size = sizeof(float) * 2; 69 | break; 70 | default: 71 | reporter->Report("Type %s (%d) not is not supported", 72 | TfLiteTypeGetName(type), type); 73 | return kTfLiteError; 74 | } 75 | return kTfLiteOk; 76 | } 77 | 78 | TfLiteStatus BytesRequiredForTensor(const tflite::Tensor& flatbuffer_tensor, 79 | size_t* bytes, size_t* type_size, 80 | ErrorReporter* error_reporter) { 81 | int element_count = 1; 82 | for (size_t n = 0; n < flatbuffer_tensor.shape()->Length(); ++n) { 83 | element_count *= flatbuffer_tensor.shape()->Get(n); 84 | } 85 | 86 | TfLiteType tf_lite_type; 87 | TF_LITE_ENSURE_STATUS(ConvertTensorType(flatbuffer_tensor.type(), 88 | &tf_lite_type, error_reporter)); 89 | TF_LITE_ENSURE_STATUS( 90 | TfLiteTypeSizeOf(tf_lite_type, type_size, error_reporter)); 91 | *bytes = element_count * (*type_size); 92 | return kTfLiteOk; 93 | } 94 | 95 | } // namespace tflite 96 | -------------------------------------------------------------------------------- /lib/tfmicro/tensorflow/lite/micro/memory_helpers.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_LITE_MICRO_MEMORY_HELPERS_H_ 16 | #define TENSORFLOW_LITE_MICRO_MEMORY_HELPERS_H_ 17 | 18 | #include "tensorflow/lite/c/common.h" 19 | #include "tensorflow/lite/core/api/error_reporter.h" 20 | #include "tensorflow/lite/schema/schema_generated.h" 21 | 22 | namespace tflite { 23 | 24 | // Returns the next pointer address aligned to the given alignment. 25 | uint8_t* AlignPointerUp(uint8_t* data, size_t alignment); 26 | 27 | // Returns the previous pointer address aligned to the given alignment. 28 | uint8_t* AlignPointerDown(uint8_t* data, size_t alignment); 29 | 30 | // Returns an increased size that's a multiple of alignment. 31 | size_t AlignSizeUp(size_t size, size_t alignment); 32 | 33 | // Returns size in bytes for a given TfLiteType. 34 | TfLiteStatus TfLiteTypeSizeOf(TfLiteType type, size_t* size, 35 | ErrorReporter* reporter); 36 | 37 | // How many bytes are needed to hold a tensor's contents. 38 | TfLiteStatus BytesRequiredForTensor(const tflite::Tensor& flatbuffer_tensor, 39 | size_t* bytes, size_t* type_size, 40 | ErrorReporter* error_reporter); 41 | 42 | } // namespace tflite 43 | 44 | #endif // TENSORFLOW_LITE_MICRO_MEMORY_HELPERS_H_ 45 | -------------------------------------------------------------------------------- /lib/tfmicro/tensorflow/lite/micro/memory_planner/linear_memory_planner.cc: -------------------------------------------------------------------------------- 1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | #include "tensorflow/lite/micro/memory_planner/linear_memory_planner.h" 17 | 18 | namespace tflite { 19 | 20 | LinearMemoryPlanner::LinearMemoryPlanner() 21 | : current_buffer_count_(0), next_free_offset_(0) {} 22 | LinearMemoryPlanner::~LinearMemoryPlanner() {} 23 | 24 | TfLiteStatus LinearMemoryPlanner::AddBuffer( 25 | tflite::ErrorReporter* error_reporter, int size, int first_time_used, 26 | int last_time_used) { 27 | if (current_buffer_count_ >= kMaxBufferCount) { 28 | TF_LITE_REPORT_ERROR(error_reporter, "Too many buffers (max is %d)", 29 | kMaxBufferCount); 30 | return kTfLiteError; 31 | } 32 | buffer_offsets_[current_buffer_count_] = next_free_offset_; 33 | next_free_offset_ += size; 34 | ++current_buffer_count_; 35 | return kTfLiteOk; 36 | } 37 | 38 | size_t LinearMemoryPlanner::GetMaximumMemorySize() { return next_free_offset_; } 39 | 40 | int LinearMemoryPlanner::GetBufferCount() { return current_buffer_count_; } 41 | 42 | TfLiteStatus LinearMemoryPlanner::GetOffsetForBuffer( 43 | tflite::ErrorReporter* error_reporter, int buffer_index, int* offset) { 44 | if ((buffer_index < 0) || (buffer_index >= current_buffer_count_)) { 45 | TF_LITE_REPORT_ERROR(error_reporter, 46 | "buffer index %d is outside range 0 to %d", 47 | buffer_index, current_buffer_count_); 48 | return kTfLiteError; 49 | } 50 | *offset = buffer_offsets_[buffer_index]; 51 | return kTfLiteOk; 52 | } 53 | 54 | } // namespace tflite 55 | -------------------------------------------------------------------------------- /lib/tfmicro/tensorflow/lite/micro/memory_planner/linear_memory_planner.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | #ifndef TENSORFLOW_LITE_MICRO_MEMORY_PLANNER_LINEAR_MEMORY_PLANNER_H_ 17 | #define TENSORFLOW_LITE_MICRO_MEMORY_PLANNER_LINEAR_MEMORY_PLANNER_H_ 18 | 19 | #include "tensorflow/lite/micro/compatibility.h" 20 | #include "tensorflow/lite/micro/memory_planner/memory_planner.h" 21 | 22 | namespace tflite { 23 | 24 | // The simplest possible memory planner that just lays out all buffers at 25 | // increasing offsets without trying to reuse memory. 26 | class LinearMemoryPlanner : public MemoryPlanner { 27 | public: 28 | LinearMemoryPlanner(); 29 | ~LinearMemoryPlanner() override; 30 | 31 | TfLiteStatus AddBuffer(tflite::ErrorReporter* error_reporter, int size, 32 | int first_time_used, int last_time_used) override; 33 | 34 | size_t GetMaximumMemorySize() override; 35 | int GetBufferCount() override; 36 | TfLiteStatus GetOffsetForBuffer(tflite::ErrorReporter* error_reporter, 37 | int buffer_index, int* offset) override; 38 | 39 | private: 40 | static constexpr int kMaxBufferCount = 1024; 41 | size_t buffer_offsets_[kMaxBufferCount]; 42 | int current_buffer_count_; 43 | size_t next_free_offset_; 44 | 45 | TF_LITE_REMOVE_VIRTUAL_DELETE 46 | }; 47 | 48 | } // namespace tflite 49 | 50 | #endif // TENSORFLOW_LITE_MICRO_MEMORY_PLANNER_LINEAR_MEMORY_PLANNER_H_ 51 | -------------------------------------------------------------------------------- /lib/tfmicro/tensorflow/lite/micro/memory_planner/memory_planner.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | #ifndef TENSORFLOW_LITE_MICRO_MEMORY_PLANNER_MEMORY_PLANNER_H_ 17 | #define TENSORFLOW_LITE_MICRO_MEMORY_PLANNER_MEMORY_PLANNER_H_ 18 | 19 | #include "tensorflow/lite/c/common.h" 20 | #include "tensorflow/lite/core/api/error_reporter.h" 21 | 22 | namespace tflite { 23 | 24 | // Interface class for planning the layout of memory buffers during the 25 | // execution of a graph. 26 | // It's designed to be used by a client that iterates in any order through the 27 | // buffers it wants to lay out, and then calls the getter functions for 28 | // information about the calculated layout. For example: 29 | // 30 | // SomeMemoryPlanner planner; 31 | // planner.AddBuffer(reporter, 100, 0, 1); // Buffer 0 32 | // planner.AddBuffer(reporter, 50, 2, 3); // Buffer 1 33 | // planner.AddBuffer(reporter, 50, 2, 3); // Buffer 2 34 | // 35 | // int offset0; 36 | // TF_EXPECT_OK(planner.GetOffsetForBuffer(reporter, 0, &offset0)); 37 | // int offset1; 38 | // TF_EXPECT_OK(planner.GetOffsetForBuffer(reporter, 1, &offset1)); 39 | // int offset2; 40 | // TF_EXPECT_OK(planner.GetOffsetForBuffer(reporter, 2, &offset2)); 41 | // const int arena_size_needed = planner.GetMaximumMemorySize(); 42 | // 43 | // The goal is for applications to be able to experiment with different layout 44 | // strategies without changing their client code, by swapping out classes that 45 | // implement this interface.= 46 | class MemoryPlanner { 47 | public: 48 | MemoryPlanner() {} 49 | virtual ~MemoryPlanner() {} 50 | 51 | // Pass information about a buffer's size and lifetime to the layout 52 | // algorithm. The order this is called implicitly assigns an index to the 53 | // result, so the buffer information that's passed into the N-th call of 54 | // this method will be used as the buffer_index argument to 55 | // GetOffsetForBuffer(). 56 | virtual TfLiteStatus AddBuffer(tflite::ErrorReporter* error_reporter, 57 | int size, int first_time_used, 58 | int last_time_used) = 0; 59 | 60 | // The largest contiguous block of memory that's needed to hold the layout. 61 | virtual size_t GetMaximumMemorySize() = 0; 62 | // How many buffers have been added to the planner. 63 | virtual int GetBufferCount() = 0; 64 | // Calculated layout offset for the N-th buffer added to the planner. 65 | virtual TfLiteStatus GetOffsetForBuffer(tflite::ErrorReporter* error_reporter, 66 | int buffer_index, int* offset) = 0; 67 | }; 68 | 69 | } // namespace tflite 70 | 71 | #endif // TENSORFLOW_LITE_MICRO_MEMORY_PLANNER_MEMORY_PLANNER_H_ 72 | -------------------------------------------------------------------------------- /lib/tfmicro/tensorflow/lite/micro/micro_error_reporter.cc: -------------------------------------------------------------------------------- 1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | #include "tensorflow/lite/micro/micro_error_reporter.h" 17 | 18 | #ifndef TF_LITE_STRIP_ERROR_STRINGS 19 | #include "tensorflow/lite/micro/micro_string.h" 20 | #endif 21 | 22 | namespace tflite { 23 | 24 | int MicroErrorReporter::Report(const char* format, va_list args) { 25 | #ifndef TF_LITE_STRIP_ERROR_STRINGS 26 | // Only pulling in the implementation of this function for builds where we 27 | // expect to make use of it to be extra cautious about not increasing the code 28 | // size. 29 | static constexpr int kMaxLogLen = 256; 30 | char log_buffer[kMaxLogLen]; 31 | MicroVsnprintf(log_buffer, kMaxLogLen, format, args); 32 | DebugLog(log_buffer); 33 | DebugLog("\r\n"); 34 | #endif 35 | return 0; 36 | } 37 | 38 | } // namespace tflite 39 | -------------------------------------------------------------------------------- /lib/tfmicro/tensorflow/lite/micro/micro_error_reporter.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_LITE_MICRO_MICRO_ERROR_REPORTER_H_ 16 | #define TENSORFLOW_LITE_MICRO_MICRO_ERROR_REPORTER_H_ 17 | 18 | #include "tensorflow/lite/core/api/error_reporter.h" 19 | #include "tensorflow/lite/micro/compatibility.h" 20 | #include "tensorflow/lite/micro/debug_log.h" 21 | 22 | namespace tflite { 23 | 24 | class MicroErrorReporter : public ErrorReporter { 25 | public: 26 | ~MicroErrorReporter() override {} 27 | int Report(const char* format, va_list args) override; 28 | 29 | private: 30 | TF_LITE_REMOVE_VIRTUAL_DELETE 31 | }; 32 | 33 | } // namespace tflite 34 | 35 | #endif // TENSORFLOW_LITE_MICRO_MICRO_ERROR_REPORTER_H_ 36 | -------------------------------------------------------------------------------- /lib/tfmicro/tensorflow/lite/micro/micro_optional_debug_tools.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2017 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | // Optional debugging functionality. For small sized binaries, these are not 16 | // needed. 17 | #ifndef TENSORFLOW_LITE_MICRO_MICRO_OPTIONAL_DEBUG_TOOLS_H_ 18 | #define TENSORFLOW_LITE_MICRO_MICRO_OPTIONAL_DEBUG_TOOLS_H_ 19 | 20 | #include "tensorflow/lite/micro/micro_interpreter.h" 21 | 22 | namespace tflite { 23 | // Prints a dump of what tensors and what nodes are in the interpreter. 24 | void PrintInterpreterState(MicroInterpreter* interpreter); 25 | } // namespace tflite 26 | 27 | #endif // TENSORFLOW_LITE_MICRO_MICRO_OPTIONAL_DEBUG_TOOLS_H_ 28 | -------------------------------------------------------------------------------- /lib/tfmicro/tensorflow/lite/micro/micro_string.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_LITE_MICRO_MICRO_STRING_H_ 16 | #define TENSORFLOW_LITE_MICRO_MICRO_STRING_H_ 17 | 18 | #include 19 | 20 | // Implements simple string formatting for numeric types. Returns the number of 21 | // bytes written to output. 22 | extern "C" { 23 | // Functionally equivalent to vsnprintf, trimmed down for TFLite Micro. 24 | // MicroSnprintf() is implemented using MicroVsnprintf(). 25 | int MicroVsnprintf(char* output, int len, const char* format, va_list args); 26 | // Functionally equavalent to snprintf, trimmed down for TFLite Micro. 27 | // For example, MicroSnprintf(buffer, 10, "int %d", 10) will put the string 28 | // "int 10" in the buffer. 29 | // Floating point values are logged in exponent notation (1.XXX*2^N). 30 | int MicroSnprintf(char* output, int len, const char* format, ...); 31 | } 32 | 33 | #endif // TENSORFLOW_LITE_MICRO_MICRO_STRING_H_ 34 | -------------------------------------------------------------------------------- /lib/tfmicro/tensorflow/lite/micro/micro_time.cc: -------------------------------------------------------------------------------- 1 | /* Copyright 2020 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | // Reference implementation of timer functions. Platforms are not required to 17 | // implement these timer methods, but they are required to enable profiling. 18 | 19 | // On platforms that have a POSIX stack or C library, it can be written using 20 | // methods from or clock() from . 21 | 22 | // To add an equivalent function for your own platform, create your own 23 | // implementation file, and place it in a subfolder with named after the OS 24 | // you're targeting. For example, see the Cortex M bare metal version in 25 | // tensorflow/lite/micro/bluepill/micro_time.cc or the mbed one on 26 | // tensorflow/lite/micro/mbed/micro_time.cc. 27 | 28 | #include "tensorflow/lite/micro/micro_time.h" 29 | 30 | namespace tflite { 31 | 32 | // Reference implementation of the ticks_per_second() function that's required 33 | // for a platform to support Tensorflow Lite for Microcontrollers profiling. 34 | // This returns 0 by default because timing is an optional feature that builds 35 | // without errors on platforms that do not need it. 36 | int32_t ticks_per_second() { return 0; } 37 | 38 | // Reference implementation of the GetCurrentTimeTicks() function that's 39 | // required for a platform to support Tensorflow Lite for Microcontrollers 40 | // profiling. This returns 0 by default because timing is an optional feature 41 | // that builds without errors on platforms that do not need it. 42 | int32_t GetCurrentTimeTicks() { return 0; } 43 | 44 | } // namespace tflite 45 | -------------------------------------------------------------------------------- /lib/tfmicro/tensorflow/lite/micro/micro_time.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2020 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_LITE_MICRO_MICRO_TIME_H_ 16 | #define TENSORFLOW_LITE_MICRO_MICRO_TIME_H_ 17 | 18 | #include 19 | 20 | namespace tflite { 21 | 22 | // These functions should be implemented by each target platform, and provide an 23 | // accurate tick count along with how many ticks there are per second. 24 | int32_t ticks_per_second(); 25 | 26 | // Return time in ticks. The meaning of a tick varies per platform. 27 | int32_t GetCurrentTimeTicks(); 28 | 29 | } // namespace tflite 30 | 31 | #endif // TENSORFLOW_LITE_MICRO_MICRO_TIME_H_ 32 | -------------------------------------------------------------------------------- /lib/tfmicro/tensorflow/lite/micro/simple_memory_allocator.cc: -------------------------------------------------------------------------------- 1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | #include "tensorflow/lite/micro/simple_memory_allocator.h" 17 | 18 | #include 19 | #include 20 | 21 | #include "tensorflow/lite/micro/memory_helpers.h" 22 | 23 | namespace tflite { 24 | 25 | SimpleMemoryAllocator* CreateInPlaceSimpleMemoryAllocator( 26 | ErrorReporter* error_reporter, uint8_t* buffer, size_t buffer_size) { 27 | SimpleMemoryAllocator tmp = 28 | SimpleMemoryAllocator(error_reporter, buffer, buffer_size); 29 | SimpleMemoryAllocator* in_place_allocator = 30 | reinterpret_cast(tmp.AllocateFromTail( 31 | sizeof(SimpleMemoryAllocator), alignof(SimpleMemoryAllocator))); 32 | *in_place_allocator = tmp; 33 | return in_place_allocator; 34 | } 35 | 36 | uint8_t* SimpleMemoryAllocator::AllocateFromHead(size_t size, 37 | size_t alignment) { 38 | uint8_t* const aligned_result = AlignPointerUp(head_, alignment); 39 | const size_t available_memory = tail_ - aligned_result; 40 | if (available_memory < size) { 41 | TF_LITE_REPORT_ERROR( 42 | error_reporter_, 43 | "Failed to allocate memory. Requested: %u, available %u, missing: %u", 44 | size, available_memory, size - available_memory); 45 | return nullptr; 46 | } 47 | head_ = aligned_result + size; 48 | return aligned_result; 49 | } 50 | 51 | uint8_t* SimpleMemoryAllocator::AllocateFromTail(size_t size, 52 | size_t alignment) { 53 | uint8_t* const aligned_result = AlignPointerDown(tail_ - size, alignment); 54 | if (aligned_result < head_) { 55 | const size_t missing_memory = head_ - aligned_result; 56 | TF_LITE_REPORT_ERROR( 57 | error_reporter_, 58 | "Failed to allocate memory. Requested: %u, available %u, missing: %u", 59 | size, size - missing_memory, missing_memory); 60 | return nullptr; 61 | } 62 | tail_ = aligned_result; 63 | return aligned_result; 64 | } 65 | 66 | } // namespace tflite 67 | -------------------------------------------------------------------------------- /lib/tfmicro/tensorflow/lite/micro/simple_memory_allocator.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | #ifndef TENSORFLOW_LITE_MICRO_SIMPLE_MEMORY_ALLOCATOR_H_ 17 | #define TENSORFLOW_LITE_MICRO_SIMPLE_MEMORY_ALLOCATOR_H_ 18 | 19 | #include 20 | 21 | #include "tensorflow/lite/c/common.h" 22 | #include "tensorflow/lite/core/api/error_reporter.h" 23 | 24 | namespace tflite { 25 | 26 | // TODO(petewarden): This allocator never frees up or reuses any memory, even 27 | // though we have enough information about lifetimes of the tensors to do so. 28 | // This makes it pretty wasteful, so we should use a more intelligent method. 29 | class SimpleMemoryAllocator { 30 | public: 31 | SimpleMemoryAllocator(ErrorReporter* error_reporter, uint8_t* buffer_head, 32 | uint8_t* buffer_tail) 33 | : error_reporter_(error_reporter), 34 | buffer_head_(buffer_head), 35 | buffer_tail_(buffer_tail), 36 | head_(buffer_head), 37 | tail_(buffer_tail) {} 38 | SimpleMemoryAllocator(ErrorReporter* error_reporter, uint8_t* buffer, 39 | size_t buffer_size) 40 | : SimpleMemoryAllocator(error_reporter, buffer, buffer + buffer_size) {} 41 | 42 | // Allocates memory starting at the head of the arena (lowest address and 43 | // moving upwards). 44 | uint8_t* AllocateFromHead(size_t size, size_t alignment); 45 | // Allocates memory starting at the tail of the arena (highest address and 46 | // moving downwards). 47 | uint8_t* AllocateFromTail(size_t size, size_t alignment); 48 | 49 | uint8_t* GetHead() const { return head_; } 50 | uint8_t* GetTail() const { return tail_; } 51 | size_t GetAvailableMemory() const { return tail_ - head_; } 52 | size_t GetUsedBytes() const { return GetBufferSize() - GetAvailableMemory(); } 53 | 54 | size_t GetHeadUsedBytes() const { return head_ - buffer_head_; } 55 | size_t GetTailUsedBytes() const { return buffer_tail_ - tail_; } 56 | 57 | private: 58 | size_t GetBufferSize() const { return buffer_tail_ - buffer_head_; } 59 | 60 | ErrorReporter* error_reporter_; 61 | uint8_t* buffer_head_; 62 | uint8_t* buffer_tail_; 63 | uint8_t* head_; 64 | uint8_t* tail_; 65 | }; 66 | 67 | // Allocate a SimpleMemoryAllocator from the buffer and then return the pointer 68 | // to this allocator. 69 | SimpleMemoryAllocator* CreateInPlaceSimpleMemoryAllocator( 70 | ErrorReporter* error_reporter, uint8_t* buffer, size_t buffer_size); 71 | 72 | } // namespace tflite 73 | 74 | #endif // TENSORFLOW_LITE_MICRO_SIMPLE_MEMORY_ALLOCATOR_H_ 75 | -------------------------------------------------------------------------------- /lib/tfmicro/tensorflow/lite/micro/testing/micro_benchmark.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2020 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | #ifndef TENSORFLOW_LITE_MICRO_TESTING_MICRO_BENCHMARK_H_ 17 | #define TENSORFLOW_LITE_MICRO_TESTING_MICRO_BENCHMARK_H_ 18 | 19 | #include 20 | 21 | #include "tensorflow/lite/micro/micro_error_reporter.h" 22 | #include "tensorflow/lite/micro/micro_time.h" 23 | 24 | namespace micro_benchmark { 25 | extern tflite::ErrorReporter* reporter; 26 | } // namespace micro_benchmark 27 | 28 | #define TF_LITE_MICRO_BENCHMARKS_BEGIN \ 29 | namespace micro_benchmark { \ 30 | tflite::ErrorReporter* reporter; \ 31 | } \ 32 | \ 33 | int main(int argc, char** argv) { \ 34 | tflite::MicroErrorReporter error_reporter; \ 35 | micro_benchmark::reporter = &error_reporter; \ 36 | int32_t start_ticks; \ 37 | int32_t duration_ticks; \ 38 | int32_t duration_ms; 39 | 40 | #define TF_LITE_MICRO_BENCHMARKS_END \ 41 | return 0; \ 42 | } 43 | 44 | #define TF_LITE_MICRO_BENCHMARK(func) \ 45 | if (tflite::ticks_per_second() == 0) { \ 46 | return 0; \ 47 | } \ 48 | start_ticks = tflite::GetCurrentTimeTicks(); \ 49 | func(); \ 50 | duration_ticks = tflite::GetCurrentTimeTicks() - start_ticks; \ 51 | if (duration_ticks > INT_MAX / 1000) { \ 52 | duration_ms = duration_ticks / (tflite::ticks_per_second() / 1000); \ 53 | } else { \ 54 | duration_ms = (duration_ticks * 1000) / tflite::ticks_per_second(); \ 55 | } \ 56 | TF_LITE_REPORT_ERROR(micro_benchmark::reporter, "%s took %d ticks (%d ms)", \ 57 | #func, duration_ticks, duration_ms); 58 | 59 | #endif // TENSORFLOW_LITE_MICRO_TESTING_MICRO_BENCHMARK_H_ 60 | -------------------------------------------------------------------------------- /lib/tfmicro/tensorflow/lite/micro/tools/make/downloads/kissfft/COPYING: -------------------------------------------------------------------------------- 1 | Copyright (c) 2003-2010 Mark Borgerding 2 | 3 | All rights reserved. 4 | 5 | Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 6 | 7 | * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 8 | * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 9 | * Neither the author nor the names of any contributors may be used to endorse or promote products derived from this software without specific prior written permission. 10 | 11 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 12 | -------------------------------------------------------------------------------- /lib/tfmicro/tensorflow/lite/micro/tools/make/downloads/kissfft/tools/kiss_fftr.h: -------------------------------------------------------------------------------- 1 | #ifndef KISS_FTR_H 2 | #define KISS_FTR_H 3 | 4 | #include "kiss_fft.h" 5 | #ifdef __cplusplus 6 | extern "C" { 7 | #endif 8 | 9 | 10 | /* 11 | 12 | Real optimized version can save about 45% cpu time vs. complex fft of a real seq. 13 | 14 | 15 | 16 | */ 17 | 18 | typedef struct kiss_fftr_state *kiss_fftr_cfg; 19 | 20 | 21 | kiss_fftr_cfg kiss_fftr_alloc(int nfft,int inverse_fft,void * mem, size_t * lenmem); 22 | /* 23 | nfft must be even 24 | 25 | If you don't care to allocate space, use mem = lenmem = NULL 26 | */ 27 | 28 | 29 | void kiss_fftr(kiss_fftr_cfg cfg,const kiss_fft_scalar *timedata,kiss_fft_cpx *freqdata); 30 | /* 31 | input timedata has nfft scalar points 32 | output freqdata has nfft/2+1 complex points 33 | */ 34 | 35 | void kiss_fftri(kiss_fftr_cfg cfg,const kiss_fft_cpx *freqdata,kiss_fft_scalar *timedata); 36 | /* 37 | input freqdata has nfft/2+1 complex points 38 | output timedata has nfft scalar points 39 | */ 40 | 41 | #define kiss_fftr_free free 42 | 43 | #ifdef __cplusplus 44 | } 45 | #endif 46 | #endif 47 | -------------------------------------------------------------------------------- /lib/tfmicro/tensorflow/lite/string_type.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2017 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | // Abstract string. We don't want even absl at this level. 16 | #ifndef TENSORFLOW_LITE_STRING_TYPE_H_ 17 | #define TENSORFLOW_LITE_STRING_TYPE_H_ 18 | 19 | #include 20 | 21 | namespace tflite { 22 | 23 | using std::string; 24 | 25 | } // namespace tflite 26 | 27 | #endif // TENSORFLOW_LITE_STRING_TYPE_H_ 28 | -------------------------------------------------------------------------------- /lib/tfmicro/tensorflow/lite/type_to_tflitetype.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_LITE_TYPE_TO_TFLITETYPE_H_ 16 | #define TENSORFLOW_LITE_TYPE_TO_TFLITETYPE_H_ 17 | 18 | // Arduino build defines abs as a macro here. That is invalid C++, and breaks 19 | // libc++'s header, undefine it. 20 | #ifdef abs 21 | #undef abs 22 | #endif 23 | 24 | #include 25 | #include 26 | 27 | #include "tensorflow/lite/c/common.h" 28 | 29 | namespace tflite { 30 | 31 | // Map statically from a c++ type to a TfLiteType. Used in interpreter for safe 32 | // casts. 33 | template 34 | constexpr TfLiteType typeToTfLiteType() { 35 | return kTfLiteNoType; 36 | } 37 | template <> 38 | constexpr TfLiteType typeToTfLiteType() { 39 | return kTfLiteInt32; 40 | } 41 | template <> 42 | constexpr TfLiteType typeToTfLiteType() { 43 | return kTfLiteInt16; 44 | } 45 | template <> 46 | constexpr TfLiteType typeToTfLiteType() { 47 | return kTfLiteInt64; 48 | } 49 | template <> 50 | constexpr TfLiteType typeToTfLiteType() { 51 | return kTfLiteFloat32; 52 | } 53 | template <> 54 | constexpr TfLiteType typeToTfLiteType() { 55 | return kTfLiteUInt8; 56 | } 57 | template <> 58 | constexpr TfLiteType typeToTfLiteType() { 59 | return kTfLiteInt8; 60 | } 61 | template <> 62 | constexpr TfLiteType typeToTfLiteType() { 63 | return kTfLiteBool; 64 | } 65 | template <> 66 | constexpr TfLiteType typeToTfLiteType>() { 67 | return kTfLiteComplex64; 68 | } 69 | template <> 70 | constexpr TfLiteType typeToTfLiteType() { 71 | return kTfLiteString; 72 | } 73 | template <> 74 | constexpr TfLiteType typeToTfLiteType() { 75 | return kTfLiteFloat16; 76 | } 77 | template <> 78 | constexpr TfLiteType typeToTfLiteType() { 79 | return kTfLiteFloat64; 80 | } 81 | } // namespace tflite 82 | #endif // TENSORFLOW_LITE_TYPE_TO_TFLITETYPE_H_ 83 | -------------------------------------------------------------------------------- /lib/tfmicro/tensorflow/lite/version.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2017 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_LITE_VERSION_H_ 16 | #define TENSORFLOW_LITE_VERSION_H_ 17 | 18 | #include "tensorflow/core/public/version.h" 19 | 20 | // The version number of the Schema. Ideally all changes will be backward 21 | // compatible. If that ever changes, we must ensure that version is the first 22 | // entry in the new tflite root so that we can see that version is not 1. 23 | #define TFLITE_SCHEMA_VERSION (3) 24 | 25 | // TensorFlow Lite Runtime version. 26 | // This value is currently shared with that of TensorFlow. 27 | #define TFLITE_VERSION_STRING TF_VERSION_STRING 28 | 29 | #endif // TENSORFLOW_LITE_VERSION_H_ 30 | -------------------------------------------------------------------------------- /lib/tfmicro/tools/kiss_fftr.h: -------------------------------------------------------------------------------- 1 | //#ifndef KISS_FTR_H 2 | #define KISS_FTR_H 3 | 4 | #include "kiss_fft.h" 5 | #ifdef __cplusplus 6 | extern "C" { 7 | #endif 8 | 9 | 10 | /* 11 | 12 | Real optimized version can save about 45% cpu time vs. complex fft of a real seq. 13 | 14 | 15 | 16 | */ 17 | 18 | typedef struct kiss_fftr_state *kiss_fftr_cfg; 19 | 20 | 21 | kiss_fftr_cfg kiss_fftr_alloc(int nfft,int inverse_fft,void * mem, size_t * lenmem); 22 | /* 23 | nfft must be even 24 | 25 | If you don't care to allocate space, use mem = lenmem = NULL 26 | */ 27 | 28 | 29 | void kiss_fftr(kiss_fftr_cfg cfg,const kiss_fft_scalar *timedata,kiss_fft_cpx *freqdata); 30 | /* 31 | input timedata has nfft scalar points 32 | output freqdata has nfft/2+1 complex points 33 | */ 34 | 35 | void kiss_fftri(kiss_fftr_cfg cfg,const kiss_fft_cpx *freqdata,kiss_fft_scalar *timedata); 36 | /* 37 | input freqdata has nfft/2+1 complex points 38 | output timedata has nfft scalar points 39 | */ 40 | 41 | #define kiss_fftr_free free 42 | 43 | #ifdef __cplusplus 44 | } 45 | #endif 46 | //#endif 47 | -------------------------------------------------------------------------------- /platformio.ini: -------------------------------------------------------------------------------- 1 | ; PlatformIO Project Configuration File 2 | ; 3 | ; Build options: build flags, source filter 4 | ; Upload options: custom upload port, speed and extra flags 5 | ; Library options: dependencies, extra library storages 6 | ; Advanced options: extra scripting 7 | ; 8 | ; Please visit documentation for the other options and examples 9 | ; https://docs.platformio.org/page/projectconf.html 10 | 11 | [env:esp-wrover-kit] 12 | platform = espressif32 13 | board = esp-wrover-kit 14 | framework = arduino 15 | board_build.partitions = custom.csv 16 | upload_port = /dev/cu.SLAB_USBtoUART 17 | monitor_port = /dev/cu.SLAB_USBtoUART 18 | monitor_speed = 115200 19 | lib_deps=tfmicro,ESP8266_SSD1306@4.1.0 20 | build_flags = -DCORE_DEBUG_LEVEL=5 -DBOARD_HAS_PSRAM -mfix-esp32-psram-cache-issue 21 | -------------------------------------------------------------------------------- /src/audio_provider.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | #ifndef TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_AUDIO_PROVIDER_H_ 17 | #define TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_AUDIO_PROVIDER_H_ 18 | 19 | #include "tensorflow/lite/c/common.h" 20 | #include "tensorflow/lite/micro/micro_error_reporter.h" 21 | 22 | // This is an abstraction around an audio source like a microphone, and is 23 | // expected to return 16-bit PCM sample data for a given point in time. The 24 | // sample data itself should be used as quickly as possible by the caller, since 25 | // to allow memory optimizations there are no guarantees that the samples won't 26 | // be overwritten by new data in the future. In practice, implementations should 27 | // ensure that there's a reasonable time allowed for clients to access the data 28 | // before any reuse. 29 | // The reference implementation can have no platform-specific dependencies, so 30 | // it just returns an array filled with zeros. For real applications, you should 31 | // ensure there's a specialized implementation that accesses hardware APIs. 32 | TfLiteStatus GetAudioSamples(tflite::ErrorReporter* error_reporter, 33 | int start_ms, int duration_ms, 34 | int* audio_samples_size, int16_t** audio_samples); 35 | 36 | // Returns the time that audio data was last captured in milliseconds. There's 37 | // no contract about what time zero represents, the accuracy, or the granularity 38 | // of the result. Subsequent calls will generally not return a lower value, but 39 | // even that's not guaranteed if there's an overflow wraparound. 40 | // The reference implementation of this function just returns a constantly 41 | // incrementing value for each call, since it would need a non-portable platform 42 | // call to access time information. For real applications, you'll need to write 43 | // your own platform-specific implementation. 44 | int32_t LatestAudioTimestamp(); 45 | 46 | #endif // TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_AUDIO_PROVIDER_H_ 47 | -------------------------------------------------------------------------------- /src/command_responder.cc: -------------------------------------------------------------------------------- 1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | #include "command_responder.h" 17 | 18 | 19 | 20 | 21 | 22 | 23 | // The default implementation writes out the name of the recognized command 24 | // to the error console. Real applications will want to take some custom 25 | // action instead, and should implement their own versions of this function. 26 | void RespondToCommand(tflite::ErrorReporter* error_reporter, OLEDDisplay *display, 27 | int32_t current_time, const char* found_command, 28 | uint8_t score, bool is_new_command) { 29 | 30 | static uint16_t lastInfo = 0; 31 | static String command = ""; 32 | if (is_new_command) { 33 | TF_LITE_REPORT_ERROR(error_reporter, "Heard %s (%d) @%dms", found_command, 34 | score, current_time); 35 | command = String(found_command); 36 | lastInfo = millis(); 37 | } 38 | display->setFont(ArialMT_Plain_24); 39 | display->drawString(0, 26, command); 40 | if (millis() - lastInfo > 1000) { 41 | command = ""; 42 | } 43 | } 44 | -------------------------------------------------------------------------------- /src/command_responder.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | // Provides an interface to take an action based on an audio command. 17 | 18 | #ifndef TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_COMMAND_RESPONDER_H_ 19 | #define TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_COMMAND_RESPONDER_H_ 20 | 21 | #include "tensorflow/lite/c/common.h" 22 | #include "tensorflow/lite/micro/micro_error_reporter.h" 23 | #include // Only needed for Arduino 1.6.5 and earlier 24 | #include "SSD1306Wire.h" // legacy include: `#include "SSD1306.h"` 25 | 26 | // Called every time the results of an audio recognition run are available. The 27 | // human-readable name of any recognized command is in the `found_command` 28 | // argument, `score` has the numerical confidence, and `is_new_command` is set 29 | // if the previous command was different to this one. 30 | void RespondToCommand(tflite::ErrorReporter* error_reporter, OLEDDisplay *display, 31 | int32_t current_time, const char* found_command, 32 | uint8_t score, bool is_new_command); 33 | 34 | #endif // TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_COMMAND_RESPONDER_H_ 35 | -------------------------------------------------------------------------------- /src/esp/ringbuf.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | #ifndef TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_ESP_RINGBUF_H_ 17 | #define TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_ESP_RINGBUF_H_ 18 | 19 | #include 20 | #include 21 | #include 22 | 23 | #ifdef __cplusplus 24 | extern "C" { 25 | #endif 26 | 27 | #define RB_FAIL ESP_FAIL 28 | #define RB_ABORT -1 29 | #define RB_WRITER_FINISHED -2 30 | #define RB_READER_UNBLOCK -3 31 | 32 | typedef struct ringbuf { 33 | char *name; 34 | uint8_t *base; /**< Original pointer */ 35 | /* XXX: these need to be volatile? */ 36 | uint8_t *volatile readptr; /**< Read pointer */ 37 | uint8_t *volatile writeptr; /**< Write pointer */ 38 | volatile ssize_t fill_cnt; /**< Number of filled slots */ 39 | ssize_t size; /**< Buffer size */ 40 | xSemaphoreHandle can_read; 41 | xSemaphoreHandle can_write; 42 | xSemaphoreHandle lock; 43 | int abort_read; 44 | int abort_write; 45 | int writer_finished; // to prevent infinite blocking for buffer read 46 | int reader_unblock; 47 | } ringbuf_t; 48 | 49 | ringbuf_t *rb_init(const char *rb_name, uint32_t size); 50 | void rb_abort_read(ringbuf_t *rb); 51 | void rb_abort_write(ringbuf_t *rb); 52 | void rb_abort(ringbuf_t *rb); 53 | void rb_reset(ringbuf_t *rb); 54 | /** 55 | * @brief Special function to reset the buffer while keeping rb_write aborted. 56 | * This rb needs to be reset again before being useful. 57 | */ 58 | void rb_reset_and_abort_write(ringbuf_t *rb); 59 | void rb_stat(ringbuf_t *rb); 60 | ssize_t rb_filled(ringbuf_t *rb); 61 | ssize_t rb_available(ringbuf_t *rb); 62 | int rb_read(ringbuf_t *rb, uint8_t *buf, int len, uint32_t ticks_to_wait); 63 | int rb_write(ringbuf_t *rb, const uint8_t *buf, int len, 64 | uint32_t ticks_to_wait); 65 | void rb_cleanup(ringbuf_t *rb); 66 | void rb_signal_writer_finished(ringbuf_t *rb); 67 | void rb_wakeup_reader(ringbuf_t *rb); 68 | int rb_is_writer_finished(ringbuf_t *rb); 69 | 70 | #ifdef __cplusplus 71 | } 72 | #endif 73 | 74 | #endif // TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_ESP_RINGBUF_H_ 75 | -------------------------------------------------------------------------------- /src/feature_provider.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | #ifndef TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_FEATURE_PROVIDER_H_ 17 | #define TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_FEATURE_PROVIDER_H_ 18 | 19 | #include "tensorflow/lite/c/common.h" 20 | #include "tensorflow/lite/micro/micro_error_reporter.h" 21 | 22 | // Binds itself to an area of memory intended to hold the input features for an 23 | // audio-recognition neural network model, and fills that data area with the 24 | // features representing the current audio input, for example from a microphone. 25 | // The audio features themselves are a two-dimensional array, made up of 26 | // horizontal slices representing the frequencies at one point in time, stacked 27 | // on top of each other to form a spectrogram showing how those frequencies 28 | // changed over time. 29 | class FeatureProvider { 30 | public: 31 | // Create the provider, and bind it to an area of memory. This memory should 32 | // remain accessible for the lifetime of the provider object, since subsequent 33 | // calls will fill it with feature data. The provider does no memory 34 | // management of this data. 35 | FeatureProvider(int feature_size, uint8_t* feature_data); 36 | ~FeatureProvider(); 37 | 38 | // Fills the feature data with information from audio inputs, and returns how 39 | // many feature slices were updated. 40 | TfLiteStatus PopulateFeatureData(tflite::ErrorReporter* error_reporter, 41 | int32_t last_time_in_ms, int32_t time_in_ms, 42 | int* how_many_new_slices); 43 | 44 | private: 45 | int feature_size_; 46 | uint8_t* feature_data_; 47 | // Make sure we don't try to use cached information if this is the first call 48 | // into the provider. 49 | bool is_first_run_; 50 | }; 51 | 52 | #endif // TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_FEATURE_PROVIDER_H_ 53 | -------------------------------------------------------------------------------- /src/micro_features/micro_features_generator.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | #ifndef TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_MICRO_FEATURES_MICRO_FEATURES_GENERATOR_H_ 17 | #define TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_MICRO_FEATURES_MICRO_FEATURES_GENERATOR_H_ 18 | 19 | #include "tensorflow/lite/c/common.h" 20 | #include "tensorflow/lite/micro/micro_error_reporter.h" 21 | 22 | // Sets up any resources needed for the feature generation pipeline. 23 | TfLiteStatus InitializeMicroFeatures(tflite::ErrorReporter* error_reporter); 24 | 25 | // Converts audio sample data into a more compact form that's appropriate for 26 | // feeding into a neural network. 27 | TfLiteStatus GenerateMicroFeatures(tflite::ErrorReporter* error_reporter, 28 | const int16_t* input, int input_size, 29 | int output_size, uint8_t* output, 30 | size_t* num_samples_read); 31 | 32 | #endif // TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_MICRO_FEATURES_MICRO_FEATURES_GENERATOR_H_ 33 | -------------------------------------------------------------------------------- /src/micro_features/micro_model_settings.cc: -------------------------------------------------------------------------------- 1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | #include "micro_model_settings.h" 17 | 18 | const char* kCategoryLabels[kCategoryCount] = { 19 | "silence", 20 | "unknown", 21 | "yes", 22 | "no", 23 | }; 24 | -------------------------------------------------------------------------------- /src/micro_features/micro_model_settings.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2020 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | #ifndef TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_MICRO_FEATURES_MICRO_MODEL_SETTINGS_H_ 17 | #define TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_MICRO_FEATURES_MICRO_MODEL_SETTINGS_H_ 18 | 19 | // Keeping these as constant expressions allow us to allocate fixed-sized arrays 20 | // on the stack for our working memory. 21 | 22 | // The size of the input time series data we pass to the FFT to produce the 23 | // frequency information. This has to be a power of two, and since we're dealing 24 | // with 30ms of 16KHz inputs, which means 480 samples, this is the next value. 25 | constexpr int kMaxAudioSampleSize = 512; 26 | constexpr int kAudioSampleFrequency = 16000; 27 | 28 | // The following values are derived from values used during model training. 29 | // If you change the way you preprocess the input, update all these constants. 30 | constexpr int kFeatureSliceSize = 40; 31 | constexpr int kFeatureSliceCount = 49; 32 | constexpr int kFeatureElementCount = (kFeatureSliceSize * kFeatureSliceCount); 33 | constexpr int kFeatureSliceStrideMs = 20; 34 | constexpr int kFeatureSliceDurationMs = 30; 35 | 36 | // Variables for the model's output categories. 37 | constexpr int kSilenceIndex = 0; 38 | constexpr int kUnknownIndex = 1; 39 | // If you modify the output categories, you need to update the following values. 40 | constexpr int kCategoryCount = 4; 41 | extern const char* kCategoryLabels[kCategoryCount]; 42 | 43 | #endif // TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_MICRO_FEATURES_MICRO_MODEL_SETTINGS_H_ 44 | -------------------------------------------------------------------------------- /src/micro_features/model.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2020 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | // This is a standard TensorFlow Lite FlatBuffer model file that has been 17 | // converted into a C data array, so it can be easily compiled into a binary 18 | // for devices that don't have a file system. It was created using the command: 19 | // xxd -i model.tflite > model.cc 20 | 21 | #ifndef TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_MICRO_FEATURES_MODEL_H_ 22 | #define TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_MICRO_FEATURES_MODEL_H_ 23 | 24 | extern const unsigned char g_model[]; 25 | extern const int g_model_len; 26 | 27 | #endif // TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_MICRO_FEATURES_MODEL_H_ 28 | -------------------------------------------------------------------------------- /src/micro_features/no_micro_features_data.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | #ifndef TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_MICRO_FEATURES_NO_MICRO_FEATURES_DATA_H_ 17 | #define TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_MICRO_FEATURES_NO_MICRO_FEATURES_DATA_H_ 18 | 19 | extern const int g_no_micro_f9643d42_nohash_4_width; 20 | extern const int g_no_micro_f9643d42_nohash_4_height; 21 | extern const unsigned char g_no_micro_f9643d42_nohash_4_data[]; 22 | 23 | #endif // TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_MICRO_FEATURES_NO_MICRO_FEATURES_DATA_H_ 24 | -------------------------------------------------------------------------------- /src/micro_features/yes_micro_features_data.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | #ifndef TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_MICRO_FEATURES_YES_MICRO_FEATURES_DATA_H_ 17 | #define TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_MICRO_FEATURES_YES_MICRO_FEATURES_DATA_H_ 18 | 19 | extern const int g_yes_micro_f2e59fea_nohash_1_width; 20 | extern const int g_yes_micro_f2e59fea_nohash_1_height; 21 | extern const unsigned char g_yes_micro_f2e59fea_nohash_1_data[]; 22 | 23 | #endif // TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_MICRO_FEATURES_YES_MICRO_FEATURES_DATA_H_ 24 | -------------------------------------------------------------------------------- /test/README: -------------------------------------------------------------------------------- 1 | 2 | This directory is intended for PIO Unit Testing and project tests. 3 | 4 | Unit Testing is a software testing method by which individual units of 5 | source code, sets of one or more MCU program modules together with associated 6 | control data, usage procedures, and operating procedures, are tested to 7 | determine whether they are fit for use. Unit testing finds problems early 8 | in the development cycle. 9 | 10 | More information about PIO Unit Testing: 11 | - https://docs.platformio.org/page/plus/unit-testing.html 12 | --------------------------------------------------------------------------------