├── README.md ├── custom.csv ├── include └── sine_model_data.h ├── lib └── tfmicro │ ├── fixedpoint │ ├── LICENSE │ ├── fixedpoint.h │ └── fixedpoint_sse.h │ ├── flatbuffers │ ├── LICENSE.txt │ ├── base.h │ ├── flatbuffers.h │ └── stl_emulation.h │ ├── internal │ ├── LICENSE │ └── detect_platform.h │ ├── kissfft │ ├── COPYING │ ├── _kiss_fft_guts.h │ ├── kiss_fft.h │ └── tools │ │ └── kiss_fftr.h │ └── tensorflow │ ├── core │ └── public │ │ └── version.h │ └── lite │ ├── c │ ├── builtin_op_data.h │ ├── c_api_internal.c │ └── c_api_internal.h │ ├── core │ └── api │ │ ├── error_reporter.cc │ │ ├── error_reporter.h │ │ ├── flatbuffer_conversions.cc │ │ ├── flatbuffer_conversions.h │ │ ├── op_resolver.cc │ │ ├── op_resolver.h │ │ ├── tensor_utils.cc │ │ └── tensor_utils.h │ ├── experimental │ └── micro │ │ ├── compatibility.h │ │ ├── debug_log.cc │ │ ├── debug_log.h │ │ ├── debug_log_numbers.cc │ │ ├── debug_log_numbers.h │ │ ├── kernels │ │ ├── activation_utils.h │ │ ├── activations.cc │ │ ├── add.cc │ │ ├── all_ops_resolver.cc │ │ ├── all_ops_resolver.h │ │ ├── arg_min_max.cc │ │ ├── ceil.cc │ │ ├── comparisons.cc │ │ ├── conv.cc │ │ ├── depthwise_conv.cc │ │ ├── dequantize.cc │ │ ├── elementwise.cc │ │ ├── floor.cc │ │ ├── fully_connected.cc │ │ ├── logical.cc │ │ ├── logistic.cc │ │ ├── maximum_minimum.cc │ │ ├── micro_ops.h │ │ ├── micro_utils.h │ │ ├── mul.cc │ │ ├── neg.cc │ │ ├── pack.cc │ │ ├── pooling.cc │ │ ├── prelu.cc │ │ ├── quantize.cc │ │ ├── reshape.cc │ │ ├── round.cc │ │ ├── softmax.cc │ │ ├── split.cc │ │ ├── strided_slice.cc │ │ ├── svdf.cc │ │ └── unpack.cc │ │ ├── memory_helpers.cc │ │ ├── memory_helpers.h │ │ ├── memory_planner │ │ ├── greedy_memory_planner.cc │ │ ├── greedy_memory_planner.h │ │ ├── linear_memory_planner.cc │ │ ├── linear_memory_planner.h │ │ └── memory_planner.h │ │ ├── micro_allocator.cc │ │ ├── micro_allocator.h │ │ ├── micro_error_reporter.cc │ │ ├── micro_error_reporter.h │ │ ├── micro_interpreter.cc │ │ ├── micro_interpreter.h │ │ ├── micro_mutable_op_resolver.cc │ │ ├── micro_mutable_op_resolver.h │ │ ├── micro_optional_debug_tools.cc │ │ ├── micro_optional_debug_tools.h │ │ ├── micro_utils.cc │ │ ├── micro_utils.h │ │ ├── simple_memory_allocator.cc │ │ ├── simple_memory_allocator.h │ │ ├── test_helpers.cc │ │ ├── test_helpers.h │ │ └── testing │ │ ├── micro_test.h │ │ └── test_utils.h │ ├── kernels │ ├── internal │ │ ├── common.h │ │ ├── compatibility.h │ │ ├── optimized │ │ │ └── neon_check.h │ │ ├── quantization_util.cc │ │ ├── quantization_util.h │ │ ├── reference │ │ │ ├── add.h │ │ │ ├── arg_min_max.h │ │ │ ├── binary_function.h │ │ │ ├── ceil.h │ │ │ ├── comparisons.h │ │ │ ├── conv.h │ │ │ ├── depthwiseconv_float.h │ │ │ ├── depthwiseconv_uint8.h │ │ │ ├── dequantize.h │ │ │ ├── floor.h │ │ │ ├── fully_connected.h │ │ │ ├── integer_ops │ │ │ │ ├── add.h │ │ │ │ ├── conv.h │ │ │ │ ├── depthwise_conv.h │ │ │ │ ├── fully_connected.h │ │ │ │ ├── mul.h │ │ │ │ ├── pooling.h │ │ │ │ └── softmax.h │ │ │ ├── logistic.h │ │ │ ├── maximum_minimum.h │ │ │ ├── mul.h │ │ │ ├── neg.h │ │ │ ├── pooling.h │ │ │ ├── prelu.h │ │ │ ├── process_broadcast_shapes.h │ │ │ ├── quantize.h │ │ │ ├── round.h │ │ │ ├── softmax.h │ │ │ └── strided_slice.h │ │ ├── round.h │ │ ├── strided_slice_logic.h │ │ ├── tensor.h │ │ ├── tensor_ctypes.h │ │ └── types.h │ ├── kernel_util.cc │ ├── kernel_util.h │ ├── op_macros.h │ └── padding.h │ ├── schema │ └── schema_generated.h │ ├── string_type.h │ ├── string_util.h │ ├── type_to_tflitetype.h │ └── version.h ├── platformio.ini └── src ├── main.cpp └── sine_model_data.cc /README.md: -------------------------------------------------------------------------------- 1 | # ESP32-TensorFlow-Lite-Sample 2 | Sample project for deploying TensorFlow Lite models on the ESP32 using Platformio 3 | 4 | Deploy to ESP32 using: 5 | ```platformio run -t upload --upload-port /dev/ttyUSB0``` 6 | 7 | Access Serial using: 8 | ```screen /dev/ttyUSB0 115200``` 9 | -------------------------------------------------------------------------------- /custom.csv: -------------------------------------------------------------------------------- 1 | # Name, Type, SubType, Offset, Size, Flags 2 | nvs, data, nvs, 0x9000, 20K, 3 | otadata, data, ota, 0xe000, 8K, 4 | firm, app, ota_0, , 3400K, 5 | eeprom, data, 0x99, , 4K, 6 | spiffs, data, spiffs, , 444K, 7 | -------------------------------------------------------------------------------- /include/sine_model_data.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | // This is a standard TensorFlow Lite model file that has been converted into a 17 | // C data array, so it can be easily compiled into a binary for devices that 18 | // don't have a file system. It was created using the command: 19 | // xxd -i sine_model.tflite > sine_model_data.cc 20 | 21 | #ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICRO_EXAMPLES_HELLO_WORLD_SINE_MODEL_DATA_H_ 22 | #define TENSORFLOW_LITE_EXPERIMENTAL_MICRO_EXAMPLES_HELLO_WORLD_SINE_MODEL_DATA_H_ 23 | 24 | extern const unsigned char g_sine_model_data[]; 25 | extern const int g_sine_model_data_len; 26 | 27 | #endif // TENSORFLOW_LITE_EXPERIMENTAL_MICRO_EXAMPLES_HELLO_WORLD_SINE_MODEL_DATA_H_ 28 | -------------------------------------------------------------------------------- /lib/tfmicro/kissfft/COPYING: -------------------------------------------------------------------------------- 1 | Copyright (c) 2003-2010 Mark Borgerding 2 | 3 | All rights reserved. 4 | 5 | Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 6 | 7 | * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 8 | * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 9 | * Neither the author nor the names of any contributors may be used to endorse or promote products derived from this software without specific prior written permission. 10 | 11 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 12 | -------------------------------------------------------------------------------- /lib/tfmicro/kissfft/kiss_fft.h: -------------------------------------------------------------------------------- 1 | #ifndef KISS_FFT_H 2 | #define KISS_FFT_H 3 | 4 | #include 5 | #include 6 | #include 7 | #include 8 | 9 | #ifdef __cplusplus 10 | extern "C" { 11 | #endif 12 | 13 | /* 14 | ATTENTION! 15 | If you would like a : 16 | -- a utility that will handle the caching of fft objects 17 | -- real-only (no imaginary time component ) FFT 18 | -- a multi-dimensional FFT 19 | -- a command-line utility to perform ffts 20 | -- a command-line utility to perform fast-convolution filtering 21 | 22 | Then see kfc.h kiss_fftr.h kiss_fftnd.h fftutil.c kiss_fastfir.c 23 | in the tools/ directory. 24 | */ 25 | 26 | #ifdef USE_SIMD 27 | # include 28 | # define kiss_fft_scalar __m128 29 | #define KISS_FFT_MALLOC(nbytes) _mm_malloc(nbytes,16) 30 | #define KISS_FFT_FREE _mm_free 31 | #else 32 | #define KISS_FFT_MALLOC(X) (void*)(0) /* Patched. */ 33 | #define KISS_FFT_FREE(X) /* Patched. */ 34 | #endif 35 | 36 | 37 | // Patched automatically by download_dependencies.sh so default is 16 bit. 38 | #ifndef FIXED_POINT 39 | #define FIXED_POINT (16) 40 | #endif 41 | // End patch. 42 | 43 | #ifdef FIXED_POINT 44 | #include 45 | # if (FIXED_POINT == 32) 46 | # define kiss_fft_scalar int32_t 47 | # else 48 | # define kiss_fft_scalar int16_t 49 | # endif 50 | #else 51 | # ifndef kiss_fft_scalar 52 | /* default is float */ 53 | # define kiss_fft_scalar float 54 | # endif 55 | #endif 56 | 57 | typedef struct { 58 | kiss_fft_scalar r; 59 | kiss_fft_scalar i; 60 | }kiss_fft_cpx; 61 | 62 | typedef struct kiss_fft_state* kiss_fft_cfg; 63 | 64 | /* 65 | * kiss_fft_alloc 66 | * 67 | * Initialize a FFT (or IFFT) algorithm's cfg/state buffer. 68 | * 69 | * typical usage: kiss_fft_cfg mycfg=kiss_fft_alloc(1024,0,NULL,NULL); 70 | * 71 | * The return value from fft_alloc is a cfg buffer used internally 72 | * by the fft routine or NULL. 73 | * 74 | * If lenmem is NULL, then kiss_fft_alloc will allocate a cfg buffer using malloc. 75 | * The returned value should be free()d when done to avoid memory leaks. 76 | * 77 | * The state can be placed in a user supplied buffer 'mem': 78 | * If lenmem is not NULL and mem is not NULL and *lenmem is large enough, 79 | * then the function places the cfg in mem and the size used in *lenmem 80 | * and returns mem. 81 | * 82 | * If lenmem is not NULL and ( mem is NULL or *lenmem is not large enough), 83 | * then the function returns NULL and places the minimum cfg 84 | * buffer size in *lenmem. 85 | * */ 86 | 87 | kiss_fft_cfg kiss_fft_alloc(int nfft,int inverse_fft,void * mem,size_t * lenmem); 88 | 89 | /* 90 | * kiss_fft(cfg,in_out_buf) 91 | * 92 | * Perform an FFT on a complex input buffer. 93 | * for a forward FFT, 94 | * fin should be f[0] , f[1] , ... ,f[nfft-1] 95 | * fout will be F[0] , F[1] , ... ,F[nfft-1] 96 | * Note that each element is complex and can be accessed like 97 | f[k].r and f[k].i 98 | * */ 99 | void kiss_fft(kiss_fft_cfg cfg,const kiss_fft_cpx *fin,kiss_fft_cpx *fout); 100 | 101 | /* 102 | A more generic version of the above function. It reads its input from every Nth sample. 103 | * */ 104 | void kiss_fft_stride(kiss_fft_cfg cfg,const kiss_fft_cpx *fin,kiss_fft_cpx *fout,int fin_stride); 105 | 106 | /* If kiss_fft_alloc allocated a buffer, it is one contiguous 107 | buffer and can be simply free()d when no longer needed*/ 108 | #define kiss_fft_free free 109 | 110 | /* 111 | Cleans up some memory that gets managed internally. Not necessary to call, but it might clean up 112 | your compiler output to call this before you exit. 113 | */ 114 | void kiss_fft_cleanup(void); 115 | 116 | 117 | /* 118 | * Returns the smallest integer k, such that k>=n and k has only "fast" factors (2,3,5) 119 | */ 120 | int kiss_fft_next_fast_size(int n); 121 | 122 | /* for real ffts, we need an even size */ 123 | #define kiss_fftr_next_fast_size_real(n) \ 124 | (kiss_fft_next_fast_size( ((n)+1)>>1)<<1) 125 | 126 | #ifdef __cplusplus 127 | } 128 | #endif 129 | 130 | #endif 131 | -------------------------------------------------------------------------------- /lib/tfmicro/kissfft/tools/kiss_fftr.h: -------------------------------------------------------------------------------- 1 | #ifndef KISS_FTR_H 2 | #define KISS_FTR_H 3 | 4 | #include "kiss_fft.h" 5 | #ifdef __cplusplus 6 | extern "C" { 7 | #endif 8 | 9 | 10 | /* 11 | 12 | Real optimized version can save about 45% cpu time vs. complex fft of a real seq. 13 | 14 | 15 | 16 | */ 17 | 18 | typedef struct kiss_fftr_state *kiss_fftr_cfg; 19 | 20 | 21 | kiss_fftr_cfg kiss_fftr_alloc(int nfft,int inverse_fft,void * mem, size_t * lenmem); 22 | /* 23 | nfft must be even 24 | 25 | If you don't care to allocate space, use mem = lenmem = NULL 26 | */ 27 | 28 | 29 | void kiss_fftr(kiss_fftr_cfg cfg,const kiss_fft_scalar *timedata,kiss_fft_cpx *freqdata); 30 | /* 31 | input timedata has nfft scalar points 32 | output freqdata has nfft/2+1 complex points 33 | */ 34 | 35 | void kiss_fftri(kiss_fftr_cfg cfg,const kiss_fft_cpx *freqdata,kiss_fft_scalar *timedata); 36 | /* 37 | input freqdata has nfft/2+1 complex points 38 | output timedata has nfft scalar points 39 | */ 40 | 41 | #define kiss_fftr_free free 42 | 43 | #ifdef __cplusplus 44 | } 45 | #endif 46 | #endif 47 | -------------------------------------------------------------------------------- /lib/tfmicro/tensorflow/lite/core/api/error_reporter.cc: -------------------------------------------------------------------------------- 1 | /* Copyright 2017 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #include "tensorflow/lite/core/api/error_reporter.h" 16 | #include 17 | 18 | namespace tflite { 19 | 20 | int ErrorReporter::Report(const char* format, ...) { 21 | va_list args; 22 | va_start(args, format); 23 | int code = Report(format, args); 24 | va_end(args); 25 | return code; 26 | } 27 | 28 | // TODO(aselle): Make the name of ReportError on context the same, so 29 | // we can use the ensure functions w/o a context and w/ a reporter. 30 | int ErrorReporter::ReportError(void*, const char* format, ...) { 31 | va_list args; 32 | va_start(args, format); 33 | int code = Report(format, args); 34 | va_end(args); 35 | return code; 36 | } 37 | 38 | } // namespace tflite 39 | -------------------------------------------------------------------------------- /lib/tfmicro/tensorflow/lite/core/api/error_reporter.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2017 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_LITE_CORE_API_ERROR_REPORTER_H_ 16 | #define TENSORFLOW_LITE_CORE_API_ERROR_REPORTER_H_ 17 | 18 | #include 19 | 20 | namespace tflite { 21 | 22 | /// A functor that reports error to supporting system. Invoked similar to 23 | /// printf. 24 | /// 25 | /// Usage: 26 | /// ErrorReporter foo; 27 | /// foo.Report("test %d", 5); 28 | /// or 29 | /// va_list args; 30 | /// foo.Report("test %d", args); // where args is va_list 31 | /// 32 | /// Subclass ErrorReporter to provide another reporting destination. 33 | /// For example, if you have a GUI program, you might redirect to a buffer 34 | /// that drives a GUI error log box. 35 | class ErrorReporter { 36 | public: 37 | virtual ~ErrorReporter() {} 38 | virtual int Report(const char* format, va_list args) = 0; 39 | int Report(const char* format, ...); 40 | int ReportError(void*, const char* format, ...); 41 | }; 42 | 43 | } // namespace tflite 44 | 45 | #endif // TENSORFLOW_LITE_CORE_API_ERROR_REPORTER_H_ 46 | -------------------------------------------------------------------------------- /lib/tfmicro/tensorflow/lite/core/api/flatbuffer_conversions.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_LITE_CORE_API_FLATBUFFER_CONVERSIONS_H_ 16 | #define TENSORFLOW_LITE_CORE_API_FLATBUFFER_CONVERSIONS_H_ 17 | 18 | // These functions transform codes and data structures that are defined in the 19 | // flatbuffer serialization format into in-memory values that are used by the 20 | // runtime API and interpreter. 21 | 22 | #include "tensorflow/lite/c/c_api_internal.h" 23 | #include "tensorflow/lite/core/api/error_reporter.h" 24 | #include "tensorflow/lite/core/api/op_resolver.h" 25 | #include "tensorflow/lite/schema/schema_generated.h" 26 | 27 | namespace tflite { 28 | 29 | // Interface class for builtin data allocations. 30 | class BuiltinDataAllocator { 31 | public: 32 | virtual void* Allocate(size_t size) = 0; 33 | virtual void Deallocate(void* data) = 0; 34 | 35 | // Allocate a structure, but make sure it is a POD structure that doesn't 36 | // require constructors to run. The reason we do this, is that Interpreter's C 37 | // extension part will take ownership so destructors will not be run during 38 | // deallocation. 39 | template 40 | T* AllocatePOD() { 41 | static_assert(std::is_pod::value, "Builtin data structure must be POD."); 42 | return static_cast(this->Allocate(sizeof(T))); 43 | } 44 | 45 | virtual ~BuiltinDataAllocator() {} 46 | }; 47 | 48 | // Parse the appropriate data out of the op. 49 | // 50 | // This handles builtin data explicitly as there are flatbuffer schemas. 51 | // If it returns kTfLiteOk, it passes the data out with `builtin_data`. The 52 | // calling function has to pass in an allocator object, and this allocator 53 | // will be called to reserve space for the output data. If the calling 54 | // function's allocator reserves memory on the heap, then it's the calling 55 | // function's responsibility to free it. 56 | // If it returns kTfLiteError, `builtin_data` will be `nullptr`. 57 | TfLiteStatus ParseOpData(const Operator* op, BuiltinOperator op_type, 58 | ErrorReporter* error_reporter, 59 | BuiltinDataAllocator* allocator, void** builtin_data); 60 | 61 | // Converts the tensor data type used in the flat buffer to the representation 62 | // used by the runtime. 63 | TfLiteStatus ConvertTensorType(TensorType tensor_type, TfLiteType* type, 64 | ErrorReporter* error_reporter); 65 | 66 | } // namespace tflite 67 | 68 | #endif // TENSORFLOW_LITE_CORE_API_FLATBUFFER_CONVERSIONS_H_ 69 | -------------------------------------------------------------------------------- /lib/tfmicro/tensorflow/lite/core/api/op_resolver.cc: -------------------------------------------------------------------------------- 1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | #include "tensorflow/lite/core/api/op_resolver.h" 17 | 18 | namespace tflite { 19 | 20 | TfLiteStatus GetRegistrationFromOpCode( 21 | const OperatorCode* opcode, const OpResolver& op_resolver, 22 | ErrorReporter* error_reporter, const TfLiteRegistration** registration) { 23 | TfLiteStatus status = kTfLiteOk; 24 | *registration = nullptr; 25 | auto builtin_code = opcode->builtin_code(); 26 | int version = opcode->version(); 27 | 28 | if (builtin_code > BuiltinOperator_MAX || 29 | builtin_code < BuiltinOperator_MIN) { 30 | error_reporter->Report( 31 | "Op builtin_code out of range: %d. Are you using old TFLite binary " 32 | "with newer model?", 33 | builtin_code); 34 | status = kTfLiteError; 35 | } else if (builtin_code != BuiltinOperator_CUSTOM) { 36 | *registration = op_resolver.FindOp(builtin_code, version); 37 | if (*registration == nullptr) { 38 | error_reporter->Report( 39 | "Didn't find op for builtin opcode '%s' version '%d'\n", 40 | EnumNameBuiltinOperator(builtin_code), version); 41 | status = kTfLiteError; 42 | } 43 | } else if (!opcode->custom_code()) { 44 | error_reporter->Report( 45 | "Operator with CUSTOM builtin_code has no custom_code.\n"); 46 | status = kTfLiteError; 47 | } else { 48 | const char* name = opcode->custom_code()->c_str(); 49 | *registration = op_resolver.FindOp(name, version); 50 | if (*registration == nullptr) { 51 | // Do not report error for unresolved custom op, we do the final check 52 | // while preparing ops. 53 | status = kTfLiteError; 54 | } 55 | } 56 | return status; 57 | } 58 | 59 | } // namespace tflite 60 | -------------------------------------------------------------------------------- /lib/tfmicro/tensorflow/lite/core/api/op_resolver.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_LITE_CORE_API_OP_RESOLVER_H_ 16 | #define TENSORFLOW_LITE_CORE_API_OP_RESOLVER_H_ 17 | 18 | #include "tensorflow/lite/c/c_api_internal.h" 19 | #include "tensorflow/lite/core/api/error_reporter.h" 20 | #include "tensorflow/lite/schema/schema_generated.h" 21 | 22 | namespace tflite { 23 | 24 | /// Abstract interface that returns TfLiteRegistrations given op codes or custom 25 | /// op names. This is the mechanism that ops being referenced in the flatbuffer 26 | /// model are mapped to executable function pointers (TfLiteRegistrations). 27 | class OpResolver { 28 | public: 29 | /// Finds the op registration for a builtin operator by enum code. 30 | virtual const TfLiteRegistration* FindOp(tflite::BuiltinOperator op, 31 | int version) const = 0; 32 | /// Finds the op registration of a custom operator by op name. 33 | virtual const TfLiteRegistration* FindOp(const char* op, 34 | int version) const = 0; 35 | virtual ~OpResolver() {} 36 | }; 37 | 38 | // Handles the logic for converting between an OperatorCode structure extracted 39 | // from a flatbuffer and information about a registered operator 40 | // implementation. 41 | TfLiteStatus GetRegistrationFromOpCode(const OperatorCode* opcode, 42 | const OpResolver& op_resolver, 43 | ErrorReporter* error_reporter, 44 | const TfLiteRegistration** registration); 45 | 46 | } // namespace tflite 47 | 48 | #endif // TENSORFLOW_LITE_CORE_API_OP_RESOLVER_H_ 49 | -------------------------------------------------------------------------------- /lib/tfmicro/tensorflow/lite/core/api/tensor_utils.cc: -------------------------------------------------------------------------------- 1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | #include "tensorflow/lite/core/api/tensor_utils.h" 17 | 18 | #include 19 | 20 | namespace tflite { 21 | 22 | TfLiteStatus ResetVariableTensor(TfLiteTensor* tensor) { 23 | if (!tensor->is_variable) { 24 | return kTfLiteOk; 25 | } 26 | // TODO(b/115961645): Implement - If a variable tensor has a buffer, reset it 27 | // to the value of the buffer. 28 | int value = 0; 29 | if (tensor->type == kTfLiteInt8) { 30 | value = tensor->params.zero_point; 31 | } 32 | // TODO(b/139446230): Provide a platform header to better handle these 33 | // specific scenarios. 34 | #if __ANDROID__ || defined(__x86_64__) || defined(__i386__) || \ 35 | defined(__i386) || defined(__x86__) || defined(__X86__) || \ 36 | defined(_X86_) || defined(_M_IX86) || defined(_M_X64) 37 | memset(tensor->data.raw, value, tensor->bytes); 38 | #else 39 | char* raw_ptr = tensor->data.raw; 40 | for (int i = 0; i < tensor->bytes; ++i) { 41 | *raw_ptr = value; 42 | raw_ptr++; 43 | } 44 | #endif 45 | return kTfLiteOk; 46 | } 47 | 48 | } // namespace tflite 49 | -------------------------------------------------------------------------------- /lib/tfmicro/tensorflow/lite/core/api/tensor_utils.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | #ifndef TENSORFLOW_LITE_CORE_API_TENSOR_UTILS_H_ 17 | #define TENSORFLOW_LITE_CORE_API_TENSOR_UTILS_H_ 18 | 19 | #include "tensorflow/lite/c/c_api_internal.h" 20 | 21 | namespace tflite { 22 | 23 | // Resets a variable tensor to the default value. 24 | TfLiteStatus ResetVariableTensor(TfLiteTensor* tensor); 25 | 26 | } // namespace tflite 27 | 28 | #endif // TENSORFLOW_LITE_CORE_API_TENSOR_UTILS_H_ 29 | -------------------------------------------------------------------------------- /lib/tfmicro/tensorflow/lite/experimental/micro/compatibility.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICRO_COMPATIBILITY_H_ 16 | #define TENSORFLOW_LITE_EXPERIMENTAL_MICRO_COMPATIBILITY_H_ 17 | 18 | // C++ will automatically create class-specific delete operators for virtual 19 | // objects, which by default call the global delete function. For embedded 20 | // applications we want to avoid this, and won't be calling new/delete on these 21 | // objects, so we need to override the default implementation with one that does 22 | // nothing to avoid linking in ::delete(). 23 | // This macro needs to be included in all subclasses of a virtual base class in 24 | // the private section. 25 | #ifdef TF_LITE_STATIC_MEMORY 26 | #define TF_LITE_REMOVE_VIRTUAL_DELETE \ 27 | void operator delete(void* p) {} 28 | #else 29 | #define TF_LITE_REMOVE_VIRTUAL_DELETE 30 | #endif 31 | 32 | #endif // TENSORFLOW_LITE_EXPERIMENTAL_MICRO_COMPATIBILITY_H_ 33 | -------------------------------------------------------------------------------- /lib/tfmicro/tensorflow/lite/experimental/micro/debug_log.cc: -------------------------------------------------------------------------------- 1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | // Reference implementation of the DebugLog() function that's required for a 17 | // platform to support the TensorFlow Lite for Microcontrollers library. This is 18 | // the only function that's absolutely required to be available on a target 19 | // device, since it's used for communicating test results back to the host so 20 | // that we can verify the implementation is working correctly. 21 | // It's designed to be as easy as possible to supply an implementation though. 22 | // On platforms that have a POSIX stack or C library, it can be written as a 23 | // single call to `fprintf(stderr, "%s", s)` to output a string to the error 24 | // stream of the console, but if there's no OS or C library available, there's 25 | // almost always an equivalent way to write out a string to some serial 26 | // interface that can be used instead. For example on Arm M-series MCUs, calling 27 | // the `bkpt #0xAB` assembler instruction will output the string in r1 to 28 | // whatever debug serial connection is available. If you're running mbed, you 29 | // can do the same by creating `Serial pc(USBTX, USBRX)` and then calling 30 | // `pc.printf("%s", s)`. 31 | // To add an equivalent function for your own platform, create your own 32 | // implementation file, and place it in a subfolder with named after the OS 33 | // you're targeting. For example, see the Cortex M bare metal version in 34 | // tensorflow/lite/experimental/micro/bluepill/debug_log.cc or the mbed one on 35 | // tensorflow/lite/experimental/micro/mbed/debug_log.cc. 36 | 37 | #include "tensorflow/lite/experimental/micro/debug_log.h" 38 | 39 | #include 40 | 41 | extern "C" void DebugLog(const char* s) { fprintf(stderr, "%s", s); } 42 | -------------------------------------------------------------------------------- /lib/tfmicro/tensorflow/lite/experimental/micro/debug_log.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICRO_DEBUG_LOG_H_ 16 | #define TENSORFLOW_LITE_EXPERIMENTAL_MICRO_DEBUG_LOG_H_ 17 | 18 | // This function should be implemented by each target platform, and provide a 19 | // way for strings to be output to some text stream. For more information, see 20 | // tensorflow/lite/experimental/micro/debug_log.cc. 21 | extern "C" void DebugLog(const char* s); 22 | 23 | #endif // TENSORFLOW_LITE_EXPERIMENTAL_MICRO_DEBUG_LOG_H_ 24 | -------------------------------------------------------------------------------- /lib/tfmicro/tensorflow/lite/experimental/micro/debug_log_numbers.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICRO_DEBUG_LOG_NUMBERS_H_ 16 | #define TENSORFLOW_LITE_EXPERIMENTAL_MICRO_DEBUG_LOG_NUMBERS_H_ 17 | 18 | #include 19 | 20 | // Output numbers to the debug logging stream. 21 | extern "C" { 22 | void DebugLogInt32(int32_t i); 23 | void DebugLogUInt32(uint32_t i); 24 | void DebugLogHex(uint32_t i); 25 | void DebugLogFloat(float i); 26 | } 27 | 28 | #endif // TENSORFLOW_LITE_EXPERIMENTAL_MICRO_DEBUG_LOG_NUMBERS_H_ 29 | -------------------------------------------------------------------------------- /lib/tfmicro/tensorflow/lite/experimental/micro/kernels/activation_utils.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | #ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICRO_KERNELS_ACTIVATION_UTILS_H_ 17 | #define TENSORFLOW_LITE_EXPERIMENTAL_MICRO_KERNELS_ACTIVATION_UTILS_H_ 18 | 19 | #include 20 | #include 21 | #include 22 | 23 | #include "tensorflow/lite/c/builtin_op_data.h" 24 | 25 | namespace tflite { 26 | namespace ops { 27 | namespace micro { 28 | 29 | // Returns the floating point value for a fused activation: 30 | inline float ActivationValFloat(TfLiteFusedActivation act, float a) { 31 | switch (act) { 32 | case kTfLiteActNone: 33 | return a; 34 | case kTfLiteActRelu: 35 | return a < 0.f ? 0.f : a; 36 | case kTfLiteActRelu1: 37 | return a < 0.f ? 0.f : ((a > 1.f) ? 1.f : a); 38 | case kTfLiteActRelu6: 39 | return a < 0.f ? 0.f : ((a > 6.f) ? 6.f : a); 40 | case kTfLiteActTanh: 41 | return (expf(a) - expf(-a)) / (expf(a) + expf(-a)); 42 | case kTfLiteActSignBit: 43 | return std::signbit(a); 44 | case kTfLiteActSigmoid: 45 | return 1.f / (1.f + expf(-a)); 46 | default: 47 | return a; 48 | } 49 | } 50 | 51 | } // namespace micro 52 | } // namespace ops 53 | } // namespace tflite 54 | 55 | #endif // TENSORFLOW_LITE_EXPERIMENTAL_MICRO_KERNELS_ACTIVATION_UTILS_H_ 56 | -------------------------------------------------------------------------------- /lib/tfmicro/tensorflow/lite/experimental/micro/kernels/all_ops_resolver.cc: -------------------------------------------------------------------------------- 1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved. 2 | Licensed under the Apache License, Version 2.0 (the "License"); 3 | you may not use this file except in compliance with the License. 4 | You may obtain a copy of the License at 5 | http://www.apache.org/licenses/LICENSE-2.0 6 | Unless required by applicable law or agreed to in writing, software 7 | distributed under the License is distributed on an "AS IS" BASIS, 8 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 9 | See the License for the specific language governing permissions and 10 | limitations under the License. 11 | ==============================================================================*/ 12 | 13 | #include "tensorflow/lite/experimental/micro/kernels/all_ops_resolver.h" 14 | 15 | #include "tensorflow/lite/experimental/micro/kernels/micro_ops.h" 16 | 17 | namespace tflite { 18 | namespace ops { 19 | namespace micro { 20 | 21 | // Register each supported op with: 22 | // AddBuiltin(, , [min version], [max version]) 23 | AllOpsResolver::AllOpsResolver() { 24 | AddBuiltin(BuiltinOperator_DEPTHWISE_CONV_2D, Register_DEPTHWISE_CONV_2D(), 1, 25 | 3); 26 | AddBuiltin(BuiltinOperator_FULLY_CONNECTED, Register_FULLY_CONNECTED(), 1, 4); 27 | AddBuiltin(BuiltinOperator_MAX_POOL_2D, Register_MAX_POOL_2D()); 28 | AddBuiltin(BuiltinOperator_SOFTMAX, Register_SOFTMAX()); 29 | AddBuiltin(BuiltinOperator_LOGISTIC, Register_LOGISTIC()); 30 | AddBuiltin(BuiltinOperator_SVDF, Register_SVDF()); 31 | AddBuiltin(BuiltinOperator_CONV_2D, Register_CONV_2D(), 1, 3); 32 | AddBuiltin(BuiltinOperator_AVERAGE_POOL_2D, Register_AVERAGE_POOL_2D()); 33 | AddBuiltin(BuiltinOperator_ABS, Register_ABS()); 34 | AddBuiltin(BuiltinOperator_SIN, Register_SIN()); 35 | AddBuiltin(BuiltinOperator_COS, Register_COS()); 36 | AddBuiltin(BuiltinOperator_LOG, Register_LOG()); 37 | AddBuiltin(BuiltinOperator_SQRT, Register_SQRT()); 38 | AddBuiltin(BuiltinOperator_RSQRT, Register_RSQRT()); 39 | AddBuiltin(BuiltinOperator_SQUARE, Register_SQUARE()); 40 | AddBuiltin(BuiltinOperator_PRELU, Register_PRELU()); 41 | AddBuiltin(BuiltinOperator_FLOOR, Register_FLOOR()); 42 | AddBuiltin(BuiltinOperator_MAXIMUM, Register_MAXIMUM()); 43 | AddBuiltin(BuiltinOperator_MINIMUM, Register_MINIMUM()); 44 | AddBuiltin(BuiltinOperator_ARG_MAX, Register_ARG_MAX()); 45 | AddBuiltin(BuiltinOperator_ARG_MIN, Register_ARG_MIN()); 46 | AddBuiltin(BuiltinOperator_LOGICAL_OR, Register_LOGICAL_OR()); 47 | AddBuiltin(BuiltinOperator_LOGICAL_AND, Register_LOGICAL_AND()); 48 | AddBuiltin(BuiltinOperator_LOGICAL_NOT, Register_LOGICAL_NOT()); 49 | AddBuiltin(BuiltinOperator_RESHAPE, Register_RESHAPE()); 50 | AddBuiltin(BuiltinOperator_EQUAL, Register_EQUAL()); 51 | AddBuiltin(BuiltinOperator_NOT_EQUAL, Register_NOT_EQUAL()); 52 | AddBuiltin(BuiltinOperator_GREATER, Register_GREATER()); 53 | AddBuiltin(BuiltinOperator_GREATER_EQUAL, Register_GREATER_EQUAL()); 54 | AddBuiltin(BuiltinOperator_LESS, Register_LESS()); 55 | AddBuiltin(BuiltinOperator_LESS_EQUAL, Register_LESS_EQUAL()); 56 | AddBuiltin(BuiltinOperator_CEIL, Register_CEIL()); 57 | AddBuiltin(BuiltinOperator_ROUND, Register_ROUND()); 58 | AddBuiltin(BuiltinOperator_STRIDED_SLICE, Register_STRIDED_SLICE()); 59 | AddBuiltin(BuiltinOperator_PACK, Register_PACK()); 60 | AddBuiltin(BuiltinOperator_SPLIT, Register_SPLIT(), 1, 3); 61 | AddBuiltin(BuiltinOperator_UNPACK, Register_UNPACK()); 62 | AddBuiltin(BuiltinOperator_NEG, Register_NEG()); 63 | AddBuiltin(BuiltinOperator_ADD, Register_ADD()); 64 | AddBuiltin(BuiltinOperator_MUL, Register_MUL()); 65 | AddBuiltin(BuiltinOperator_QUANTIZE, Register_QUANTIZE()); 66 | AddBuiltin(BuiltinOperator_DEQUANTIZE, Register_DEQUANTIZE(), 1, 2); 67 | AddBuiltin(BuiltinOperator_RELU, Register_RELU()); 68 | AddBuiltin(BuiltinOperator_RELU6, Register_RELU6()); 69 | } 70 | 71 | } // namespace micro 72 | } // namespace ops 73 | } // namespace tflite 74 | -------------------------------------------------------------------------------- /lib/tfmicro/tensorflow/lite/experimental/micro/kernels/all_ops_resolver.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved. 2 | Licensed under the Apache License, Version 2.0 (the "License"); 3 | you may not use this file except in compliance with the License. 4 | You may obtain a copy of the License at 5 | http://www.apache.org/licenses/LICENSE-2.0 6 | Unless required by applicable law or agreed to in writing, software 7 | distributed under the License is distributed on an "AS IS" BASIS, 8 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 9 | See the License for the specific language governing permissions and 10 | limitations under the License. 11 | ==============================================================================*/ 12 | #ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICRO_KERNELS_ALL_OPS_RESOLVER_H_ 13 | #define TENSORFLOW_LITE_EXPERIMENTAL_MICRO_KERNELS_ALL_OPS_RESOLVER_H_ 14 | 15 | #include "tensorflow/lite/experimental/micro/compatibility.h" 16 | #include "tensorflow/lite/experimental/micro/micro_mutable_op_resolver.h" 17 | 18 | namespace tflite { 19 | namespace ops { 20 | namespace micro { 21 | 22 | class AllOpsResolver : public MicroMutableOpResolver { 23 | public: 24 | AllOpsResolver(); 25 | 26 | private: 27 | TF_LITE_REMOVE_VIRTUAL_DELETE 28 | }; 29 | 30 | } // namespace micro 31 | } // namespace ops 32 | } // namespace tflite 33 | 34 | #endif // TENSORFLOW_LITE_EXPERIMENTAL_MICRO_KERNELS_ALL_OPS_RESOLVER_H_ 35 | -------------------------------------------------------------------------------- /lib/tfmicro/tensorflow/lite/experimental/micro/kernels/arg_min_max.cc: -------------------------------------------------------------------------------- 1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | #include "tensorflow/lite/kernels/internal/reference/arg_min_max.h" 17 | 18 | #include "tensorflow/lite/c/builtin_op_data.h" 19 | #include "tensorflow/lite/c/c_api_internal.h" 20 | #include "tensorflow/lite/experimental/micro/kernels/micro_utils.h" 21 | #include "tensorflow/lite/kernels/internal/tensor_ctypes.h" 22 | #include "tensorflow/lite/kernels/kernel_util.h" 23 | 24 | namespace tflite { 25 | namespace ops { 26 | namespace micro { 27 | namespace arg_min_max { 28 | 29 | constexpr int kInputTensor = 0; 30 | constexpr int kAxis = 1; 31 | constexpr int kOutputTensor = 0; 32 | 33 | TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { 34 | return kTfLiteOk; 35 | } 36 | 37 | template 38 | inline void ArgMinMaxHelper(const RuntimeShape& input1_shape, 39 | const T1* input1_data, const T3* input2_data, 40 | const RuntimeShape& output_shape, T2* output_data, 41 | bool is_arg_max) { 42 | if (is_arg_max) { 43 | reference_ops::ArgMinMax(input1_shape, input1_data, input2_data, 44 | output_shape, output_data, micro::Greater()); 45 | } else { 46 | reference_ops::ArgMinMax(input1_shape, input1_data, input2_data, 47 | output_shape, output_data, micro::Less()); 48 | } 49 | } 50 | 51 | TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node, bool is_arg_max) { 52 | const TfLiteTensor* input = GetInput(context, node, kInputTensor); 53 | const TfLiteTensor* axis = GetInput(context, node, kAxis); 54 | TfLiteTensor* output = GetOutput(context, node, kOutputTensor); 55 | 56 | #define TF_LITE_ARG_MIN_MAX(data_type, axis_type, output_type) \ 57 | ArgMinMaxHelper(GetTensorShape(input), GetTensorData(input), \ 58 | GetTensorData(axis), GetTensorShape(output), \ 59 | GetTensorData(output), is_arg_max) 60 | if (axis->type == kTfLiteInt32) { 61 | if (output->type == kTfLiteInt32) { 62 | switch (input->type) { 63 | case kTfLiteFloat32: 64 | TF_LITE_ARG_MIN_MAX(float, int32_t, int32_t); 65 | break; 66 | case kTfLiteUInt8: 67 | TF_LITE_ARG_MIN_MAX(uint8_t, int32_t, int32_t); 68 | break; 69 | case kTfLiteInt8: 70 | TF_LITE_ARG_MIN_MAX(int8_t, int32_t, int32_t); 71 | break; 72 | default: 73 | context->ReportError(context, 74 | "Only float32, uint8 and int8 are " 75 | "supported currently, got %s.", 76 | TfLiteTypeGetName(input->type)); 77 | return kTfLiteError; 78 | } 79 | } else { 80 | context->ReportError(context, 81 | "Only int32 are supported currently, got %s.", 82 | TfLiteTypeGetName(output->type)); 83 | return kTfLiteError; 84 | } 85 | } else { 86 | context->ReportError(context, "Only int32 are supported currently, got %s.", 87 | TfLiteTypeGetName(axis->type)); 88 | return kTfLiteError; 89 | } 90 | 91 | #undef TF_LITE_ARG_MIN_MAX 92 | 93 | return kTfLiteOk; 94 | } 95 | 96 | TfLiteStatus ArgMinEval(TfLiteContext* context, TfLiteNode* node) { 97 | return Eval(context, node, false); 98 | } 99 | 100 | TfLiteStatus ArgMaxEval(TfLiteContext* context, TfLiteNode* node) { 101 | return Eval(context, node, true); 102 | } 103 | 104 | } // namespace arg_min_max 105 | 106 | TfLiteRegistration* Register_ARG_MAX() { 107 | static TfLiteRegistration r = {nullptr, nullptr, arg_min_max::Prepare, 108 | arg_min_max::ArgMaxEval}; 109 | return &r; 110 | } 111 | 112 | TfLiteRegistration* Register_ARG_MIN() { 113 | static TfLiteRegistration r = {nullptr, nullptr, arg_min_max::Prepare, 114 | arg_min_max::ArgMinEval}; 115 | return &r; 116 | } 117 | 118 | } // namespace micro 119 | } // namespace ops 120 | } // namespace tflite 121 | -------------------------------------------------------------------------------- /lib/tfmicro/tensorflow/lite/experimental/micro/kernels/ceil.cc: -------------------------------------------------------------------------------- 1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | #include "tensorflow/lite/kernels/internal/reference/ceil.h" 17 | 18 | #include "tensorflow/lite/c/c_api_internal.h" 19 | #include "tensorflow/lite/kernels/internal/tensor_ctypes.h" 20 | #include "tensorflow/lite/kernels/kernel_util.h" 21 | 22 | namespace tflite { 23 | namespace ops { 24 | namespace micro { 25 | namespace ceil { 26 | 27 | constexpr int kInputTensor = 0; 28 | constexpr int kOutputTensor = 0; 29 | 30 | TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { 31 | const TfLiteTensor* input = GetInput(context, node, kInputTensor); 32 | TfLiteTensor* output = GetOutput(context, node, kOutputTensor); 33 | TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); 34 | TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); 35 | TF_LITE_ENSURE_EQ(context, input->type, kTfLiteFloat32); 36 | TF_LITE_ENSURE_EQ(context, output->type, input->type); 37 | TF_LITE_ENSURE_EQ(context, output->bytes, input->bytes); 38 | TF_LITE_ENSURE_EQ(context, output->dims->size, input->dims->size); 39 | for (int i = 0; i < output->dims->size; ++i) { 40 | TF_LITE_ENSURE_EQ(context, output->dims->data[i], input->dims->data[i]); 41 | } 42 | return kTfLiteOk; 43 | } 44 | 45 | TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { 46 | const TfLiteTensor* input = GetInput(context, node, kInputTensor); 47 | TfLiteTensor* output = GetOutput(context, node, kOutputTensor); 48 | 49 | reference_ops::Ceil(GetTensorShape(input), GetTensorData(input), 50 | GetTensorShape(output), GetTensorData(output)); 51 | 52 | return kTfLiteOk; 53 | } 54 | } // namespace ceil 55 | 56 | TfLiteRegistration* Register_CEIL() { 57 | static TfLiteRegistration r = {/*init=*/nullptr, 58 | /*free=*/nullptr, ceil::Prepare, ceil::Eval}; 59 | return &r; 60 | } 61 | 62 | } // namespace micro 63 | } // namespace ops 64 | } // namespace tflite 65 | -------------------------------------------------------------------------------- /lib/tfmicro/tensorflow/lite/experimental/micro/kernels/dequantize.cc: -------------------------------------------------------------------------------- 1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | #include "tensorflow/lite/kernels/internal/reference/dequantize.h" 17 | 18 | #include "tensorflow/lite/c/builtin_op_data.h" 19 | #include "tensorflow/lite/c/c_api_internal.h" 20 | #include "tensorflow/lite/kernels/internal/tensor_ctypes.h" 21 | #include "tensorflow/lite/kernels/kernel_util.h" 22 | 23 | namespace tflite { 24 | namespace ops { 25 | namespace micro { 26 | namespace dequantize { 27 | 28 | TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { 29 | TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); 30 | TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); 31 | 32 | // TODO(b/140515557): Add cached dequant to improve hybrid model performance. 33 | TfLiteTensor* input = &context->tensors[node->inputs->data[0]]; 34 | TfLiteTensor* output = &context->tensors[node->outputs->data[0]]; 35 | 36 | TF_LITE_ENSURE(context, 37 | input->type == kTfLiteUInt8 || input->type == kTfLiteInt8); 38 | TF_LITE_ENSURE(context, output->type == kTfLiteFloat32); 39 | 40 | return kTfLiteOk; 41 | } 42 | 43 | TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { 44 | TfLiteTensor* input = &context->tensors[node->inputs->data[0]]; 45 | TfLiteTensor* output = &context->tensors[node->outputs->data[0]]; 46 | 47 | tflite::DequantizationParams op_params; 48 | op_params.zero_point = input->params.zero_point; 49 | op_params.scale = input->params.scale; 50 | switch (input->type) { 51 | case kTfLiteUInt8: 52 | reference_ops::Dequantize( 53 | op_params, GetTensorShape(input), GetTensorData(input), 54 | GetTensorShape(output), GetTensorData(output)); 55 | break; 56 | case kTfLiteInt8: 57 | reference_ops::Dequantize( 58 | op_params, GetTensorShape(input), GetTensorData(input), 59 | GetTensorShape(output), GetTensorData(output)); 60 | break; 61 | default: 62 | context->ReportError(context, "Type %s (%d) not supported.", 63 | TfLiteTypeGetName(input->type), input->type); 64 | return kTfLiteError; 65 | } 66 | 67 | return kTfLiteOk; 68 | } 69 | 70 | } // namespace dequantize 71 | 72 | TfLiteRegistration* Register_DEQUANTIZE() { 73 | static TfLiteRegistration r = {nullptr, nullptr, dequantize::Prepare, 74 | dequantize::Eval}; 75 | return &r; 76 | } 77 | 78 | } // namespace micro 79 | } // namespace ops 80 | } // namespace tflite 81 | -------------------------------------------------------------------------------- /lib/tfmicro/tensorflow/lite/experimental/micro/kernels/floor.cc: -------------------------------------------------------------------------------- 1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | #include "tensorflow/lite/c/c_api_internal.h" 17 | #include "tensorflow/lite/kernels/internal/reference/floor.h" 18 | #include "tensorflow/lite/kernels/internal/tensor_ctypes.h" 19 | #include "tensorflow/lite/kernels/kernel_util.h" 20 | 21 | namespace tflite { 22 | namespace ops { 23 | namespace micro { 24 | namespace floor { 25 | 26 | constexpr int kInputTensor = 0; 27 | constexpr int kOutputTensor = 0; 28 | 29 | TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { 30 | const TfLiteTensor* input = GetInput(context, node, kInputTensor); 31 | TF_LITE_ENSURE_EQ(context, input->type, kTfLiteFloat32); 32 | TfLiteTensor* output = GetOutput(context, node, kOutputTensor); 33 | reference_ops::Floor(GetTensorShape(input), GetTensorData(input), 34 | GetTensorShape(output), GetTensorData(output)); 35 | return kTfLiteOk; 36 | } 37 | } // namespace floor 38 | 39 | TfLiteRegistration* Register_FLOOR() { 40 | static TfLiteRegistration r = {/*init=*/nullptr, 41 | /*free=*/nullptr, /*prepare=*/nullptr, 42 | floor::Eval}; 43 | return &r; 44 | } 45 | 46 | } // namespace micro 47 | } // namespace ops 48 | } // namespace tflite 49 | -------------------------------------------------------------------------------- /lib/tfmicro/tensorflow/lite/experimental/micro/kernels/logical.cc: -------------------------------------------------------------------------------- 1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #include "tensorflow/lite/c/c_api_internal.h" 16 | #include "tensorflow/lite/kernels/internal/reference/binary_function.h" 17 | #include "tensorflow/lite/kernels/internal/tensor_ctypes.h" 18 | #include "tensorflow/lite/kernels/kernel_util.h" 19 | #include "tensorflow/lite/kernels/op_macros.h" 20 | 21 | namespace tflite { 22 | namespace ops { 23 | namespace micro { 24 | namespace logical { 25 | namespace { 26 | 27 | // Input/output tensor index. 28 | constexpr int kInputTensor1 = 0; 29 | constexpr int kInputTensor2 = 1; 30 | constexpr int kOutputTensor = 0; 31 | 32 | TfLiteStatus LogicalImpl(TfLiteContext* context, TfLiteNode* node, 33 | bool (*func)(bool, bool)) { 34 | const TfLiteTensor* input1 = GetInput(context, node, kInputTensor1); 35 | const TfLiteTensor* input2 = GetInput(context, node, kInputTensor2); 36 | TfLiteTensor* output = GetOutput(context, node, kOutputTensor); 37 | 38 | if (HaveSameShapes(input1, input2)) { 39 | reference_ops::BinaryFunction( 40 | GetTensorShape(input1), GetTensorData(input1), 41 | GetTensorShape(input2), GetTensorData(input2), 42 | GetTensorShape(output), GetTensorData(output), func); 43 | } else { 44 | reference_ops::BroadcastBinaryFunction4DSlow( 45 | GetTensorShape(input1), GetTensorData(input1), 46 | GetTensorShape(input2), GetTensorData(input2), 47 | GetTensorShape(output), GetTensorData(output), func); 48 | } 49 | 50 | return kTfLiteOk; 51 | } 52 | 53 | bool LogicalOr(bool x, bool y) { return x || y; } 54 | 55 | TfLiteStatus LogicalOrEval(TfLiteContext* context, TfLiteNode* node) { 56 | return LogicalImpl(context, node, LogicalOr); 57 | } 58 | 59 | bool LogicalAnd(bool x, bool y) { return x && y; } 60 | 61 | TfLiteStatus LogicalAndEval(TfLiteContext* context, TfLiteNode* node) { 62 | return LogicalImpl(context, node, LogicalAnd); 63 | } 64 | 65 | } // namespace 66 | } // namespace logical 67 | 68 | TfLiteRegistration* Register_LOGICAL_OR() { 69 | // Init, Free, Prepare, Eval are satisfying the Interface required by 70 | // TfLiteRegistration. 71 | static TfLiteRegistration r = {/* init */ nullptr, /* free */ nullptr, 72 | /* prepare */ nullptr, logical::LogicalOrEval}; 73 | return &r; 74 | } 75 | 76 | TfLiteRegistration* Register_LOGICAL_AND() { 77 | // Init, Free, Prepare, Eval are satisfying the Interface required by 78 | // TfLiteRegistration. 79 | static TfLiteRegistration r = {/* init */ nullptr, /* free */ nullptr, 80 | /* prepare */ nullptr, 81 | logical::LogicalAndEval}; 82 | return &r; 83 | } 84 | 85 | } // namespace micro 86 | } // namespace ops 87 | } // namespace tflite 88 | -------------------------------------------------------------------------------- /lib/tfmicro/tensorflow/lite/experimental/micro/kernels/logistic.cc: -------------------------------------------------------------------------------- 1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | #include "tensorflow/lite/kernels/internal/reference/logistic.h" 17 | 18 | #include "tensorflow/lite/c/builtin_op_data.h" 19 | #include "tensorflow/lite/c/c_api_internal.h" 20 | #include "tensorflow/lite/kernels/internal/common.h" 21 | #include "tensorflow/lite/kernels/internal/quantization_util.h" 22 | #include "tensorflow/lite/kernels/internal/tensor_ctypes.h" 23 | #include "tensorflow/lite/kernels/kernel_util.h" 24 | #include "tensorflow/lite/kernels/op_macros.h" 25 | 26 | namespace tflite { 27 | namespace ops { 28 | namespace micro { 29 | namespace activations { 30 | 31 | constexpr int kInputTensor = 0; 32 | constexpr int kOutputTensor = 0; 33 | 34 | TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { 35 | return kTfLiteOk; 36 | } 37 | 38 | TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { 39 | const TfLiteTensor* input = GetInput(context, node, kInputTensor); 40 | TfLiteTensor* output = GetOutput(context, node, kOutputTensor); 41 | 42 | switch (input->type) { 43 | case kTfLiteFloat32: { 44 | reference_ops::Logistic( 45 | GetTensorShape(input), GetTensorData(input), 46 | GetTensorShape(output), GetTensorData(output)); 47 | return kTfLiteOk; 48 | } 49 | default: { 50 | // TODO(b/141211002): Also support other data types once we have supported 51 | // temporary tensors in TFLM. 52 | context->ReportError(context, 53 | "Only float32 is supported currently, got %s", 54 | TfLiteTypeGetName(input->type)); 55 | return kTfLiteError; 56 | } 57 | } 58 | } 59 | 60 | } // namespace activations 61 | 62 | TfLiteRegistration* Register_LOGISTIC() { 63 | static TfLiteRegistration r = {/*init=*/nullptr, 64 | /*free=*/nullptr, activations::Prepare, 65 | activations::Eval}; 66 | return &r; 67 | } 68 | } // namespace micro 69 | } // namespace ops 70 | } // namespace tflite 71 | -------------------------------------------------------------------------------- /lib/tfmicro/tensorflow/lite/experimental/micro/kernels/maximum_minimum.cc: -------------------------------------------------------------------------------- 1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | #include "tensorflow/lite/kernels/internal/reference/maximum_minimum.h" 17 | 18 | #include "tensorflow/lite/c/builtin_op_data.h" 19 | #include "tensorflow/lite/c/c_api_internal.h" 20 | #include "tensorflow/lite/kernels/internal/common.h" 21 | #include "tensorflow/lite/kernels/internal/quantization_util.h" 22 | #include "tensorflow/lite/kernels/internal/tensor_ctypes.h" 23 | #include "tensorflow/lite/kernels/kernel_util.h" 24 | #include "tensorflow/lite/kernels/op_macros.h" 25 | 26 | namespace tflite { 27 | namespace ops { 28 | namespace micro { 29 | namespace maximum_minimum { 30 | namespace { 31 | 32 | // This file has a reference implementation of TFMaximum/TFMinimum. 33 | enum KernelType { 34 | kReference, 35 | }; 36 | 37 | constexpr int kInputTensor1 = 0; 38 | constexpr int kInputTensor2 = 1; 39 | constexpr int kOutputTensor = 0; 40 | 41 | struct OpContext { 42 | OpContext(TfLiteContext* context, TfLiteNode* node) { 43 | input1 = GetInput(context, node, kInputTensor1); 44 | input2 = GetInput(context, node, kInputTensor2); 45 | output = GetOutput(context, node, kOutputTensor); 46 | } 47 | const TfLiteTensor* input1; 48 | const TfLiteTensor* input2; 49 | TfLiteTensor* output; 50 | }; 51 | 52 | struct MaximumOp { 53 | template 54 | static data_type op(data_type el1, data_type el2) { 55 | return el1 > el2 ? el1 : el2; 56 | } 57 | }; 58 | 59 | struct MinimumOp { 60 | template 61 | static data_type op(data_type el1, data_type el2) { 62 | return el1 < el2 ? el1 : el2; 63 | } 64 | }; 65 | 66 | } // namespace 67 | 68 | template 69 | void TFLiteOperation(TfLiteContext* context, TfLiteNode* node, 70 | const OpContext& op_context) { 71 | reference_ops::MaximumMinimumBroadcast4DSlow( 72 | GetTensorShape(op_context.input1), 73 | GetTensorData(op_context.input1), 74 | GetTensorShape(op_context.input2), 75 | GetTensorData(op_context.input2), 76 | GetTensorShape(op_context.output), 77 | GetTensorData(op_context.output), 78 | op_type::template op); 79 | } 80 | 81 | template 82 | TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { 83 | OpContext op_context(context, node); 84 | 85 | if (kernel_type == kReference) { 86 | switch (op_context.output->type) { 87 | case kTfLiteFloat32: 88 | TFLiteOperation(context, node, op_context); 89 | break; 90 | case kTfLiteUInt8: 91 | TFLiteOperation(context, node, op_context); 92 | break; 93 | case kTfLiteInt8: 94 | TFLiteOperation(context, node, op_context); 95 | break; 96 | case kTfLiteInt32: 97 | TFLiteOperation(context, node, op_context); 98 | break; 99 | case kTfLiteInt64: 100 | TFLiteOperation(context, node, op_context); 101 | break; 102 | default: 103 | context->ReportError( 104 | context, "Type %s (%d) is not supported by Maximum/Minimum.", 105 | TfLiteTypeGetName(op_context.output->type), 106 | op_context.output->type); 107 | return kTfLiteError; 108 | } 109 | } else { 110 | context->ReportError(context, 111 | "Kernel type not supported by Maximum/Minimum."); 112 | return kTfLiteError; 113 | } 114 | return kTfLiteOk; 115 | } 116 | 117 | } // namespace maximum_minimum 118 | 119 | TfLiteRegistration* Register_MAXIMUM() { 120 | static TfLiteRegistration r = { 121 | /* init */ nullptr, 122 | /* free */ nullptr, 123 | /* prepare */ nullptr, 124 | maximum_minimum::Eval}; 126 | return &r; 127 | } 128 | 129 | TfLiteRegistration* Register_MINIMUM() { 130 | static TfLiteRegistration r = { 131 | /* init */ nullptr, 132 | /* free */ nullptr, 133 | /* prepare */ nullptr, 134 | maximum_minimum::Eval}; 136 | return &r; 137 | } 138 | 139 | } // namespace micro 140 | } // namespace ops 141 | } // namespace tflite 142 | -------------------------------------------------------------------------------- /lib/tfmicro/tensorflow/lite/experimental/micro/kernels/micro_ops.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICRO_KERNELS_MICRO_OPS_H_ 16 | #define TENSORFLOW_LITE_EXPERIMENTAL_MICRO_KERNELS_MICRO_OPS_H_ 17 | 18 | #include "tensorflow/lite/c/c_api_internal.h" 19 | 20 | namespace tflite { 21 | namespace ops { 22 | namespace micro { 23 | 24 | // Forward declaration of all micro op kernel registration methods. These 25 | // registrations are included with the standard `BuiltinOpResolver`. 26 | // 27 | // This header is particularly useful in cases where only a subset of ops are 28 | // needed. In such cases, the client can selectively add only the registrations 29 | // their model requires, using a custom `(Micro)MutableOpResolver`. Selective 30 | // registration in turn allows the linker to strip unused kernels. 31 | 32 | TfLiteRegistration* Register_ABS(); 33 | TfLiteRegistration* Register_ADD(); 34 | TfLiteRegistration* Register_ARG_MAX(); 35 | TfLiteRegistration* Register_ARG_MIN(); 36 | TfLiteRegistration* Register_AVERAGE_POOL_2D(); 37 | TfLiteRegistration* Register_CEIL(); 38 | TfLiteRegistration* Register_CONV_2D(); 39 | TfLiteRegistration* Register_COS(); 40 | TfLiteRegistration* Register_DEPTHWISE_CONV_2D(); 41 | TfLiteRegistration* Register_DEQUANTIZE(); 42 | TfLiteRegistration* Register_EQUAL(); 43 | TfLiteRegistration* Register_FLOOR(); 44 | TfLiteRegistration* Register_FULLY_CONNECTED(); 45 | TfLiteRegistration* Register_GREATER(); 46 | TfLiteRegistration* Register_GREATER_EQUAL(); 47 | TfLiteRegistration* Register_LESS(); 48 | TfLiteRegistration* Register_LESS_EQUAL(); 49 | TfLiteRegistration* Register_LOG(); 50 | TfLiteRegistration* Register_LOGICAL_AND(); 51 | TfLiteRegistration* Register_LOGICAL_NOT(); 52 | TfLiteRegistration* Register_LOGICAL_OR(); 53 | TfLiteRegistration* Register_LOGISTIC(); 54 | TfLiteRegistration* Register_MAXIMUM(); 55 | TfLiteRegistration* Register_MAX_POOL_2D(); 56 | TfLiteRegistration* Register_MINIMUM(); 57 | TfLiteRegistration* Register_MUL(); 58 | TfLiteRegistration* Register_NEG(); 59 | TfLiteRegistration* Register_NOT_EQUAL(); 60 | TfLiteRegistration* Register_PACK(); 61 | TfLiteRegistration* Register_PRELU(); 62 | TfLiteRegistration* Register_QUANTIZE(); 63 | TfLiteRegistration* Register_RELU(); 64 | TfLiteRegistration* Register_RELU6(); 65 | TfLiteRegistration* Register_RESHAPE(); 66 | TfLiteRegistration* Register_ROUND(); 67 | TfLiteRegistration* Register_RSQRT(); 68 | TfLiteRegistration* Register_SIN(); 69 | TfLiteRegistration* Register_SOFTMAX(); 70 | TfLiteRegistration* Register_SPLIT(); 71 | TfLiteRegistration* Register_SQRT(); 72 | TfLiteRegistration* Register_SQUARE(); 73 | TfLiteRegistration* Register_STRIDED_SLICE(); 74 | TfLiteRegistration* Register_SVDF(); 75 | TfLiteRegistration* Register_UNPACK(); 76 | 77 | } // namespace micro 78 | } // namespace ops 79 | } // namespace tflite 80 | 81 | #endif // TENSORFLOW_LITE_EXPERIMENTAL_MICRO_KERNELS_MICRO_OPS_H_ 82 | -------------------------------------------------------------------------------- /lib/tfmicro/tensorflow/lite/experimental/micro/kernels/micro_utils.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved. 2 | Licensed under the Apache License, Version 2.0 (the "License"); 3 | you may not use this file except in compliance with the License. 4 | You may obtain a copy of the License at 5 | http://www.apache.org/licenses/LICENSE-2.0 6 | Unless required by applicable law or agreed to in writing, software 7 | distributed under the License is distributed on an "AS IS" BASIS, 8 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 9 | See the License for the specific language governing permissions and 10 | limitations under the License. 11 | ==============================================================================*/ 12 | #ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICRO_KERNELS_MICRO_UTILS_H_ 13 | #define TENSORFLOW_LITE_EXPERIMENTAL_MICRO_KERNELS_MICRO_UTILS_H_ 14 | namespace tflite { 15 | namespace ops { 16 | namespace micro { 17 | 18 | // Same as gtl::Greater but defined here to reduce dependencies and 19 | // binary size for micro environment. 20 | struct Greater { 21 | template 22 | bool operator()(const T& x, const T& y) const { 23 | return x > y; 24 | } 25 | }; 26 | 27 | struct Less { 28 | template 29 | bool operator()(const T& x, const T& y) const { 30 | return x < y; 31 | } 32 | }; 33 | 34 | } // namespace micro 35 | } // namespace ops 36 | } // namespace tflite 37 | #endif // TENSORFLOW_LITE_EXPERIMENTAL_MICRO_KERNELS_MICRO_UTILS_H_ 38 | -------------------------------------------------------------------------------- /lib/tfmicro/tensorflow/lite/experimental/micro/kernels/neg.cc: -------------------------------------------------------------------------------- 1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | #include "tensorflow/lite/kernels/internal/reference/neg.h" 17 | 18 | #include "tensorflow/lite/c/c_api_internal.h" 19 | #include "tensorflow/lite/kernels/internal/tensor_ctypes.h" 20 | #include "tensorflow/lite/kernels/kernel_util.h" 21 | 22 | namespace tflite { 23 | namespace ops { 24 | namespace micro { 25 | namespace neg { 26 | 27 | constexpr int kInputTensor = 0; 28 | constexpr int kOutputTensor = 0; 29 | 30 | TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { 31 | const TfLiteTensor* input = GetInput(context, node, kInputTensor); 32 | TfLiteTensor* output = GetOutput(context, node, kOutputTensor); 33 | switch (input->type) { 34 | // TODO(wangtz): handle for kTfLiteInt8 35 | case kTfLiteFloat32: 36 | reference_ops::Negate(GetTensorShape(input), GetTensorData(input), 37 | GetTensorShape(output), 38 | GetTensorData(output)); 39 | break; 40 | default: 41 | context->ReportError( 42 | context, "Neg only currently supports float32, got %d.", input->type); 43 | return kTfLiteError; 44 | } 45 | return kTfLiteOk; 46 | } 47 | 48 | } // namespace neg 49 | 50 | TfLiteRegistration* Register_NEG() { 51 | static TfLiteRegistration r = {/*init=*/nullptr, /*free=*/nullptr, 52 | /*prepare=*/nullptr, neg::Eval}; 53 | return &r; 54 | } 55 | 56 | } // namespace micro 57 | } // namespace ops 58 | } // namespace tflite 59 | -------------------------------------------------------------------------------- /lib/tfmicro/tensorflow/lite/experimental/micro/kernels/pack.cc: -------------------------------------------------------------------------------- 1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | #include "tensorflow/lite/c/builtin_op_data.h" 17 | #include "tensorflow/lite/c/c_api_internal.h" 18 | #include "tensorflow/lite/kernels/internal/tensor_ctypes.h" 19 | #include "tensorflow/lite/kernels/kernel_util.h" 20 | 21 | namespace tflite { 22 | namespace ops { 23 | namespace micro { 24 | namespace pack { 25 | namespace { 26 | 27 | constexpr int kOutputTensor = 0; 28 | 29 | TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { 30 | return kTfLiteOk; 31 | } 32 | 33 | template 34 | TfLiteStatus PackImpl(TfLiteContext* context, TfLiteNode* node, 35 | TfLiteTensor* output, int values_count, int axis) { 36 | const int dimensions = output->dims->size; 37 | const TfLiteTensor* input0 = &context->tensors[node->inputs->data[0]]; 38 | const TfLiteIntArray* input_dims = input0->dims; 39 | const TfLiteIntArray* output_dims = output->dims; 40 | 41 | if (axis < 0) { 42 | axis += dimensions; 43 | } 44 | 45 | int outer_size = 1; 46 | for (int i = 0; i < axis; ++i) { 47 | outer_size *= output_dims->data[i]; 48 | } 49 | int copy_size = 1; 50 | for (int i = axis + 1; i < dimensions; ++i) { 51 | copy_size *= output_dims->data[i]; 52 | } 53 | int input_size = 1; 54 | for (int i = 0; i < input_dims->size; ++i) { 55 | input_size *= input_dims->data[i]; 56 | } 57 | TFLITE_DCHECK_EQ(input_size, copy_size * outer_size); 58 | 59 | T* output_data = GetTensorData(output); 60 | 61 | for (int i = 0; i < values_count; ++i) { 62 | TfLiteTensor* t = &context->tensors[node->inputs->data[i]]; 63 | const T* input_data = GetTensorData(t); 64 | for (int k = 0; k < outer_size; ++k) { 65 | const T* input_ptr = input_data + copy_size * k; 66 | int loc = k * values_count * copy_size + i * copy_size; 67 | T* output_ptr = output_data + loc; 68 | for (int j = 0; j < copy_size; ++j) output_ptr[j] = input_ptr[j]; 69 | } 70 | } 71 | 72 | return kTfLiteOk; 73 | } 74 | 75 | TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { 76 | const TfLitePackParams* data = 77 | reinterpret_cast(node->builtin_data); 78 | 79 | TfLiteTensor* output = GetOutput(context, node, kOutputTensor); 80 | 81 | switch (output->type) { 82 | case kTfLiteFloat32: { 83 | return PackImpl(context, node, output, data->values_count, 84 | data->axis); 85 | } 86 | case kTfLiteUInt8: { 87 | return PackImpl(context, node, output, data->values_count, 88 | data->axis); 89 | } 90 | case kTfLiteInt8: { 91 | return PackImpl(context, node, output, data->values_count, 92 | data->axis); 93 | } 94 | case kTfLiteInt32: { 95 | return PackImpl(context, node, output, data->values_count, 96 | data->axis); 97 | } 98 | case kTfLiteInt64: { 99 | return PackImpl(context, node, output, data->values_count, 100 | data->axis); 101 | } 102 | default: { 103 | context->ReportError(context, "Type '%s' is not supported by pack.", 104 | TfLiteTypeGetName(output->type)); 105 | return kTfLiteError; 106 | } 107 | } 108 | 109 | return kTfLiteOk; 110 | } 111 | 112 | } // namespace 113 | } // namespace pack 114 | 115 | TfLiteRegistration* Register_PACK() { 116 | static TfLiteRegistration r = {nullptr, nullptr, pack::Prepare, pack::Eval}; 117 | return &r; 118 | } 119 | 120 | } // namespace micro 121 | } // namespace ops 122 | } // namespace tflite 123 | -------------------------------------------------------------------------------- /lib/tfmicro/tensorflow/lite/experimental/micro/kernels/prelu.cc: -------------------------------------------------------------------------------- 1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | #include "tensorflow/lite/kernels/internal/reference/prelu.h" 17 | 18 | #include "tensorflow/lite/c/c_api_internal.h" 19 | #include "tensorflow/lite/kernels/internal/quantization_util.h" 20 | #include "tensorflow/lite/kernels/internal/tensor_ctypes.h" 21 | #include "tensorflow/lite/kernels/kernel_util.h" 22 | 23 | namespace tflite { 24 | namespace ops { 25 | namespace micro { 26 | namespace activations { 27 | 28 | TfLiteStatus PreluPrepare(TfLiteContext* context, TfLiteNode* node) { 29 | return kTfLiteOk; 30 | } 31 | 32 | inline void BroadcastPrelu4DSlowFloat( 33 | const RuntimeShape& unextended_input1_shape, const float* input1_data, 34 | const RuntimeShape& unextended_input2_shape, const float* input2_data, 35 | const RuntimeShape& unextended_output_shape, float* output_data) { 36 | TFLITE_DCHECK_LE(unextended_input1_shape.DimensionsCount(), 4); 37 | TFLITE_DCHECK_LE(unextended_input2_shape.DimensionsCount(), 4); 38 | TFLITE_DCHECK_LE(unextended_output_shape.DimensionsCount(), 4); 39 | const RuntimeShape output_shape = 40 | RuntimeShape::ExtendedShape(4, unextended_output_shape); 41 | 42 | NdArrayDesc<4> desc1; 43 | NdArrayDesc<4> desc2; 44 | NdArrayDescsForElementwiseBroadcast(unextended_input1_shape, 45 | unextended_input2_shape, &desc1, &desc2); 46 | 47 | for (int b = 0; b < output_shape.Dims(0); ++b) { 48 | for (int y = 0; y < output_shape.Dims(1); ++y) { 49 | for (int x = 0; x < output_shape.Dims(2); ++x) { 50 | for (int c = 0; c < output_shape.Dims(3); ++c) { 51 | auto out_idx = Offset(output_shape, b, y, x, c); 52 | auto in1_idx = SubscriptToIndex(desc1, b, y, x, c); 53 | auto in2_idx = SubscriptToIndex(desc2, b, y, x, c); 54 | auto in1_val = input1_data[in1_idx]; 55 | auto in2_val = input2_data[in2_idx]; 56 | output_data[out_idx] = in1_val >= 0.0 ? in1_val : in1_val * in2_val; 57 | } 58 | } 59 | } 60 | } 61 | } 62 | 63 | TfLiteStatus PreluEval(TfLiteContext* context, TfLiteNode* node) { 64 | const TfLiteTensor* input = GetInput(context, node, 0); 65 | const TfLiteTensor* alpha = GetInput(context, node, 1); 66 | TfLiteTensor* output = GetOutput(context, node, 0); 67 | int32_t output_multiplier = 0; 68 | int output_shift = 0; 69 | if (output->type == kTfLiteUInt8 || output->type == kTfLiteInt16) { 70 | double real_multiplier = 71 | input->params.scale * alpha->params.scale / output->params.scale; 72 | QuantizeMultiplierSmallerThanOneExp(real_multiplier, &output_multiplier, 73 | &output_shift); 74 | } 75 | switch (input->type) { 76 | case kTfLiteFloat32: { 77 | BroadcastPrelu4DSlowFloat( 78 | GetTensorShape(input), GetTensorData(input), 79 | GetTensorShape(alpha), GetTensorData(alpha), 80 | GetTensorShape(output), GetTensorData(output)); 81 | return kTfLiteOk; 82 | } break; 83 | case kTfLiteUInt8: { 84 | PreluParams op_params; 85 | op_params.input_offset = -input->params.zero_point; 86 | op_params.alpha_offset = -alpha->params.zero_point; 87 | op_params.output_offset = output->params.zero_point; 88 | op_params.output_multiplier = output_multiplier; 89 | op_params.output_shift = output_shift; 90 | reference_ops::BroadcastPrelu4DSlow( 91 | op_params, GetTensorShape(input), GetTensorData(input), 92 | GetTensorShape(alpha), GetTensorData(alpha), 93 | GetTensorShape(output), GetTensorData(output)); 94 | return kTfLiteOk; 95 | } break; 96 | default: 97 | context->ReportError( 98 | context, "Only float32 and uint8 are supported currently, got %d.", 99 | TfLiteTypeGetName(input->type)); 100 | return kTfLiteError; 101 | } 102 | } 103 | 104 | } // namespace activations 105 | 106 | TfLiteRegistration* Register_PRELU() { 107 | static TfLiteRegistration r = {nullptr, nullptr, activations::PreluPrepare, 108 | activations::PreluEval}; 109 | return &r; 110 | } 111 | 112 | } // namespace micro 113 | } // namespace ops 114 | } // namespace tflite 115 | -------------------------------------------------------------------------------- /lib/tfmicro/tensorflow/lite/experimental/micro/kernels/quantize.cc: -------------------------------------------------------------------------------- 1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #include "tensorflow/lite/kernels/internal/reference/quantize.h" 16 | 17 | #include "tensorflow/lite/c/c_api_internal.h" 18 | #include "tensorflow/lite/kernels/internal/quantization_util.h" 19 | #include "tensorflow/lite/kernels/internal/tensor_ctypes.h" 20 | #include "tensorflow/lite/kernels/kernel_util.h" 21 | 22 | namespace tflite { 23 | namespace ops { 24 | namespace micro { 25 | namespace quantize { 26 | 27 | void* Init(TfLiteContext* context, const char* buffer, size_t length) { 28 | return nullptr; 29 | } 30 | 31 | void Free(TfLiteContext* context, void* buffer) {} 32 | 33 | TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { 34 | TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); 35 | TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); 36 | 37 | TfLiteTensor* input = &context->tensors[node->inputs->data[0]]; 38 | TfLiteTensor* output = &context->tensors[node->outputs->data[0]]; 39 | 40 | // TODO(b/128934713): Add support for fixed-point per-channel quantization. 41 | // Currently this only support affine per-layer quantization. 42 | TF_LITE_ENSURE_EQ(context, output->quantization.type, 43 | kTfLiteAffineQuantization); 44 | const auto* affine_quantization = 45 | reinterpret_cast(output->quantization.params); 46 | TF_LITE_ENSURE(context, affine_quantization); 47 | TF_LITE_ENSURE(context, affine_quantization->scale); 48 | TF_LITE_ENSURE(context, affine_quantization->scale->size == 1); 49 | 50 | TF_LITE_ENSURE(context, input->type == kTfLiteFloat32); 51 | TF_LITE_ENSURE(context, 52 | output->type == kTfLiteUInt8 || output->type == kTfLiteInt8); 53 | 54 | return kTfLiteOk; 55 | } 56 | 57 | TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { 58 | TfLiteTensor* input = &context->tensors[node->inputs->data[0]]; 59 | TfLiteTensor* output = &context->tensors[node->outputs->data[0]]; 60 | 61 | tflite::QuantizationParams op_params; 62 | op_params.zero_point = output->params.zero_point; 63 | op_params.scale = output->params.scale; 64 | switch (output->type) { 65 | case kTfLiteInt8: 66 | reference_ops::AffineQuantize( 67 | op_params, GetTensorShape(input), GetTensorData(input), 68 | GetTensorShape(output), GetTensorData(output)); 69 | break; 70 | case kTfLiteUInt8: 71 | reference_ops::AffineQuantize( 72 | op_params, GetTensorShape(input), GetTensorData(input), 73 | GetTensorShape(output), GetTensorData(output)); 74 | break; 75 | default: 76 | context->ReportError(context, "Output type %s (%d) not supported", 77 | TfLiteTypeGetName(input->type), output->type); 78 | return kTfLiteError; 79 | } 80 | 81 | return kTfLiteOk; 82 | } 83 | 84 | } // namespace quantize 85 | 86 | // This Op (QUANTIZE) quantizes the input and produces quantized output. 87 | // AffineQuantize takes scale and zero point and quantizes the float value to 88 | // quantized output, in int8 or uint8 format. 89 | TfLiteRegistration* Register_QUANTIZE() { 90 | static TfLiteRegistration r = {quantize::Init, quantize::Free, 91 | quantize::Prepare, quantize::Eval}; 92 | return &r; 93 | } 94 | 95 | } // namespace micro 96 | } // namespace ops 97 | } // namespace tflite 98 | -------------------------------------------------------------------------------- /lib/tfmicro/tensorflow/lite/experimental/micro/kernels/reshape.cc: -------------------------------------------------------------------------------- 1 | /* Copyright 2017 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | #include "tensorflow/lite/c/builtin_op_data.h" 17 | #include "tensorflow/lite/c/c_api_internal.h" 18 | #include "tensorflow/lite/kernels/internal/tensor_ctypes.h" 19 | #include "tensorflow/lite/kernels/kernel_util.h" 20 | #include "tensorflow/lite/kernels/op_macros.h" 21 | 22 | namespace tflite { 23 | namespace ops { 24 | namespace micro { 25 | namespace reshape { 26 | 27 | constexpr int kInputTensor = 0; 28 | constexpr int kShapeTensor = 1; 29 | constexpr int kOutputTensor = 0; 30 | 31 | TfLiteStatus ReshapeOutput(TfLiteContext* context, TfLiteNode* node) { 32 | const TfLiteTensor* input = GetInput(context, node, kInputTensor); 33 | TfLiteTensor* output = GetOutput(context, node, kOutputTensor); 34 | // Tensorflow's Reshape allows one of the shape components to have the 35 | // special -1 value, meaning it will be calculated automatically based on the 36 | // input. Here we calculate what that dimension should be so that the number 37 | // of output elements in the same as the number of input elements. 38 | int num_input_elements = NumElements(input); 39 | TfLiteIntArray* output_shape = output->dims; 40 | 41 | if (NumInputs(node) == 1 && // Legacy scalar supported with params. 42 | output_shape->size == 1 && output_shape->data[0] == 0) { 43 | // Legacy tflite models use a shape parameter of [0] to indicate scalars, 44 | // so adjust accordingly. TODO(b/111614235): Allow zero-sized buffers during 45 | // toco conversion. 46 | output_shape->size = 0; 47 | } 48 | 49 | int num_output_elements = 1; 50 | int stretch_dim = -1; 51 | for (int i = 0; i < output_shape->size; ++i) { 52 | int value = output_shape->data[i]; 53 | if (value == -1) { 54 | TF_LITE_ENSURE_EQ(context, stretch_dim, -1); 55 | stretch_dim = i; 56 | } else { 57 | num_output_elements *= value; 58 | } 59 | } 60 | if (stretch_dim != -1) { 61 | output_shape->data[stretch_dim] = num_input_elements / num_output_elements; 62 | num_output_elements *= output_shape->data[stretch_dim]; 63 | } 64 | 65 | TF_LITE_ENSURE_EQ(context, input->type, output->type); 66 | TF_LITE_ENSURE_EQ(context, num_input_elements, num_output_elements); 67 | return kTfLiteOk; 68 | } 69 | 70 | TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { 71 | TF_LITE_ENSURE(context, NumInputs(node) == 1 || NumInputs(node) == 2); 72 | TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); 73 | return kTfLiteOk; 74 | } 75 | 76 | TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { 77 | const TfLiteTensor* input = GetInput(context, node, kInputTensor); 78 | TfLiteTensor* output = GetOutput(context, node, kOutputTensor); 79 | if (ReshapeOutput(context, node) != kTfLiteOk) { 80 | return kTfLiteError; 81 | } 82 | 83 | for (int i = 0; i < input->bytes; ++i) { 84 | output->data.raw[i] = input->data.raw[i]; 85 | } 86 | return kTfLiteOk; 87 | } 88 | 89 | } // namespace reshape 90 | 91 | TfLiteRegistration* Register_RESHAPE() { 92 | static TfLiteRegistration r = {nullptr, nullptr, reshape::Prepare, 93 | reshape::Eval}; 94 | return &r; 95 | } 96 | 97 | } // namespace micro 98 | } // namespace ops 99 | } // namespace tflite 100 | -------------------------------------------------------------------------------- /lib/tfmicro/tensorflow/lite/experimental/micro/kernels/round.cc: -------------------------------------------------------------------------------- 1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | #include "tensorflow/lite/kernels/internal/reference/round.h" 17 | 18 | #include "tensorflow/lite/c/c_api_internal.h" 19 | #include "tensorflow/lite/kernels/internal/tensor_ctypes.h" 20 | #include "tensorflow/lite/kernels/kernel_util.h" 21 | 22 | namespace tflite { 23 | namespace ops { 24 | namespace micro { 25 | namespace round { 26 | 27 | constexpr int kInputTensor = 0; 28 | constexpr int kOutputTensor = 0; 29 | 30 | TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { 31 | const TfLiteTensor* input = GetInput(context, node, kInputTensor); 32 | TfLiteTensor* output = GetOutput(context, node, kOutputTensor); 33 | TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); 34 | TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); 35 | TF_LITE_ENSURE_EQ(context, input->type, kTfLiteFloat32); 36 | TF_LITE_ENSURE_EQ(context, output->type, input->type); 37 | TF_LITE_ENSURE_EQ(context, output->bytes, input->bytes); 38 | TF_LITE_ENSURE_EQ(context, output->dims->size, input->dims->size); 39 | for (int i = 0; i < output->dims->size; ++i) { 40 | TF_LITE_ENSURE_EQ(context, output->dims->data[i], input->dims->data[i]); 41 | } 42 | return kTfLiteOk; 43 | } 44 | 45 | TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { 46 | const TfLiteTensor* input = GetInput(context, node, kInputTensor); 47 | TfLiteTensor* output = GetOutput(context, node, kOutputTensor); 48 | 49 | reference_ops::Round(GetTensorShape(input), GetTensorData(input), 50 | GetTensorShape(output), GetTensorData(output)); 51 | 52 | return kTfLiteOk; 53 | } 54 | } // namespace round 55 | 56 | TfLiteRegistration* Register_ROUND() { 57 | static TfLiteRegistration r = {/*init=*/nullptr, 58 | /*free=*/nullptr, round::Prepare, round::Eval}; 59 | return &r; 60 | } 61 | 62 | } // namespace micro 63 | } // namespace ops 64 | } // namespace tflite 65 | -------------------------------------------------------------------------------- /lib/tfmicro/tensorflow/lite/experimental/micro/kernels/split.cc: -------------------------------------------------------------------------------- 1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | #include "tensorflow/lite/c/builtin_op_data.h" 17 | #include "tensorflow/lite/c/c_api_internal.h" 18 | #include "tensorflow/lite/kernels/internal/tensor_ctypes.h" 19 | #include "tensorflow/lite/kernels/kernel_util.h" 20 | 21 | namespace tflite { 22 | namespace ops { 23 | namespace micro { 24 | namespace split { 25 | 26 | TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { 27 | return kTfLiteOk; 28 | } 29 | 30 | template 31 | TfLiteStatus SplitImpl(TfLiteContext* context, TfLiteNode* node, 32 | const TfLiteTensor* input, int axis_value) { 33 | const int output_count = NumOutputs(node); 34 | const TfLiteIntArray* input_dims = input->dims; 35 | const TfLiteTensor* output0 = &context->tensors[node->outputs->data[0]]; 36 | const TfLiteIntArray* output_dims = output0->dims; 37 | 38 | const int split_dimensions = input_dims->size; 39 | int axis = axis_value < 0 ? axis_value + split_dimensions : axis_value; 40 | 41 | TFLITE_DCHECK_LT(axis, split_dimensions); 42 | TFLITE_DCHECK_EQ(output_dims->size, split_dimensions); 43 | 44 | int64_t split_size = output_dims->data[axis] * output_count; 45 | 46 | TFLITE_DCHECK_EQ(split_size, input_dims->data[axis]); 47 | int64_t outer_size = 1; 48 | for (int i = 0; i < axis; ++i) { 49 | outer_size *= input_dims->data[i]; 50 | } 51 | 52 | int64_t base_inner_size = 1; 53 | for (int i = axis + 1; i < split_dimensions; ++i) { 54 | base_inner_size *= input_dims->data[i]; 55 | } 56 | 57 | const T* input_ptr = GetTensorData(input); 58 | for (int k = 0; k < outer_size; ++k) { 59 | for (int i = 0; i < output_count; ++i) { 60 | TfLiteTensor* t = &context->tensors[node->outputs->data[i]]; 61 | T* output_data = GetTensorData(t); 62 | const int copy_size = output_dims->data[axis] * base_inner_size; 63 | T* output_ptr = output_data + k * copy_size; 64 | for (int j = 0; j < copy_size; ++j) output_ptr[j] = input_ptr[j]; 65 | input_ptr += copy_size; 66 | } 67 | } 68 | 69 | return kTfLiteOk; 70 | } 71 | 72 | TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { 73 | const TfLiteTensor* axis = GetInput(context, node, 0); 74 | const TfLiteTensor* input = GetInput(context, node, 1); 75 | 76 | // Dynamic output tensors are needed if axis tensor is not constant. 77 | // But Micro doesn't support dynamic memeory allocation, so we only support 78 | // constant axis tensor for now. 79 | TF_LITE_ENSURE_MSG(context, IsConstantTensor(axis), 80 | "Non constant axis tensor not supported"); 81 | 82 | int axis_value = GetTensorData(axis)[0]; 83 | if (axis_value < 0) { 84 | axis_value += NumDimensions(input); 85 | } 86 | 87 | TF_LITE_ENSURE(context, axis_value >= 0); 88 | TF_LITE_ENSURE(context, axis_value < NumDimensions(input)); 89 | 90 | switch (input->type) { 91 | case kTfLiteFloat32: { 92 | return SplitImpl(context, node, input, axis_value); 93 | } 94 | case kTfLiteUInt8: { 95 | return SplitImpl(context, node, input, axis_value); 96 | } 97 | case kTfLiteInt8: { 98 | return SplitImpl(context, node, input, axis_value); 99 | } 100 | case kTfLiteInt16: { 101 | return SplitImpl(context, node, input, axis_value); 102 | } 103 | case kTfLiteInt32: { 104 | return SplitImpl(context, node, input, axis_value); 105 | } 106 | default: 107 | context->ReportError(context, "Type %s currently not supported.", 108 | TfLiteTypeGetName(input->type)); 109 | return kTfLiteError; 110 | } 111 | #undef TF_LITE_SPLIT 112 | 113 | return kTfLiteOk; 114 | } 115 | 116 | } // namespace split 117 | 118 | TfLiteRegistration* Register_SPLIT() { 119 | static TfLiteRegistration r = {nullptr, nullptr, split::Prepare, split::Eval}; 120 | return &r; 121 | } 122 | 123 | } // namespace micro 124 | } // namespace ops 125 | } // namespace tflite 126 | -------------------------------------------------------------------------------- /lib/tfmicro/tensorflow/lite/experimental/micro/kernels/unpack.cc: -------------------------------------------------------------------------------- 1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | #include "tensorflow/lite/c/builtin_op_data.h" 17 | #include "tensorflow/lite/c/c_api_internal.h" 18 | #include "tensorflow/lite/kernels/internal/tensor_ctypes.h" 19 | #include "tensorflow/lite/kernels/kernel_util.h" 20 | 21 | namespace tflite { 22 | namespace ops { 23 | namespace micro { 24 | namespace unpack { 25 | namespace { 26 | 27 | constexpr int kInputTensor = 0; 28 | 29 | TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { 30 | return kTfLiteOk; 31 | } 32 | 33 | template 34 | TfLiteStatus UnpackImpl(TfLiteContext* context, TfLiteNode* node, 35 | const TfLiteTensor* input, int output_count, int axis) { 36 | const TfLiteTensor* output0 = &context->tensors[node->outputs->data[0]]; 37 | const TfLiteIntArray* input_dims = input->dims; 38 | const TfLiteIntArray* output_dims = output0->dims; 39 | const int dimensions = input_dims->size; 40 | 41 | if (axis < 0) { 42 | axis += NumDimensions(input); 43 | } 44 | 45 | TFLITE_DCHECK_LT(axis, dimensions); 46 | 47 | int outer_size = 1; 48 | for (int i = 0; i < axis; ++i) { 49 | outer_size *= input_dims->data[i]; 50 | } 51 | int copy_size = 1; 52 | for (int i = axis + 1; i < dimensions; ++i) { 53 | copy_size *= input_dims->data[i]; 54 | } 55 | int output_size = 1; 56 | for (int i = 0; i < output_dims->size; ++i) { 57 | output_size *= output_dims->data[i]; 58 | } 59 | TFLITE_DCHECK_EQ(output_size, copy_size * outer_size); 60 | 61 | const T* input_data = GetTensorData(input); 62 | 63 | for (int i = 0; i < output_count; ++i) { 64 | TfLiteTensor* t = &context->tensors[node->outputs->data[i]]; 65 | T* output_data = GetTensorData(t); 66 | for (int k = 0; k < outer_size; ++k) { 67 | T* output_ptr = output_data + copy_size * k; 68 | int loc = k * output_count * copy_size + i * copy_size; 69 | const T* input_ptr = input_data + loc; 70 | for (int j = 0; j < copy_size; ++j) output_ptr[j] = input_ptr[j]; 71 | } 72 | } 73 | 74 | return kTfLiteOk; 75 | } 76 | 77 | TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { 78 | TfLiteUnpackParams* data = 79 | reinterpret_cast(node->builtin_data); 80 | 81 | const TfLiteTensor* input = GetInput(context, node, kInputTensor); 82 | 83 | switch (input->type) { 84 | case kTfLiteFloat32: { 85 | return UnpackImpl(context, node, input, data->num, data->axis); 86 | } 87 | case kTfLiteInt32: { 88 | return UnpackImpl(context, node, input, data->num, data->axis); 89 | } 90 | case kTfLiteUInt8: { 91 | return UnpackImpl(context, node, input, data->num, data->axis); 92 | } 93 | case kTfLiteInt8: { 94 | return UnpackImpl(context, node, input, data->num, data->axis); 95 | } 96 | default: { 97 | context->ReportError(context, "Type '%s' is not supported by unpack.", 98 | TfLiteTypeGetName(input->type)); 99 | return kTfLiteError; 100 | } 101 | } 102 | 103 | return kTfLiteOk; 104 | } 105 | } // namespace 106 | } // namespace unpack 107 | 108 | TfLiteRegistration* Register_UNPACK() { 109 | static TfLiteRegistration r = {nullptr, nullptr, unpack::Prepare, 110 | unpack::Eval}; 111 | return &r; 112 | } 113 | 114 | } // namespace micro 115 | } // namespace ops 116 | } // namespace tflite 117 | -------------------------------------------------------------------------------- /lib/tfmicro/tensorflow/lite/experimental/micro/memory_helpers.cc: -------------------------------------------------------------------------------- 1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | #include "tensorflow/lite/experimental/micro/memory_helpers.h" 17 | 18 | #include 19 | 20 | #include "tensorflow/lite/core/api/flatbuffer_conversions.h" 21 | 22 | namespace tflite { 23 | 24 | uint8_t* AlignPointerUp(uint8_t* data, size_t alignment) { 25 | std::uintptr_t data_as_uintptr_t = reinterpret_cast(data); 26 | uint8_t* aligned_result = reinterpret_cast( 27 | ((data_as_uintptr_t + (alignment - 1)) / alignment) * alignment); 28 | return aligned_result; 29 | } 30 | 31 | uint8_t* AlignPointerDown(uint8_t* data, size_t alignment) { 32 | std::uintptr_t data_as_uintptr_t = reinterpret_cast(data); 33 | uint8_t* aligned_result = 34 | reinterpret_cast((data_as_uintptr_t / alignment) * alignment); 35 | return aligned_result; 36 | } 37 | 38 | size_t AlignSizeUp(size_t size, size_t alignment) { 39 | size_t aligned_size = (((size + (alignment - 1)) / alignment) * alignment); 40 | return aligned_size; 41 | } 42 | 43 | TfLiteStatus TfLiteTypeSizeOf(TfLiteType type, size_t* size, 44 | ErrorReporter* reporter) { 45 | switch (type) { 46 | case kTfLiteFloat32: 47 | *size = sizeof(float); 48 | break; 49 | case kTfLiteInt16: 50 | *size = sizeof(int16_t); 51 | break; 52 | case kTfLiteInt32: 53 | *size = sizeof(int32_t); 54 | break; 55 | case kTfLiteUInt8: 56 | *size = sizeof(uint8_t); 57 | break; 58 | case kTfLiteInt8: 59 | *size = sizeof(int8_t); 60 | break; 61 | case kTfLiteInt64: 62 | *size = sizeof(int64_t); 63 | break; 64 | case kTfLiteBool: 65 | *size = sizeof(bool); 66 | break; 67 | case kTfLiteComplex64: 68 | *size = sizeof(float) * 2; 69 | break; 70 | default: 71 | reporter->Report("Type %s (%d) not is not supported", 72 | TfLiteTypeGetName(type), type); 73 | return kTfLiteError; 74 | } 75 | return kTfLiteOk; 76 | } 77 | 78 | TfLiteStatus BytesRequiredForTensor(const tflite::Tensor& flatbuffer_tensor, 79 | size_t* bytes, size_t* type_size, 80 | ErrorReporter* error_reporter) { 81 | int element_count = 1; 82 | for (size_t n = 0; n < flatbuffer_tensor.shape()->Length(); ++n) { 83 | element_count *= flatbuffer_tensor.shape()->Get(n); 84 | } 85 | 86 | TfLiteType tf_lite_type; 87 | TF_LITE_ENSURE_STATUS(ConvertTensorType(flatbuffer_tensor.type(), 88 | &tf_lite_type, error_reporter)); 89 | TF_LITE_ENSURE_STATUS( 90 | TfLiteTypeSizeOf(tf_lite_type, type_size, error_reporter)); 91 | *bytes = element_count * (*type_size); 92 | return kTfLiteOk; 93 | } 94 | 95 | } // namespace tflite 96 | -------------------------------------------------------------------------------- /lib/tfmicro/tensorflow/lite/experimental/micro/memory_helpers.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICRO_MEMORY_HELPERS_H_ 16 | #define TENSORFLOW_LITE_EXPERIMENTAL_MICRO_MEMORY_HELPERS_H_ 17 | 18 | #include "tensorflow/lite/c/c_api_internal.h" 19 | #include "tensorflow/lite/core/api/error_reporter.h" 20 | #include "tensorflow/lite/schema/schema_generated.h" 21 | 22 | namespace tflite { 23 | 24 | // Returns the next pointer address aligned to the given alignment. 25 | uint8_t* AlignPointerUp(uint8_t* data, size_t alignment); 26 | 27 | // Returns the previous pointer address aligned to the given alignment. 28 | uint8_t* AlignPointerDown(uint8_t* data, size_t alignment); 29 | 30 | // Returns an increased size that's a multiple of alignment. 31 | size_t AlignSizeUp(size_t size, size_t alignment); 32 | 33 | // Returns size in bytes for a given TfLiteType. 34 | TfLiteStatus TfLiteTypeSizeOf(TfLiteType type, size_t* size, 35 | ErrorReporter* reporter); 36 | 37 | // How many bytes are needed to hold a tensor's contents. 38 | TfLiteStatus BytesRequiredForTensor(const tflite::Tensor& flatbuffer_tensor, 39 | size_t* bytes, size_t* type_size, 40 | ErrorReporter* error_reporter); 41 | 42 | } // namespace tflite 43 | 44 | #endif // TENSORFLOW_LITE_EXPERIMENTAL_MICRO_MEMORY_HELPERS_H_ 45 | -------------------------------------------------------------------------------- /lib/tfmicro/tensorflow/lite/experimental/micro/memory_planner/linear_memory_planner.cc: -------------------------------------------------------------------------------- 1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | #include "tensorflow/lite/experimental/micro/memory_planner/linear_memory_planner.h" 17 | 18 | namespace tflite { 19 | 20 | LinearMemoryPlanner::LinearMemoryPlanner() 21 | : current_buffer_count_(0), next_free_offset_(0) {} 22 | LinearMemoryPlanner::~LinearMemoryPlanner() {} 23 | 24 | TfLiteStatus LinearMemoryPlanner::AddBuffer( 25 | tflite::ErrorReporter* error_reporter, int size, int first_time_used, 26 | int last_time_used) { 27 | if (current_buffer_count_ >= kMaxBufferCount) { 28 | error_reporter->Report("Too many buffers (max is %d)", kMaxBufferCount); 29 | return kTfLiteError; 30 | } 31 | buffer_offsets_[current_buffer_count_] = next_free_offset_; 32 | next_free_offset_ += size; 33 | ++current_buffer_count_; 34 | return kTfLiteOk; 35 | } 36 | 37 | int LinearMemoryPlanner::GetMaximumMemorySize() { return next_free_offset_; } 38 | 39 | int LinearMemoryPlanner::GetBufferCount() { return current_buffer_count_; } 40 | 41 | TfLiteStatus LinearMemoryPlanner::GetOffsetForBuffer( 42 | tflite::ErrorReporter* error_reporter, int buffer_index, int* offset) { 43 | if ((buffer_index < 0) || (buffer_index >= current_buffer_count_)) { 44 | error_reporter->Report("buffer index %d is outside range 0 to %d", 45 | buffer_index, current_buffer_count_); 46 | return kTfLiteError; 47 | } 48 | *offset = buffer_offsets_[buffer_index]; 49 | return kTfLiteOk; 50 | } 51 | 52 | } // namespace tflite 53 | -------------------------------------------------------------------------------- /lib/tfmicro/tensorflow/lite/experimental/micro/memory_planner/linear_memory_planner.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | #ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICRO_MEMORY_PLANNER_LINEAR_MEMORY_PLANNER_H_ 17 | #define TENSORFLOW_LITE_EXPERIMENTAL_MICRO_MEMORY_PLANNER_LINEAR_MEMORY_PLANNER_H_ 18 | 19 | #include "tensorflow/lite/experimental/micro/memory_planner/memory_planner.h" 20 | 21 | namespace tflite { 22 | 23 | // The simplest possible memory planner that just lays out all buffers at 24 | // increasing offsets without trying to reuse memory. 25 | class LinearMemoryPlanner : public MemoryPlanner { 26 | public: 27 | LinearMemoryPlanner(); 28 | ~LinearMemoryPlanner() override; 29 | 30 | TfLiteStatus AddBuffer(tflite::ErrorReporter* error_reporter, int size, 31 | int first_time_used, int last_time_used) override; 32 | 33 | int GetMaximumMemorySize() override; 34 | int GetBufferCount() override; 35 | TfLiteStatus GetOffsetForBuffer(tflite::ErrorReporter* error_reporter, 36 | int buffer_index, int* offset) override; 37 | 38 | private: 39 | static constexpr int kMaxBufferCount = 1024; 40 | int buffer_offsets_[kMaxBufferCount]; 41 | int current_buffer_count_; 42 | int next_free_offset_; 43 | }; 44 | 45 | } // namespace tflite 46 | 47 | #endif // TENSORFLOW_LITE_EXPERIMENTAL_MICRO_MEMORY_PLANNER_LINEAR_MEMORY_PLANNER_H_ 48 | -------------------------------------------------------------------------------- /lib/tfmicro/tensorflow/lite/experimental/micro/memory_planner/memory_planner.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | #ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICRO_MEMORY_PLANNER_MEMORY_PLANNER_H_ 17 | #define TENSORFLOW_LITE_EXPERIMENTAL_MICRO_MEMORY_PLANNER_MEMORY_PLANNER_H_ 18 | 19 | #include "tensorflow/lite/c/c_api_internal.h" 20 | #include "tensorflow/lite/core/api/error_reporter.h" 21 | 22 | namespace tflite { 23 | 24 | // Interface class for planning the layout of memory buffers during the 25 | // execution of a graph. 26 | // It's designed to be used by a client that iterates in any order through the 27 | // buffers it wants to lay out, and then calls the getter functions for 28 | // information about the calculated layout. For example: 29 | // 30 | // SomeMemoryPlanner planner; 31 | // planner.AddBuffer(reporter, 100, 0, 1); // Buffer 0 32 | // planner.AddBuffer(reporter, 50, 2, 3); // Buffer 1 33 | // planner.AddBuffer(reporter, 50, 2, 3); // Buffer 2 34 | // 35 | // int offset0; 36 | // TF_EXPECT_OK(planner.GetOffsetForBuffer(reporter, 0, &offset0)); 37 | // int offset1; 38 | // TF_EXPECT_OK(planner.GetOffsetForBuffer(reporter, 1, &offset1)); 39 | // int offset2; 40 | // TF_EXPECT_OK(planner.GetOffsetForBuffer(reporter, 2, &offset2)); 41 | // const int arena_size_needed = planner.GetMaximumMemorySize(); 42 | // 43 | // The goal is for applications to be able to experiment with different layout 44 | // strategies without changing their client code, by swapping out classes that 45 | // implement this interface.= 46 | class MemoryPlanner { 47 | public: 48 | MemoryPlanner() {} 49 | virtual ~MemoryPlanner() {} 50 | 51 | // Pass information about a buffer's size and lifetime to the layout 52 | // algorithm. The order this is called implicitly assigns an index to the 53 | // result, so the buffer information that's passed into the N-th call of 54 | // this method will be used as the buffer_index argument to 55 | // GetOffsetForBuffer(). 56 | virtual TfLiteStatus AddBuffer(tflite::ErrorReporter* error_reporter, 57 | int size, int first_time_used, 58 | int last_time_used) = 0; 59 | 60 | // The largest contguous block of memory that's needed to hold the layout. 61 | virtual int GetMaximumMemorySize() = 0; 62 | // How many buffers have been added to the planner. 63 | virtual int GetBufferCount() = 0; 64 | // Calculated layout offset for the N-th buffer added to the planner. 65 | virtual TfLiteStatus GetOffsetForBuffer(tflite::ErrorReporter* error_reporter, 66 | int buffer_index, int* offset) = 0; 67 | }; 68 | 69 | } // namespace tflite 70 | 71 | #endif // TENSORFLOW_LITE_EXPERIMENTAL_MICRO_MEMORY_PLANNER_MEMORY_PLANNER_H_ 72 | -------------------------------------------------------------------------------- /lib/tfmicro/tensorflow/lite/experimental/micro/micro_allocator.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICRO_MICRO_ALLOCATOR_H_ 16 | #define TENSORFLOW_LITE_EXPERIMENTAL_MICRO_MICRO_ALLOCATOR_H_ 17 | 18 | #include "tensorflow/lite/c/c_api_internal.h" 19 | #include "tensorflow/lite/core/api/error_reporter.h" 20 | #include "tensorflow/lite/core/api/flatbuffer_conversions.h" 21 | #include "tensorflow/lite/experimental/micro/simple_memory_allocator.h" 22 | #include "tensorflow/lite/schema/schema_generated.h" 23 | 24 | namespace tflite { 25 | 26 | typedef struct { 27 | TfLiteNode node; 28 | const TfLiteRegistration* registration; 29 | } NodeAndRegistration; 30 | 31 | // Allocator responsible for allocating memory for all intermediate tensors 32 | // necessary to invoke a model. 33 | class MicroAllocator { 34 | public: 35 | // The lifetime of the model, tensor allocator and error reporter must be at 36 | // least as long as that of the allocator object, since the allocator needs 37 | // them to be accessible during its entire lifetime. 38 | MicroAllocator(TfLiteContext* context, const Model* model, 39 | uint8_t* tensor_arena, size_t arena_size, 40 | ErrorReporter* error_reporter); 41 | 42 | // Specify a particular tensor as pre-allocated. This means that this tensor 43 | // will internally point to the supplied buffer, and no new memory will be 44 | // provided. The buffer must live at least as long as the allocator, since 45 | // the buffer will be used every time an op is invoked which uses the 46 | // specified tensor. Most commonly this is useful when a platform-provided 47 | // DMA buffer is used as an input, and it is desirable to avoid unnecessarily 48 | // allocating a new buffer and copying from the DMA buffer. The user must 49 | // ensure the buffer is valid throughout each interpreter run, and is not 50 | // prematurely overwritten. 51 | TfLiteStatus RegisterPreallocatedInput(uint8_t* buffer, size_t input_index); 52 | 53 | // Sets up all of the data structure members for a runtime tensor based on the 54 | // contents of a serialized tensor. This method doesn't allocate any memory, 55 | // all allocations happen subsequently in AllocateTensors. 56 | TfLiteStatus InitializeRuntimeTensor( 57 | const tflite::Tensor& flatbuffer_tensor, 58 | const flatbuffers::Vector>* buffers, 59 | ErrorReporter* error_reporter, TfLiteTensor* result, 60 | uint8_t* preallocated_buffer = nullptr); 61 | 62 | // Run through the model and allocate all necessary input, output and 63 | // intermediate tensors except for those already provided via calls to 64 | // registerPreallocatedInput. 65 | // WARNING: doing any allocation after calling is method has the risk of 66 | // corruption tensor data so this method is the last method to be called in 67 | // this class. 68 | TfLiteStatus FinishTensorAllocation(); 69 | 70 | // Run through the model to allocate nodes and registrations. We need to keep 71 | // them for the entire life time of the model to allow persistent tensors. 72 | // This method needs to be called before FinishTensorAllocation method. 73 | TfLiteStatus AllocateNodeAndRegistrations( 74 | const OpResolver& op_resolver, 75 | NodeAndRegistration** node_and_registrations); 76 | 77 | private: 78 | const Model* model_; 79 | SimpleMemoryAllocator memory_allocator_; 80 | ErrorReporter* error_reporter_; 81 | TfLiteContext* context_; 82 | uint8_t* arena_; 83 | size_t arena_size_; 84 | // Indicating if the allocator is ready for allocation. 85 | bool active_ = false; 86 | 87 | const SubGraph* subgraph_; 88 | const flatbuffers::Vector>* operators_; 89 | const flatbuffers::Vector>* tensors_; 90 | }; 91 | 92 | } // namespace tflite 93 | #endif // TENSORFLOW_LITE_EXPERIMENTAL_MICRO_MICRO_ALLOCATOR_H_ 94 | -------------------------------------------------------------------------------- /lib/tfmicro/tensorflow/lite/experimental/micro/micro_error_reporter.cc: -------------------------------------------------------------------------------- 1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | #include "tensorflow/lite/experimental/micro/micro_error_reporter.h" 17 | 18 | namespace tflite { 19 | namespace { 20 | void DebugLogPrintf(const char* format, va_list args) { 21 | const int output_cache_size = 64; 22 | char output_cache[output_cache_size + 1]; 23 | int output_cache_index = 0; 24 | const char* current = format; 25 | while (*current != 0) { 26 | if (*current == '%') { 27 | const char next = *(current + 1); 28 | if ((next == 'd') || (next == 's') || (next == 'f')) { 29 | current += 1; 30 | if (output_cache_index > 0) { 31 | output_cache[output_cache_index] = 0; 32 | DebugLog(output_cache); 33 | output_cache_index = 0; 34 | } 35 | if (next == 'd') { 36 | DebugLogInt32(va_arg(args, int)); 37 | } else if (next == 's') { 38 | DebugLog(va_arg(args, char*)); 39 | } else if (next == 'f') { 40 | DebugLogFloat(va_arg(args, double)); 41 | } 42 | } 43 | } else { 44 | output_cache[output_cache_index] = *current; 45 | output_cache_index += 1; 46 | } 47 | if (output_cache_index >= output_cache_size) { 48 | output_cache[output_cache_index] = 0; 49 | DebugLog(output_cache); 50 | output_cache_index = 0; 51 | } 52 | current += 1; 53 | } 54 | if (output_cache_index > 0) { 55 | output_cache[output_cache_index] = 0; 56 | DebugLog(output_cache); 57 | output_cache_index = 0; 58 | } 59 | DebugLog("\r\n"); 60 | } 61 | } // namespace 62 | 63 | int MicroErrorReporter::Report(const char* format, va_list args) { 64 | DebugLogPrintf(format, args); 65 | return 0; 66 | } 67 | 68 | } // namespace tflite 69 | -------------------------------------------------------------------------------- /lib/tfmicro/tensorflow/lite/experimental/micro/micro_error_reporter.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICRO_MICRO_ERROR_REPORTER_H_ 16 | #define TENSORFLOW_LITE_EXPERIMENTAL_MICRO_MICRO_ERROR_REPORTER_H_ 17 | 18 | #include "tensorflow/lite/core/api/error_reporter.h" 19 | #include "tensorflow/lite/experimental/micro/compatibility.h" 20 | #include "tensorflow/lite/experimental/micro/debug_log.h" 21 | #include "tensorflow/lite/experimental/micro/debug_log_numbers.h" 22 | 23 | namespace tflite { 24 | 25 | class MicroErrorReporter : public ErrorReporter { 26 | public: 27 | ~MicroErrorReporter() {} 28 | int Report(const char* format, va_list args) override; 29 | 30 | private: 31 | TF_LITE_REMOVE_VIRTUAL_DELETE 32 | }; 33 | 34 | } // namespace tflite 35 | 36 | #endif // TENSORFLOW_LITE_EXPERIMENTAL_MICRO_MICRO_ERROR_REPORTER_H_ 37 | -------------------------------------------------------------------------------- /lib/tfmicro/tensorflow/lite/experimental/micro/micro_mutable_op_resolver.cc: -------------------------------------------------------------------------------- 1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | #include "tensorflow/lite/experimental/micro/micro_mutable_op_resolver.h" 17 | 18 | namespace tflite { 19 | 20 | const TfLiteRegistration* MicroMutableOpResolver::FindOp( 21 | tflite::BuiltinOperator op, int version) const { 22 | for (int i = 0; i < registrations_len_; ++i) { 23 | const TfLiteRegistration& registration = registrations_[i]; 24 | if ((registration.builtin_code == op) && 25 | (registration.version == version)) { 26 | return ®istration; 27 | } 28 | } 29 | return nullptr; 30 | } 31 | 32 | const TfLiteRegistration* MicroMutableOpResolver::FindOp(const char* op, 33 | int version) const { 34 | for (int i = 0; i < registrations_len_; ++i) { 35 | const TfLiteRegistration& registration = registrations_[i]; 36 | if ((registration.builtin_code == BuiltinOperator_CUSTOM) && 37 | (strcmp(registration.custom_name, op) == 0) && 38 | (registration.version == version)) { 39 | return ®istration; 40 | } 41 | } 42 | return nullptr; 43 | } 44 | 45 | void MicroMutableOpResolver::AddBuiltin(tflite::BuiltinOperator op, 46 | TfLiteRegistration* registration, 47 | int min_version, int max_version) { 48 | for (int version = min_version; version <= max_version; ++version) { 49 | if (registrations_len_ >= TFLITE_REGISTRATIONS_MAX) { 50 | // TODO(petewarden) - Add error reporting hooks so we can report this! 51 | return; 52 | } 53 | TfLiteRegistration* new_registration = ®istrations_[registrations_len_]; 54 | registrations_len_ += 1; 55 | 56 | *new_registration = *registration; 57 | new_registration->builtin_code = op; 58 | new_registration->version = version; 59 | } 60 | } 61 | 62 | void MicroMutableOpResolver::AddCustom(const char* name, 63 | TfLiteRegistration* registration, 64 | int min_version, int max_version) { 65 | for (int version = min_version; version <= max_version; ++version) { 66 | if (registrations_len_ >= TFLITE_REGISTRATIONS_MAX) { 67 | // TODO(petewarden) - Add error reporting hooks so we can report this! 68 | return; 69 | } 70 | TfLiteRegistration* new_registration = ®istrations_[registrations_len_]; 71 | registrations_len_ += 1; 72 | 73 | *new_registration = *registration; 74 | new_registration->builtin_code = BuiltinOperator_CUSTOM; 75 | new_registration->custom_name = name; 76 | new_registration->version = version; 77 | } 78 | } 79 | 80 | } // namespace tflite 81 | -------------------------------------------------------------------------------- /lib/tfmicro/tensorflow/lite/experimental/micro/micro_mutable_op_resolver.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICRO_MICRO_MUTABLE_OP_RESOLVER_H_ 16 | #define TENSORFLOW_LITE_EXPERIMENTAL_MICRO_MICRO_MUTABLE_OP_RESOLVER_H_ 17 | 18 | #include "tensorflow/lite/core/api/op_resolver.h" 19 | #include "tensorflow/lite/experimental/micro/compatibility.h" 20 | 21 | #ifndef TFLITE_REGISTRATIONS_MAX 22 | #define TFLITE_REGISTRATIONS_MAX (128) 23 | #endif 24 | 25 | namespace tflite { 26 | 27 | class MicroMutableOpResolver : public OpResolver { 28 | public: 29 | const TfLiteRegistration* FindOp(tflite::BuiltinOperator op, 30 | int version) const override; 31 | const TfLiteRegistration* FindOp(const char* op, int version) const override; 32 | void AddBuiltin(tflite::BuiltinOperator op, TfLiteRegistration* registration, 33 | int min_version = 1, int max_version = 1); 34 | void AddCustom(const char* name, TfLiteRegistration* registration, 35 | int min_version = 1, int max_version = 1); 36 | 37 | private: 38 | TfLiteRegistration registrations_[TFLITE_REGISTRATIONS_MAX]; 39 | int registrations_len_ = 0; 40 | 41 | TF_LITE_REMOVE_VIRTUAL_DELETE 42 | }; 43 | 44 | } // namespace tflite 45 | 46 | #endif // TENSORFLOW_LITE_EXPERIMENTAL_MICRO_MICRO_MUTABLE_OP_RESOLVER_H_ 47 | -------------------------------------------------------------------------------- /lib/tfmicro/tensorflow/lite/experimental/micro/micro_optional_debug_tools.cc: -------------------------------------------------------------------------------- 1 | /* Copyright 2017 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #include "tensorflow/lite/experimental/micro/micro_optional_debug_tools.h" 16 | 17 | #include "tensorflow/lite/schema/schema_generated.h" 18 | namespace tflite { 19 | namespace { 20 | 21 | std::vector flatbuffersVector2StdVector( 22 | const flatbuffers::Vector& fVector) { 23 | std::vector stdVector; 24 | stdVector.reserve(fVector.size()); 25 | for (size_t i = 0; i < fVector.size(); i++) { 26 | stdVector.push_back(fVector.Get(i)); 27 | } 28 | return stdVector; 29 | } 30 | 31 | void PrintIntVector(const std::vector& v) { 32 | for (const auto& it : v) { 33 | printf(" %d", it); 34 | } 35 | printf("\n"); 36 | } 37 | 38 | void PrintTfLiteIntVector(const TfLiteIntArray* v) { 39 | if (!v) { 40 | printf(" (null)\n"); 41 | return; 42 | } 43 | for (int k = 0; k < v->size; k++) { 44 | printf(" %d", v->data[k]); 45 | } 46 | printf("\n"); 47 | } 48 | 49 | const char* TensorTypeName(TfLiteType type) { 50 | switch (type) { 51 | case kTfLiteNoType: 52 | return "kTfLiteNoType"; 53 | case kTfLiteFloat32: 54 | return "kTfLiteFloat32"; 55 | case kTfLiteInt32: 56 | return "kTfLiteInt32"; 57 | case kTfLiteUInt8: 58 | return "kTfLiteUInt8"; 59 | case kTfLiteInt8: 60 | return "kTfLiteInt8"; 61 | case kTfLiteInt64: 62 | return "kTfLiteInt64"; 63 | case kTfLiteString: 64 | return "kTfLiteString"; 65 | case kTfLiteBool: 66 | return "kTfLiteBool"; 67 | case kTfLiteInt16: 68 | return "kTfLiteInt16"; 69 | case kTfLiteComplex64: 70 | return "kTfLiteComplex64"; 71 | case kTfLiteFloat16: 72 | return "kTfLiteFloat16"; 73 | } 74 | return "(invalid)"; 75 | } 76 | 77 | const char* AllocTypeName(TfLiteAllocationType type) { 78 | switch (type) { 79 | case kTfLiteMemNone: 80 | return "kTfLiteMemNone"; 81 | case kTfLiteMmapRo: 82 | return "kTfLiteMmapRo"; 83 | case kTfLiteDynamic: 84 | return "kTfLiteDynamic"; 85 | case kTfLiteArenaRw: 86 | return "kTfLiteArenaRw"; 87 | case kTfLiteArenaRwPersistent: 88 | return "kTfLiteArenaRwPersistent"; 89 | } 90 | return "(invalid)"; 91 | } 92 | } // namespace 93 | 94 | // Prints a dump of what tensors and what nodes are in the interpreter. 95 | void PrintInterpreterState(MicroInterpreter* interpreter) { 96 | printf("Interpreter has %zu tensors and %zu nodes\n", 97 | interpreter->tensors_size(), interpreter->operators_size()); 98 | printf("Inputs:"); 99 | PrintIntVector(flatbuffersVector2StdVector(interpreter->inputs())); 100 | printf("Outputs:"); 101 | PrintIntVector(flatbuffersVector2StdVector(interpreter->outputs())); 102 | printf("\n"); 103 | 104 | for (size_t tensor_index = 0; tensor_index < interpreter->tensors_size(); 105 | tensor_index++) { 106 | TfLiteTensor* tensor = interpreter->tensor(static_cast(tensor_index)); 107 | printf("Tensor %3zu %-20s %10s %15s %10zu bytes (%4.1f MB) ", tensor_index, 108 | tensor->name, TensorTypeName(tensor->type), 109 | AllocTypeName(tensor->allocation_type), tensor->bytes, 110 | static_cast(tensor->bytes / (1 << 20))); 111 | PrintTfLiteIntVector(tensor->dims); 112 | } 113 | printf("\n"); 114 | 115 | for (size_t node_index = 0; node_index < interpreter->operators_size(); 116 | node_index++) { 117 | struct pairTfLiteNodeAndRegistration node_and_reg = 118 | interpreter->node_and_registration(static_cast(node_index)); 119 | const TfLiteNode& node = node_and_reg.node; 120 | const TfLiteRegistration* reg = node_and_reg.registration; 121 | if (reg->custom_name != nullptr) { 122 | printf("Node %3zu Operator Custom Name %s\n", node_index, 123 | reg->custom_name); 124 | } else { 125 | printf("Node %3zu Operator Builtin Code %3d %s\n", node_index, 126 | reg->builtin_code, EnumNamesBuiltinOperator()[reg->builtin_code]); 127 | } 128 | printf(" Inputs:"); 129 | PrintTfLiteIntVector(node.inputs); 130 | printf(" Outputs:"); 131 | PrintTfLiteIntVector(node.outputs); 132 | } 133 | } 134 | 135 | } // namespace tflite 136 | -------------------------------------------------------------------------------- /lib/tfmicro/tensorflow/lite/experimental/micro/micro_optional_debug_tools.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2017 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | // Optional debugging functionality. For small sized binaries, these are not 16 | // needed. 17 | #ifndef TENSORFLOW_LITE_MICRO_OPTIONAL_DEBUG_TOOLS_H_ 18 | #define TENSORFLOW_LITE_MICRO_OPTIONAL_DEBUG_TOOLS_H_ 19 | 20 | #include "tensorflow/lite/experimental/micro/micro_interpreter.h" 21 | 22 | namespace tflite { 23 | // Prints a dump of what tensors and what nodes are in the interpreter. 24 | class MicroInterpreter; 25 | void PrintInterpreterState(MicroInterpreter* interpreter); 26 | 27 | #ifdef __cplusplus 28 | extern "C" { 29 | #endif // __cplusplus 30 | struct pairTfLiteNodeAndRegistration { 31 | TfLiteNode node; 32 | const TfLiteRegistration* registration; 33 | }; 34 | #ifdef __cplusplus 35 | } // extern "C" 36 | #endif // __cplusplus 37 | 38 | } // namespace tflite 39 | 40 | #endif // TENSORFLOW_LITE_MICRO_OPTIONAL_DEBUG_TOOLS_H_ 41 | -------------------------------------------------------------------------------- /lib/tfmicro/tensorflow/lite/experimental/micro/micro_utils.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | #ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICRO_MICRO_UTILS_H_ 17 | #define TENSORFLOW_LITE_EXPERIMENTAL_MICRO_MICRO_UTILS_H_ 18 | 19 | #include 20 | 21 | #include "tensorflow/lite/c/c_api_internal.h" 22 | 23 | namespace tflite { 24 | 25 | // Returns number of elements in the shape array. 26 | 27 | int ElementCount(const TfLiteIntArray& dims); 28 | 29 | uint8_t FloatToAsymmetricQuantizedUInt8(const float value, const float scale, 30 | const int zero_point); 31 | 32 | uint8_t FloatToSymmetricQuantizedUInt8(const float value, const float scale); 33 | 34 | int8_t FloatToAsymmetricQuantizedInt8(const float value, const float scale, 35 | const int zero_point); 36 | 37 | int8_t FloatToSymmetricQuantizedInt8(const float value, const float scale); 38 | 39 | // Converts a float value into a signed thirty-two-bit quantized value. Note 40 | // that values close to max int and min int may see significant error due to 41 | // a lack of floating point granularity for large values. 42 | int32_t FloatToSymmetricQuantizedInt32(const float value, const float scale); 43 | 44 | // Helper methods to quantize arrays of floats to the desired format. 45 | // 46 | // There are several key flavors of quantization in TfLite: 47 | // asymmetric symmetric per channel 48 | // int8 | X | X | X | 49 | // uint8 | X | X | | 50 | // int32 | | X | X | 51 | // 52 | // The per-op quantizaiton spec can be found here: 53 | // https://www.tensorflow.org/lite/performance/quantization_spec 54 | 55 | void AsymmetricQuantize(const float* input, int8_t* output, int num_elements, 56 | float scale, int zero_point = 0); 57 | 58 | void AsymmetricQuantize(const float* input, uint8_t* output, int num_elements, 59 | float scale, int zero_point = 128); 60 | 61 | void SymmetricQuantize(const float* input, int32_t* output, int num_elements, 62 | float scale); 63 | 64 | void SymmetricPerChannelQuantize(const float* input, int32_t* output, 65 | int num_elements, int num_channels, 66 | float* scales); 67 | 68 | void SignedSymmetricPerChannelQuantize(const float* values, 69 | TfLiteIntArray* dims, 70 | int quantized_dimension, 71 | int8_t* quantized_values, 72 | float* scaling_factor); 73 | 74 | void SignedSymmetricQuantize(const float* values, TfLiteIntArray* dims, 75 | int8_t* quantized_values, float* scaling_factor); 76 | 77 | void SymmetricQuantize(const float* values, TfLiteIntArray* dims, 78 | uint8_t* quantized_values, float* scaling_factor); 79 | 80 | void SymmetricDequantize(const int8_t* values, const int size, 81 | const float dequantization_scale, 82 | float* dequantized_values); 83 | 84 | } // namespace tflite 85 | 86 | #endif // TENSORFLOW_LITE_EXPERIMENTAL_MICRO_MICRO_UTILS_H_ 87 | -------------------------------------------------------------------------------- /lib/tfmicro/tensorflow/lite/experimental/micro/simple_memory_allocator.cc: -------------------------------------------------------------------------------- 1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | #include "tensorflow/lite/experimental/micro/simple_memory_allocator.h" 17 | 18 | #include "tensorflow/lite/core/api/flatbuffer_conversions.h" 19 | #include "tensorflow/lite/experimental/micro/memory_helpers.h" 20 | 21 | namespace tflite { 22 | 23 | uint8_t* SimpleMemoryAllocator::AllocateFromTail(size_t size, 24 | size_t alignment) { 25 | if (has_child_allocator_) { 26 | // TODO(wangtz): Add error reporting when the parent allocator is locked! 27 | return nullptr; 28 | } 29 | uint8_t* previous_free = (data_ + data_size_max_) - data_size_; 30 | uint8_t* current_data = previous_free - size; 31 | uint8_t* aligned_result = AlignPointerDown(current_data, alignment); 32 | size_t aligned_size = (previous_free - aligned_result); 33 | if ((data_size_ + aligned_size) > data_size_max_) { 34 | // TODO(petewarden): Add error reporting beyond returning null! 35 | return nullptr; 36 | } 37 | data_size_ += aligned_size; 38 | return aligned_result; 39 | } 40 | 41 | SimpleMemoryAllocator SimpleMemoryAllocator::CreateChildAllocator() { 42 | // Note that the parameterized constructor initializes data_size_ to 0 which 43 | // is not what we expected. 44 | SimpleMemoryAllocator child = *this; 45 | child.parent_allocator_ = this; 46 | has_child_allocator_ = true; 47 | return child; 48 | } 49 | 50 | SimpleMemoryAllocator::~SimpleMemoryAllocator() { 51 | // Root allocator doesn't have a parent. 52 | if (nullptr != parent_allocator_) { 53 | parent_allocator_->has_child_allocator_ = false; 54 | } 55 | } 56 | 57 | } // namespace tflite 58 | -------------------------------------------------------------------------------- /lib/tfmicro/tensorflow/lite/experimental/micro/simple_memory_allocator.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | #ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICRO_SIMPLE_MEMORY_ALLOCATOR_H_ 17 | #define TENSORFLOW_LITE_EXPERIMENTAL_MICRO_SIMPLE_MEMORY_ALLOCATOR_H_ 18 | 19 | #include "tensorflow/lite/c/c_api_internal.h" 20 | #include "tensorflow/lite/core/api/error_reporter.h" 21 | #include "tensorflow/lite/schema/schema_generated.h" 22 | 23 | namespace tflite { 24 | 25 | // TODO(petewarden): This allocator never frees up or reuses any memory, even 26 | // though we have enough information about lifetimes of the tensors to do so. 27 | // This makes it pretty wasteful, so we should use a more intelligent method. 28 | class SimpleMemoryAllocator { 29 | public: 30 | SimpleMemoryAllocator(uint8_t* buffer, size_t buffer_size) 31 | : data_size_max_(buffer_size), data_(buffer) {} 32 | 33 | // Allocates memory starting at the end of the arena (highest address and 34 | // moving downwards, so that tensor buffers can be allocated from the start 35 | // in ascending order. 36 | uint8_t* AllocateFromTail(size_t size, size_t alignment); 37 | 38 | int GetDataSize() const { return data_size_; } 39 | 40 | // Child allocator is something like a temporary allocator. Memory allocated 41 | // by the child allocator will be freed once the child allocator is 42 | // deallocated. Child allocator could be cascaded to have for example 43 | // grandchild allocator. But at any given time, only the latest child 44 | // allocator can be used. All its ancestors will be locked to avoid memory 45 | // corruption. Locked means that the allocator can't allocate memory. 46 | // WARNING: Parent allocator needs to live longer than the child allocator. 47 | SimpleMemoryAllocator CreateChildAllocator(); 48 | 49 | // Unlocks parent allocator when the child allocator is deconstructed. 50 | ~SimpleMemoryAllocator(); 51 | 52 | private: 53 | int data_size_ = 0; 54 | size_t data_size_max_; 55 | uint8_t* data_; 56 | SimpleMemoryAllocator* parent_allocator_ = nullptr; 57 | // The allocator is locaked if it has a child. 58 | bool has_child_allocator_ = false; 59 | }; 60 | 61 | } // namespace tflite 62 | 63 | #endif // TENSORFLOW_LITE_EXPERIMENTAL_MICRO_SIMPLE_MEMORY_ALLOCATOR_H_ 64 | -------------------------------------------------------------------------------- /lib/tfmicro/tensorflow/lite/experimental/micro/test_helpers.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | #ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICRO_TEST_HELPERS_H_ 17 | #define TENSORFLOW_LITE_EXPERIMENTAL_MICRO_TEST_HELPERS_H_ 18 | 19 | // Useful functions for writing tests. 20 | 21 | #include "tensorflow/lite/c/c_api_internal.h" 22 | #include "tensorflow/lite/core/api/error_reporter.h" 23 | #include "tensorflow/lite/schema/schema_generated.h" 24 | 25 | namespace tflite { 26 | namespace testing { 27 | 28 | // Returns an example flatbuffer TensorFlow Lite model. 29 | const Model* GetMockModel(); 30 | 31 | // Builds a one-dimensional flatbuffer tensor of the given size. 32 | const Tensor* Create1dFlatbufferTensor(int size); 33 | 34 | // Creates a one-dimensional tensor with no quantization metadata. 35 | const Tensor* CreateMissingQuantizationFlatbufferTensor(int size); 36 | 37 | // Creates a vector of flatbuffer buffers. 38 | const flatbuffers::Vector>* 39 | CreateFlatbufferBuffers(); 40 | 41 | // Performs a simple string comparison without requiring standard C library. 42 | int TestStrcmp(const char* a, const char* b); 43 | 44 | // Wrapper to forward kernel errors to the interpreter's error reporter. 45 | void ReportOpError(struct TfLiteContext* context, const char* format, ...); 46 | 47 | void PopulateContext(TfLiteTensor* tensors, int tensors_size, 48 | TfLiteContext* context); 49 | 50 | // Create a TfLiteIntArray from an array of ints. The first element in the 51 | // supplied array must be the size of the array expressed as an int. 52 | TfLiteIntArray* IntArrayFromInts(const int* int_array); 53 | 54 | // Create a TfLiteFloatArray from an array of floats. The first element in the 55 | // supplied array must be the size of the array expressed as a float. 56 | TfLiteFloatArray* FloatArrayFromFloats(const float* floats); 57 | 58 | TfLiteTensor CreateFloatTensor(const float* data, TfLiteIntArray* dims, 59 | const char* name, bool is_variable = false); 60 | 61 | void PopulateFloatTensor(TfLiteTensor* tensor, float* begin, float* end); 62 | 63 | TfLiteTensor CreateBoolTensor(const bool* data, TfLiteIntArray* dims, 64 | const char* name, bool is_variable = false); 65 | 66 | TfLiteTensor CreateInt32Tensor(const int32_t*, TfLiteIntArray* dims, 67 | const char* name, bool is_variable = false); 68 | 69 | TfLiteTensor CreateQuantizedTensor(const uint8_t* data, TfLiteIntArray* dims, 70 | float scale, int zero_point, 71 | const char* name, bool is_variable = false); 72 | 73 | TfLiteTensor CreateQuantizedTensor(const float* input, uint8_t* quantized, 74 | TfLiteIntArray* dims, float scale, 75 | int zero_point, const char* name, 76 | bool is_variable = false); 77 | 78 | TfLiteTensor CreateQuantizedTensor(const int8_t* data, TfLiteIntArray* dims, 79 | float scale, int zero_point, 80 | const char* name, bool is_variable = false); 81 | 82 | TfLiteTensor CreateQuantizedTensor(const float* input, int8_t* quantized, 83 | TfLiteIntArray* dims, float scale, 84 | int zero_point, const char* name, 85 | bool is_variable = false); 86 | 87 | TfLiteTensor CreateQuantizedBiasTensor(const float* data, int32_t* quantized, 88 | TfLiteIntArray* dims, float input_scale, 89 | float weights_scale, const char* name, 90 | bool is_variable = false); 91 | 92 | // Quantizes int32 bias tensor with per-channel weights determined by input 93 | // scale multiplied by weight scale for each channel. 94 | TfLiteTensor CreatePerChannelQuantizedBiasTensor( 95 | const float* input, int32_t* quantized, TfLiteIntArray* dims, 96 | float input_scale, float* weight_scales, float* scales, int* zero_points, 97 | TfLiteAffineQuantization* affine_quant, int quantized_dimension, 98 | const char* name, bool is_variable = false); 99 | 100 | TfLiteTensor CreateSymmetricPerChannelQuantizedTensor( 101 | const float* input, int8_t* quantized, TfLiteIntArray* dims, float* scales, 102 | int* zero_points, TfLiteAffineQuantization* affine_quant, 103 | int quantized_dimension, const char* name, bool is_variable = false); 104 | 105 | } // namespace testing 106 | } // namespace tflite 107 | 108 | #endif // TENSORFLOW_LITE_EXPERIMENTAL_MICRO_TEST_HELPERS_H_ 109 | -------------------------------------------------------------------------------- /lib/tfmicro/tensorflow/lite/kernels/internal/compatibility.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2017 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_COMPATIBILITY_H_ 16 | #define TENSORFLOW_LITE_KERNELS_INTERNAL_COMPATIBILITY_H_ 17 | 18 | #include 19 | 20 | #include "tensorflow/lite/kernels/op_macros.h" 21 | 22 | #ifndef TFLITE_DCHECK 23 | #define TFLITE_DCHECK(condition) (condition) ? (void)0 : TFLITE_ASSERT_FALSE 24 | #endif 25 | 26 | #ifndef TFLITE_DCHECK_EQ 27 | #define TFLITE_DCHECK_EQ(x, y) ((x) == (y)) ? (void)0 : TFLITE_ASSERT_FALSE 28 | #endif 29 | 30 | #ifndef TFLITE_DCHECK_NE 31 | #define TFLITE_DCHECK_NE(x, y) ((x) != (y)) ? (void)0 : TFLITE_ASSERT_FALSE 32 | #endif 33 | 34 | #ifndef TFLITE_DCHECK_GE 35 | #define TFLITE_DCHECK_GE(x, y) ((x) >= (y)) ? (void)0 : TFLITE_ASSERT_FALSE 36 | #endif 37 | 38 | #ifndef TFLITE_DCHECK_GT 39 | #define TFLITE_DCHECK_GT(x, y) ((x) > (y)) ? (void)0 : TFLITE_ASSERT_FALSE 40 | #endif 41 | 42 | #ifndef TFLITE_DCHECK_LE 43 | #define TFLITE_DCHECK_LE(x, y) ((x) <= (y)) ? (void)0 : TFLITE_ASSERT_FALSE 44 | #endif 45 | 46 | #ifndef TFLITE_DCHECK_LT 47 | #define TFLITE_DCHECK_LT(x, y) ((x) < (y)) ? (void)0 : TFLITE_ASSERT_FALSE 48 | #endif 49 | 50 | // TODO(ahentz): Clean up: We should stick to the DCHECK versions. 51 | #ifndef TFLITE_CHECK 52 | #define TFLITE_CHECK(condition) (condition) ? (void)0 : TFLITE_ABORT 53 | #endif 54 | 55 | #ifndef TFLITE_CHECK_EQ 56 | #define TFLITE_CHECK_EQ(x, y) ((x) == (y)) ? (void)0 : TFLITE_ABORT 57 | #endif 58 | 59 | #ifndef TFLITE_CHECK_NE 60 | #define TFLITE_CHECK_NE(x, y) ((x) != (y)) ? (void)0 : TFLITE_ABORT 61 | #endif 62 | 63 | #ifndef TFLITE_CHECK_GE 64 | #define TFLITE_CHECK_GE(x, y) ((x) >= (y)) ? (void)0 : TFLITE_ABORT 65 | #endif 66 | 67 | #ifndef TFLITE_CHECK_GT 68 | #define TFLITE_CHECK_GT(x, y) ((x) > (y)) ? (void)0 : TFLITE_ABORT 69 | #endif 70 | 71 | #ifndef TFLITE_CHECK_LE 72 | #define TFLITE_CHECK_LE(x, y) ((x) <= (y)) ? (void)0 : TFLITE_ABORT 73 | #endif 74 | 75 | #ifndef TFLITE_CHECK_LT 76 | #define TFLITE_CHECK_LT(x, y) ((x) < (y)) ? (void)0 : TFLITE_ABORT 77 | #endif 78 | 79 | // TODO(ahentz): Clean up. 80 | using int8 = std::int8_t; 81 | using uint8 = std::uint8_t; 82 | using int16 = std::int16_t; 83 | using uint16 = std::uint16_t; 84 | using int32 = std::int32_t; 85 | using uint32 = std::uint32_t; 86 | 87 | // TFLITE_DEPRECATED() 88 | // 89 | // Duplicated from absl/base/macros.h to avoid pulling in that library. 90 | // Marks a deprecated class, struct, enum, function, method and variable 91 | // declarations. The macro argument is used as a custom diagnostic message (e.g. 92 | // suggestion of a better alternative). 93 | // 94 | // Example: 95 | // 96 | // class TFLITE_DEPRECATED("Use Bar instead") Foo {...}; 97 | // TFLITE_DEPRECATED("Use Baz instead") void Bar() {...} 98 | // 99 | // Every usage of a deprecated entity will trigger a warning when compiled with 100 | // clang's `-Wdeprecated-declarations` option. This option is turned off by 101 | // default, but the warnings will be reported by clang-tidy. 102 | #if defined(__clang__) && __cplusplus >= 201103L 103 | #define TFLITE_DEPRECATED(message) __attribute__((deprecated(message))) 104 | #endif 105 | 106 | #ifndef TFLITE_DEPRECATED 107 | #define TFLITE_DEPRECATED(message) 108 | #endif 109 | 110 | #endif // TENSORFLOW_LITE_KERNELS_INTERNAL_COMPATIBILITY_H_ 111 | -------------------------------------------------------------------------------- /lib/tfmicro/tensorflow/lite/kernels/internal/optimized/neon_check.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_OPTIMIZED_NEON_CHECK_H_ 16 | #define TENSORFLOW_LITE_KERNELS_INTERNAL_OPTIMIZED_NEON_CHECK_H_ 17 | 18 | #if defined(__ARM_NEON__) || defined(__ARM_NEON) 19 | #define USE_NEON 20 | #include 21 | #endif 22 | 23 | #if defined __GNUC__ && defined __SSE4_1__ && !defined TF_LITE_DISABLE_X86_NEON 24 | #define USE_NEON 25 | #pragma GCC diagnostic push 26 | #pragma GCC diagnostic ignored "-Wdeprecated-declarations" 27 | #pragma GCC diagnostic ignored "-Wattributes" 28 | #pragma GCC diagnostic ignored "-Wnarrowing" 29 | #pragma GCC diagnostic ignored "-Wsequence-point" 30 | #include "NEON_2_SSE.h" 31 | #pragma GCC diagnostic pop 32 | #endif 33 | 34 | // NEON_OR_PORTABLE(SomeFunc, args) calls NeonSomeFunc(args) if USE_NEON is 35 | // defined, PortableSomeFunc(args) otherwise. 36 | #ifdef USE_NEON 37 | // Always use Neon code 38 | #define NEON_OR_PORTABLE(funcname, ...) Neon##funcname(__VA_ARGS__) 39 | 40 | #else 41 | // No NEON available: Use Portable code 42 | #define NEON_OR_PORTABLE(funcname, ...) Portable##funcname(__VA_ARGS__) 43 | 44 | #endif // defined(USE_NEON) 45 | 46 | #endif // TENSORFLOW_LITE_KERNELS_INTERNAL_OPTIMIZED_NEON_CHECK_H_ 47 | -------------------------------------------------------------------------------- /lib/tfmicro/tensorflow/lite/kernels/internal/reference/arg_min_max.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_ARG_MIN_MAX_H_ 16 | #define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_ARG_MIN_MAX_H_ 17 | 18 | #include "tensorflow/lite/kernels/internal/types.h" 19 | 20 | namespace tflite { 21 | 22 | namespace reference_ops { 23 | 24 | template 25 | void ArgMinMax(const RuntimeShape& input1_shape, const T1* input1_data, 26 | const T3* input2_data, const RuntimeShape& output_shape, 27 | T2* output_data, const Cmp& cmp) { 28 | TFLITE_DCHECK_GT(input1_shape.DimensionsCount(), 0); 29 | TFLITE_DCHECK_EQ(input1_shape.DimensionsCount() - 1, 30 | output_shape.DimensionsCount()); 31 | int axis = input2_data[0]; 32 | if (axis < 0) { 33 | axis += input1_shape.DimensionsCount(); 34 | } 35 | const int axis_size = input1_shape.Dims(axis); 36 | 37 | int outer_size = 1; 38 | for (int i = 0; i < axis; ++i) { 39 | TFLITE_DCHECK_EQ(input1_shape.Dims(i), output_shape.Dims(i)); 40 | outer_size *= input1_shape.Dims(i); 41 | } 42 | 43 | int inner_size = 1; 44 | const int dims_count = input1_shape.DimensionsCount(); 45 | for (int i = axis + 1; i < dims_count; ++i) { 46 | TFLITE_DCHECK_EQ(input1_shape.Dims(i), output_shape.Dims(i - 1)); 47 | inner_size *= input1_shape.Dims(i); 48 | } 49 | for (int outer = 0; outer < outer_size; ++outer) { 50 | for (int inner = 0; inner < inner_size; ++inner) { 51 | auto min_max_value = input1_data[outer * axis_size * inner_size + inner]; 52 | T2 min_max_index = 0; 53 | for (int i = 1; i < axis_size; ++i) { 54 | const auto& curr_value = 55 | input1_data[(outer * axis_size + i) * inner_size + inner]; 56 | if (cmp(curr_value, min_max_value)) { 57 | min_max_value = curr_value; 58 | min_max_index = static_cast(i); 59 | } 60 | } 61 | output_data[outer * inner_size + inner] = min_max_index; 62 | } 63 | } 64 | } 65 | } // namespace reference_ops 66 | } // namespace tflite 67 | 68 | #endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_ARG_MIN_MAX_H_ 69 | -------------------------------------------------------------------------------- /lib/tfmicro/tensorflow/lite/kernels/internal/reference/binary_function.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_BINARY_FUNCTION_H_ 16 | #define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_BINARY_FUNCTION_H_ 17 | 18 | #include "tensorflow/lite/kernels/internal/common.h" 19 | #include "tensorflow/lite/kernels/internal/compatibility.h" 20 | #include "tensorflow/lite/kernels/internal/types.h" 21 | 22 | namespace tflite { 23 | 24 | namespace reference_ops { 25 | 26 | // TODO(ycling): Refactoring. Remove BroadcastLogical and use the more 27 | // generalized and efficient BroadcastBinaryFunction. 28 | // 29 | // Also appears to duplicte MinimumMaximum. 30 | // 31 | // R: Result type. T1: Input 1 type. T2: Input 2 type. 32 | template 33 | inline void BroadcastBinaryFunction4DSlow( 34 | const RuntimeShape& unextended_input1_shape, const T1* input1_data, 35 | const RuntimeShape& unextended_input2_shape, const T2* input2_data, 36 | const RuntimeShape& unextended_output_shape, R* output_data, 37 | R (*func)(T1, T2)) { 38 | TFLITE_DCHECK_LE(unextended_input1_shape.DimensionsCount(), 4); 39 | TFLITE_DCHECK_LE(unextended_input2_shape.DimensionsCount(), 4); 40 | TFLITE_DCHECK_LE(unextended_output_shape.DimensionsCount(), 4); 41 | const RuntimeShape output_shape = 42 | RuntimeShape::ExtendedShape(4, unextended_output_shape); 43 | 44 | NdArrayDesc<4> desc1; 45 | NdArrayDesc<4> desc2; 46 | NdArrayDescsForElementwiseBroadcast(unextended_input1_shape, 47 | unextended_input2_shape, &desc1, &desc2); 48 | 49 | for (int b = 0; b < output_shape.Dims(0); ++b) { 50 | for (int y = 0; y < output_shape.Dims(1); ++y) { 51 | for (int x = 0; x < output_shape.Dims(2); ++x) { 52 | for (int c = 0; c < output_shape.Dims(3); ++c) { 53 | auto out_idx = Offset(output_shape, b, y, x, c); 54 | auto in1_idx = SubscriptToIndex(desc1, b, y, x, c); 55 | auto in2_idx = SubscriptToIndex(desc2, b, y, x, c); 56 | auto in1_val = input1_data[in1_idx]; 57 | auto in2_val = input2_data[in2_idx]; 58 | output_data[out_idx] = func(in1_val, in2_val); 59 | } 60 | } 61 | } 62 | } 63 | } 64 | 65 | // R: Result type. T1: Input 1 type. T2: Input 2 type. 66 | // TODO(renjieliu): Refactor other binary functions to use this one. 67 | template 68 | inline void BinaryFunction(const RuntimeShape& input1_shape, 69 | const T1* input1_data, 70 | const RuntimeShape& input2_shape, 71 | const T2* input2_data, 72 | const RuntimeShape& output_shape, R* output_data, 73 | R (*func)(T1, T2)) { 74 | const int flat_size = 75 | MatchingFlatSize(input1_shape, input2_shape, output_shape); 76 | for (int i = 0; i < flat_size; ++i) { 77 | output_data[i] = func(input1_data[i], input2_data[i]); 78 | } 79 | } 80 | 81 | } // namespace reference_ops 82 | } // namespace tflite 83 | 84 | #endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_BINARY_FUNCTION_H_ 85 | -------------------------------------------------------------------------------- /lib/tfmicro/tensorflow/lite/kernels/internal/reference/ceil.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_CEIL_H_ 16 | #define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_CEIL_H_ 17 | 18 | #include 19 | 20 | #include "tensorflow/lite/kernels/internal/types.h" 21 | 22 | namespace tflite { 23 | 24 | namespace reference_ops { 25 | 26 | inline void Ceil(const RuntimeShape& input_shape, const float* input_data, 27 | const RuntimeShape& output_shape, float* output_data) { 28 | const int flat_size = MatchingFlatSize(input_shape, output_shape); 29 | 30 | for (int i = 0; i < flat_size; ++i) { 31 | output_data[i] = std::ceil(input_data[i]); 32 | } 33 | } 34 | 35 | } // namespace reference_ops 36 | } // namespace tflite 37 | #endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_CEIL_H_ 38 | -------------------------------------------------------------------------------- /lib/tfmicro/tensorflow/lite/kernels/internal/reference/depthwiseconv_float.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2017 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_DEPTHWISECONV_FLOAT_H_ 16 | #define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_DEPTHWISECONV_FLOAT_H_ 17 | 18 | #include "tensorflow/lite/kernels/internal/common.h" 19 | #include "tensorflow/lite/kernels/internal/compatibility.h" 20 | #include "tensorflow/lite/kernels/internal/types.h" 21 | 22 | namespace tflite { 23 | namespace reference_ops { 24 | 25 | inline void DepthwiseConv( 26 | const DepthwiseParams& params, const RuntimeShape& input_shape, 27 | const float* input_data, const RuntimeShape& filter_shape, 28 | const float* filter_data, const RuntimeShape& bias_shape, 29 | const float* bias_data, const RuntimeShape& output_shape, 30 | float* output_data) { 31 | const int stride_width = params.stride_width; 32 | const int stride_height = params.stride_height; 33 | const int dilation_width_factor = params.dilation_width_factor; 34 | const int dilation_height_factor = params.dilation_height_factor; 35 | const int pad_width = params.padding_values.width; 36 | const int pad_height = params.padding_values.height; 37 | const int depth_multiplier = params.depth_multiplier; 38 | const float output_activation_min = params.float_activation_min; 39 | const float output_activation_max = params.float_activation_max; 40 | TFLITE_DCHECK_EQ(input_shape.DimensionsCount(), 4); 41 | TFLITE_DCHECK_EQ(filter_shape.DimensionsCount(), 4); 42 | TFLITE_DCHECK_EQ(output_shape.DimensionsCount(), 4); 43 | 44 | const int batches = MatchingDim(input_shape, 0, output_shape, 0); 45 | const int output_depth = MatchingDim(filter_shape, 3, output_shape, 3); 46 | const int input_height = input_shape.Dims(1); 47 | const int input_width = input_shape.Dims(2); 48 | const int input_depth = input_shape.Dims(3); 49 | const int filter_height = filter_shape.Dims(1); 50 | const int filter_width = filter_shape.Dims(2); 51 | const int output_height = output_shape.Dims(1); 52 | const int output_width = output_shape.Dims(2); 53 | TFLITE_DCHECK_EQ(output_depth, input_depth * depth_multiplier); 54 | TFLITE_DCHECK_EQ(bias_shape.FlatSize(), output_depth); 55 | 56 | for (int b = 0; b < batches; ++b) { 57 | for (int out_y = 0; out_y < output_height; ++out_y) { 58 | for (int out_x = 0; out_x < output_width; ++out_x) { 59 | for (int ic = 0; ic < input_depth; ++ic) { 60 | for (int m = 0; m < depth_multiplier; m++) { 61 | const int oc = m + ic * depth_multiplier; 62 | const int in_x_origin = (out_x * stride_width) - pad_width; 63 | const int in_y_origin = (out_y * stride_height) - pad_height; 64 | float total = 0.f; 65 | for (int filter_y = 0; filter_y < filter_height; ++filter_y) { 66 | for (int filter_x = 0; filter_x < filter_width; ++filter_x) { 67 | const int in_x = in_x_origin + dilation_width_factor * filter_x; 68 | const int in_y = 69 | in_y_origin + dilation_height_factor * filter_y; 70 | // If the location is outside the bounds of the input image, 71 | // use zero as a default value. 72 | if ((in_x >= 0) && (in_x < input_width) && (in_y >= 0) && 73 | (in_y < input_height)) { 74 | float input_value = 75 | input_data[Offset(input_shape, b, in_y, in_x, ic)]; 76 | float filter_value = filter_data[Offset( 77 | filter_shape, 0, filter_y, filter_x, oc)]; 78 | total += (input_value * filter_value); 79 | } 80 | } 81 | } 82 | float bias_value = 0.0f; 83 | if (bias_data) { 84 | bias_value = bias_data[oc]; 85 | } 86 | output_data[Offset(output_shape, b, out_y, out_x, oc)] = 87 | ActivationFunctionWithMinMax(total + bias_value, 88 | output_activation_min, 89 | output_activation_max); 90 | } 91 | } 92 | } 93 | } 94 | } 95 | } 96 | 97 | } // end namespace reference_ops 98 | } // end namespace tflite 99 | 100 | #endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_DEPTHWISECONV_FLOAT_H_ 101 | -------------------------------------------------------------------------------- /lib/tfmicro/tensorflow/lite/kernels/internal/reference/dequantize.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_DEQUANTIZE_H_ 16 | #define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_DEQUANTIZE_H_ 17 | 18 | #include "tensorflow/lite/kernels/internal/common.h" 19 | #include "tensorflow/lite/kernels/internal/types.h" 20 | 21 | namespace tflite { 22 | 23 | namespace reference_ops { 24 | 25 | template 26 | inline void Dequantize(const tflite::DequantizationParams& op_params, 27 | const RuntimeShape& input_shape, const T* input_data, 28 | const RuntimeShape& output_shape, float* output_data) { 29 | int32 zero_point = op_params.zero_point; 30 | const double scale = op_params.scale; 31 | const int flat_size = MatchingFlatSize(input_shape, output_shape); 32 | 33 | for (int i = 0; i < flat_size; i++) { 34 | const int32 val = input_data[i]; 35 | const float result = static_cast(scale * (val - zero_point)); 36 | output_data[i] = result; 37 | } 38 | } 39 | 40 | } // namespace reference_ops 41 | 42 | } // namespace tflite 43 | #endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_DEQUANTIZE_H_ 44 | -------------------------------------------------------------------------------- /lib/tfmicro/tensorflow/lite/kernels/internal/reference/floor.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_FLOOR_H_ 16 | #define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_FLOOR_H_ 17 | 18 | #include 19 | 20 | #include "tensorflow/lite/kernels/internal/types.h" 21 | 22 | namespace tflite { 23 | 24 | namespace reference_ops { 25 | 26 | inline void Floor(const RuntimeShape& input_shape, const float* input_data, 27 | const RuntimeShape& output_shape, float* output_data) { 28 | const int flat_size = MatchingFlatSize(input_shape, output_shape); 29 | 30 | for (int i = 0; i < flat_size; i++) { 31 | int offset = i; 32 | output_data[offset] = std::floor(input_data[offset]); 33 | } 34 | } 35 | 36 | } // namespace reference_ops 37 | } // namespace tflite 38 | 39 | #endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_FLOOR_H_ 40 | -------------------------------------------------------------------------------- /lib/tfmicro/tensorflow/lite/kernels/internal/reference/integer_ops/fully_connected.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_FULLY_CONNECTED_H_ 16 | #define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_FULLY_CONNECTED_H_ 17 | 18 | #include "tensorflow/lite/kernels/internal/common.h" 19 | 20 | namespace tflite { 21 | namespace reference_integer_ops { 22 | 23 | inline void FullyConnected( 24 | const FullyConnectedParams& params, const RuntimeShape& input_shape, 25 | const int8_t* input_data, const RuntimeShape& filter_shape, 26 | const int8_t* filter_data, const RuntimeShape& bias_shape, 27 | const int32* bias_data, const RuntimeShape& output_shape, 28 | int8_t* output_data) { 29 | const int32 input_offset = params.input_offset; 30 | const int32 filter_offset = params.weights_offset; 31 | const int32 output_offset = params.output_offset; 32 | const int32 output_multiplier = params.output_multiplier; 33 | const int output_shift = params.output_shift; 34 | const int32 output_activation_min = params.quantized_activation_min; 35 | const int32 output_activation_max = params.quantized_activation_max; 36 | TFLITE_DCHECK_GE(filter_shape.DimensionsCount(), 2); 37 | TFLITE_DCHECK_EQ(output_shape.DimensionsCount(), 2); 38 | 39 | TFLITE_DCHECK_LE(output_activation_min, output_activation_max); 40 | const int filter_dim_count = filter_shape.DimensionsCount(); 41 | const int batches = output_shape.Dims(0); 42 | const int output_depth = output_shape.Dims(1); 43 | TFLITE_DCHECK_LE(output_depth, filter_shape.Dims(filter_dim_count - 2)); 44 | const int accum_depth = filter_shape.Dims(filter_dim_count - 1); 45 | for (int b = 0; b < batches; ++b) { 46 | for (int out_c = 0; out_c < output_depth; ++out_c) { 47 | int32 acc = 0; 48 | for (int d = 0; d < accum_depth; ++d) { 49 | int32 input_val = input_data[b * accum_depth + d]; 50 | int32 filter_val = filter_data[out_c * accum_depth + d]; 51 | acc += (filter_val + filter_offset) * (input_val + input_offset); 52 | } 53 | if (bias_data) { 54 | acc += bias_data[out_c]; 55 | } 56 | acc = MultiplyByQuantizedMultiplier(acc, output_multiplier, output_shift); 57 | acc += output_offset; 58 | acc = std::max(acc, output_activation_min); 59 | acc = std::min(acc, output_activation_max); 60 | output_data[out_c + output_depth * b] = static_cast(acc); 61 | } 62 | } 63 | } 64 | 65 | } // namespace reference_integer_ops 66 | } // namespace tflite 67 | 68 | #endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_FULLY_CONNECTED_H_ 69 | -------------------------------------------------------------------------------- /lib/tfmicro/tensorflow/lite/kernels/internal/reference/integer_ops/softmax.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_SOFTMAX_H_ 16 | #define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_SOFTMAX_H_ 17 | 18 | #include "tensorflow/lite/kernels/internal/common.h" 19 | 20 | namespace tflite { 21 | namespace reference_integer_ops { 22 | 23 | // Quantized softmax with int8 input and output. 24 | inline void Softmax(const SoftmaxParams& params, 25 | const RuntimeShape& input_shape, const int8* input_data, 26 | const RuntimeShape& output_shape, int8* output_data) { 27 | const int32 input_beta_multiplier = params.input_multiplier; 28 | const int32 input_beta_left_shift = params.input_left_shift; 29 | const int diff_min = params.diff_min; 30 | // The representation chosen for the input to the exp() function is Q5.26. 31 | // We need to leave extra space since values that we skip might be as large as 32 | // -32 before multiplying by input_beta_multiplier, and therefore as large as 33 | // -16 afterwards. Note that exp(-8) is definitely not insignificant to 34 | // accumulation, but exp(-16) definitely is. 35 | static const int kScaledDiffIntegerBits = 5; 36 | static const int kAccumulationIntegerBits = 12; 37 | using FixedPointScaledDiff = 38 | gemmlowp::FixedPoint; 39 | using FixedPointAccum = gemmlowp::FixedPoint; 40 | using FixedPoint0 = gemmlowp::FixedPoint; 41 | 42 | const int trailing_dim = input_shape.DimensionsCount() - 1; 43 | const int outer_size = 44 | MatchingFlatSizeSkipDim(input_shape, trailing_dim, output_shape); 45 | const int depth = 46 | MatchingDim(input_shape, trailing_dim, output_shape, trailing_dim); 47 | 48 | for (int i = 0; i < outer_size; ++i) { 49 | int8 max_in_row = -128; 50 | for (int c = 0; c < depth; ++c) { 51 | max_in_row = std::max(max_in_row, input_data[i * depth + c]); 52 | } 53 | 54 | FixedPointAccum sum_of_exps = FixedPointAccum::Zero(); 55 | for (int c = 0; c < depth; ++c) { 56 | int32 input_diff = 57 | static_cast(input_data[i * depth + c]) - max_in_row; 58 | if (input_diff >= diff_min) { 59 | const int32 input_diff_rescaled = 60 | MultiplyByQuantizedMultiplierGreaterThanOne( 61 | input_diff, input_beta_multiplier, input_beta_left_shift); 62 | const FixedPointScaledDiff scaled_diff_f8 = 63 | FixedPointScaledDiff::FromRaw(input_diff_rescaled); 64 | sum_of_exps = sum_of_exps + gemmlowp::Rescale( 65 | exp_on_negative_values(scaled_diff_f8)); 66 | } 67 | } 68 | 69 | int num_bits_over_unit; 70 | FixedPoint0 shifted_scale = FixedPoint0::FromRaw(GetReciprocal( 71 | sum_of_exps.raw(), kAccumulationIntegerBits, &num_bits_over_unit)); 72 | 73 | for (int c = 0; c < depth; ++c) { 74 | int32 input_diff = 75 | static_cast(input_data[i * depth + c]) - max_in_row; 76 | if (input_diff >= diff_min) { 77 | const int32 input_diff_rescaled = 78 | MultiplyByQuantizedMultiplierGreaterThanOne( 79 | input_diff, input_beta_multiplier, input_beta_left_shift); 80 | const FixedPointScaledDiff scaled_diff_f8 = 81 | FixedPointScaledDiff::FromRaw(input_diff_rescaled); 82 | 83 | FixedPoint0 exp_in_0 = exp_on_negative_values(scaled_diff_f8); 84 | const int32 unsat_output = gemmlowp::RoundingDivideByPOT( 85 | (shifted_scale * exp_in_0).raw(), num_bits_over_unit + 31 - 8); 86 | const int32 shifted_output = unsat_output - 128; 87 | 88 | output_data[i * depth + c] = static_cast( 89 | std::max(std::min(shifted_output, static_cast(127)), 90 | static_cast(-128))); 91 | 92 | } else { 93 | output_data[i * depth + c] = -128; 94 | } 95 | } 96 | } 97 | } 98 | 99 | } // namespace reference_integer_ops 100 | } // namespace tflite 101 | 102 | #endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_SOFTMAX_H_ 103 | -------------------------------------------------------------------------------- /lib/tfmicro/tensorflow/lite/kernels/internal/reference/logistic.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_LOGISTIC_H_ 16 | #define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_LOGISTIC_H_ 17 | 18 | #include "fixedpoint/fixedpoint.h" 19 | #include "tensorflow/lite/kernels/internal/common.h" 20 | #include "tensorflow/lite/kernels/internal/quantization_util.h" 21 | #include "tensorflow/lite/kernels/internal/round.h" 22 | #include "tensorflow/lite/kernels/internal/types.h" 23 | #include "tensorflow/lite/kernels/op_macros.h" 24 | 25 | namespace tflite { 26 | namespace reference_ops { 27 | 28 | inline void Logistic(const RuntimeShape& input_shape, const float* input_data, 29 | const RuntimeShape& output_shape, float* output_data) { 30 | const int flat_size = MatchingFlatSize(input_shape, output_shape); 31 | 32 | for (int i = 0; i < flat_size; i++) { 33 | float val = input_data[i]; 34 | float result = 1.f / (1.f + std::exp(-val)); 35 | output_data[i] = result; 36 | } 37 | } 38 | 39 | // Convenience version that allows, for example, generated-code calls to be 40 | // uniform between data types. 41 | inline void Logistic(const LogisticParams&, const RuntimeShape& input_shape, 42 | const float* input_data, const RuntimeShape& output_shape, 43 | float* output_data) { 44 | // Drop params: not needed. 45 | Logistic(input_shape, input_data, output_shape, output_data); 46 | } 47 | 48 | inline void Logistic(const LogisticParams& params, 49 | const RuntimeShape& input_shape, const int16* input_data, 50 | const RuntimeShape& output_shape, int16* output_data) { 51 | const int flat_size = MatchingFlatSize(input_shape, output_shape); 52 | 53 | for (int i = 0; i < flat_size; i++) { 54 | // F0 uses 0 integer bits, range [-1, 1]. 55 | // This is the return type of math functions such as tanh, logistic, 56 | // whose range is in [-1, 1]. 57 | using F0 = gemmlowp::FixedPoint; 58 | // F3 uses 3 integer bits, range [-8, 8], the input range expected here. 59 | using F3 = gemmlowp::FixedPoint; 60 | 61 | const F3 input = F3::FromRaw(input_data[i]); 62 | F0 output = gemmlowp::logistic(input); 63 | output_data[i] = output.raw(); 64 | } 65 | } 66 | 67 | } // namespace reference_ops 68 | } // namespace tflite 69 | 70 | #endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_LOGISTIC_H_ 71 | -------------------------------------------------------------------------------- /lib/tfmicro/tensorflow/lite/kernels/internal/reference/maximum_minimum.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2017 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_MAXIMUM_MINIMUM_H_ 16 | #define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_MAXIMUM_MINIMUM_H_ 17 | 18 | #include "tensorflow/lite/kernels/internal/common.h" 19 | #include "tensorflow/lite/kernels/internal/types.h" 20 | 21 | namespace tflite { 22 | namespace reference_ops { 23 | 24 | template 25 | void MaximumMinimumBroadcast4DSlow(const RuntimeShape& unextended_input1_shape, 26 | const T* input1_data, 27 | const RuntimeShape& unextended_input2_shape, 28 | const T* input2_data, 29 | const RuntimeShape& unextended_output_shape, 30 | T* output_data, Op op) { 31 | TFLITE_DCHECK_LE(unextended_input1_shape.DimensionsCount(), 4); 32 | TFLITE_DCHECK_LE(unextended_input2_shape.DimensionsCount(), 4); 33 | TFLITE_DCHECK_LE(unextended_output_shape.DimensionsCount(), 4); 34 | const RuntimeShape output_shape = 35 | RuntimeShape::ExtendedShape(4, unextended_output_shape); 36 | 37 | NdArrayDesc<4> desc1; 38 | NdArrayDesc<4> desc2; 39 | NdArrayDescsForElementwiseBroadcast(unextended_input1_shape, 40 | unextended_input2_shape, &desc1, &desc2); 41 | 42 | for (int b = 0; b < output_shape.Dims(0); ++b) { 43 | for (int y = 0; y < output_shape.Dims(1); ++y) { 44 | for (int x = 0; x < output_shape.Dims(2); ++x) { 45 | for (int c = 0; c < output_shape.Dims(3); ++c) { 46 | auto out_idx = Offset(output_shape, b, y, x, c); 47 | auto in1_idx = SubscriptToIndex(desc1, b, y, x, c); 48 | auto in2_idx = SubscriptToIndex(desc2, b, y, x, c); 49 | auto in1_val = input1_data[in1_idx]; 50 | auto in2_val = input2_data[in2_idx]; 51 | output_data[out_idx] = op(in1_val, in2_val); 52 | } 53 | } 54 | } 55 | } 56 | } 57 | 58 | } // namespace reference_ops 59 | } // namespace tflite 60 | 61 | #endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_MAXIMUM_MINIMUM_H_ 62 | -------------------------------------------------------------------------------- /lib/tfmicro/tensorflow/lite/kernels/internal/reference/neg.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_NEG_H_ 16 | #define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_NEG_H_ 17 | 18 | #include "tensorflow/lite/kernels/internal/types.h" 19 | 20 | namespace tflite { 21 | 22 | namespace reference_ops { 23 | 24 | template 25 | inline void Negate(const RuntimeShape& input_shape, const T* input_data, 26 | const RuntimeShape& output_shape, T* output_data) { 27 | const int flat_size = MatchingFlatSize(input_shape, output_shape); 28 | 29 | for (int i = 0; i < flat_size; ++i) { 30 | output_data[i] = -input_data[i]; 31 | } 32 | } 33 | 34 | } // namespace reference_ops 35 | } // namespace tflite 36 | 37 | #endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_NEG_H_ 38 | -------------------------------------------------------------------------------- /lib/tfmicro/tensorflow/lite/kernels/internal/reference/prelu.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_PRELU_H_ 16 | #define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_PRELU_H_ 17 | 18 | #include "tensorflow/lite/kernels/internal/common.h" 19 | #include "tensorflow/lite/kernels/internal/compatibility.h" 20 | #include "tensorflow/lite/kernels/internal/types.h" 21 | 22 | namespace tflite { 23 | 24 | namespace reference_ops { 25 | 26 | // Broadcast prelu to output_shape for quantized uint8 data. 27 | inline void BroadcastPrelu4DSlow(const PreluParams& params, 28 | const RuntimeShape& input_shape, 29 | const uint8* input_data, 30 | const RuntimeShape& alpha_shape, 31 | const uint8* alpha_data, 32 | const RuntimeShape& output_shape, 33 | uint8* output_data) { 34 | TFLITE_DCHECK_LE(input_shape.DimensionsCount(), 4); 35 | TFLITE_DCHECK_LE(alpha_shape.DimensionsCount(), 4); 36 | TFLITE_DCHECK_LE(output_shape.DimensionsCount(), 4); 37 | const RuntimeShape extended_output_shape = 38 | RuntimeShape::ExtendedShape(4, output_shape); 39 | NdArrayDesc<4> desc1; 40 | NdArrayDesc<4> desc2; 41 | NdArrayDescsForElementwiseBroadcast(input_shape, alpha_shape, &desc1, &desc2); 42 | 43 | for (int b = 0; b < extended_output_shape.Dims(0); ++b) { 44 | for (int y = 0; y < extended_output_shape.Dims(1); ++y) { 45 | for (int x = 0; x < extended_output_shape.Dims(2); ++x) { 46 | for (int c = 0; c < extended_output_shape.Dims(3); ++c) { 47 | int output_index = Offset(extended_output_shape, b, y, x, c); 48 | int input_index = SubscriptToIndex(desc1, b, y, x, c); 49 | const int32 input_value = 50 | params.input_offset + input_data[input_index]; 51 | if (input_value >= 0) { 52 | output_data[output_index] = input_data[input_index]; 53 | } else { 54 | auto alpha_index = SubscriptToIndex(desc2, b, y, x, c); 55 | const int32 alpha_value = 56 | params.alpha_offset + alpha_data[alpha_index]; 57 | const int32 unclamped_output = 58 | params.output_offset + 59 | MultiplyByQuantizedMultiplierSmallerThanOneExp( 60 | input_value * alpha_value, params.output_multiplier, 61 | params.output_shift); 62 | const int32 quantized_min = std::numeric_limits::min(); 63 | const int32 quantized_max = std::numeric_limits::max(); 64 | const int32 clamped_output = std::min( 65 | quantized_max, std::max(quantized_min, unclamped_output)); 66 | output_data[output_index] = static_cast(clamped_output); 67 | } 68 | } 69 | } 70 | } 71 | } 72 | } 73 | 74 | } // namespace reference_ops 75 | } // namespace tflite 76 | 77 | #endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_PRELU_H_ 78 | -------------------------------------------------------------------------------- /lib/tfmicro/tensorflow/lite/kernels/internal/reference/quantize.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_QUANTIZE_H_ 16 | #define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_QUANTIZE_H_ 17 | 18 | #include "tensorflow/lite/kernels/internal/common.h" 19 | #include "tensorflow/lite/kernels/internal/round.h" 20 | #include "tensorflow/lite/kernels/internal/types.h" 21 | 22 | namespace tflite { 23 | 24 | namespace reference_ops { 25 | 26 | template 27 | inline void AffineQuantize(const tflite::QuantizationParams& op_params, 28 | const RuntimeShape& input_shape, 29 | const float* input_data, 30 | const RuntimeShape& output_shape, T* output_data) { 31 | const int32 zero_point = op_params.zero_point; 32 | const double scale = static_cast(op_params.scale); 33 | const int flat_size = MatchingFlatSize(input_shape, output_shape); 34 | static constexpr int32 min_val = std::numeric_limits::min(); 35 | static constexpr int32 max_val = std::numeric_limits::max(); 36 | 37 | for (int i = 0; i < flat_size; i++) { 38 | const float val = input_data[i]; 39 | int32 unclamped = static_cast(TfLiteRound(val / scale)) + zero_point; 40 | int32 clamped = std::min(std::max(unclamped, min_val), max_val); 41 | output_data[i] = clamped; 42 | } 43 | } 44 | 45 | } // namespace reference_ops 46 | 47 | } // namespace tflite 48 | #endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_QUANTIZE_H_ 49 | -------------------------------------------------------------------------------- /lib/tfmicro/tensorflow/lite/kernels/internal/reference/round.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_ROUND_H_ 16 | #define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_ROUND_H_ 17 | 18 | #include 19 | 20 | #include "tensorflow/lite/kernels/internal/types.h" 21 | 22 | namespace tflite { 23 | 24 | namespace reference_ops { 25 | 26 | inline float RoundToNearest(float value) { 27 | auto floor_val = std::floor(value); 28 | auto diff = value - floor_val; 29 | if ((diff < 0.5f) || 30 | ((diff == 0.5f) && (static_cast(floor_val) % 2 == 0))) { 31 | return floor_val; 32 | } else { 33 | return floor_val = floor_val + 1.0f; 34 | } 35 | } 36 | 37 | inline void Round(const RuntimeShape& input_shape, const float* input_data, 38 | const RuntimeShape& output_shape, float* output_data) { 39 | const int flat_size = MatchingFlatSize(input_shape, output_shape); 40 | for (int i = 0; i < flat_size; ++i) { 41 | // Note that this implementation matches that of tensorFlow tf.round 42 | // and corresponds to the bankers rounding method. 43 | // cfenv (for fesetround) is not yet supported universally on Android, so 44 | // using a work around. 45 | output_data[i] = RoundToNearest(input_data[i]); 46 | } 47 | } 48 | 49 | } // namespace reference_ops 50 | } // namespace tflite 51 | #endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_ROUND_H_ 52 | -------------------------------------------------------------------------------- /lib/tfmicro/tensorflow/lite/kernels/internal/reference/strided_slice.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2017 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_STRIDED_SLICE_H_ 16 | #define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_STRIDED_SLICE_H_ 17 | 18 | #include "tensorflow/lite/kernels/internal/common.h" 19 | #include "tensorflow/lite/kernels/internal/strided_slice_logic.h" 20 | #include "tensorflow/lite/kernels/internal/types.h" 21 | 22 | namespace tflite { 23 | 24 | namespace reference_ops { 25 | template 26 | inline void StridedSlice(const tflite::StridedSliceParams& op_params, 27 | const RuntimeShape& unextended_input_shape, 28 | const T* input_data, 29 | const RuntimeShape& unextended_output_shape, 30 | T* output_data) { 31 | // Note that the output_shape is not used herein. 32 | tflite::StridedSliceParams params_copy = op_params; 33 | 34 | TFLITE_DCHECK_LE(unextended_input_shape.DimensionsCount(), 4); 35 | TFLITE_DCHECK_LE(unextended_output_shape.DimensionsCount(), 4); 36 | const RuntimeShape input_shape = 37 | RuntimeShape::ExtendedShape(4, unextended_input_shape); 38 | const RuntimeShape output_shape = 39 | RuntimeShape::ExtendedShape(4, unextended_output_shape); 40 | 41 | // Reverse and pad to 4 dimensions because that is what the runtime code 42 | // requires (ie. all shapes must be 4D and are given backwards). 43 | strided_slice::StridedSlicePadIndices(¶ms_copy, 4); 44 | 45 | const int start_b = strided_slice::StartForAxis(params_copy, input_shape, 0); 46 | const int stop_b = 47 | strided_slice::StopForAxis(params_copy, input_shape, 0, start_b); 48 | const int start_h = strided_slice::StartForAxis(params_copy, input_shape, 1); 49 | const int stop_h = 50 | strided_slice::StopForAxis(params_copy, input_shape, 1, start_h); 51 | const int start_w = strided_slice::StartForAxis(params_copy, input_shape, 2); 52 | const int stop_w = 53 | strided_slice::StopForAxis(params_copy, input_shape, 2, start_w); 54 | const int start_d = strided_slice::StartForAxis(params_copy, input_shape, 3); 55 | const int stop_d = 56 | strided_slice::StopForAxis(params_copy, input_shape, 3, start_d); 57 | 58 | T* out_ptr = output_data; 59 | for (int in_b = start_b; 60 | !strided_slice::LoopCondition(in_b, stop_b, params_copy.strides[0]); 61 | in_b += params_copy.strides[0]) { 62 | for (int in_h = start_h; 63 | !strided_slice::LoopCondition(in_h, stop_h, params_copy.strides[1]); 64 | in_h += params_copy.strides[1]) { 65 | for (int in_w = start_w; 66 | !strided_slice::LoopCondition(in_w, stop_w, params_copy.strides[2]); 67 | in_w += params_copy.strides[2]) { 68 | for (int in_d = start_d; !strided_slice::LoopCondition( 69 | in_d, stop_d, params_copy.strides[3]); 70 | in_d += params_copy.strides[3]) { 71 | *out_ptr++ = input_data[Offset(input_shape, in_b, in_h, in_w, in_d)]; 72 | } 73 | } 74 | } 75 | } 76 | } 77 | } // namespace reference_ops 78 | } // namespace tflite 79 | 80 | #endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_STRIDED_SLICE_H_ 81 | -------------------------------------------------------------------------------- /lib/tfmicro/tensorflow/lite/kernels/internal/round.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2017 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_ROUND_H_ 16 | #define TENSORFLOW_LITE_KERNELS_INTERNAL_ROUND_H_ 17 | 18 | #include 19 | 20 | namespace tflite { 21 | 22 | // TODO(aselle): See if we can do this only on jdk. Also mikecase, check 23 | // if you need this for java host build. 24 | #if defined(TF_LITE_USE_GLOBAL_ROUND) || \ 25 | (defined(__ANDROID__) && !defined(__NDK_MAJOR__)) || defined(ARDUINO) 26 | template 27 | inline float TfLiteRound(const float x) { 28 | return ::round(x); 29 | } 30 | inline double TfLiteRound(const double x) { return ::round(x); } 31 | #else 32 | template 33 | inline T TfLiteRound(const T x) { 34 | return std::round(x); 35 | } 36 | #endif 37 | 38 | } // namespace tflite 39 | 40 | #endif // TENSORFLOW_LITE_KERNELS_INTERNAL_ROUND_H_ 41 | -------------------------------------------------------------------------------- /lib/tfmicro/tensorflow/lite/kernels/internal/tensor.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2017 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_TENSOR_H_ 16 | #define TENSORFLOW_LITE_KERNELS_INTERNAL_TENSOR_H_ 17 | 18 | #include 19 | #include 20 | 21 | #include "tensorflow/lite/c/c_api_internal.h" 22 | #include "tensorflow/lite/kernels/internal/tensor_ctypes.h" 23 | #include "tensorflow/lite/kernels/internal/types.h" 24 | #include "tensorflow/lite/string_util.h" 25 | 26 | namespace tflite { 27 | 28 | inline RuntimeShape GetTensorShape(std::vector data) { 29 | return RuntimeShape(data.size(), data.data()); 30 | } 31 | 32 | // A list of tensors in a format that can be used by kernels like split and 33 | // concatenation. 34 | template 35 | class VectorOfTensors { 36 | public: 37 | // Build with the tensors in 'tensor_list'. 38 | VectorOfTensors(const TfLiteContext& context, 39 | const TfLiteIntArray& tensor_list) { 40 | int num_tensors = tensor_list.size; 41 | 42 | all_data_.reserve(num_tensors); 43 | all_shape_.reserve(num_tensors); 44 | all_shape_ptr_.reserve(num_tensors); 45 | 46 | for (int i = 0; i < num_tensors; ++i) { 47 | TfLiteTensor* t = &context.tensors[tensor_list.data[i]]; 48 | all_data_.push_back(GetTensorData(t)); 49 | all_shape_.push_back(GetTensorShape(t)); 50 | } 51 | 52 | // Taking the pointer from inside a std::vector is only OK if the vector is 53 | // never modified, so we populate all_shape in the previous loop and then we 54 | // are free to grab iterators here. 55 | for (int i = 0; i < num_tensors; ++i) { 56 | all_shape_ptr_.push_back(&all_shape_[i]); 57 | } 58 | } 59 | // Return a pointer to the data pointers of all tensors in the list. For 60 | // example: 61 | // float* const* f = v.data(); 62 | // f[0][1] is the second element of the first tensor. 63 | T* const* data() const { return all_data_.data(); } 64 | 65 | // Return a pointer the shape pointers of all tensors in the list. For 66 | // example: 67 | // const RuntimeShape* const* d = v.dims(); 68 | // dims[1] are the dimensions of the second tensor in the list. 69 | const RuntimeShape* const* shapes() const { return all_shape_ptr_.data(); } 70 | 71 | private: 72 | std::vector all_data_; 73 | std::vector all_shape_; 74 | std::vector all_shape_ptr_; 75 | }; 76 | 77 | // A list of quantized tensors in a format that can be used by kernels like 78 | // split and concatenation. 79 | class VectorOfQuantizedTensors : public VectorOfTensors { 80 | public: 81 | // Build with the tensors in 'tensor_list'. 82 | VectorOfQuantizedTensors(const TfLiteContext& context, 83 | const TfLiteIntArray& tensor_list) 84 | : VectorOfTensors(context, tensor_list) { 85 | for (int i = 0; i < tensor_list.size; ++i) { 86 | TfLiteTensor* t = &context.tensors[tensor_list.data[i]]; 87 | zero_point_.push_back(t->params.zero_point); 88 | scale_.push_back(t->params.scale); 89 | } 90 | } 91 | 92 | const float* scale() const { return scale_.data(); } 93 | const int32* zero_point() const { return zero_point_.data(); } 94 | 95 | private: 96 | std::vector zero_point_; 97 | std::vector scale_; 98 | }; 99 | 100 | // Writes randomly accessed values from `input` sequentially into `output`. 101 | template 102 | class SequentialTensorWriter { 103 | public: 104 | SequentialTensorWriter(const TfLiteTensor* input, TfLiteTensor* output) { 105 | input_data_ = GetTensorData(input); 106 | output_ptr_ = GetTensorData(output); 107 | } 108 | SequentialTensorWriter(const T* input_data, T* output_data) 109 | : input_data_(input_data), output_ptr_(output_data) {} 110 | 111 | void Write(int position) { *output_ptr_++ = input_data_[position]; } 112 | void WriteN(int position, int len) { 113 | memcpy(output_ptr_, &input_data_[position], sizeof(T) * len); 114 | output_ptr_ += len; 115 | } 116 | 117 | private: 118 | const T* input_data_; 119 | T* output_ptr_; 120 | }; 121 | 122 | template <> 123 | class SequentialTensorWriter { 124 | public: 125 | SequentialTensorWriter(const TfLiteTensor* input, TfLiteTensor* output) 126 | : input_(input), output_(output) {} 127 | ~SequentialTensorWriter() { buffer_.WriteToTensor(output_, nullptr); } 128 | 129 | void Write(int position) { this->WriteN(position, 1); } 130 | void WriteN(int position, int len) { 131 | for (int i = 0; i < len; i++) { 132 | buffer_.AddString(GetString(input_, position + i)); 133 | } 134 | } 135 | 136 | private: 137 | const TfLiteTensor* input_; 138 | TfLiteTensor* output_; 139 | DynamicBuffer buffer_; 140 | }; 141 | 142 | } // namespace tflite 143 | 144 | #endif // TENSORFLOW_LITE_KERNELS_INTERNAL_TENSOR_H_ 145 | -------------------------------------------------------------------------------- /lib/tfmicro/tensorflow/lite/kernels/internal/tensor_ctypes.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2017 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_TENSOR_CTYPES_H_ 16 | #define TENSORFLOW_LITE_KERNELS_INTERNAL_TENSOR_CTYPES_H_ 17 | 18 | #include "tensorflow/lite/c/c_api_internal.h" 19 | #include "tensorflow/lite/kernels/internal/types.h" 20 | 21 | namespace tflite { 22 | 23 | template 24 | inline T* GetTensorData(TfLiteTensor* tensor) { 25 | return tensor != nullptr ? reinterpret_cast(tensor->data.raw) : nullptr; 26 | } 27 | 28 | template 29 | inline const T* GetTensorData(const TfLiteTensor* tensor) { 30 | return tensor != nullptr ? reinterpret_cast(tensor->data.raw) 31 | : nullptr; 32 | } 33 | 34 | inline RuntimeShape GetTensorShape(const TfLiteTensor* tensor) { 35 | if (tensor == nullptr) { 36 | return RuntimeShape(); 37 | } 38 | 39 | TfLiteIntArray* dims = tensor->dims; 40 | const int dims_size = dims->size; 41 | const int32_t* dims_data = reinterpret_cast(dims->data); 42 | return RuntimeShape(dims_size, dims_data); 43 | } 44 | 45 | } // namespace tflite 46 | 47 | #endif // TENSORFLOW_LITE_KERNELS_INTERNAL_TENSOR_CTYPES_H_ 48 | -------------------------------------------------------------------------------- /lib/tfmicro/tensorflow/lite/kernels/op_macros.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2017 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_LITE_KERNELS_OP_MACROS_H_ 16 | #define TENSORFLOW_LITE_KERNELS_OP_MACROS_H_ 17 | 18 | // If we're on a platform without standard IO functions, fall back to a 19 | // non-portable function. 20 | #ifdef TF_LITE_MCU_DEBUG_LOG 21 | 22 | #include "tensorflow/lite/experimental/micro/micro_error_reporter.h" 23 | 24 | #define DEBUG_LOG(x) \ 25 | do { \ 26 | DebugLog(x); \ 27 | } while (0) 28 | 29 | inline void InfiniteLoop() { 30 | DEBUG_LOG("HALTED\n"); 31 | while (1) { 32 | } 33 | } 34 | #define TFLITE_ASSERT_FALSE InfiniteLoop(); 35 | #define TFLITE_ABORT InfiniteLoop(); 36 | 37 | #else // TF_LITE_MCU_DEBUG_LOG 38 | 39 | #include 40 | #include 41 | #include 42 | 43 | #define DEBUG_LOG(x) \ 44 | do { \ 45 | fprintf(stderr, "%s", (x)); \ 46 | } while (0) 47 | 48 | #define TFLITE_ABORT abort() 49 | 50 | #ifdef NDEBUG 51 | #define TFLITE_ASSERT_FALSE (static_cast(0)) 52 | #else 53 | #define TFLITE_ASSERT_FALSE TFLITE_ABORT 54 | #endif 55 | 56 | #endif // TF_LITE_MCU_DEBUG_LOG 57 | 58 | #define TF_LITE_FATAL(msg) \ 59 | do { \ 60 | DEBUG_LOG(msg); \ 61 | DEBUG_LOG("\nFATAL\n"); \ 62 | TFLITE_ABORT; \ 63 | } while (0) 64 | 65 | #define TF_LITE_ASSERT(x) \ 66 | do { \ 67 | if (!(x)) TF_LITE_FATAL(#x); \ 68 | } while (0) 69 | 70 | #define TF_LITE_ASSERT_EQ(x, y) \ 71 | do { \ 72 | if ((x) != (y)) TF_LITE_FATAL(#x " didn't equal " #y); \ 73 | } while (0) 74 | 75 | #endif // TENSORFLOW_LITE_KERNELS_OP_MACROS_H_ 76 | -------------------------------------------------------------------------------- /lib/tfmicro/tensorflow/lite/kernels/padding.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2017 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_LITE_KERNELS_PADDING_H_ 16 | #define TENSORFLOW_LITE_KERNELS_PADDING_H_ 17 | 18 | #include "tensorflow/lite/c/builtin_op_data.h" 19 | 20 | namespace tflite { 21 | 22 | // TODO(renjieliu): Migrate others to use ComputePaddingWithLeftover. 23 | inline int ComputePadding(int stride, int dilation_rate, int in_size, 24 | int filter_size, int out_size) { 25 | int effective_filter_size = (filter_size - 1) * dilation_rate + 1; 26 | int padding = ((out_size - 1) * stride + effective_filter_size - in_size) / 2; 27 | return padding > 0 ? padding : 0; 28 | } 29 | 30 | // It's not guaranteed that padding is symmetric. It's important to keep 31 | // offset for algorithms need all paddings. 32 | inline int ComputePaddingWithOffset(int stride, int dilation_rate, int in_size, 33 | int filter_size, int out_size, 34 | int* offset) { 35 | int effective_filter_size = (filter_size - 1) * dilation_rate + 1; 36 | int total_padding = 37 | ((out_size - 1) * stride + effective_filter_size - in_size); 38 | total_padding = total_padding > 0 ? total_padding : 0; 39 | *offset = total_padding % 2; 40 | return total_padding / 2; 41 | } 42 | 43 | // Matching GetWindowedOutputSize in TensorFlow. 44 | inline int ComputeOutSize(TfLitePadding padding, int image_size, 45 | int filter_size, int stride, int dilation_rate = 1) { 46 | int effective_filter_size = (filter_size - 1) * dilation_rate + 1; 47 | switch (padding) { 48 | case kTfLitePaddingSame: 49 | return (image_size + stride - 1) / stride; 50 | case kTfLitePaddingValid: 51 | return (image_size + stride - effective_filter_size) / stride; 52 | default: 53 | return 0; 54 | } 55 | } 56 | 57 | inline TfLitePaddingValues ComputePaddingHeightWidth( 58 | int stride_height, int stride_width, int dilation_rate_height, 59 | int dilation_rate_width, int in_height, int in_width, int filter_height, 60 | int filter_width, TfLitePadding padding, int* out_height, int* out_width) { 61 | *out_width = ComputeOutSize(padding, in_width, filter_width, stride_width, 62 | dilation_rate_width); 63 | *out_height = ComputeOutSize(padding, in_height, filter_height, stride_height, 64 | dilation_rate_height); 65 | 66 | TfLitePaddingValues padding_values; 67 | int offset = 0; 68 | padding_values.height = 69 | ComputePaddingWithOffset(stride_height, dilation_rate_height, in_height, 70 | filter_height, *out_height, &offset); 71 | padding_values.height_offset = offset; 72 | padding_values.width = 73 | ComputePaddingWithOffset(stride_width, dilation_rate_width, in_width, 74 | filter_width, *out_width, &offset); 75 | padding_values.width_offset = offset; 76 | return padding_values; 77 | } 78 | } // namespace tflite 79 | 80 | #endif // TENSORFLOW_LITE_KERNELS_PADDING_H_ 81 | -------------------------------------------------------------------------------- /lib/tfmicro/tensorflow/lite/string_type.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2017 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | // Abstract string. We don't want even absl at this level. 16 | #ifndef TENSORFLOW_LITE_STRING_TYPE_H_ 17 | #define TENSORFLOW_LITE_STRING_TYPE_H_ 18 | 19 | #include 20 | 21 | namespace tflite { 22 | 23 | using std::string; 24 | 25 | } // namespace tflite 26 | 27 | #endif // TENSORFLOW_LITE_STRING_TYPE_H_ 28 | -------------------------------------------------------------------------------- /lib/tfmicro/tensorflow/lite/string_util.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2017 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | // Util methods to read and write String tensors. 17 | // String tensors are considered to be char tensor with protocol. 18 | // [0, 3] 4 bytes: N, num of strings in the tensor in little endian. 19 | // [(i+1)*4, (i+1)*4+3] 4 bytes: offset of i-th string in little endian. 20 | // [(N+2)*4, (N+2)*4+3] 4 bytes: length of the whole char buffer. 21 | // [offset(i), offset(i+1) - 1] : content of i-th string. 22 | // Example of a string tensor: 23 | // [ 24 | // 2, 0, 0, 0, # 2 strings. 25 | // 16, 0, 0, 0, # 0-th string starts from index 12. 26 | // 18, 0, 0, 0, # 1-st string starts from index 18. 27 | // 18, 0, 0, 0, # total length of array. 28 | // 'A', 'B', # 0-th string [16..17]: "AB" 29 | // ] # 1-th string, empty 30 | // 31 | // A typical usage: 32 | // In op.Eval(context, node): 33 | // DynamicBuffer buf; 34 | // # Add string "AB" to tensor, string is stored in dynamic buffer. 35 | // buf.AddString("AB", 2); 36 | // # Write content of DynamicBuffer to tensor in format of string tensor 37 | // # described above. 38 | // buf.WriteToTensor(tensor, nullptr) 39 | 40 | #ifndef TENSORFLOW_LITE_STRING_UTIL_H_ 41 | #define TENSORFLOW_LITE_STRING_UTIL_H_ 42 | 43 | #include 44 | 45 | #include "tensorflow/lite/c/c_api_internal.h" 46 | #include "tensorflow/lite/string_type.h" 47 | 48 | namespace tflite { 49 | 50 | // Convenient structure to store string pointer and length. 51 | typedef struct { 52 | const char* str; 53 | int len; 54 | } StringRef; 55 | 56 | // DynamicBuffer holds temporary buffer that will be used to create a dynamic 57 | // tensor. A typical usage is to initialize a DynamicBuffer object, fill in 58 | // content and call CreateStringTensor in op.Eval(). 59 | class DynamicBuffer { 60 | public: 61 | DynamicBuffer() : offset_({0}) {} 62 | 63 | // Add string to dynamic buffer by resizing the buffer and copying the data. 64 | void AddString(const StringRef& string); 65 | 66 | // Add string to dynamic buffer by resizing the buffer and copying the data. 67 | void AddString(const char* str, size_t len); 68 | 69 | // Join a list of string with separator, and add as a single string to the 70 | // buffer. 71 | void AddJoinedString(const std::vector& strings, char separator); 72 | 73 | // Fill content into a buffer and returns the number of bytes stored. 74 | // The function allocates space for the buffer but does NOT take ownership. 75 | int WriteToBuffer(char** buffer); 76 | 77 | // Fill content into a string tensor, with the given new_shape. The new shape 78 | // must match the number of strings in this object. Caller relinquishes 79 | // ownership of new_shape. If 'new_shape' is nullptr, keep the tensor's 80 | // existing shape. 81 | void WriteToTensor(TfLiteTensor* tensor, TfLiteIntArray* new_shape); 82 | 83 | // Fill content into a string tensor. Set shape to {num_strings}. 84 | void WriteToTensorAsVector(TfLiteTensor* tensor); 85 | 86 | private: 87 | // Data buffer to store contents of strings, not including headers. 88 | std::vector data_; 89 | // Offset of the starting index of each string in data buffer. 90 | std::vector offset_; 91 | }; 92 | 93 | // Return num of strings in a String tensor. 94 | int GetStringCount(const void* raw_buffer); 95 | int GetStringCount(const TfLiteTensor* tensor); 96 | 97 | // Get String pointer and length of index-th string in tensor. 98 | // NOTE: This will not create a copy of string data. 99 | StringRef GetString(const void* raw_buffer, int string_index); 100 | StringRef GetString(const TfLiteTensor* tensor, int string_index); 101 | } // namespace tflite 102 | 103 | #endif // TENSORFLOW_LITE_STRING_UTIL_H_ 104 | -------------------------------------------------------------------------------- /lib/tfmicro/tensorflow/lite/type_to_tflitetype.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_LITE_TYPE_TO_TFLITETYPE_H_ 16 | #define TENSORFLOW_LITE_TYPE_TO_TFLITETYPE_H_ 17 | 18 | // Arduino build defines abs as a macro here. That is invalid C++, and breaks 19 | // libc++'s header, undefine it. 20 | #ifdef abs 21 | #undef abs 22 | #endif 23 | 24 | #include 25 | #include 26 | 27 | #include "tensorflow/lite/c/c_api_internal.h" 28 | 29 | namespace tflite { 30 | 31 | // Map statically from a c++ type to a TfLiteType. Used in interpreter for safe 32 | // casts. 33 | template 34 | constexpr TfLiteType typeToTfLiteType() { 35 | return kTfLiteNoType; 36 | } 37 | template <> 38 | constexpr TfLiteType typeToTfLiteType() { 39 | return kTfLiteInt32; 40 | } 41 | template <> 42 | constexpr TfLiteType typeToTfLiteType() { 43 | return kTfLiteInt16; 44 | } 45 | template <> 46 | constexpr TfLiteType typeToTfLiteType() { 47 | return kTfLiteInt64; 48 | } 49 | template <> 50 | constexpr TfLiteType typeToTfLiteType() { 51 | return kTfLiteFloat32; 52 | } 53 | template <> 54 | constexpr TfLiteType typeToTfLiteType() { 55 | return kTfLiteUInt8; 56 | } 57 | template <> 58 | constexpr TfLiteType typeToTfLiteType() { 59 | return kTfLiteInt8; 60 | } 61 | template <> 62 | constexpr TfLiteType typeToTfLiteType() { 63 | return kTfLiteBool; 64 | } 65 | template <> 66 | constexpr TfLiteType typeToTfLiteType>() { 67 | return kTfLiteComplex64; 68 | } 69 | template <> 70 | constexpr TfLiteType typeToTfLiteType() { 71 | return kTfLiteString; 72 | } 73 | template <> 74 | constexpr TfLiteType typeToTfLiteType() { 75 | return kTfLiteFloat16; 76 | } 77 | } // namespace tflite 78 | #endif // TENSORFLOW_LITE_TYPE_TO_TFLITETYPE_H_ 79 | -------------------------------------------------------------------------------- /lib/tfmicro/tensorflow/lite/version.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2017 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_LITE_VERSION_H_ 16 | #define TENSORFLOW_LITE_VERSION_H_ 17 | 18 | #include "tensorflow/core/public/version.h" 19 | 20 | // The version number of the Schema. Ideally all changes will be backward 21 | // compatible. If that ever changes, we must ensure that version is the first 22 | // entry in the new tflite root so that we can see that version is not 1. 23 | #define TFLITE_SCHEMA_VERSION (3) 24 | 25 | // TensorFlow Lite Runtime version. 26 | // This value is currently shared with that of TensorFlow. 27 | #define TFLITE_VERSION_STRING TF_VERSION_STRING 28 | 29 | #endif // TENSORFLOW_LITE_VERSION_H_ 30 | -------------------------------------------------------------------------------- /platformio.ini: -------------------------------------------------------------------------------- 1 | [env:esp32doit-devkit-v1] 2 | platform = espressif32 3 | board = esp32doit-devkit-v1 4 | framework = arduino 5 | board_build.partitions = custom.csv 6 | lib_deps=tfmicro 7 | -------------------------------------------------------------------------------- /src/main.cpp: -------------------------------------------------------------------------------- 1 | /*============================================================================= 2 | TensorFlow Lite Platformio Example 3 | 4 | Author: Wezley Sherman 5 | Referenced Authors: The TensorFlow Authors 6 | 7 | Licensed under the Apache License, Version 2.0 (the "License"); 8 | you may not use this file except in compliance with the License. 9 | You may obtain a copy of the License at 10 | 11 | http://www.apache.org/licenses/LICENSE-2.0 12 | 13 | Unless required by applicable law or agreed to in writing, software 14 | distributed under the License is distributed on an "AS IS" BASIS, 15 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 16 | See the License for the specific language governing permissions and 17 | limitations under the License. 18 | ==============================================================================*/ 19 | 20 | #include 21 | #include 22 | #include "tensorflow/lite/experimental/micro/kernels/all_ops_resolver.h" 23 | #include "tensorflow/lite/experimental/micro/micro_error_reporter.h" 24 | #include "tensorflow/lite/experimental/micro/micro_interpreter.h" 25 | #include "sine_model_data.h" 26 | 27 | // Create a memory pool for the nodes in the network 28 | constexpr int tensor_pool_size = 2 * 1024; 29 | uint8_t tensor_pool[tensor_pool_size]; 30 | 31 | // Define the model to be used 32 | const tflite::Model* sine_model; 33 | 34 | // Define the interpreter 35 | tflite::MicroInterpreter* interpreter; 36 | 37 | // Input/Output nodes for the network 38 | TfLiteTensor* input; 39 | TfLiteTensor* output; 40 | 41 | // Set up the ESP32's environment. 42 | void setup() { 43 | // Start serial at 115200 baud 44 | Serial.begin(115200); 45 | 46 | // Load the sample sine model 47 | Serial.println("Loading Tensorflow model...."); 48 | sine_model = tflite::GetModel(g_sine_model_data); 49 | Serial.println("Sine model loaded!"); 50 | 51 | // Define ops resolver and error reporting 52 | static tflite::ops::micro::AllOpsResolver resolver; 53 | 54 | static tflite::ErrorReporter* error_reporter; 55 | static tflite::MicroErrorReporter micro_error; 56 | error_reporter = µ_error; 57 | 58 | // Instantiate the interpreter 59 | static tflite::MicroInterpreter static_interpreter( 60 | sine_model, resolver, tensor_pool, tensor_pool_size, error_reporter 61 | ); 62 | 63 | interpreter = &static_interpreter; 64 | 65 | // Allocate the the model's tensors in the memory pool that was created. 66 | Serial.println("Allocating tensors to memory pool"); 67 | if(interpreter->AllocateTensors() != kTfLiteOk) { 68 | Serial.println("There was an error allocating the memory...ooof"); 69 | return; 70 | } 71 | 72 | // Define input and output nodes 73 | input = interpreter->input(0); 74 | output = interpreter->output(0); 75 | Serial.println("Starting inferences... Input a number! "); 76 | } 77 | 78 | // Logic loop for taking user input and outputting the sine 79 | void loop() { 80 | // Wait for serial input to be made available and parse it as a float 81 | if(Serial.available() > 0) { 82 | float user_input = Serial.parseFloat(); 83 | 84 | /* The sample model is only trained for values between 0 and 2*PI 85 | * This will keep the user from inputting bad numbers. 86 | */ 87 | if(user_input < 0.0f || user_input > (float)(2*M_PI)) { 88 | Serial.println("Your number must be greater than 0 and less than 2*PI"); 89 | return; 90 | } 91 | 92 | // Set the input node to the user input 93 | input->data.f[0] = user_input; 94 | 95 | Serial.println("Running inference on inputted data..."); 96 | 97 | // Run inference on the input data 98 | if(interpreter->Invoke() != kTfLiteOk) { 99 | Serial.println("There was an error invoking the interpreter!"); 100 | return; 101 | } 102 | 103 | // Print the output of the model. 104 | Serial.print("Input: "); 105 | Serial.println(user_input); 106 | Serial.print("Output: "); 107 | Serial.println(output->data.f[0]); 108 | Serial.println(""); 109 | 110 | } 111 | } --------------------------------------------------------------------------------