├── .gitignore ├── mfcc-nn ├── components │ ├── esp_adf_min │ │ ├── esp-adf │ │ ├── min │ │ │ ├── audio_hal │ │ │ ├── audio_sal │ │ │ ├── audio_board │ │ │ ├── audio_pipeline │ │ │ ├── display_service │ │ │ ├── esp_dispatcher │ │ │ └── esp_peripherals │ │ └── component.mk │ └── kws │ │ ├── tf │ │ ├── dataset │ │ ├── dcnn.tflite │ │ ├── dcnn.quant.tflite │ │ └── tfmicro │ │ │ └── tensorflow │ │ │ └── lite │ │ │ ├── string_type.h │ │ │ ├── micro │ │ │ ├── testing │ │ │ │ └── test_conv_model.h │ │ │ ├── debug_log.h │ │ │ ├── benchmarks │ │ │ │ └── keyword_scrambled_model_data.h │ │ │ ├── kernels │ │ │ │ ├── ethosu.cc │ │ │ │ ├── micro_utils.h │ │ │ │ ├── activation_utils.h │ │ │ │ └── floor.cc │ │ │ ├── micro_time.h │ │ │ ├── micro_error_reporter.h │ │ │ ├── micro_optional_debug_tools.h │ │ │ ├── compatibility.h │ │ │ ├── micro_error_reporter.cc │ │ │ ├── micro_string.h │ │ │ ├── all_ops_resolver.h │ │ │ ├── micro_profiler.cc │ │ │ ├── memory_helpers.h │ │ │ ├── memory_planner │ │ │ │ ├── linear_memory_planner.h │ │ │ │ └── linear_memory_planner.cc │ │ │ ├── micro_time.cc │ │ │ └── debug_log.cc │ │ │ ├── core │ │ │ └── api │ │ │ │ ├── tensor_utils.h │ │ │ │ ├── error_reporter.cc │ │ │ │ ├── tensor_utils.cc │ │ │ │ └── op_resolver.h │ │ │ ├── kernels │ │ │ ├── internal │ │ │ │ ├── max.h │ │ │ │ ├── min.h │ │ │ │ ├── reference │ │ │ │ │ ├── neg.h │ │ │ │ │ ├── ceil.h │ │ │ │ │ ├── floor.h │ │ │ │ │ ├── round.h │ │ │ │ │ └── quantize.h │ │ │ │ ├── cppmath.h │ │ │ │ ├── optimized │ │ │ │ │ └── neon_check.h │ │ │ │ └── tensor_ctypes.h │ │ │ └── op_macros.h │ │ │ └── version.h │ │ ├── include │ │ └── kws.h │ │ ├── kws_priv.h │ │ └── non-copyable.h ├── main │ └── component.mk ├── partitions.csv ├── Makefile └── sdkconfig.defaults ├── 2-mfcc-nn-streaming ├── components │ ├── esp_adf_min │ │ ├── esp-adf │ │ ├── min │ │ │ ├── audio_hal │ │ │ ├── audio_sal │ │ │ ├── audio_board │ │ │ ├── audio_pipeline │ │ │ ├── esp_dispatcher │ │ │ ├── display_service │ │ │ └── esp_peripherals │ │ └── component.mk │ └── kws │ │ ├── fe │ │ ├── tf │ │ ├── dcnn.ipynb │ │ ├── dcnn.en.tflite │ │ ├── dcnn.bin.libri.tflite │ │ ├── dcnn.bin.musan.tflite │ │ ├── tf-streaming.h │ │ ├── tfmicro │ │ │ └── tensorflow │ │ │ │ └── lite │ │ │ │ ├── string_type.h │ │ │ │ ├── micro │ │ │ │ ├── testing │ │ │ │ │ └── test_conv_model.h │ │ │ │ ├── debug_log.h │ │ │ │ ├── benchmarks │ │ │ │ │ └── keyword_scrambled_model_data.h │ │ │ │ ├── kernels │ │ │ │ │ ├── ethosu.cc │ │ │ │ │ ├── micro_utils.h │ │ │ │ │ ├── activation_utils.h │ │ │ │ │ └── floor.cc │ │ │ │ ├── micro_time.h │ │ │ │ ├── micro_error_reporter.h │ │ │ │ ├── micro_optional_debug_tools.h │ │ │ │ ├── compatibility.h │ │ │ │ ├── micro_error_reporter.cc │ │ │ │ ├── micro_string.h │ │ │ │ ├── all_ops_resolver.h │ │ │ │ ├── micro_profiler.cc │ │ │ │ ├── memory_helpers.h │ │ │ │ ├── memory_planner │ │ │ │ │ ├── linear_memory_planner.h │ │ │ │ │ └── linear_memory_planner.cc │ │ │ │ ├── micro_time.cc │ │ │ │ └── debug_log.cc │ │ │ │ ├── core │ │ │ │ └── api │ │ │ │ │ ├── tensor_utils.h │ │ │ │ │ ├── error_reporter.cc │ │ │ │ │ ├── tensor_utils.cc │ │ │ │ │ └── op_resolver.h │ │ │ │ ├── kernels │ │ │ │ ├── internal │ │ │ │ │ ├── max.h │ │ │ │ │ ├── min.h │ │ │ │ │ ├── reference │ │ │ │ │ │ ├── neg.h │ │ │ │ │ │ ├── ceil.h │ │ │ │ │ │ ├── floor.h │ │ │ │ │ │ ├── round.h │ │ │ │ │ │ └── quantize.h │ │ │ │ │ ├── cppmath.h │ │ │ │ │ ├── optimized │ │ │ │ │ │ └── neon_check.h │ │ │ │ │ └── tensor_ctypes.h │ │ │ │ └── op_macros.h │ │ │ │ └── version.h │ │ └── tf-streaming.cc │ │ ├── include │ │ └── kws.h │ │ ├── kws_priv.h │ │ ├── non-copyable.h │ │ └── Kconfig.projbuild ├── main │ └── component.mk ├── partitions.csv ├── Makefile └── sdkconfig.defaults ├── mfcc-nn-streaming ├── components │ ├── esp_adf_min │ │ ├── esp-adf │ │ ├── min │ │ │ ├── audio_hal │ │ │ ├── audio_sal │ │ │ ├── audio_board │ │ │ ├── audio_pipeline │ │ │ ├── display_service │ │ │ ├── esp_dispatcher │ │ │ └── esp_peripherals │ │ └── component.mk │ └── kws │ │ ├── fe │ │ ├── tf │ │ ├── dcnn.en.tflite │ │ ├── dcnn.ru.tflite │ │ └── tfmicro │ │ │ └── tensorflow │ │ │ └── lite │ │ │ ├── string_type.h │ │ │ ├── micro │ │ │ ├── testing │ │ │ │ └── test_conv_model.h │ │ │ ├── debug_log.h │ │ │ ├── benchmarks │ │ │ │ └── keyword_scrambled_model_data.h │ │ │ ├── kernels │ │ │ │ ├── ethosu.cc │ │ │ │ ├── micro_utils.h │ │ │ │ ├── activation_utils.h │ │ │ │ └── floor.cc │ │ │ ├── micro_time.h │ │ │ ├── micro_error_reporter.h │ │ │ ├── micro_optional_debug_tools.h │ │ │ ├── compatibility.h │ │ │ ├── micro_error_reporter.cc │ │ │ ├── micro_string.h │ │ │ ├── all_ops_resolver.h │ │ │ ├── micro_profiler.cc │ │ │ ├── memory_helpers.h │ │ │ ├── memory_planner │ │ │ │ ├── linear_memory_planner.h │ │ │ │ └── linear_memory_planner.cc │ │ │ ├── micro_time.cc │ │ │ └── debug_log.cc │ │ │ ├── core │ │ │ └── api │ │ │ │ ├── tensor_utils.h │ │ │ │ ├── error_reporter.cc │ │ │ │ ├── tensor_utils.cc │ │ │ │ └── op_resolver.h │ │ │ ├── kernels │ │ │ ├── internal │ │ │ │ ├── max.h │ │ │ │ ├── min.h │ │ │ │ ├── reference │ │ │ │ │ ├── neg.h │ │ │ │ │ ├── ceil.h │ │ │ │ │ ├── floor.h │ │ │ │ │ ├── round.h │ │ │ │ │ └── quantize.h │ │ │ │ ├── cppmath.h │ │ │ │ ├── optimized │ │ │ │ │ └── neon_check.h │ │ │ │ └── tensor_ctypes.h │ │ │ └── op_macros.h │ │ │ └── version.h │ │ ├── include │ │ └── kws.h │ │ ├── kws_priv.h │ │ ├── non-copyable.h │ │ └── Kconfig.projbuild ├── main │ └── component.mk ├── partitions.csv ├── Makefile └── sdkconfig.defaults ├── tl └── embedding.weights.h5 ├── .gitmodules └── Dockerfile /.gitignore: -------------------------------------------------------------------------------- 1 | build/ 2 | sdkconfig 3 | sdkconfig.old -------------------------------------------------------------------------------- /mfcc-nn/components/esp_adf_min/esp-adf: -------------------------------------------------------------------------------- 1 | ../../../submodules/esp-adf/ -------------------------------------------------------------------------------- /mfcc-nn/components/kws/tf/dataset: -------------------------------------------------------------------------------- 1 | ../../../../submodules/dataset/ -------------------------------------------------------------------------------- /2-mfcc-nn-streaming/components/esp_adf_min/esp-adf: -------------------------------------------------------------------------------- 1 | ../../../submodules/esp-adf/ -------------------------------------------------------------------------------- /mfcc-nn-streaming/components/esp_adf_min/esp-adf: -------------------------------------------------------------------------------- 1 | ../../../submodules/esp-adf/ -------------------------------------------------------------------------------- /2-mfcc-nn-streaming/components/kws/fe: -------------------------------------------------------------------------------- 1 | ../../../submodules/dataset/google_speech_commands/ -------------------------------------------------------------------------------- /mfcc-nn-streaming/components/kws/fe: -------------------------------------------------------------------------------- 1 | ../../../submodules/dataset/google_speech_commands/ -------------------------------------------------------------------------------- /mfcc-nn/components/esp_adf_min/min/audio_hal: -------------------------------------------------------------------------------- 1 | ../../../../submodules/esp-adf/components/audio_hal/ -------------------------------------------------------------------------------- /mfcc-nn/components/esp_adf_min/min/audio_sal: -------------------------------------------------------------------------------- 1 | ../../../../submodules/esp-adf/components/audio_sal/ -------------------------------------------------------------------------------- /tl/embedding.weights.h5: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/42io/esp32_kws/HEAD/tl/embedding.weights.h5 -------------------------------------------------------------------------------- /mfcc-nn/components/esp_adf_min/min/audio_board: -------------------------------------------------------------------------------- 1 | ../../../../submodules/esp-adf/components/audio_board/ -------------------------------------------------------------------------------- /2-mfcc-nn-streaming/components/esp_adf_min/min/audio_hal: -------------------------------------------------------------------------------- 1 | ../../../../submodules/esp-adf/components/audio_hal/ -------------------------------------------------------------------------------- /2-mfcc-nn-streaming/components/esp_adf_min/min/audio_sal: -------------------------------------------------------------------------------- 1 | ../../../../submodules/esp-adf/components/audio_sal/ -------------------------------------------------------------------------------- /2-mfcc-nn-streaming/components/kws/tf/dcnn.ipynb: -------------------------------------------------------------------------------- 1 | ../../../../mfcc-nn-streaming/components/kws/tf/dcnn.ipynb -------------------------------------------------------------------------------- /mfcc-nn-streaming/components/esp_adf_min/min/audio_hal: -------------------------------------------------------------------------------- 1 | ../../../../submodules/esp-adf/components/audio_hal/ -------------------------------------------------------------------------------- /mfcc-nn-streaming/components/esp_adf_min/min/audio_sal: -------------------------------------------------------------------------------- 1 | ../../../../submodules/esp-adf/components/audio_sal/ -------------------------------------------------------------------------------- /mfcc-nn/components/esp_adf_min/min/audio_pipeline: -------------------------------------------------------------------------------- 1 | ../../../../submodules/esp-adf/components/audio_pipeline/ -------------------------------------------------------------------------------- /mfcc-nn/components/esp_adf_min/min/display_service: -------------------------------------------------------------------------------- 1 | ../../../../submodules/esp-adf/components/display_service/ -------------------------------------------------------------------------------- /mfcc-nn/components/esp_adf_min/min/esp_dispatcher: -------------------------------------------------------------------------------- 1 | ../../../../submodules/esp-adf/components/esp_dispatcher/ -------------------------------------------------------------------------------- /mfcc-nn/components/esp_adf_min/min/esp_peripherals: -------------------------------------------------------------------------------- 1 | ../../../../submodules/esp-adf/components/esp_peripherals/ -------------------------------------------------------------------------------- /mfcc-nn/main/component.mk: -------------------------------------------------------------------------------- 1 | # 2 | # Main Makefile. This is basically the same as a component makefile. 3 | # -------------------------------------------------------------------------------- /2-mfcc-nn-streaming/components/esp_adf_min/min/audio_board: -------------------------------------------------------------------------------- 1 | ../../../../submodules/esp-adf/components/audio_board/ -------------------------------------------------------------------------------- /mfcc-nn-streaming/components/esp_adf_min/min/audio_board: -------------------------------------------------------------------------------- 1 | ../../../../submodules/esp-adf/components/audio_board/ -------------------------------------------------------------------------------- /2-mfcc-nn-streaming/components/esp_adf_min/min/audio_pipeline: -------------------------------------------------------------------------------- 1 | ../../../../submodules/esp-adf/components/audio_pipeline/ -------------------------------------------------------------------------------- /2-mfcc-nn-streaming/components/esp_adf_min/min/esp_dispatcher: -------------------------------------------------------------------------------- 1 | ../../../../submodules/esp-adf/components/esp_dispatcher/ -------------------------------------------------------------------------------- /2-mfcc-nn-streaming/components/kws/tf/dcnn.en.tflite: -------------------------------------------------------------------------------- 1 | ../../../../mfcc-nn-streaming/components/kws/tf/dcnn.en.tflite -------------------------------------------------------------------------------- /2-mfcc-nn-streaming/main/component.mk: -------------------------------------------------------------------------------- 1 | # 2 | # Main Makefile. This is basically the same as a component makefile. 3 | # -------------------------------------------------------------------------------- /mfcc-nn-streaming/components/esp_adf_min/min/audio_pipeline: -------------------------------------------------------------------------------- 1 | ../../../../submodules/esp-adf/components/audio_pipeline/ -------------------------------------------------------------------------------- /mfcc-nn-streaming/components/esp_adf_min/min/display_service: -------------------------------------------------------------------------------- 1 | ../../../../submodules/esp-adf/components/display_service/ -------------------------------------------------------------------------------- /mfcc-nn-streaming/components/esp_adf_min/min/esp_dispatcher: -------------------------------------------------------------------------------- 1 | ../../../../submodules/esp-adf/components/esp_dispatcher/ -------------------------------------------------------------------------------- /mfcc-nn-streaming/components/esp_adf_min/min/esp_peripherals: -------------------------------------------------------------------------------- 1 | ../../../../submodules/esp-adf/components/esp_peripherals/ -------------------------------------------------------------------------------- /mfcc-nn-streaming/main/component.mk: -------------------------------------------------------------------------------- 1 | # 2 | # Main Makefile. This is basically the same as a component makefile. 3 | # -------------------------------------------------------------------------------- /2-mfcc-nn-streaming/components/esp_adf_min/min/display_service: -------------------------------------------------------------------------------- 1 | ../../../../submodules/esp-adf/components/display_service/ -------------------------------------------------------------------------------- /2-mfcc-nn-streaming/components/esp_adf_min/min/esp_peripherals: -------------------------------------------------------------------------------- 1 | ../../../../submodules/esp-adf/components/esp_peripherals/ -------------------------------------------------------------------------------- /mfcc-nn/components/kws/tf/dcnn.tflite: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/42io/esp32_kws/HEAD/mfcc-nn/components/kws/tf/dcnn.tflite -------------------------------------------------------------------------------- /mfcc-nn/components/kws/tf/dcnn.quant.tflite: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/42io/esp32_kws/HEAD/mfcc-nn/components/kws/tf/dcnn.quant.tflite -------------------------------------------------------------------------------- /mfcc-nn-streaming/components/kws/tf/dcnn.en.tflite: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/42io/esp32_kws/HEAD/mfcc-nn-streaming/components/kws/tf/dcnn.en.tflite -------------------------------------------------------------------------------- /mfcc-nn-streaming/components/kws/tf/dcnn.ru.tflite: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/42io/esp32_kws/HEAD/mfcc-nn-streaming/components/kws/tf/dcnn.ru.tflite -------------------------------------------------------------------------------- /2-mfcc-nn-streaming/components/kws/tf/dcnn.bin.libri.tflite: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/42io/esp32_kws/HEAD/2-mfcc-nn-streaming/components/kws/tf/dcnn.bin.libri.tflite -------------------------------------------------------------------------------- /2-mfcc-nn-streaming/components/kws/tf/dcnn.bin.musan.tflite: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/42io/esp32_kws/HEAD/2-mfcc-nn-streaming/components/kws/tf/dcnn.bin.musan.tflite -------------------------------------------------------------------------------- /mfcc-nn/partitions.csv: -------------------------------------------------------------------------------- 1 | # Espressif ESP32 Partition Table 2 | # Name, Type, SubType, Offset, Size, Flags 3 | nvs, data, nvs, 0x9000, 24K, 4 | phy_init, data, phy, 0xf000, 4K, 5 | factory, app, factory, 0x10000, 3M, -------------------------------------------------------------------------------- /mfcc-nn-streaming/partitions.csv: -------------------------------------------------------------------------------- 1 | # Espressif ESP32 Partition Table 2 | # Name, Type, SubType, Offset, Size, Flags 3 | nvs, data, nvs, 0x9000, 24K, 4 | phy_init, data, phy, 0xf000, 4K, 5 | factory, app, factory, 0x10000, 3M, -------------------------------------------------------------------------------- /2-mfcc-nn-streaming/partitions.csv: -------------------------------------------------------------------------------- 1 | # Espressif ESP32 Partition Table 2 | # Name, Type, SubType, Offset, Size, Flags 3 | nvs, data, nvs, 0x9000, 24K, 4 | phy_init, data, phy, 0xf000, 4K, 5 | factory, app, factory, 0x10000, 3M, -------------------------------------------------------------------------------- /.gitmodules: -------------------------------------------------------------------------------- 1 | 2 | [submodule "submodules/esp-adf"] 3 | path = submodules/esp-adf 4 | url = https://github.com/espressif/esp-adf.git 5 | [submodule "submodules/dataset"] 6 | path = submodules/dataset 7 | url = https://github.com/42io/dataset.git 8 | -------------------------------------------------------------------------------- /mfcc-nn/components/kws/include/kws.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | #include 5 | 6 | void* kws_init(size_t rate, size_t channels, 7 | size_t sample_bits, size_t buf_sz, 8 | void (*callback)(int word)); 9 | 10 | void kws_detect(void); -------------------------------------------------------------------------------- /2-mfcc-nn-streaming/components/kws/include/kws.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | #include 5 | 6 | void* kws_init(size_t rate, size_t channels, 7 | size_t sample_bits, size_t buf_sz, 8 | void (*callback)(int word)); 9 | 10 | void kws_detect(void); -------------------------------------------------------------------------------- /mfcc-nn-streaming/components/kws/include/kws.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | #include 5 | 6 | void* kws_init(size_t rate, size_t channels, 7 | size_t sample_bits, size_t buf_sz, 8 | void (*callback)(int word)); 9 | 10 | void kws_detect(void); -------------------------------------------------------------------------------- /mfcc-nn/components/kws/kws_priv.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include "kws.h" 4 | 5 | #ifdef __cplusplus 6 | extern "C" { 7 | #endif 8 | struct guess_t; 9 | struct guess_t *guess_create(size_t inputs_sz); 10 | int guess_16b_16k_mono(struct guess_t *instance, float* feat); 11 | #ifdef __cplusplus 12 | } 13 | #endif -------------------------------------------------------------------------------- /mfcc-nn-streaming/components/kws/kws_priv.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include "kws.h" 4 | 5 | #ifdef __cplusplus 6 | extern "C" { 7 | #endif 8 | struct guess_t; 9 | struct guess_t *guess_create(size_t inputs_sz); 10 | int guess_16b_16k_mono(struct guess_t *instance, float* feat); 11 | #ifdef __cplusplus 12 | } 13 | #endif -------------------------------------------------------------------------------- /mfcc-nn/components/kws/non-copyable.h: -------------------------------------------------------------------------------- 1 | // C++ compile time check idiom: Non-Copyable Class 2 | // TODO: inheritance approach bloats the code size 3 | 4 | 5 | class NonCopyable { 6 | protected: 7 | NonCopyable(NonCopyable const&) = delete; 8 | NonCopyable& operator=(NonCopyable const&) = delete; 9 | NonCopyable() = default; 10 | }; -------------------------------------------------------------------------------- /mfcc-nn-streaming/components/kws/non-copyable.h: -------------------------------------------------------------------------------- 1 | // C++ compile time check idiom: Non-Copyable Class 2 | // TODO: inheritance approach bloats the code size 3 | 4 | 5 | class NonCopyable { 6 | protected: 7 | NonCopyable(NonCopyable const&) = delete; 8 | NonCopyable& operator=(NonCopyable const&) = delete; 9 | NonCopyable() = default; 10 | }; -------------------------------------------------------------------------------- /mfcc-nn/Makefile: -------------------------------------------------------------------------------- 1 | # 2 | # This is a project Makefile. It is assumed the directory this Makefile resides in is a 3 | # project subdirectory. 4 | # 5 | 6 | PROJECT_NAME := kws_mfcc_nn 7 | 8 | EXTRA_COMPONENT_DIRS += $(PROJECT_PATH)/components/esp_adf_min/min 9 | 10 | include $(IDF_PATH)/make/project.mk 11 | 12 | ifndef MAKE_RESTARTS 13 | $(info IDF_VER is $(IDF_VER)) 14 | endif -------------------------------------------------------------------------------- /2-mfcc-nn-streaming/Makefile: -------------------------------------------------------------------------------- 1 | # 2 | # This is a project Makefile. It is assumed the directory this Makefile resides in is a 3 | # project subdirectory. 4 | # 5 | 6 | PROJECT_NAME := 2_kws_mfcc_nn_streaming 7 | 8 | EXTRA_COMPONENT_DIRS += $(PROJECT_PATH)/components/esp_adf_min/min 9 | 10 | include $(IDF_PATH)/make/project.mk 11 | 12 | ifndef MAKE_RESTARTS 13 | $(info IDF_VER is $(IDF_VER)) 14 | endif -------------------------------------------------------------------------------- /mfcc-nn-streaming/Makefile: -------------------------------------------------------------------------------- 1 | # 2 | # This is a project Makefile. It is assumed the directory this Makefile resides in is a 3 | # project subdirectory. 4 | # 5 | 6 | PROJECT_NAME := kws_mfcc_nn_streaming 7 | 8 | EXTRA_COMPONENT_DIRS += $(PROJECT_PATH)/components/esp_adf_min/min 9 | 10 | include $(IDF_PATH)/make/project.mk 11 | 12 | ifndef MAKE_RESTARTS 13 | $(info IDF_VER is $(IDF_VER)) 14 | endif -------------------------------------------------------------------------------- /mfcc-nn/sdkconfig.defaults: -------------------------------------------------------------------------------- 1 | CONFIG_PARTITION_TABLE_CUSTOM=y 2 | CONFIG_PARTITION_TABLE_CUSTOM_FILENAME="partitions.csv" 3 | 4 | CONFIG_ESP32_DEFAULT_CPU_FREQ_240=y 5 | 6 | CONFIG_ESP_LYRATD_MSC_V2_1_BOARD=y 7 | 8 | CONFIG_ESPTOOLPY_BAUD_921600B=y 9 | CONFIG_ESPTOOLPY_FLASHSIZE_4MB=y 10 | 11 | CONFIG_HEAP_POISONING_DISABLED=y 12 | CONFIG_STACK_CHECK_NONE=y 13 | CONFIG_OPTIMIZATION_LEVEL_RELEASE=y 14 | CONFIG_ESP32_PANIC_PRINT_REBOOT=y 15 | -------------------------------------------------------------------------------- /mfcc-nn-streaming/sdkconfig.defaults: -------------------------------------------------------------------------------- 1 | CONFIG_PARTITION_TABLE_CUSTOM=y 2 | CONFIG_PARTITION_TABLE_CUSTOM_FILENAME="partitions.csv" 3 | 4 | CONFIG_ESP_LYRATD_MSC_V2_1_BOARD=y 5 | 6 | CONFIG_ESP32_DEFAULT_CPU_FREQ_240=y 7 | 8 | CONFIG_NEWLIB_NANO_FORMAT=y 9 | 10 | CONFIG_ESPTOOLPY_BAUD_921600B=y 11 | CONFIG_ESPTOOLPY_FLASHSIZE_4MB=y 12 | 13 | CONFIG_HEAP_POISONING_DISABLED=y 14 | CONFIG_STACK_CHECK_NONE=y 15 | CONFIG_OPTIMIZATION_LEVEL_RELEASE=y 16 | CONFIG_ESP32_PANIC_PRINT_REBOOT=y 17 | -------------------------------------------------------------------------------- /2-mfcc-nn-streaming/sdkconfig.defaults: -------------------------------------------------------------------------------- 1 | CONFIG_PARTITION_TABLE_CUSTOM=y 2 | CONFIG_PARTITION_TABLE_CUSTOM_FILENAME="partitions.csv" 3 | 4 | CONFIG_ESP_LYRATD_MSC_V2_1_BOARD=y 5 | 6 | CONFIG_ESP32_DEFAULT_CPU_FREQ_240=y 7 | 8 | CONFIG_NEWLIB_NANO_FORMAT=y 9 | 10 | CONFIG_ESPTOOLPY_BAUD_921600B=y 11 | CONFIG_ESPTOOLPY_FLASHSIZE_4MB=y 12 | 13 | CONFIG_HEAP_POISONING_DISABLED=y 14 | CONFIG_STACK_CHECK_NONE=y 15 | CONFIG_OPTIMIZATION_LEVEL_RELEASE=y 16 | CONFIG_ESP32_PANIC_PRINT_REBOOT=y 17 | -------------------------------------------------------------------------------- /2-mfcc-nn-streaming/components/kws/kws_priv.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include "kws.h" 4 | 5 | #ifdef __cplusplus 6 | extern "C" { 7 | #endif 8 | 9 | struct guess_t; 10 | struct guess_t *guess_create(size_t inputs_sz); 11 | int guess_16b_16k_mono(struct guess_t *instance, float* feat); 12 | 13 | struct confirm_t; 14 | struct confirm_t *confirm_create(size_t inputs_sz); 15 | float confirm_16b_16k_mono(struct confirm_t *instance, float* feat); 16 | 17 | #ifdef __cplusplus 18 | } 19 | #endif -------------------------------------------------------------------------------- /2-mfcc-nn-streaming/components/kws/non-copyable.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | // C++ compile time check idiom: Non-Copyable Class 4 | // TODO: inheritance approach bloats the code size 5 | 6 | class NonCopyable { 7 | protected: 8 | NonCopyable( NonCopyable const& ) = delete; 9 | NonCopyable( NonCopyable && ) = delete; 10 | NonCopyable& operator = ( NonCopyable const& ) = delete; 11 | NonCopyable& operator = ( NonCopyable && ) = delete; 12 | NonCopyable() = default; 13 | }; -------------------------------------------------------------------------------- /mfcc-nn-streaming/components/kws/Kconfig.projbuild: -------------------------------------------------------------------------------- 1 | menu "KWS" 2 | 3 | choice 4 | prompt "KWS LANGUGE" 5 | default KWS_LANG_EN 6 | 7 | config KWS_LANG_RU 8 | bool "ru" 9 | 10 | config KWS_LANG_EN 11 | bool "en" 12 | endchoice 13 | 14 | config KWS_MODEL_START 15 | string 16 | default "_binary_dcnn_en_tflite_start" if KWS_LANG_EN 17 | default "_binary_dcnn_ru_tflite_start" if KWS_LANG_RU 18 | 19 | config KWS_MODEL_OUT_NUM 20 | int 21 | default 12 22 | 23 | config KWS_TENSOR_ARENA_SZ 24 | int 25 | default 18048 26 | 27 | endmenu -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM ubuntu:18.04 2 | ARG TOOL=xtensa-esp32-elf-linux64-1.22.0-96-g2852398-5.2.0.tar.gz 3 | RUN apt-get -qq update && \ 4 | apt-get -qq install -y gcc git wget make libncurses-dev \ 5 | flex bison gperf python python-pip python-setuptools \ 6 | python-serial python-cryptography python-future \ 7 | python-pyparsing libffi-dev libssl-dev > /dev/null && \ 8 | mkdir /home/esp && cd /home/esp && \ 9 | wget -q https://dl.espressif.com/dl/$TOOL && \ 10 | tar xzf $TOOL && rm $TOOL && \ 11 | git clone -q -b v3.3.4 --recursive --depth 1 \ 12 | https://github.com/espressif/esp-idf.git 13 | ENV PATH /home/esp/xtensa-esp32-elf/bin:$PATH 14 | ENV IDF_PATH /home/esp/esp-idf 15 | WORKDIR /home/src -------------------------------------------------------------------------------- /2-mfcc-nn-streaming/components/kws/tf/tf-streaming.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include "non-copyable.h" 4 | #include "tensorflow/lite/c/common.h" 5 | 6 | class StreamingState : NonCopyable 7 | { 8 | char *i, *o, *r; 9 | size_t i_sz, o_sz, mv_sz; 10 | void init(char* ii, size_t is, char* oo, size_t os); 11 | void (*strategy)(StreamingState* self) = &default_strategy_impl; 12 | static void default_strategy_impl(StreamingState* self); 13 | static void dilated_strategy_impl(StreamingState* self); 14 | 15 | public: 16 | StreamingState(TfLiteTensor* ii, TfLiteTensor* oo); 17 | StreamingState(TfLiteTensor* ii, size_t ss); 18 | StreamingState(TfLiteTensor* ii, TfLiteTensor* oo, size_t d); 19 | ~StreamingState() { delete[] r; } 20 | void stream() { strategy(this); } 21 | }; -------------------------------------------------------------------------------- /mfcc-nn/components/esp_adf_min/component.mk: -------------------------------------------------------------------------------- 1 | COMPONENT_ADD_INCLUDEDIRS += esp-adf/components/audio_stream/include 2 | COMPONENT_SRCDIRS += esp-adf/components/audio_stream 3 | COMPONENT_OBJS += esp-adf/components/audio_stream/raw_stream.o \ 4 | esp-adf/components/audio_stream/i2s_stream.o 5 | 6 | COMPONENT_ADD_INCLUDEDIRS += esp-adf/components/esp-adf-libs/esp_audio/include \ 7 | esp-adf/components/esp-adf-libs/esp_codec/include/codec \ 8 | esp-adf/components/esp-adf-libs/esp_codec/include/processing 9 | COMPONENT_SRCDIRS += esp-adf/components/esp-adf-libs/esp_codec 10 | COMPONENT_OBJS += esp-adf/components/esp-adf-libs/esp_codec/audio_alc.o \ 11 | esp-adf/components/esp-adf-libs/esp_codec/filter_resample.o 12 | 13 | COMPONENT_ADD_LDFLAGS += -L$(COMPONENT_PATH)/esp-adf/components/esp-adf-libs/esp_codec/lib/esp32 -lesp_processing -------------------------------------------------------------------------------- /2-mfcc-nn-streaming/components/esp_adf_min/component.mk: -------------------------------------------------------------------------------- 1 | COMPONENT_ADD_INCLUDEDIRS += esp-adf/components/audio_stream/include 2 | COMPONENT_SRCDIRS += esp-adf/components/audio_stream 3 | COMPONENT_OBJS += esp-adf/components/audio_stream/raw_stream.o \ 4 | esp-adf/components/audio_stream/i2s_stream.o 5 | 6 | COMPONENT_ADD_INCLUDEDIRS += esp-adf/components/esp-adf-libs/esp_audio/include \ 7 | esp-adf/components/esp-adf-libs/esp_codec/include/codec \ 8 | esp-adf/components/esp-adf-libs/esp_codec/include/processing 9 | COMPONENT_SRCDIRS += esp-adf/components/esp-adf-libs/esp_codec 10 | COMPONENT_OBJS += esp-adf/components/esp-adf-libs/esp_codec/audio_alc.o \ 11 | esp-adf/components/esp-adf-libs/esp_codec/filter_resample.o 12 | 13 | COMPONENT_ADD_LDFLAGS += -L$(COMPONENT_PATH)/esp-adf/components/esp-adf-libs/esp_codec/lib/esp32 -lesp_processing -------------------------------------------------------------------------------- /mfcc-nn-streaming/components/esp_adf_min/component.mk: -------------------------------------------------------------------------------- 1 | COMPONENT_ADD_INCLUDEDIRS += esp-adf/components/audio_stream/include 2 | COMPONENT_SRCDIRS += esp-adf/components/audio_stream 3 | COMPONENT_OBJS += esp-adf/components/audio_stream/raw_stream.o \ 4 | esp-adf/components/audio_stream/i2s_stream.o 5 | 6 | COMPONENT_ADD_INCLUDEDIRS += esp-adf/components/esp-adf-libs/esp_audio/include \ 7 | esp-adf/components/esp-adf-libs/esp_codec/include/codec \ 8 | esp-adf/components/esp-adf-libs/esp_codec/include/processing 9 | COMPONENT_SRCDIRS += esp-adf/components/esp-adf-libs/esp_codec 10 | COMPONENT_OBJS += esp-adf/components/esp-adf-libs/esp_codec/audio_alc.o \ 11 | esp-adf/components/esp-adf-libs/esp_codec/filter_resample.o 12 | 13 | COMPONENT_ADD_LDFLAGS += -L$(COMPONENT_PATH)/esp-adf/components/esp-adf-libs/esp_codec/lib/esp32 -lesp_processing -------------------------------------------------------------------------------- /mfcc-nn/components/kws/tf/tfmicro/tensorflow/lite/string_type.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2017 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | // Abstract string. We don't want even absl at this level. 16 | #ifndef TENSORFLOW_LITE_STRING_TYPE_H_ 17 | #define TENSORFLOW_LITE_STRING_TYPE_H_ 18 | 19 | #include 20 | 21 | namespace tflite { 22 | 23 | using std::string; 24 | 25 | } // namespace tflite 26 | 27 | #endif // TENSORFLOW_LITE_STRING_TYPE_H_ 28 | -------------------------------------------------------------------------------- /2-mfcc-nn-streaming/components/kws/tf/tfmicro/tensorflow/lite/string_type.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2017 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | // Abstract string. We don't want even absl at this level. 16 | #ifndef TENSORFLOW_LITE_STRING_TYPE_H_ 17 | #define TENSORFLOW_LITE_STRING_TYPE_H_ 18 | 19 | #include 20 | 21 | namespace tflite { 22 | 23 | using std::string; 24 | 25 | } // namespace tflite 26 | 27 | #endif // TENSORFLOW_LITE_STRING_TYPE_H_ 28 | -------------------------------------------------------------------------------- /mfcc-nn-streaming/components/kws/tf/tfmicro/tensorflow/lite/string_type.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2017 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | // Abstract string. We don't want even absl at this level. 16 | #ifndef TENSORFLOW_LITE_STRING_TYPE_H_ 17 | #define TENSORFLOW_LITE_STRING_TYPE_H_ 18 | 19 | #include 20 | 21 | namespace tflite { 22 | 23 | using std::string; 24 | 25 | } // namespace tflite 26 | 27 | #endif // TENSORFLOW_LITE_STRING_TYPE_H_ 28 | -------------------------------------------------------------------------------- /mfcc-nn/components/kws/tf/tfmicro/tensorflow/lite/micro/testing/test_conv_model.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2020 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | #ifndef TENSORFLOW_LITE_MICRO_TESTING_TEST_CONV_MODEL_H_ 17 | #define TENSORFLOW_LITE_MICRO_TESTING_TEST_CONV_MODEL_H_ 18 | 19 | // See generate_test_models.py for updating the contents of this model: 20 | extern const unsigned char kTestConvModelData[]; 21 | extern const unsigned int kTestConvModelDataSize; 22 | 23 | #endif // TENSORFLOW_LITE_MICRO_TESTING_TEST_CONV_MODEL_H_ 24 | -------------------------------------------------------------------------------- /mfcc-nn/components/kws/tf/tfmicro/tensorflow/lite/micro/debug_log.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_LITE_MICRO_DEBUG_LOG_H_ 16 | #define TENSORFLOW_LITE_MICRO_DEBUG_LOG_H_ 17 | 18 | // This function should be implemented by each target platform, and provide a 19 | // way for strings to be output to some text stream. For more information, see 20 | // tensorflow/lite/micro/debug_log.cc. 21 | extern "C" void DebugLog(const char* s); 22 | 23 | #endif // TENSORFLOW_LITE_MICRO_DEBUG_LOG_H_ 24 | -------------------------------------------------------------------------------- /2-mfcc-nn-streaming/components/kws/tf/tfmicro/tensorflow/lite/micro/testing/test_conv_model.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2020 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | #ifndef TENSORFLOW_LITE_MICRO_TESTING_TEST_CONV_MODEL_H_ 17 | #define TENSORFLOW_LITE_MICRO_TESTING_TEST_CONV_MODEL_H_ 18 | 19 | // See generate_test_models.py for updating the contents of this model: 20 | extern const unsigned char kTestConvModelData[]; 21 | extern const unsigned int kTestConvModelDataSize; 22 | 23 | #endif // TENSORFLOW_LITE_MICRO_TESTING_TEST_CONV_MODEL_H_ 24 | -------------------------------------------------------------------------------- /mfcc-nn-streaming/components/kws/tf/tfmicro/tensorflow/lite/micro/testing/test_conv_model.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2020 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | #ifndef TENSORFLOW_LITE_MICRO_TESTING_TEST_CONV_MODEL_H_ 17 | #define TENSORFLOW_LITE_MICRO_TESTING_TEST_CONV_MODEL_H_ 18 | 19 | // See generate_test_models.py for updating the contents of this model: 20 | extern const unsigned char kTestConvModelData[]; 21 | extern const unsigned int kTestConvModelDataSize; 22 | 23 | #endif // TENSORFLOW_LITE_MICRO_TESTING_TEST_CONV_MODEL_H_ 24 | -------------------------------------------------------------------------------- /mfcc-nn/components/kws/tf/tfmicro/tensorflow/lite/core/api/tensor_utils.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | #ifndef TENSORFLOW_LITE_CORE_API_TENSOR_UTILS_H_ 17 | #define TENSORFLOW_LITE_CORE_API_TENSOR_UTILS_H_ 18 | 19 | #include "tensorflow/lite/c/common.h" 20 | 21 | namespace tflite { 22 | 23 | // Resets a variable tensor to the default value. 24 | TfLiteStatus ResetVariableTensor(TfLiteTensor* tensor); 25 | 26 | } // namespace tflite 27 | 28 | #endif // TENSORFLOW_LITE_CORE_API_TENSOR_UTILS_H_ 29 | -------------------------------------------------------------------------------- /mfcc-nn/components/kws/tf/tfmicro/tensorflow/lite/micro/benchmarks/keyword_scrambled_model_data.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2020 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | #ifndef TENSORFLOW_LITE_MICRO_BENCHMARKS_KEYWORD_SCRAMBLED_MODEL_DATA_H_ 17 | #define TENSORFLOW_LITE_MICRO_BENCHMARKS_KEYWORD_SCRAMBLED_MODEL_DATA_H_ 18 | 19 | extern const unsigned char g_keyword_scrambled_model_data[]; 20 | extern const unsigned int g_keyword_scrambled_model_data_length; 21 | 22 | #endif // TENSORFLOW_LITE_MICRO_BENCHMARKS_KEYWORD_SCRAMBLED_MODEL_DATA_H_ 23 | -------------------------------------------------------------------------------- /mfcc-nn-streaming/components/kws/tf/tfmicro/tensorflow/lite/micro/debug_log.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_LITE_MICRO_DEBUG_LOG_H_ 16 | #define TENSORFLOW_LITE_MICRO_DEBUG_LOG_H_ 17 | 18 | // This function should be implemented by each target platform, and provide a 19 | // way for strings to be output to some text stream. For more information, see 20 | // tensorflow/lite/micro/debug_log.cc. 21 | extern "C" void DebugLog(const char* s); 22 | 23 | #endif // TENSORFLOW_LITE_MICRO_DEBUG_LOG_H_ 24 | -------------------------------------------------------------------------------- /2-mfcc-nn-streaming/components/kws/tf/tfmicro/tensorflow/lite/core/api/tensor_utils.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | #ifndef TENSORFLOW_LITE_CORE_API_TENSOR_UTILS_H_ 17 | #define TENSORFLOW_LITE_CORE_API_TENSOR_UTILS_H_ 18 | 19 | #include "tensorflow/lite/c/common.h" 20 | 21 | namespace tflite { 22 | 23 | // Resets a variable tensor to the default value. 24 | TfLiteStatus ResetVariableTensor(TfLiteTensor* tensor); 25 | 26 | } // namespace tflite 27 | 28 | #endif // TENSORFLOW_LITE_CORE_API_TENSOR_UTILS_H_ 29 | -------------------------------------------------------------------------------- /2-mfcc-nn-streaming/components/kws/tf/tfmicro/tensorflow/lite/micro/debug_log.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_LITE_MICRO_DEBUG_LOG_H_ 16 | #define TENSORFLOW_LITE_MICRO_DEBUG_LOG_H_ 17 | 18 | // This function should be implemented by each target platform, and provide a 19 | // way for strings to be output to some text stream. For more information, see 20 | // tensorflow/lite/micro/debug_log.cc. 21 | extern "C" void DebugLog(const char* s); 22 | 23 | #endif // TENSORFLOW_LITE_MICRO_DEBUG_LOG_H_ 24 | -------------------------------------------------------------------------------- /mfcc-nn-streaming/components/kws/tf/tfmicro/tensorflow/lite/core/api/tensor_utils.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | #ifndef TENSORFLOW_LITE_CORE_API_TENSOR_UTILS_H_ 17 | #define TENSORFLOW_LITE_CORE_API_TENSOR_UTILS_H_ 18 | 19 | #include "tensorflow/lite/c/common.h" 20 | 21 | namespace tflite { 22 | 23 | // Resets a variable tensor to the default value. 24 | TfLiteStatus ResetVariableTensor(TfLiteTensor* tensor); 25 | 26 | } // namespace tflite 27 | 28 | #endif // TENSORFLOW_LITE_CORE_API_TENSOR_UTILS_H_ 29 | -------------------------------------------------------------------------------- /mfcc-nn-streaming/components/kws/tf/tfmicro/tensorflow/lite/micro/benchmarks/keyword_scrambled_model_data.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2020 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | #ifndef TENSORFLOW_LITE_MICRO_BENCHMARKS_KEYWORD_SCRAMBLED_MODEL_DATA_H_ 17 | #define TENSORFLOW_LITE_MICRO_BENCHMARKS_KEYWORD_SCRAMBLED_MODEL_DATA_H_ 18 | 19 | extern const unsigned char g_keyword_scrambled_model_data[]; 20 | extern const unsigned int g_keyword_scrambled_model_data_length; 21 | 22 | #endif // TENSORFLOW_LITE_MICRO_BENCHMARKS_KEYWORD_SCRAMBLED_MODEL_DATA_H_ 23 | -------------------------------------------------------------------------------- /2-mfcc-nn-streaming/components/kws/tf/tfmicro/tensorflow/lite/micro/benchmarks/keyword_scrambled_model_data.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2020 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | #ifndef TENSORFLOW_LITE_MICRO_BENCHMARKS_KEYWORD_SCRAMBLED_MODEL_DATA_H_ 17 | #define TENSORFLOW_LITE_MICRO_BENCHMARKS_KEYWORD_SCRAMBLED_MODEL_DATA_H_ 18 | 19 | extern const unsigned char g_keyword_scrambled_model_data[]; 20 | extern const unsigned int g_keyword_scrambled_model_data_length; 21 | 22 | #endif // TENSORFLOW_LITE_MICRO_BENCHMARKS_KEYWORD_SCRAMBLED_MODEL_DATA_H_ 23 | -------------------------------------------------------------------------------- /mfcc-nn/components/kws/tf/tfmicro/tensorflow/lite/micro/kernels/ethosu.cc: -------------------------------------------------------------------------------- 1 | /* Copyright 2020 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | // 17 | // This is a stub file for non-Ethos platforms 18 | // 19 | #include "tensorflow/lite/c/common.h" 20 | 21 | namespace tflite { 22 | namespace ops { 23 | namespace micro { 24 | namespace custom { 25 | TfLiteRegistration* Register_ETHOSU() { return nullptr; } 26 | 27 | const char* GetString_ETHOSU() { return ""; } 28 | 29 | } // namespace custom 30 | } // namespace micro 31 | } // namespace ops 32 | } // namespace tflite 33 | -------------------------------------------------------------------------------- /mfcc-nn-streaming/components/kws/tf/tfmicro/tensorflow/lite/micro/kernels/ethosu.cc: -------------------------------------------------------------------------------- 1 | /* Copyright 2020 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | // 17 | // This is a stub file for non-Ethos platforms 18 | // 19 | #include "tensorflow/lite/c/common.h" 20 | 21 | namespace tflite { 22 | namespace ops { 23 | namespace micro { 24 | namespace custom { 25 | TfLiteRegistration* Register_ETHOSU() { return nullptr; } 26 | 27 | const char* GetString_ETHOSU() { return ""; } 28 | 29 | } // namespace custom 30 | } // namespace micro 31 | } // namespace ops 32 | } // namespace tflite 33 | -------------------------------------------------------------------------------- /2-mfcc-nn-streaming/components/kws/tf/tfmicro/tensorflow/lite/micro/kernels/ethosu.cc: -------------------------------------------------------------------------------- 1 | /* Copyright 2020 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | // 17 | // This is a stub file for non-Ethos platforms 18 | // 19 | #include "tensorflow/lite/c/common.h" 20 | 21 | namespace tflite { 22 | namespace ops { 23 | namespace micro { 24 | namespace custom { 25 | TfLiteRegistration* Register_ETHOSU() { return nullptr; } 26 | 27 | const char* GetString_ETHOSU() { return ""; } 28 | 29 | } // namespace custom 30 | } // namespace micro 31 | } // namespace ops 32 | } // namespace tflite 33 | -------------------------------------------------------------------------------- /mfcc-nn/components/kws/tf/tfmicro/tensorflow/lite/kernels/internal/max.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2020 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_MAX_H_ 16 | #define TENSORFLOW_LITE_KERNELS_INTERNAL_MAX_H_ 17 | 18 | #include 19 | 20 | namespace tflite { 21 | 22 | #if defined(TF_LITE_USE_GLOBAL_MAX) || defined(__ZEPHYR__) 23 | inline float TfLiteMax(const float& x, const float& y) { 24 | return std::max(x, y); 25 | } 26 | #else 27 | template 28 | inline T TfLiteMax(const T& x, const T& y) { 29 | return std::fmax(x, y); 30 | } 31 | #endif 32 | 33 | } // namespace tflite 34 | 35 | #endif // TENSORFLOW_LITE_KERNELS_INTERNAL_MAX_H_ 36 | -------------------------------------------------------------------------------- /mfcc-nn/components/kws/tf/tfmicro/tensorflow/lite/kernels/internal/min.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2020 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_MIN_H_ 16 | #define TENSORFLOW_LITE_KERNELS_INTERNAL_MIN_H_ 17 | 18 | #include 19 | 20 | namespace tflite { 21 | 22 | #if defined(TF_LITE_USE_GLOBAL_MIN) || defined(__ZEPHYR__) 23 | inline float TfLiteMin(const float& x, const float& y) { 24 | return std::min(x, y); 25 | } 26 | #else 27 | template 28 | inline T TfLiteMin(const T& x, const T& y) { 29 | return std::fmin(x, y); 30 | } 31 | #endif 32 | 33 | } // namespace tflite 34 | 35 | #endif // TENSORFLOW_LITE_KERNELS_INTERNAL_MIN_H_ 36 | -------------------------------------------------------------------------------- /mfcc-nn/components/kws/tf/tfmicro/tensorflow/lite/micro/micro_time.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2020 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_LITE_MICRO_MICRO_TIME_H_ 16 | #define TENSORFLOW_LITE_MICRO_MICRO_TIME_H_ 17 | 18 | #include 19 | 20 | namespace tflite { 21 | 22 | // These functions should be implemented by each target platform, and provide an 23 | // accurate tick count along with how many ticks there are per second. 24 | int32_t ticks_per_second(); 25 | 26 | // Return time in ticks. The meaning of a tick varies per platform. 27 | int32_t GetCurrentTimeTicks(); 28 | 29 | } // namespace tflite 30 | 31 | #endif // TENSORFLOW_LITE_MICRO_MICRO_TIME_H_ 32 | -------------------------------------------------------------------------------- /2-mfcc-nn-streaming/components/kws/tf/tfmicro/tensorflow/lite/kernels/internal/max.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2020 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_MAX_H_ 16 | #define TENSORFLOW_LITE_KERNELS_INTERNAL_MAX_H_ 17 | 18 | #include 19 | 20 | namespace tflite { 21 | 22 | #if defined(TF_LITE_USE_GLOBAL_MAX) || defined(__ZEPHYR__) 23 | inline float TfLiteMax(const float& x, const float& y) { 24 | return std::max(x, y); 25 | } 26 | #else 27 | template 28 | inline T TfLiteMax(const T& x, const T& y) { 29 | return std::fmax(x, y); 30 | } 31 | #endif 32 | 33 | } // namespace tflite 34 | 35 | #endif // TENSORFLOW_LITE_KERNELS_INTERNAL_MAX_H_ 36 | -------------------------------------------------------------------------------- /2-mfcc-nn-streaming/components/kws/tf/tfmicro/tensorflow/lite/kernels/internal/min.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2020 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_MIN_H_ 16 | #define TENSORFLOW_LITE_KERNELS_INTERNAL_MIN_H_ 17 | 18 | #include 19 | 20 | namespace tflite { 21 | 22 | #if defined(TF_LITE_USE_GLOBAL_MIN) || defined(__ZEPHYR__) 23 | inline float TfLiteMin(const float& x, const float& y) { 24 | return std::min(x, y); 25 | } 26 | #else 27 | template 28 | inline T TfLiteMin(const T& x, const T& y) { 29 | return std::fmin(x, y); 30 | } 31 | #endif 32 | 33 | } // namespace tflite 34 | 35 | #endif // TENSORFLOW_LITE_KERNELS_INTERNAL_MIN_H_ 36 | -------------------------------------------------------------------------------- /2-mfcc-nn-streaming/components/kws/tf/tfmicro/tensorflow/lite/micro/micro_time.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2020 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_LITE_MICRO_MICRO_TIME_H_ 16 | #define TENSORFLOW_LITE_MICRO_MICRO_TIME_H_ 17 | 18 | #include 19 | 20 | namespace tflite { 21 | 22 | // These functions should be implemented by each target platform, and provide an 23 | // accurate tick count along with how many ticks there are per second. 24 | int32_t ticks_per_second(); 25 | 26 | // Return time in ticks. The meaning of a tick varies per platform. 27 | int32_t GetCurrentTimeTicks(); 28 | 29 | } // namespace tflite 30 | 31 | #endif // TENSORFLOW_LITE_MICRO_MICRO_TIME_H_ 32 | -------------------------------------------------------------------------------- /mfcc-nn-streaming/components/kws/tf/tfmicro/tensorflow/lite/kernels/internal/max.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2020 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_MAX_H_ 16 | #define TENSORFLOW_LITE_KERNELS_INTERNAL_MAX_H_ 17 | 18 | #include 19 | 20 | namespace tflite { 21 | 22 | #if defined(TF_LITE_USE_GLOBAL_MAX) || defined(__ZEPHYR__) 23 | inline float TfLiteMax(const float& x, const float& y) { 24 | return std::max(x, y); 25 | } 26 | #else 27 | template 28 | inline T TfLiteMax(const T& x, const T& y) { 29 | return std::fmax(x, y); 30 | } 31 | #endif 32 | 33 | } // namespace tflite 34 | 35 | #endif // TENSORFLOW_LITE_KERNELS_INTERNAL_MAX_H_ 36 | -------------------------------------------------------------------------------- /mfcc-nn-streaming/components/kws/tf/tfmicro/tensorflow/lite/kernels/internal/min.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2020 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_MIN_H_ 16 | #define TENSORFLOW_LITE_KERNELS_INTERNAL_MIN_H_ 17 | 18 | #include 19 | 20 | namespace tflite { 21 | 22 | #if defined(TF_LITE_USE_GLOBAL_MIN) || defined(__ZEPHYR__) 23 | inline float TfLiteMin(const float& x, const float& y) { 24 | return std::min(x, y); 25 | } 26 | #else 27 | template 28 | inline T TfLiteMin(const T& x, const T& y) { 29 | return std::fmin(x, y); 30 | } 31 | #endif 32 | 33 | } // namespace tflite 34 | 35 | #endif // TENSORFLOW_LITE_KERNELS_INTERNAL_MIN_H_ 36 | -------------------------------------------------------------------------------- /mfcc-nn-streaming/components/kws/tf/tfmicro/tensorflow/lite/micro/micro_time.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2020 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_LITE_MICRO_MICRO_TIME_H_ 16 | #define TENSORFLOW_LITE_MICRO_MICRO_TIME_H_ 17 | 18 | #include 19 | 20 | namespace tflite { 21 | 22 | // These functions should be implemented by each target platform, and provide an 23 | // accurate tick count along with how many ticks there are per second. 24 | int32_t ticks_per_second(); 25 | 26 | // Return time in ticks. The meaning of a tick varies per platform. 27 | int32_t GetCurrentTimeTicks(); 28 | 29 | } // namespace tflite 30 | 31 | #endif // TENSORFLOW_LITE_MICRO_MICRO_TIME_H_ 32 | -------------------------------------------------------------------------------- /mfcc-nn/components/kws/tf/tfmicro/tensorflow/lite/version.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2017 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_LITE_VERSION_H_ 16 | #define TENSORFLOW_LITE_VERSION_H_ 17 | 18 | #include "tensorflow/core/public/version.h" 19 | 20 | // The version number of the Schema. Ideally all changes will be backward 21 | // compatible. If that ever changes, we must ensure that version is the first 22 | // entry in the new tflite root so that we can see that version is not 1. 23 | #define TFLITE_SCHEMA_VERSION (3) 24 | 25 | // TensorFlow Lite Runtime version. 26 | // This value is currently shared with that of TensorFlow. 27 | #define TFLITE_VERSION_STRING TF_VERSION_STRING 28 | 29 | #endif // TENSORFLOW_LITE_VERSION_H_ 30 | -------------------------------------------------------------------------------- /mfcc-nn-streaming/components/kws/tf/tfmicro/tensorflow/lite/version.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2017 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_LITE_VERSION_H_ 16 | #define TENSORFLOW_LITE_VERSION_H_ 17 | 18 | #include "tensorflow/core/public/version.h" 19 | 20 | // The version number of the Schema. Ideally all changes will be backward 21 | // compatible. If that ever changes, we must ensure that version is the first 22 | // entry in the new tflite root so that we can see that version is not 1. 23 | #define TFLITE_SCHEMA_VERSION (3) 24 | 25 | // TensorFlow Lite Runtime version. 26 | // This value is currently shared with that of TensorFlow. 27 | #define TFLITE_VERSION_STRING TF_VERSION_STRING 28 | 29 | #endif // TENSORFLOW_LITE_VERSION_H_ 30 | -------------------------------------------------------------------------------- /2-mfcc-nn-streaming/components/kws/tf/tfmicro/tensorflow/lite/version.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2017 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_LITE_VERSION_H_ 16 | #define TENSORFLOW_LITE_VERSION_H_ 17 | 18 | #include "tensorflow/core/public/version.h" 19 | 20 | // The version number of the Schema. Ideally all changes will be backward 21 | // compatible. If that ever changes, we must ensure that version is the first 22 | // entry in the new tflite root so that we can see that version is not 1. 23 | #define TFLITE_SCHEMA_VERSION (3) 24 | 25 | // TensorFlow Lite Runtime version. 26 | // This value is currently shared with that of TensorFlow. 27 | #define TFLITE_VERSION_STRING TF_VERSION_STRING 28 | 29 | #endif // TENSORFLOW_LITE_VERSION_H_ 30 | -------------------------------------------------------------------------------- /mfcc-nn/components/kws/tf/tfmicro/tensorflow/lite/micro/micro_error_reporter.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_LITE_MICRO_MICRO_ERROR_REPORTER_H_ 16 | #define TENSORFLOW_LITE_MICRO_MICRO_ERROR_REPORTER_H_ 17 | 18 | #include 19 | 20 | #include "tensorflow/lite/core/api/error_reporter.h" 21 | #include "tensorflow/lite/micro/compatibility.h" 22 | 23 | namespace tflite { 24 | 25 | class MicroErrorReporter : public ErrorReporter { 26 | public: 27 | ~MicroErrorReporter() override {} 28 | int Report(const char* format, va_list args) override; 29 | 30 | private: 31 | TF_LITE_REMOVE_VIRTUAL_DELETE 32 | }; 33 | 34 | } // namespace tflite 35 | 36 | #endif // TENSORFLOW_LITE_MICRO_MICRO_ERROR_REPORTER_H_ 37 | -------------------------------------------------------------------------------- /2-mfcc-nn-streaming/components/kws/tf/tfmicro/tensorflow/lite/micro/micro_error_reporter.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_LITE_MICRO_MICRO_ERROR_REPORTER_H_ 16 | #define TENSORFLOW_LITE_MICRO_MICRO_ERROR_REPORTER_H_ 17 | 18 | #include 19 | 20 | #include "tensorflow/lite/core/api/error_reporter.h" 21 | #include "tensorflow/lite/micro/compatibility.h" 22 | 23 | namespace tflite { 24 | 25 | class MicroErrorReporter : public ErrorReporter { 26 | public: 27 | ~MicroErrorReporter() override {} 28 | int Report(const char* format, va_list args) override; 29 | 30 | private: 31 | TF_LITE_REMOVE_VIRTUAL_DELETE 32 | }; 33 | 34 | } // namespace tflite 35 | 36 | #endif // TENSORFLOW_LITE_MICRO_MICRO_ERROR_REPORTER_H_ 37 | -------------------------------------------------------------------------------- /mfcc-nn-streaming/components/kws/tf/tfmicro/tensorflow/lite/micro/micro_error_reporter.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_LITE_MICRO_MICRO_ERROR_REPORTER_H_ 16 | #define TENSORFLOW_LITE_MICRO_MICRO_ERROR_REPORTER_H_ 17 | 18 | #include 19 | 20 | #include "tensorflow/lite/core/api/error_reporter.h" 21 | #include "tensorflow/lite/micro/compatibility.h" 22 | 23 | namespace tflite { 24 | 25 | class MicroErrorReporter : public ErrorReporter { 26 | public: 27 | ~MicroErrorReporter() override {} 28 | int Report(const char* format, va_list args) override; 29 | 30 | private: 31 | TF_LITE_REMOVE_VIRTUAL_DELETE 32 | }; 33 | 34 | } // namespace tflite 35 | 36 | #endif // TENSORFLOW_LITE_MICRO_MICRO_ERROR_REPORTER_H_ 37 | -------------------------------------------------------------------------------- /mfcc-nn/components/kws/tf/tfmicro/tensorflow/lite/core/api/error_reporter.cc: -------------------------------------------------------------------------------- 1 | /* Copyright 2017 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #include "tensorflow/lite/core/api/error_reporter.h" 16 | #include 17 | 18 | namespace tflite { 19 | 20 | int ErrorReporter::Report(const char* format, ...) { 21 | va_list args; 22 | va_start(args, format); 23 | int code = Report(format, args); 24 | va_end(args); 25 | return code; 26 | } 27 | 28 | // TODO(aselle): Make the name of ReportError on context the same, so 29 | // we can use the ensure functions w/o a context and w/ a reporter. 30 | int ErrorReporter::ReportError(void*, const char* format, ...) { 31 | va_list args; 32 | va_start(args, format); 33 | int code = Report(format, args); 34 | va_end(args); 35 | return code; 36 | } 37 | 38 | } // namespace tflite 39 | -------------------------------------------------------------------------------- /2-mfcc-nn-streaming/components/kws/tf/tfmicro/tensorflow/lite/core/api/error_reporter.cc: -------------------------------------------------------------------------------- 1 | /* Copyright 2017 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #include "tensorflow/lite/core/api/error_reporter.h" 16 | #include 17 | 18 | namespace tflite { 19 | 20 | int ErrorReporter::Report(const char* format, ...) { 21 | va_list args; 22 | va_start(args, format); 23 | int code = Report(format, args); 24 | va_end(args); 25 | return code; 26 | } 27 | 28 | // TODO(aselle): Make the name of ReportError on context the same, so 29 | // we can use the ensure functions w/o a context and w/ a reporter. 30 | int ErrorReporter::ReportError(void*, const char* format, ...) { 31 | va_list args; 32 | va_start(args, format); 33 | int code = Report(format, args); 34 | va_end(args); 35 | return code; 36 | } 37 | 38 | } // namespace tflite 39 | -------------------------------------------------------------------------------- /mfcc-nn-streaming/components/kws/tf/tfmicro/tensorflow/lite/core/api/error_reporter.cc: -------------------------------------------------------------------------------- 1 | /* Copyright 2017 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #include "tensorflow/lite/core/api/error_reporter.h" 16 | #include 17 | 18 | namespace tflite { 19 | 20 | int ErrorReporter::Report(const char* format, ...) { 21 | va_list args; 22 | va_start(args, format); 23 | int code = Report(format, args); 24 | va_end(args); 25 | return code; 26 | } 27 | 28 | // TODO(aselle): Make the name of ReportError on context the same, so 29 | // we can use the ensure functions w/o a context and w/ a reporter. 30 | int ErrorReporter::ReportError(void*, const char* format, ...) { 31 | va_list args; 32 | va_start(args, format); 33 | int code = Report(format, args); 34 | va_end(args); 35 | return code; 36 | } 37 | 38 | } // namespace tflite 39 | -------------------------------------------------------------------------------- /mfcc-nn/components/kws/tf/tfmicro/tensorflow/lite/micro/kernels/micro_utils.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved. 2 | Licensed under the Apache License, Version 2.0 (the "License"); 3 | you may not use this file except in compliance with the License. 4 | You may obtain a copy of the License at 5 | http://www.apache.org/licenses/LICENSE-2.0 6 | Unless required by applicable law or agreed to in writing, software 7 | distributed under the License is distributed on an "AS IS" BASIS, 8 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 9 | See the License for the specific language governing permissions and 10 | limitations under the License. 11 | ==============================================================================*/ 12 | #ifndef TENSORFLOW_LITE_MICRO_KERNELS_MICRO_UTILS_H_ 13 | #define TENSORFLOW_LITE_MICRO_KERNELS_MICRO_UTILS_H_ 14 | namespace tflite { 15 | namespace ops { 16 | namespace micro { 17 | 18 | // Same as gtl::Greater but defined here to reduce dependencies and 19 | // binary size for micro environment. 20 | struct Greater { 21 | template 22 | bool operator()(const T& x, const T& y) const { 23 | return x > y; 24 | } 25 | }; 26 | 27 | struct Less { 28 | template 29 | bool operator()(const T& x, const T& y) const { 30 | return x < y; 31 | } 32 | }; 33 | 34 | } // namespace micro 35 | } // namespace ops 36 | } // namespace tflite 37 | #endif // TENSORFLOW_LITE_MICRO_KERNELS_MICRO_UTILS_H_ 38 | -------------------------------------------------------------------------------- /mfcc-nn-streaming/components/kws/tf/tfmicro/tensorflow/lite/micro/kernels/micro_utils.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved. 2 | Licensed under the Apache License, Version 2.0 (the "License"); 3 | you may not use this file except in compliance with the License. 4 | You may obtain a copy of the License at 5 | http://www.apache.org/licenses/LICENSE-2.0 6 | Unless required by applicable law or agreed to in writing, software 7 | distributed under the License is distributed on an "AS IS" BASIS, 8 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 9 | See the License for the specific language governing permissions and 10 | limitations under the License. 11 | ==============================================================================*/ 12 | #ifndef TENSORFLOW_LITE_MICRO_KERNELS_MICRO_UTILS_H_ 13 | #define TENSORFLOW_LITE_MICRO_KERNELS_MICRO_UTILS_H_ 14 | namespace tflite { 15 | namespace ops { 16 | namespace micro { 17 | 18 | // Same as gtl::Greater but defined here to reduce dependencies and 19 | // binary size for micro environment. 20 | struct Greater { 21 | template 22 | bool operator()(const T& x, const T& y) const { 23 | return x > y; 24 | } 25 | }; 26 | 27 | struct Less { 28 | template 29 | bool operator()(const T& x, const T& y) const { 30 | return x < y; 31 | } 32 | }; 33 | 34 | } // namespace micro 35 | } // namespace ops 36 | } // namespace tflite 37 | #endif // TENSORFLOW_LITE_MICRO_KERNELS_MICRO_UTILS_H_ 38 | -------------------------------------------------------------------------------- /2-mfcc-nn-streaming/components/kws/tf/tfmicro/tensorflow/lite/micro/kernels/micro_utils.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved. 2 | Licensed under the Apache License, Version 2.0 (the "License"); 3 | you may not use this file except in compliance with the License. 4 | You may obtain a copy of the License at 5 | http://www.apache.org/licenses/LICENSE-2.0 6 | Unless required by applicable law or agreed to in writing, software 7 | distributed under the License is distributed on an "AS IS" BASIS, 8 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 9 | See the License for the specific language governing permissions and 10 | limitations under the License. 11 | ==============================================================================*/ 12 | #ifndef TENSORFLOW_LITE_MICRO_KERNELS_MICRO_UTILS_H_ 13 | #define TENSORFLOW_LITE_MICRO_KERNELS_MICRO_UTILS_H_ 14 | namespace tflite { 15 | namespace ops { 16 | namespace micro { 17 | 18 | // Same as gtl::Greater but defined here to reduce dependencies and 19 | // binary size for micro environment. 20 | struct Greater { 21 | template 22 | bool operator()(const T& x, const T& y) const { 23 | return x > y; 24 | } 25 | }; 26 | 27 | struct Less { 28 | template 29 | bool operator()(const T& x, const T& y) const { 30 | return x < y; 31 | } 32 | }; 33 | 34 | } // namespace micro 35 | } // namespace ops 36 | } // namespace tflite 37 | #endif // TENSORFLOW_LITE_MICRO_KERNELS_MICRO_UTILS_H_ 38 | -------------------------------------------------------------------------------- /mfcc-nn/components/kws/tf/tfmicro/tensorflow/lite/kernels/internal/reference/neg.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_NEG_H_ 16 | #define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_NEG_H_ 17 | 18 | #include "tensorflow/lite/kernels/internal/types.h" 19 | 20 | namespace tflite { 21 | 22 | namespace reference_ops { 23 | 24 | template 25 | inline void Negate(const RuntimeShape& input_shape, const T* input_data, 26 | const RuntimeShape& output_shape, T* output_data) { 27 | const int flat_size = MatchingFlatSize(input_shape, output_shape); 28 | 29 | for (int i = 0; i < flat_size; ++i) { 30 | output_data[i] = -input_data[i]; 31 | } 32 | } 33 | 34 | } // namespace reference_ops 35 | } // namespace tflite 36 | 37 | #endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_NEG_H_ 38 | -------------------------------------------------------------------------------- /2-mfcc-nn-streaming/components/kws/tf/tfmicro/tensorflow/lite/kernels/internal/reference/neg.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_NEG_H_ 16 | #define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_NEG_H_ 17 | 18 | #include "tensorflow/lite/kernels/internal/types.h" 19 | 20 | namespace tflite { 21 | 22 | namespace reference_ops { 23 | 24 | template 25 | inline void Negate(const RuntimeShape& input_shape, const T* input_data, 26 | const RuntimeShape& output_shape, T* output_data) { 27 | const int flat_size = MatchingFlatSize(input_shape, output_shape); 28 | 29 | for (int i = 0; i < flat_size; ++i) { 30 | output_data[i] = -input_data[i]; 31 | } 32 | } 33 | 34 | } // namespace reference_ops 35 | } // namespace tflite 36 | 37 | #endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_NEG_H_ 38 | -------------------------------------------------------------------------------- /mfcc-nn-streaming/components/kws/tf/tfmicro/tensorflow/lite/kernels/internal/reference/neg.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_NEG_H_ 16 | #define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_NEG_H_ 17 | 18 | #include "tensorflow/lite/kernels/internal/types.h" 19 | 20 | namespace tflite { 21 | 22 | namespace reference_ops { 23 | 24 | template 25 | inline void Negate(const RuntimeShape& input_shape, const T* input_data, 26 | const RuntimeShape& output_shape, T* output_data) { 27 | const int flat_size = MatchingFlatSize(input_shape, output_shape); 28 | 29 | for (int i = 0; i < flat_size; ++i) { 30 | output_data[i] = -input_data[i]; 31 | } 32 | } 33 | 34 | } // namespace reference_ops 35 | } // namespace tflite 36 | 37 | #endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_NEG_H_ 38 | -------------------------------------------------------------------------------- /mfcc-nn/components/kws/tf/tfmicro/tensorflow/lite/kernels/internal/reference/ceil.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_CEIL_H_ 16 | #define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_CEIL_H_ 17 | 18 | #include 19 | 20 | #include "tensorflow/lite/kernels/internal/types.h" 21 | 22 | namespace tflite { 23 | 24 | namespace reference_ops { 25 | 26 | inline void Ceil(const RuntimeShape& input_shape, const float* input_data, 27 | const RuntimeShape& output_shape, float* output_data) { 28 | const int flat_size = MatchingFlatSize(input_shape, output_shape); 29 | 30 | for (int i = 0; i < flat_size; ++i) { 31 | output_data[i] = std::ceil(input_data[i]); 32 | } 33 | } 34 | 35 | } // namespace reference_ops 36 | } // namespace tflite 37 | #endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_CEIL_H_ 38 | -------------------------------------------------------------------------------- /mfcc-nn-streaming/components/kws/tf/tfmicro/tensorflow/lite/kernels/internal/reference/ceil.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_CEIL_H_ 16 | #define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_CEIL_H_ 17 | 18 | #include 19 | 20 | #include "tensorflow/lite/kernels/internal/types.h" 21 | 22 | namespace tflite { 23 | 24 | namespace reference_ops { 25 | 26 | inline void Ceil(const RuntimeShape& input_shape, const float* input_data, 27 | const RuntimeShape& output_shape, float* output_data) { 28 | const int flat_size = MatchingFlatSize(input_shape, output_shape); 29 | 30 | for (int i = 0; i < flat_size; ++i) { 31 | output_data[i] = std::ceil(input_data[i]); 32 | } 33 | } 34 | 35 | } // namespace reference_ops 36 | } // namespace tflite 37 | #endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_CEIL_H_ 38 | -------------------------------------------------------------------------------- /2-mfcc-nn-streaming/components/kws/tf/tfmicro/tensorflow/lite/kernels/internal/reference/ceil.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_CEIL_H_ 16 | #define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_CEIL_H_ 17 | 18 | #include 19 | 20 | #include "tensorflow/lite/kernels/internal/types.h" 21 | 22 | namespace tflite { 23 | 24 | namespace reference_ops { 25 | 26 | inline void Ceil(const RuntimeShape& input_shape, const float* input_data, 27 | const RuntimeShape& output_shape, float* output_data) { 28 | const int flat_size = MatchingFlatSize(input_shape, output_shape); 29 | 30 | for (int i = 0; i < flat_size; ++i) { 31 | output_data[i] = std::ceil(input_data[i]); 32 | } 33 | } 34 | 35 | } // namespace reference_ops 36 | } // namespace tflite 37 | #endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_CEIL_H_ 38 | -------------------------------------------------------------------------------- /mfcc-nn/components/kws/tf/tfmicro/tensorflow/lite/micro/micro_optional_debug_tools.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2017 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | // Optional debugging functionality. For small sized binaries, these are not 16 | // needed. 17 | #ifndef TENSORFLOW_LITE_MICRO_MICRO_OPTIONAL_DEBUG_TOOLS_H_ 18 | #define TENSORFLOW_LITE_MICRO_MICRO_OPTIONAL_DEBUG_TOOLS_H_ 19 | 20 | #include "tensorflow/lite/micro/micro_interpreter.h" 21 | 22 | namespace tflite { 23 | // Helper function to print model flatbuffer data. This function is not called 24 | // by default. Hence it's not linked in to the final binary code. 25 | void PrintModelData(const Model* model, ErrorReporter* error_reporter); 26 | // Prints a dump of what tensors and what nodes are in the interpreter. 27 | void PrintInterpreterState(MicroInterpreter* interpreter); 28 | } // namespace tflite 29 | 30 | #endif // TENSORFLOW_LITE_MICRO_MICRO_OPTIONAL_DEBUG_TOOLS_H_ 31 | -------------------------------------------------------------------------------- /2-mfcc-nn-streaming/components/kws/tf/tfmicro/tensorflow/lite/micro/micro_optional_debug_tools.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2017 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | // Optional debugging functionality. For small sized binaries, these are not 16 | // needed. 17 | #ifndef TENSORFLOW_LITE_MICRO_MICRO_OPTIONAL_DEBUG_TOOLS_H_ 18 | #define TENSORFLOW_LITE_MICRO_MICRO_OPTIONAL_DEBUG_TOOLS_H_ 19 | 20 | #include "tensorflow/lite/micro/micro_interpreter.h" 21 | 22 | namespace tflite { 23 | // Helper function to print model flatbuffer data. This function is not called 24 | // by default. Hence it's not linked in to the final binary code. 25 | void PrintModelData(const Model* model, ErrorReporter* error_reporter); 26 | // Prints a dump of what tensors and what nodes are in the interpreter. 27 | void PrintInterpreterState(MicroInterpreter* interpreter); 28 | } // namespace tflite 29 | 30 | #endif // TENSORFLOW_LITE_MICRO_MICRO_OPTIONAL_DEBUG_TOOLS_H_ 31 | -------------------------------------------------------------------------------- /mfcc-nn-streaming/components/kws/tf/tfmicro/tensorflow/lite/micro/micro_optional_debug_tools.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2017 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | // Optional debugging functionality. For small sized binaries, these are not 16 | // needed. 17 | #ifndef TENSORFLOW_LITE_MICRO_MICRO_OPTIONAL_DEBUG_TOOLS_H_ 18 | #define TENSORFLOW_LITE_MICRO_MICRO_OPTIONAL_DEBUG_TOOLS_H_ 19 | 20 | #include "tensorflow/lite/micro/micro_interpreter.h" 21 | 22 | namespace tflite { 23 | // Helper function to print model flatbuffer data. This function is not called 24 | // by default. Hence it's not linked in to the final binary code. 25 | void PrintModelData(const Model* model, ErrorReporter* error_reporter); 26 | // Prints a dump of what tensors and what nodes are in the interpreter. 27 | void PrintInterpreterState(MicroInterpreter* interpreter); 28 | } // namespace tflite 29 | 30 | #endif // TENSORFLOW_LITE_MICRO_MICRO_OPTIONAL_DEBUG_TOOLS_H_ 31 | -------------------------------------------------------------------------------- /mfcc-nn/components/kws/tf/tfmicro/tensorflow/lite/kernels/internal/reference/floor.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_FLOOR_H_ 16 | #define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_FLOOR_H_ 17 | 18 | #include 19 | 20 | #include "tensorflow/lite/kernels/internal/types.h" 21 | 22 | namespace tflite { 23 | 24 | namespace reference_ops { 25 | 26 | inline void Floor(const RuntimeShape& input_shape, const float* input_data, 27 | const RuntimeShape& output_shape, float* output_data) { 28 | const int flat_size = MatchingFlatSize(input_shape, output_shape); 29 | 30 | for (int i = 0; i < flat_size; i++) { 31 | int offset = i; 32 | output_data[offset] = std::floor(input_data[offset]); 33 | } 34 | } 35 | 36 | } // namespace reference_ops 37 | } // namespace tflite 38 | 39 | #endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_FLOOR_H_ 40 | -------------------------------------------------------------------------------- /2-mfcc-nn-streaming/components/kws/tf/tfmicro/tensorflow/lite/kernels/internal/reference/floor.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_FLOOR_H_ 16 | #define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_FLOOR_H_ 17 | 18 | #include 19 | 20 | #include "tensorflow/lite/kernels/internal/types.h" 21 | 22 | namespace tflite { 23 | 24 | namespace reference_ops { 25 | 26 | inline void Floor(const RuntimeShape& input_shape, const float* input_data, 27 | const RuntimeShape& output_shape, float* output_data) { 28 | const int flat_size = MatchingFlatSize(input_shape, output_shape); 29 | 30 | for (int i = 0; i < flat_size; i++) { 31 | int offset = i; 32 | output_data[offset] = std::floor(input_data[offset]); 33 | } 34 | } 35 | 36 | } // namespace reference_ops 37 | } // namespace tflite 38 | 39 | #endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_FLOOR_H_ 40 | -------------------------------------------------------------------------------- /mfcc-nn-streaming/components/kws/tf/tfmicro/tensorflow/lite/kernels/internal/reference/floor.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_FLOOR_H_ 16 | #define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_FLOOR_H_ 17 | 18 | #include 19 | 20 | #include "tensorflow/lite/kernels/internal/types.h" 21 | 22 | namespace tflite { 23 | 24 | namespace reference_ops { 25 | 26 | inline void Floor(const RuntimeShape& input_shape, const float* input_data, 27 | const RuntimeShape& output_shape, float* output_data) { 28 | const int flat_size = MatchingFlatSize(input_shape, output_shape); 29 | 30 | for (int i = 0; i < flat_size; i++) { 31 | int offset = i; 32 | output_data[offset] = std::floor(input_data[offset]); 33 | } 34 | } 35 | 36 | } // namespace reference_ops 37 | } // namespace tflite 38 | 39 | #endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_FLOOR_H_ 40 | -------------------------------------------------------------------------------- /mfcc-nn/components/kws/tf/tfmicro/tensorflow/lite/micro/compatibility.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_LITE_MICRO_COMPATIBILITY_H_ 16 | #define TENSORFLOW_LITE_MICRO_COMPATIBILITY_H_ 17 | 18 | // C++ will automatically create class-specific delete operators for virtual 19 | // objects, which by default call the global delete function. For embedded 20 | // applications we want to avoid this, and won't be calling new/delete on these 21 | // objects, so we need to override the default implementation with one that does 22 | // nothing to avoid linking in ::delete(). 23 | // This macro needs to be included in all subclasses of a virtual base class in 24 | // the private section. 25 | #ifdef TF_LITE_STATIC_MEMORY 26 | #define TF_LITE_REMOVE_VIRTUAL_DELETE \ 27 | void operator delete(void* p) {} 28 | #else 29 | #define TF_LITE_REMOVE_VIRTUAL_DELETE 30 | #endif 31 | 32 | #endif // TENSORFLOW_LITE_MICRO_COMPATIBILITY_H_ 33 | -------------------------------------------------------------------------------- /mfcc-nn-streaming/components/kws/tf/tfmicro/tensorflow/lite/micro/compatibility.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_LITE_MICRO_COMPATIBILITY_H_ 16 | #define TENSORFLOW_LITE_MICRO_COMPATIBILITY_H_ 17 | 18 | // C++ will automatically create class-specific delete operators for virtual 19 | // objects, which by default call the global delete function. For embedded 20 | // applications we want to avoid this, and won't be calling new/delete on these 21 | // objects, so we need to override the default implementation with one that does 22 | // nothing to avoid linking in ::delete(). 23 | // This macro needs to be included in all subclasses of a virtual base class in 24 | // the private section. 25 | #ifdef TF_LITE_STATIC_MEMORY 26 | #define TF_LITE_REMOVE_VIRTUAL_DELETE \ 27 | void operator delete(void* p) {} 28 | #else 29 | #define TF_LITE_REMOVE_VIRTUAL_DELETE 30 | #endif 31 | 32 | #endif // TENSORFLOW_LITE_MICRO_COMPATIBILITY_H_ 33 | -------------------------------------------------------------------------------- /2-mfcc-nn-streaming/components/kws/tf/tfmicro/tensorflow/lite/micro/compatibility.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_LITE_MICRO_COMPATIBILITY_H_ 16 | #define TENSORFLOW_LITE_MICRO_COMPATIBILITY_H_ 17 | 18 | // C++ will automatically create class-specific delete operators for virtual 19 | // objects, which by default call the global delete function. For embedded 20 | // applications we want to avoid this, and won't be calling new/delete on these 21 | // objects, so we need to override the default implementation with one that does 22 | // nothing to avoid linking in ::delete(). 23 | // This macro needs to be included in all subclasses of a virtual base class in 24 | // the private section. 25 | #ifdef TF_LITE_STATIC_MEMORY 26 | #define TF_LITE_REMOVE_VIRTUAL_DELETE \ 27 | void operator delete(void* p) {} 28 | #else 29 | #define TF_LITE_REMOVE_VIRTUAL_DELETE 30 | #endif 31 | 32 | #endif // TENSORFLOW_LITE_MICRO_COMPATIBILITY_H_ 33 | -------------------------------------------------------------------------------- /mfcc-nn/components/kws/tf/tfmicro/tensorflow/lite/micro/micro_error_reporter.cc: -------------------------------------------------------------------------------- 1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | #include "tensorflow/lite/micro/micro_error_reporter.h" 17 | 18 | #include 19 | 20 | #ifndef TF_LITE_STRIP_ERROR_STRINGS 21 | #include "tensorflow/lite/micro/debug_log.h" 22 | #include "tensorflow/lite/micro/micro_string.h" 23 | #endif 24 | 25 | namespace tflite { 26 | 27 | int MicroErrorReporter::Report(const char* format, va_list args) { 28 | #ifndef TF_LITE_STRIP_ERROR_STRINGS 29 | // Only pulling in the implementation of this function for builds where we 30 | // expect to make use of it to be extra cautious about not increasing the code 31 | // size. 32 | static constexpr int kMaxLogLen = 256; 33 | char log_buffer[kMaxLogLen]; 34 | MicroVsnprintf(log_buffer, kMaxLogLen, format, args); 35 | DebugLog(log_buffer); 36 | DebugLog("\r\n"); 37 | #endif 38 | return 0; 39 | } 40 | 41 | } // namespace tflite 42 | -------------------------------------------------------------------------------- /mfcc-nn-streaming/components/kws/tf/tfmicro/tensorflow/lite/micro/micro_error_reporter.cc: -------------------------------------------------------------------------------- 1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | #include "tensorflow/lite/micro/micro_error_reporter.h" 17 | 18 | #include 19 | 20 | #ifndef TF_LITE_STRIP_ERROR_STRINGS 21 | #include "tensorflow/lite/micro/debug_log.h" 22 | #include "tensorflow/lite/micro/micro_string.h" 23 | #endif 24 | 25 | namespace tflite { 26 | 27 | int MicroErrorReporter::Report(const char* format, va_list args) { 28 | #ifndef TF_LITE_STRIP_ERROR_STRINGS 29 | // Only pulling in the implementation of this function for builds where we 30 | // expect to make use of it to be extra cautious about not increasing the code 31 | // size. 32 | static constexpr int kMaxLogLen = 256; 33 | char log_buffer[kMaxLogLen]; 34 | MicroVsnprintf(log_buffer, kMaxLogLen, format, args); 35 | DebugLog(log_buffer); 36 | DebugLog("\r\n"); 37 | #endif 38 | return 0; 39 | } 40 | 41 | } // namespace tflite 42 | -------------------------------------------------------------------------------- /2-mfcc-nn-streaming/components/kws/tf/tfmicro/tensorflow/lite/micro/micro_error_reporter.cc: -------------------------------------------------------------------------------- 1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | #include "tensorflow/lite/micro/micro_error_reporter.h" 17 | 18 | #include 19 | 20 | #ifndef TF_LITE_STRIP_ERROR_STRINGS 21 | #include "tensorflow/lite/micro/debug_log.h" 22 | #include "tensorflow/lite/micro/micro_string.h" 23 | #endif 24 | 25 | namespace tflite { 26 | 27 | int MicroErrorReporter::Report(const char* format, va_list args) { 28 | #ifndef TF_LITE_STRIP_ERROR_STRINGS 29 | // Only pulling in the implementation of this function for builds where we 30 | // expect to make use of it to be extra cautious about not increasing the code 31 | // size. 32 | static constexpr int kMaxLogLen = 256; 33 | char log_buffer[kMaxLogLen]; 34 | MicroVsnprintf(log_buffer, kMaxLogLen, format, args); 35 | DebugLog(log_buffer); 36 | DebugLog("\r\n"); 37 | #endif 38 | return 0; 39 | } 40 | 41 | } // namespace tflite 42 | -------------------------------------------------------------------------------- /mfcc-nn/components/kws/tf/tfmicro/tensorflow/lite/micro/micro_string.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_LITE_MICRO_MICRO_STRING_H_ 16 | #define TENSORFLOW_LITE_MICRO_MICRO_STRING_H_ 17 | 18 | #include 19 | 20 | // Implements simple string formatting for numeric types. Returns the number of 21 | // bytes written to output. 22 | extern "C" { 23 | // Functionally equivalent to vsnprintf, trimmed down for TFLite Micro. 24 | // MicroSnprintf() is implemented using MicroVsnprintf(). 25 | int MicroVsnprintf(char* output, int len, const char* format, va_list args); 26 | // Functionally equavalent to snprintf, trimmed down for TFLite Micro. 27 | // For example, MicroSnprintf(buffer, 10, "int %d", 10) will put the string 28 | // "int 10" in the buffer. 29 | // Floating point values are logged in exponent notation (1.XXX*2^N). 30 | int MicroSnprintf(char* output, int len, const char* format, ...); 31 | } 32 | 33 | #endif // TENSORFLOW_LITE_MICRO_MICRO_STRING_H_ 34 | -------------------------------------------------------------------------------- /mfcc-nn/components/kws/tf/tfmicro/tensorflow/lite/kernels/internal/cppmath.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2020 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_CPPMATH_H_ 16 | #define TENSORFLOW_LITE_KERNELS_INTERNAL_CPPMATH_H_ 17 | 18 | #include 19 | 20 | namespace tflite { 21 | 22 | #if defined(TF_LITE_USE_GLOBAL_CMATH_FUNCTIONS) || \ 23 | (defined(__ANDROID__) && !defined(__NDK_MAJOR__)) || defined(ARDUINO) || \ 24 | defined(__ZEPHYR__) 25 | #define TF_LITE_GLOBAL_STD_PREFIX 26 | #else 27 | #define TF_LITE_GLOBAL_STD_PREFIX std 28 | #endif 29 | 30 | #define DECLARE_STD_GLOBAL_SWITCH1(tf_name, std_name) \ 31 | template \ 32 | inline T tf_name(const T x) { \ 33 | return TF_LITE_GLOBAL_STD_PREFIX::std_name(x); \ 34 | } 35 | 36 | DECLARE_STD_GLOBAL_SWITCH1(TfLiteRound, round); 37 | 38 | } // namespace tflite 39 | 40 | #endif // TENSORFLOW_LITE_KERNELS_INTERNAL_CPPMATH_H_ 41 | -------------------------------------------------------------------------------- /2-mfcc-nn-streaming/components/kws/tf/tfmicro/tensorflow/lite/micro/micro_string.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_LITE_MICRO_MICRO_STRING_H_ 16 | #define TENSORFLOW_LITE_MICRO_MICRO_STRING_H_ 17 | 18 | #include 19 | 20 | // Implements simple string formatting for numeric types. Returns the number of 21 | // bytes written to output. 22 | extern "C" { 23 | // Functionally equivalent to vsnprintf, trimmed down for TFLite Micro. 24 | // MicroSnprintf() is implemented using MicroVsnprintf(). 25 | int MicroVsnprintf(char* output, int len, const char* format, va_list args); 26 | // Functionally equavalent to snprintf, trimmed down for TFLite Micro. 27 | // For example, MicroSnprintf(buffer, 10, "int %d", 10) will put the string 28 | // "int 10" in the buffer. 29 | // Floating point values are logged in exponent notation (1.XXX*2^N). 30 | int MicroSnprintf(char* output, int len, const char* format, ...); 31 | } 32 | 33 | #endif // TENSORFLOW_LITE_MICRO_MICRO_STRING_H_ 34 | -------------------------------------------------------------------------------- /mfcc-nn-streaming/components/kws/tf/tfmicro/tensorflow/lite/micro/micro_string.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_LITE_MICRO_MICRO_STRING_H_ 16 | #define TENSORFLOW_LITE_MICRO_MICRO_STRING_H_ 17 | 18 | #include 19 | 20 | // Implements simple string formatting for numeric types. Returns the number of 21 | // bytes written to output. 22 | extern "C" { 23 | // Functionally equivalent to vsnprintf, trimmed down for TFLite Micro. 24 | // MicroSnprintf() is implemented using MicroVsnprintf(). 25 | int MicroVsnprintf(char* output, int len, const char* format, va_list args); 26 | // Functionally equavalent to snprintf, trimmed down for TFLite Micro. 27 | // For example, MicroSnprintf(buffer, 10, "int %d", 10) will put the string 28 | // "int 10" in the buffer. 29 | // Floating point values are logged in exponent notation (1.XXX*2^N). 30 | int MicroSnprintf(char* output, int len, const char* format, ...); 31 | } 32 | 33 | #endif // TENSORFLOW_LITE_MICRO_MICRO_STRING_H_ 34 | -------------------------------------------------------------------------------- /2-mfcc-nn-streaming/components/kws/tf/tfmicro/tensorflow/lite/kernels/internal/cppmath.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2020 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_CPPMATH_H_ 16 | #define TENSORFLOW_LITE_KERNELS_INTERNAL_CPPMATH_H_ 17 | 18 | #include 19 | 20 | namespace tflite { 21 | 22 | #if defined(TF_LITE_USE_GLOBAL_CMATH_FUNCTIONS) || \ 23 | (defined(__ANDROID__) && !defined(__NDK_MAJOR__)) || defined(ARDUINO) || \ 24 | defined(__ZEPHYR__) 25 | #define TF_LITE_GLOBAL_STD_PREFIX 26 | #else 27 | #define TF_LITE_GLOBAL_STD_PREFIX std 28 | #endif 29 | 30 | #define DECLARE_STD_GLOBAL_SWITCH1(tf_name, std_name) \ 31 | template \ 32 | inline T tf_name(const T x) { \ 33 | return TF_LITE_GLOBAL_STD_PREFIX::std_name(x); \ 34 | } 35 | 36 | DECLARE_STD_GLOBAL_SWITCH1(TfLiteRound, round); 37 | 38 | } // namespace tflite 39 | 40 | #endif // TENSORFLOW_LITE_KERNELS_INTERNAL_CPPMATH_H_ 41 | -------------------------------------------------------------------------------- /mfcc-nn-streaming/components/kws/tf/tfmicro/tensorflow/lite/kernels/internal/cppmath.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2020 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_CPPMATH_H_ 16 | #define TENSORFLOW_LITE_KERNELS_INTERNAL_CPPMATH_H_ 17 | 18 | #include 19 | 20 | namespace tflite { 21 | 22 | #if defined(TF_LITE_USE_GLOBAL_CMATH_FUNCTIONS) || \ 23 | (defined(__ANDROID__) && !defined(__NDK_MAJOR__)) || defined(ARDUINO) || \ 24 | defined(__ZEPHYR__) 25 | #define TF_LITE_GLOBAL_STD_PREFIX 26 | #else 27 | #define TF_LITE_GLOBAL_STD_PREFIX std 28 | #endif 29 | 30 | #define DECLARE_STD_GLOBAL_SWITCH1(tf_name, std_name) \ 31 | template \ 32 | inline T tf_name(const T x) { \ 33 | return TF_LITE_GLOBAL_STD_PREFIX::std_name(x); \ 34 | } 35 | 36 | DECLARE_STD_GLOBAL_SWITCH1(TfLiteRound, round); 37 | 38 | } // namespace tflite 39 | 40 | #endif // TENSORFLOW_LITE_KERNELS_INTERNAL_CPPMATH_H_ 41 | -------------------------------------------------------------------------------- /mfcc-nn/components/kws/tf/tfmicro/tensorflow/lite/micro/all_ops_resolver.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved. 2 | Licensed under the Apache License, Version 2.0 (the "License"); 3 | you may not use this file except in compliance with the License. 4 | You may obtain a copy of the License at 5 | http://www.apache.org/licenses/LICENSE-2.0 6 | Unless required by applicable law or agreed to in writing, software 7 | distributed under the License is distributed on an "AS IS" BASIS, 8 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 9 | See the License for the specific language governing permissions and 10 | limitations under the License. 11 | ==============================================================================*/ 12 | #ifndef TENSORFLOW_LITE_MICRO_ALL_OPS_RESOLVER_H_ 13 | #define TENSORFLOW_LITE_MICRO_ALL_OPS_RESOLVER_H_ 14 | 15 | #include "tensorflow/lite/micro/compatibility.h" 16 | #include "tensorflow/lite/micro/micro_mutable_op_resolver.h" 17 | 18 | namespace tflite { 19 | 20 | // The magic number in the template parameter is the maximum number of ops that 21 | // can be added to AllOpsResolver. It can be increased if needed. And most 22 | // applications that care about the memory footprint will want to directly use 23 | // MicroMutableOpResolver and have an application specific template parameter. 24 | // The examples directory has sample code for this. 25 | class AllOpsResolver : public MicroMutableOpResolver<128> { 26 | public: 27 | AllOpsResolver(); 28 | 29 | private: 30 | TF_LITE_REMOVE_VIRTUAL_DELETE 31 | }; 32 | 33 | } // namespace tflite 34 | 35 | #endif // TENSORFLOW_LITE_MICRO_ALL_OPS_RESOLVER_H_ 36 | -------------------------------------------------------------------------------- /2-mfcc-nn-streaming/components/kws/tf/tfmicro/tensorflow/lite/micro/all_ops_resolver.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved. 2 | Licensed under the Apache License, Version 2.0 (the "License"); 3 | you may not use this file except in compliance with the License. 4 | You may obtain a copy of the License at 5 | http://www.apache.org/licenses/LICENSE-2.0 6 | Unless required by applicable law or agreed to in writing, software 7 | distributed under the License is distributed on an "AS IS" BASIS, 8 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 9 | See the License for the specific language governing permissions and 10 | limitations under the License. 11 | ==============================================================================*/ 12 | #ifndef TENSORFLOW_LITE_MICRO_ALL_OPS_RESOLVER_H_ 13 | #define TENSORFLOW_LITE_MICRO_ALL_OPS_RESOLVER_H_ 14 | 15 | #include "tensorflow/lite/micro/compatibility.h" 16 | #include "tensorflow/lite/micro/micro_mutable_op_resolver.h" 17 | 18 | namespace tflite { 19 | 20 | // The magic number in the template parameter is the maximum number of ops that 21 | // can be added to AllOpsResolver. It can be increased if needed. And most 22 | // applications that care about the memory footprint will want to directly use 23 | // MicroMutableOpResolver and have an application specific template parameter. 24 | // The examples directory has sample code for this. 25 | class AllOpsResolver : public MicroMutableOpResolver<128> { 26 | public: 27 | AllOpsResolver(); 28 | 29 | private: 30 | TF_LITE_REMOVE_VIRTUAL_DELETE 31 | }; 32 | 33 | } // namespace tflite 34 | 35 | #endif // TENSORFLOW_LITE_MICRO_ALL_OPS_RESOLVER_H_ 36 | -------------------------------------------------------------------------------- /mfcc-nn-streaming/components/kws/tf/tfmicro/tensorflow/lite/micro/all_ops_resolver.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved. 2 | Licensed under the Apache License, Version 2.0 (the "License"); 3 | you may not use this file except in compliance with the License. 4 | You may obtain a copy of the License at 5 | http://www.apache.org/licenses/LICENSE-2.0 6 | Unless required by applicable law or agreed to in writing, software 7 | distributed under the License is distributed on an "AS IS" BASIS, 8 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 9 | See the License for the specific language governing permissions and 10 | limitations under the License. 11 | ==============================================================================*/ 12 | #ifndef TENSORFLOW_LITE_MICRO_ALL_OPS_RESOLVER_H_ 13 | #define TENSORFLOW_LITE_MICRO_ALL_OPS_RESOLVER_H_ 14 | 15 | #include "tensorflow/lite/micro/compatibility.h" 16 | #include "tensorflow/lite/micro/micro_mutable_op_resolver.h" 17 | 18 | namespace tflite { 19 | 20 | // The magic number in the template parameter is the maximum number of ops that 21 | // can be added to AllOpsResolver. It can be increased if needed. And most 22 | // applications that care about the memory footprint will want to directly use 23 | // MicroMutableOpResolver and have an application specific template parameter. 24 | // The examples directory has sample code for this. 25 | class AllOpsResolver : public MicroMutableOpResolver<128> { 26 | public: 27 | AllOpsResolver(); 28 | 29 | private: 30 | TF_LITE_REMOVE_VIRTUAL_DELETE 31 | }; 32 | 33 | } // namespace tflite 34 | 35 | #endif // TENSORFLOW_LITE_MICRO_ALL_OPS_RESOLVER_H_ 36 | -------------------------------------------------------------------------------- /mfcc-nn/components/kws/tf/tfmicro/tensorflow/lite/kernels/internal/optimized/neon_check.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_OPTIMIZED_NEON_CHECK_H_ 16 | #define TENSORFLOW_LITE_KERNELS_INTERNAL_OPTIMIZED_NEON_CHECK_H_ 17 | 18 | #if defined(__ARM_NEON__) || defined(__ARM_NEON) 19 | #define USE_NEON 20 | #include 21 | #endif 22 | 23 | #if defined __GNUC__ && defined __SSE4_1__ && !defined TF_LITE_DISABLE_X86_NEON 24 | #define USE_NEON 25 | #include "NEON_2_SSE.h" 26 | #endif 27 | 28 | // NEON_OR_PORTABLE(SomeFunc, args) calls NeonSomeFunc(args) if USE_NEON is 29 | // defined, PortableSomeFunc(args) otherwise. 30 | #ifdef USE_NEON 31 | // Always use Neon code 32 | #define NEON_OR_PORTABLE(funcname, ...) Neon##funcname(__VA_ARGS__) 33 | 34 | #else 35 | // No NEON available: Use Portable code 36 | #define NEON_OR_PORTABLE(funcname, ...) Portable##funcname(__VA_ARGS__) 37 | 38 | #endif // defined(USE_NEON) 39 | 40 | #endif // TENSORFLOW_LITE_KERNELS_INTERNAL_OPTIMIZED_NEON_CHECK_H_ 41 | -------------------------------------------------------------------------------- /mfcc-nn/components/kws/tf/tfmicro/tensorflow/lite/micro/micro_profiler.cc: -------------------------------------------------------------------------------- 1 | /* Copyright 2020 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | #include "tensorflow/lite/micro/micro_profiler.h" 17 | 18 | #include "tensorflow/lite/kernels/internal/compatibility.h" 19 | #include "tensorflow/lite/micro/micro_time.h" 20 | 21 | namespace tflite { 22 | 23 | MicroProfiler::MicroProfiler(tflite::ErrorReporter* reporter) 24 | : reporter_(reporter) {} 25 | 26 | uint32_t MicroProfiler::BeginEvent(const char* tag, EventType event_type, 27 | int64_t event_metadata1, 28 | int64_t event_metadata2) { 29 | start_time_ = GetCurrentTimeTicks(); 30 | TFLITE_DCHECK(tag != nullptr); 31 | event_tag_ = tag; 32 | return 0; 33 | } 34 | 35 | void MicroProfiler::EndEvent(uint32_t event_handle) { 36 | int32_t end_time = GetCurrentTimeTicks(); 37 | TF_LITE_REPORT_ERROR(reporter_, "%s took %d cycles\n", event_tag_, 38 | end_time - start_time_); 39 | } 40 | 41 | } // namespace tflite 42 | -------------------------------------------------------------------------------- /mfcc-nn-streaming/components/kws/tf/tfmicro/tensorflow/lite/kernels/internal/optimized/neon_check.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_OPTIMIZED_NEON_CHECK_H_ 16 | #define TENSORFLOW_LITE_KERNELS_INTERNAL_OPTIMIZED_NEON_CHECK_H_ 17 | 18 | #if defined(__ARM_NEON__) || defined(__ARM_NEON) 19 | #define USE_NEON 20 | #include 21 | #endif 22 | 23 | #if defined __GNUC__ && defined __SSE4_1__ && !defined TF_LITE_DISABLE_X86_NEON 24 | #define USE_NEON 25 | #include "NEON_2_SSE.h" 26 | #endif 27 | 28 | // NEON_OR_PORTABLE(SomeFunc, args) calls NeonSomeFunc(args) if USE_NEON is 29 | // defined, PortableSomeFunc(args) otherwise. 30 | #ifdef USE_NEON 31 | // Always use Neon code 32 | #define NEON_OR_PORTABLE(funcname, ...) Neon##funcname(__VA_ARGS__) 33 | 34 | #else 35 | // No NEON available: Use Portable code 36 | #define NEON_OR_PORTABLE(funcname, ...) Portable##funcname(__VA_ARGS__) 37 | 38 | #endif // defined(USE_NEON) 39 | 40 | #endif // TENSORFLOW_LITE_KERNELS_INTERNAL_OPTIMIZED_NEON_CHECK_H_ 41 | -------------------------------------------------------------------------------- /mfcc-nn-streaming/components/kws/tf/tfmicro/tensorflow/lite/micro/micro_profiler.cc: -------------------------------------------------------------------------------- 1 | /* Copyright 2020 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | #include "tensorflow/lite/micro/micro_profiler.h" 17 | 18 | #include "tensorflow/lite/kernels/internal/compatibility.h" 19 | #include "tensorflow/lite/micro/micro_time.h" 20 | 21 | namespace tflite { 22 | 23 | MicroProfiler::MicroProfiler(tflite::ErrorReporter* reporter) 24 | : reporter_(reporter) {} 25 | 26 | uint32_t MicroProfiler::BeginEvent(const char* tag, EventType event_type, 27 | int64_t event_metadata1, 28 | int64_t event_metadata2) { 29 | start_time_ = GetCurrentTimeTicks(); 30 | TFLITE_DCHECK(tag != nullptr); 31 | event_tag_ = tag; 32 | return 0; 33 | } 34 | 35 | void MicroProfiler::EndEvent(uint32_t event_handle) { 36 | int32_t end_time = GetCurrentTimeTicks(); 37 | TF_LITE_REPORT_ERROR(reporter_, "%s took %d cycles\n", event_tag_, 38 | end_time - start_time_); 39 | } 40 | 41 | } // namespace tflite 42 | -------------------------------------------------------------------------------- /2-mfcc-nn-streaming/components/kws/tf/tfmicro/tensorflow/lite/kernels/internal/optimized/neon_check.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_OPTIMIZED_NEON_CHECK_H_ 16 | #define TENSORFLOW_LITE_KERNELS_INTERNAL_OPTIMIZED_NEON_CHECK_H_ 17 | 18 | #if defined(__ARM_NEON__) || defined(__ARM_NEON) 19 | #define USE_NEON 20 | #include 21 | #endif 22 | 23 | #if defined __GNUC__ && defined __SSE4_1__ && !defined TF_LITE_DISABLE_X86_NEON 24 | #define USE_NEON 25 | #include "NEON_2_SSE.h" 26 | #endif 27 | 28 | // NEON_OR_PORTABLE(SomeFunc, args) calls NeonSomeFunc(args) if USE_NEON is 29 | // defined, PortableSomeFunc(args) otherwise. 30 | #ifdef USE_NEON 31 | // Always use Neon code 32 | #define NEON_OR_PORTABLE(funcname, ...) Neon##funcname(__VA_ARGS__) 33 | 34 | #else 35 | // No NEON available: Use Portable code 36 | #define NEON_OR_PORTABLE(funcname, ...) Portable##funcname(__VA_ARGS__) 37 | 38 | #endif // defined(USE_NEON) 39 | 40 | #endif // TENSORFLOW_LITE_KERNELS_INTERNAL_OPTIMIZED_NEON_CHECK_H_ 41 | -------------------------------------------------------------------------------- /2-mfcc-nn-streaming/components/kws/tf/tfmicro/tensorflow/lite/micro/micro_profiler.cc: -------------------------------------------------------------------------------- 1 | /* Copyright 2020 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | #include "tensorflow/lite/micro/micro_profiler.h" 17 | 18 | #include "tensorflow/lite/kernels/internal/compatibility.h" 19 | #include "tensorflow/lite/micro/micro_time.h" 20 | 21 | namespace tflite { 22 | 23 | MicroProfiler::MicroProfiler(tflite::ErrorReporter* reporter) 24 | : reporter_(reporter) {} 25 | 26 | uint32_t MicroProfiler::BeginEvent(const char* tag, EventType event_type, 27 | int64_t event_metadata1, 28 | int64_t event_metadata2) { 29 | start_time_ = GetCurrentTimeTicks(); 30 | TFLITE_DCHECK(tag != nullptr); 31 | event_tag_ = tag; 32 | return 0; 33 | } 34 | 35 | void MicroProfiler::EndEvent(uint32_t event_handle) { 36 | int32_t end_time = GetCurrentTimeTicks(); 37 | TF_LITE_REPORT_ERROR(reporter_, "%s took %d cycles\n", event_tag_, 38 | end_time - start_time_); 39 | } 40 | 41 | } // namespace tflite 42 | -------------------------------------------------------------------------------- /2-mfcc-nn-streaming/components/kws/Kconfig.projbuild: -------------------------------------------------------------------------------- 1 | menu "KWS" 2 | 3 | config KWS_CONFIRM_THRESHOLD 4 | int "Confidence Threshold" 5 | default 2 6 | 7 | choice 8 | prompt "Cinfirm Model" 9 | default KWS_LANG_LIBRI 10 | 11 | config KWS_LANG_MUSAN 12 | bool "Musan" 13 | config KWS_LANG_LIBRI 14 | bool "Libri" 15 | endchoice 16 | 17 | config KWS_GUESS_MODEL_START 18 | string 19 | default "_binary_dcnn_en_tflite_start" 20 | 21 | config KWS_GUESS_MODEL_OUT_NUM 22 | int 23 | default 12 24 | 25 | config KWS_GUESS_TENSOR_ARENA_SZ 26 | int 27 | default 18048 28 | 29 | choice 30 | prompt "The cpu core which kws run" 31 | default KWS_PINNED_TO_CORE_1 32 | config KWS_PINNED_TO_CORE_0 33 | bool "Core 0 (PRO CPU)" 34 | config KWS_PINNED_TO_CORE_1 35 | bool "Core 1 (APP CPU)" 36 | endchoice 37 | 38 | config KWS_PINNED_TO_CORE 39 | int 40 | default 0 if KWS_PINNED_TO_CORE_0 41 | default 1 if KWS_PINNED_TO_CORE_1 42 | 43 | config KWS_CONFIRM_MODEL_START 44 | string 45 | default "_binary_dcnn_bin_musan_tflite_start" if KWS_LANG_MUSAN 46 | default "_binary_dcnn_bin_libri_tflite_start" if KWS_LANG_LIBRI 47 | 48 | config KWS_CONFIRM_MODEL_ZERO_ASSERT 49 | hex 50 | default c0c20963 if KWS_LANG_MUSAN 51 | default c08265cc if KWS_LANG_LIBRI 52 | 53 | config KWS_CONFIRM_TENSOR_ARENA_SZ 54 | int 55 | default 10568 56 | 57 | choice 58 | prompt "The cpu core which confirm run" 59 | default KWS_CONFIRM_PINNED_TO_CORE_0 60 | config KWS_CONFIRM_PINNED_TO_CORE_0 61 | bool "Core 0 (PRO CPU)" 62 | config KWS_CONFIRM_PINNED_TO_CORE_1 63 | bool "Core 1 (APP CPU)" 64 | endchoice 65 | 66 | config KWS_CONFIRM_PINNED_TO_CORE 67 | int 68 | default 0 if KWS_CONFIRM_PINNED_TO_CORE_0 69 | default 1 if KWS_CONFIRM_PINNED_TO_CORE_1 70 | 71 | endmenu -------------------------------------------------------------------------------- /mfcc-nn/components/kws/tf/tfmicro/tensorflow/lite/core/api/tensor_utils.cc: -------------------------------------------------------------------------------- 1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | #include "tensorflow/lite/core/api/tensor_utils.h" 17 | 18 | #include 19 | 20 | #include "tensorflow/lite/c/common.h" 21 | 22 | namespace tflite { 23 | 24 | TfLiteStatus ResetVariableTensor(TfLiteTensor* tensor) { 25 | if (!tensor->is_variable) { 26 | return kTfLiteOk; 27 | } 28 | // TODO(b/115961645): Implement - If a variable tensor has a buffer, reset it 29 | // to the value of the buffer. 30 | int value = 0; 31 | if (tensor->type == kTfLiteInt8) { 32 | value = tensor->params.zero_point; 33 | } 34 | // TODO(b/139446230): Provide a platform header to better handle these 35 | // specific scenarios. 36 | #if __ANDROID__ || defined(__x86_64__) || defined(__i386__) || \ 37 | defined(__i386) || defined(__x86__) || defined(__X86__) || \ 38 | defined(_X86_) || defined(_M_IX86) || defined(_M_X64) 39 | memset(tensor->data.raw, value, tensor->bytes); 40 | #else 41 | char* raw_ptr = tensor->data.raw; 42 | for (size_t i = 0; i < tensor->bytes; ++i) { 43 | *raw_ptr = value; 44 | raw_ptr++; 45 | } 46 | #endif 47 | return kTfLiteOk; 48 | } 49 | 50 | } // namespace tflite 51 | -------------------------------------------------------------------------------- /mfcc-nn/components/kws/tf/tfmicro/tensorflow/lite/kernels/internal/tensor_ctypes.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2017 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_TENSOR_CTYPES_H_ 16 | #define TENSORFLOW_LITE_KERNELS_INTERNAL_TENSOR_CTYPES_H_ 17 | 18 | #include "tensorflow/lite/c/common.h" 19 | #include "tensorflow/lite/kernels/internal/types.h" 20 | 21 | namespace tflite { 22 | 23 | template 24 | inline T* GetTensorData(TfLiteTensor* tensor) { 25 | return tensor != nullptr ? reinterpret_cast(tensor->data.raw) : nullptr; 26 | } 27 | 28 | template 29 | inline const T* GetTensorData(const TfLiteTensor* tensor) { 30 | return tensor != nullptr ? reinterpret_cast(tensor->data.raw) 31 | : nullptr; 32 | } 33 | 34 | inline RuntimeShape GetTensorShape(const TfLiteTensor* tensor) { 35 | if (tensor == nullptr) { 36 | return RuntimeShape(); 37 | } 38 | 39 | TfLiteIntArray* dims = tensor->dims; 40 | const int dims_size = dims->size; 41 | const int32_t* dims_data = reinterpret_cast(dims->data); 42 | return RuntimeShape(dims_size, dims_data); 43 | } 44 | 45 | } // namespace tflite 46 | 47 | #endif // TENSORFLOW_LITE_KERNELS_INTERNAL_TENSOR_CTYPES_H_ 48 | -------------------------------------------------------------------------------- /2-mfcc-nn-streaming/components/kws/tf/tfmicro/tensorflow/lite/core/api/tensor_utils.cc: -------------------------------------------------------------------------------- 1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | #include "tensorflow/lite/core/api/tensor_utils.h" 17 | 18 | #include 19 | 20 | #include "tensorflow/lite/c/common.h" 21 | 22 | namespace tflite { 23 | 24 | TfLiteStatus ResetVariableTensor(TfLiteTensor* tensor) { 25 | if (!tensor->is_variable) { 26 | return kTfLiteOk; 27 | } 28 | // TODO(b/115961645): Implement - If a variable tensor has a buffer, reset it 29 | // to the value of the buffer. 30 | int value = 0; 31 | if (tensor->type == kTfLiteInt8) { 32 | value = tensor->params.zero_point; 33 | } 34 | // TODO(b/139446230): Provide a platform header to better handle these 35 | // specific scenarios. 36 | #if __ANDROID__ || defined(__x86_64__) || defined(__i386__) || \ 37 | defined(__i386) || defined(__x86__) || defined(__X86__) || \ 38 | defined(_X86_) || defined(_M_IX86) || defined(_M_X64) 39 | memset(tensor->data.raw, value, tensor->bytes); 40 | #else 41 | char* raw_ptr = tensor->data.raw; 42 | for (size_t i = 0; i < tensor->bytes; ++i) { 43 | *raw_ptr = value; 44 | raw_ptr++; 45 | } 46 | #endif 47 | return kTfLiteOk; 48 | } 49 | 50 | } // namespace tflite 51 | -------------------------------------------------------------------------------- /2-mfcc-nn-streaming/components/kws/tf/tfmicro/tensorflow/lite/kernels/internal/tensor_ctypes.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2017 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_TENSOR_CTYPES_H_ 16 | #define TENSORFLOW_LITE_KERNELS_INTERNAL_TENSOR_CTYPES_H_ 17 | 18 | #include "tensorflow/lite/c/common.h" 19 | #include "tensorflow/lite/kernels/internal/types.h" 20 | 21 | namespace tflite { 22 | 23 | template 24 | inline T* GetTensorData(TfLiteTensor* tensor) { 25 | return tensor != nullptr ? reinterpret_cast(tensor->data.raw) : nullptr; 26 | } 27 | 28 | template 29 | inline const T* GetTensorData(const TfLiteTensor* tensor) { 30 | return tensor != nullptr ? reinterpret_cast(tensor->data.raw) 31 | : nullptr; 32 | } 33 | 34 | inline RuntimeShape GetTensorShape(const TfLiteTensor* tensor) { 35 | if (tensor == nullptr) { 36 | return RuntimeShape(); 37 | } 38 | 39 | TfLiteIntArray* dims = tensor->dims; 40 | const int dims_size = dims->size; 41 | const int32_t* dims_data = reinterpret_cast(dims->data); 42 | return RuntimeShape(dims_size, dims_data); 43 | } 44 | 45 | } // namespace tflite 46 | 47 | #endif // TENSORFLOW_LITE_KERNELS_INTERNAL_TENSOR_CTYPES_H_ 48 | -------------------------------------------------------------------------------- /mfcc-nn-streaming/components/kws/tf/tfmicro/tensorflow/lite/core/api/tensor_utils.cc: -------------------------------------------------------------------------------- 1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | #include "tensorflow/lite/core/api/tensor_utils.h" 17 | 18 | #include 19 | 20 | #include "tensorflow/lite/c/common.h" 21 | 22 | namespace tflite { 23 | 24 | TfLiteStatus ResetVariableTensor(TfLiteTensor* tensor) { 25 | if (!tensor->is_variable) { 26 | return kTfLiteOk; 27 | } 28 | // TODO(b/115961645): Implement - If a variable tensor has a buffer, reset it 29 | // to the value of the buffer. 30 | int value = 0; 31 | if (tensor->type == kTfLiteInt8) { 32 | value = tensor->params.zero_point; 33 | } 34 | // TODO(b/139446230): Provide a platform header to better handle these 35 | // specific scenarios. 36 | #if __ANDROID__ || defined(__x86_64__) || defined(__i386__) || \ 37 | defined(__i386) || defined(__x86__) || defined(__X86__) || \ 38 | defined(_X86_) || defined(_M_IX86) || defined(_M_X64) 39 | memset(tensor->data.raw, value, tensor->bytes); 40 | #else 41 | char* raw_ptr = tensor->data.raw; 42 | for (size_t i = 0; i < tensor->bytes; ++i) { 43 | *raw_ptr = value; 44 | raw_ptr++; 45 | } 46 | #endif 47 | return kTfLiteOk; 48 | } 49 | 50 | } // namespace tflite 51 | -------------------------------------------------------------------------------- /mfcc-nn-streaming/components/kws/tf/tfmicro/tensorflow/lite/kernels/internal/tensor_ctypes.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2017 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_TENSOR_CTYPES_H_ 16 | #define TENSORFLOW_LITE_KERNELS_INTERNAL_TENSOR_CTYPES_H_ 17 | 18 | #include "tensorflow/lite/c/common.h" 19 | #include "tensorflow/lite/kernels/internal/types.h" 20 | 21 | namespace tflite { 22 | 23 | template 24 | inline T* GetTensorData(TfLiteTensor* tensor) { 25 | return tensor != nullptr ? reinterpret_cast(tensor->data.raw) : nullptr; 26 | } 27 | 28 | template 29 | inline const T* GetTensorData(const TfLiteTensor* tensor) { 30 | return tensor != nullptr ? reinterpret_cast(tensor->data.raw) 31 | : nullptr; 32 | } 33 | 34 | inline RuntimeShape GetTensorShape(const TfLiteTensor* tensor) { 35 | if (tensor == nullptr) { 36 | return RuntimeShape(); 37 | } 38 | 39 | TfLiteIntArray* dims = tensor->dims; 40 | const int dims_size = dims->size; 41 | const int32_t* dims_data = reinterpret_cast(dims->data); 42 | return RuntimeShape(dims_size, dims_data); 43 | } 44 | 45 | } // namespace tflite 46 | 47 | #endif // TENSORFLOW_LITE_KERNELS_INTERNAL_TENSOR_CTYPES_H_ 48 | -------------------------------------------------------------------------------- /mfcc-nn/components/kws/tf/tfmicro/tensorflow/lite/micro/memory_helpers.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_LITE_MICRO_MEMORY_HELPERS_H_ 16 | #define TENSORFLOW_LITE_MICRO_MEMORY_HELPERS_H_ 17 | 18 | #include 19 | #include 20 | 21 | #include "tensorflow/lite/c/common.h" 22 | #include "tensorflow/lite/core/api/error_reporter.h" 23 | #include "tensorflow/lite/schema/schema_generated.h" 24 | 25 | namespace tflite { 26 | 27 | // Returns the next pointer address aligned to the given alignment. 28 | uint8_t* AlignPointerUp(uint8_t* data, size_t alignment); 29 | 30 | // Returns the previous pointer address aligned to the given alignment. 31 | uint8_t* AlignPointerDown(uint8_t* data, size_t alignment); 32 | 33 | // Returns an increased size that's a multiple of alignment. 34 | size_t AlignSizeUp(size_t size, size_t alignment); 35 | 36 | // Returns size in bytes for a given TfLiteType. 37 | TfLiteStatus TfLiteTypeSizeOf(TfLiteType type, size_t* size, 38 | ErrorReporter* reporter); 39 | 40 | // How many bytes are needed to hold a tensor's contents. 41 | TfLiteStatus BytesRequiredForTensor(const tflite::Tensor& flatbuffer_tensor, 42 | size_t* bytes, size_t* type_size, 43 | ErrorReporter* error_reporter); 44 | 45 | } // namespace tflite 46 | 47 | #endif // TENSORFLOW_LITE_MICRO_MEMORY_HELPERS_H_ 48 | -------------------------------------------------------------------------------- /mfcc-nn-streaming/components/kws/tf/tfmicro/tensorflow/lite/micro/memory_helpers.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_LITE_MICRO_MEMORY_HELPERS_H_ 16 | #define TENSORFLOW_LITE_MICRO_MEMORY_HELPERS_H_ 17 | 18 | #include 19 | #include 20 | 21 | #include "tensorflow/lite/c/common.h" 22 | #include "tensorflow/lite/core/api/error_reporter.h" 23 | #include "tensorflow/lite/schema/schema_generated.h" 24 | 25 | namespace tflite { 26 | 27 | // Returns the next pointer address aligned to the given alignment. 28 | uint8_t* AlignPointerUp(uint8_t* data, size_t alignment); 29 | 30 | // Returns the previous pointer address aligned to the given alignment. 31 | uint8_t* AlignPointerDown(uint8_t* data, size_t alignment); 32 | 33 | // Returns an increased size that's a multiple of alignment. 34 | size_t AlignSizeUp(size_t size, size_t alignment); 35 | 36 | // Returns size in bytes for a given TfLiteType. 37 | TfLiteStatus TfLiteTypeSizeOf(TfLiteType type, size_t* size, 38 | ErrorReporter* reporter); 39 | 40 | // How many bytes are needed to hold a tensor's contents. 41 | TfLiteStatus BytesRequiredForTensor(const tflite::Tensor& flatbuffer_tensor, 42 | size_t* bytes, size_t* type_size, 43 | ErrorReporter* error_reporter); 44 | 45 | } // namespace tflite 46 | 47 | #endif // TENSORFLOW_LITE_MICRO_MEMORY_HELPERS_H_ 48 | -------------------------------------------------------------------------------- /2-mfcc-nn-streaming/components/kws/tf/tfmicro/tensorflow/lite/micro/memory_helpers.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_LITE_MICRO_MEMORY_HELPERS_H_ 16 | #define TENSORFLOW_LITE_MICRO_MEMORY_HELPERS_H_ 17 | 18 | #include 19 | #include 20 | 21 | #include "tensorflow/lite/c/common.h" 22 | #include "tensorflow/lite/core/api/error_reporter.h" 23 | #include "tensorflow/lite/schema/schema_generated.h" 24 | 25 | namespace tflite { 26 | 27 | // Returns the next pointer address aligned to the given alignment. 28 | uint8_t* AlignPointerUp(uint8_t* data, size_t alignment); 29 | 30 | // Returns the previous pointer address aligned to the given alignment. 31 | uint8_t* AlignPointerDown(uint8_t* data, size_t alignment); 32 | 33 | // Returns an increased size that's a multiple of alignment. 34 | size_t AlignSizeUp(size_t size, size_t alignment); 35 | 36 | // Returns size in bytes for a given TfLiteType. 37 | TfLiteStatus TfLiteTypeSizeOf(TfLiteType type, size_t* size, 38 | ErrorReporter* reporter); 39 | 40 | // How many bytes are needed to hold a tensor's contents. 41 | TfLiteStatus BytesRequiredForTensor(const tflite::Tensor& flatbuffer_tensor, 42 | size_t* bytes, size_t* type_size, 43 | ErrorReporter* error_reporter); 44 | 45 | } // namespace tflite 46 | 47 | #endif // TENSORFLOW_LITE_MICRO_MEMORY_HELPERS_H_ 48 | -------------------------------------------------------------------------------- /mfcc-nn/components/kws/tf/tfmicro/tensorflow/lite/kernels/internal/reference/round.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_ROUND_H_ 16 | #define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_ROUND_H_ 17 | 18 | #include 19 | 20 | #include "tensorflow/lite/kernels/internal/types.h" 21 | 22 | namespace tflite { 23 | 24 | namespace reference_ops { 25 | 26 | inline float RoundToNearest(float value) { 27 | auto floor_val = std::floor(value); 28 | auto diff = value - floor_val; 29 | if ((diff < 0.5f) || 30 | ((diff == 0.5f) && (static_cast(floor_val) % 2 == 0))) { 31 | return floor_val; 32 | } else { 33 | return floor_val = floor_val + 1.0f; 34 | } 35 | } 36 | 37 | inline void Round(const RuntimeShape& input_shape, const float* input_data, 38 | const RuntimeShape& output_shape, float* output_data) { 39 | const int flat_size = MatchingFlatSize(input_shape, output_shape); 40 | for (int i = 0; i < flat_size; ++i) { 41 | // Note that this implementation matches that of tensorFlow tf.round 42 | // and corresponds to the bankers rounding method. 43 | // cfenv (for fesetround) is not yet supported universally on Android, so 44 | // using a work around. 45 | output_data[i] = RoundToNearest(input_data[i]); 46 | } 47 | } 48 | 49 | } // namespace reference_ops 50 | } // namespace tflite 51 | #endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_ROUND_H_ 52 | -------------------------------------------------------------------------------- /mfcc-nn/components/kws/tf/tfmicro/tensorflow/lite/micro/memory_planner/linear_memory_planner.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | #ifndef TENSORFLOW_LITE_MICRO_MEMORY_PLANNER_LINEAR_MEMORY_PLANNER_H_ 17 | #define TENSORFLOW_LITE_MICRO_MEMORY_PLANNER_LINEAR_MEMORY_PLANNER_H_ 18 | 19 | #include "tensorflow/lite/micro/compatibility.h" 20 | #include "tensorflow/lite/micro/memory_planner/memory_planner.h" 21 | 22 | namespace tflite { 23 | 24 | // The simplest possible memory planner that just lays out all buffers at 25 | // increasing offsets without trying to reuse memory. 26 | class LinearMemoryPlanner : public MemoryPlanner { 27 | public: 28 | LinearMemoryPlanner(); 29 | ~LinearMemoryPlanner() override; 30 | 31 | TfLiteStatus AddBuffer(tflite::ErrorReporter* error_reporter, int size, 32 | int first_time_used, int last_time_used) override; 33 | 34 | size_t GetMaximumMemorySize() override; 35 | int GetBufferCount() override; 36 | TfLiteStatus GetOffsetForBuffer(tflite::ErrorReporter* error_reporter, 37 | int buffer_index, int* offset) override; 38 | 39 | private: 40 | static constexpr int kMaxBufferCount = 1024; 41 | size_t buffer_offsets_[kMaxBufferCount]; 42 | int current_buffer_count_; 43 | size_t next_free_offset_; 44 | 45 | TF_LITE_REMOVE_VIRTUAL_DELETE 46 | }; 47 | 48 | } // namespace tflite 49 | 50 | #endif // TENSORFLOW_LITE_MICRO_MEMORY_PLANNER_LINEAR_MEMORY_PLANNER_H_ 51 | -------------------------------------------------------------------------------- /mfcc-nn-streaming/components/kws/tf/tfmicro/tensorflow/lite/kernels/internal/reference/round.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_ROUND_H_ 16 | #define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_ROUND_H_ 17 | 18 | #include 19 | 20 | #include "tensorflow/lite/kernels/internal/types.h" 21 | 22 | namespace tflite { 23 | 24 | namespace reference_ops { 25 | 26 | inline float RoundToNearest(float value) { 27 | auto floor_val = std::floor(value); 28 | auto diff = value - floor_val; 29 | if ((diff < 0.5f) || 30 | ((diff == 0.5f) && (static_cast(floor_val) % 2 == 0))) { 31 | return floor_val; 32 | } else { 33 | return floor_val = floor_val + 1.0f; 34 | } 35 | } 36 | 37 | inline void Round(const RuntimeShape& input_shape, const float* input_data, 38 | const RuntimeShape& output_shape, float* output_data) { 39 | const int flat_size = MatchingFlatSize(input_shape, output_shape); 40 | for (int i = 0; i < flat_size; ++i) { 41 | // Note that this implementation matches that of tensorFlow tf.round 42 | // and corresponds to the bankers rounding method. 43 | // cfenv (for fesetround) is not yet supported universally on Android, so 44 | // using a work around. 45 | output_data[i] = RoundToNearest(input_data[i]); 46 | } 47 | } 48 | 49 | } // namespace reference_ops 50 | } // namespace tflite 51 | #endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_ROUND_H_ 52 | -------------------------------------------------------------------------------- /2-mfcc-nn-streaming/components/kws/tf/tfmicro/tensorflow/lite/kernels/internal/reference/round.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_ROUND_H_ 16 | #define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_ROUND_H_ 17 | 18 | #include 19 | 20 | #include "tensorflow/lite/kernels/internal/types.h" 21 | 22 | namespace tflite { 23 | 24 | namespace reference_ops { 25 | 26 | inline float RoundToNearest(float value) { 27 | auto floor_val = std::floor(value); 28 | auto diff = value - floor_val; 29 | if ((diff < 0.5f) || 30 | ((diff == 0.5f) && (static_cast(floor_val) % 2 == 0))) { 31 | return floor_val; 32 | } else { 33 | return floor_val = floor_val + 1.0f; 34 | } 35 | } 36 | 37 | inline void Round(const RuntimeShape& input_shape, const float* input_data, 38 | const RuntimeShape& output_shape, float* output_data) { 39 | const int flat_size = MatchingFlatSize(input_shape, output_shape); 40 | for (int i = 0; i < flat_size; ++i) { 41 | // Note that this implementation matches that of tensorFlow tf.round 42 | // and corresponds to the bankers rounding method. 43 | // cfenv (for fesetround) is not yet supported universally on Android, so 44 | // using a work around. 45 | output_data[i] = RoundToNearest(input_data[i]); 46 | } 47 | } 48 | 49 | } // namespace reference_ops 50 | } // namespace tflite 51 | #endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_ROUND_H_ 52 | -------------------------------------------------------------------------------- /2-mfcc-nn-streaming/components/kws/tf/tfmicro/tensorflow/lite/micro/memory_planner/linear_memory_planner.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | #ifndef TENSORFLOW_LITE_MICRO_MEMORY_PLANNER_LINEAR_MEMORY_PLANNER_H_ 17 | #define TENSORFLOW_LITE_MICRO_MEMORY_PLANNER_LINEAR_MEMORY_PLANNER_H_ 18 | 19 | #include "tensorflow/lite/micro/compatibility.h" 20 | #include "tensorflow/lite/micro/memory_planner/memory_planner.h" 21 | 22 | namespace tflite { 23 | 24 | // The simplest possible memory planner that just lays out all buffers at 25 | // increasing offsets without trying to reuse memory. 26 | class LinearMemoryPlanner : public MemoryPlanner { 27 | public: 28 | LinearMemoryPlanner(); 29 | ~LinearMemoryPlanner() override; 30 | 31 | TfLiteStatus AddBuffer(tflite::ErrorReporter* error_reporter, int size, 32 | int first_time_used, int last_time_used) override; 33 | 34 | size_t GetMaximumMemorySize() override; 35 | int GetBufferCount() override; 36 | TfLiteStatus GetOffsetForBuffer(tflite::ErrorReporter* error_reporter, 37 | int buffer_index, int* offset) override; 38 | 39 | private: 40 | static constexpr int kMaxBufferCount = 1024; 41 | size_t buffer_offsets_[kMaxBufferCount]; 42 | int current_buffer_count_; 43 | size_t next_free_offset_; 44 | 45 | TF_LITE_REMOVE_VIRTUAL_DELETE 46 | }; 47 | 48 | } // namespace tflite 49 | 50 | #endif // TENSORFLOW_LITE_MICRO_MEMORY_PLANNER_LINEAR_MEMORY_PLANNER_H_ 51 | -------------------------------------------------------------------------------- /mfcc-nn-streaming/components/kws/tf/tfmicro/tensorflow/lite/micro/memory_planner/linear_memory_planner.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | #ifndef TENSORFLOW_LITE_MICRO_MEMORY_PLANNER_LINEAR_MEMORY_PLANNER_H_ 17 | #define TENSORFLOW_LITE_MICRO_MEMORY_PLANNER_LINEAR_MEMORY_PLANNER_H_ 18 | 19 | #include "tensorflow/lite/micro/compatibility.h" 20 | #include "tensorflow/lite/micro/memory_planner/memory_planner.h" 21 | 22 | namespace tflite { 23 | 24 | // The simplest possible memory planner that just lays out all buffers at 25 | // increasing offsets without trying to reuse memory. 26 | class LinearMemoryPlanner : public MemoryPlanner { 27 | public: 28 | LinearMemoryPlanner(); 29 | ~LinearMemoryPlanner() override; 30 | 31 | TfLiteStatus AddBuffer(tflite::ErrorReporter* error_reporter, int size, 32 | int first_time_used, int last_time_used) override; 33 | 34 | size_t GetMaximumMemorySize() override; 35 | int GetBufferCount() override; 36 | TfLiteStatus GetOffsetForBuffer(tflite::ErrorReporter* error_reporter, 37 | int buffer_index, int* offset) override; 38 | 39 | private: 40 | static constexpr int kMaxBufferCount = 1024; 41 | size_t buffer_offsets_[kMaxBufferCount]; 42 | int current_buffer_count_; 43 | size_t next_free_offset_; 44 | 45 | TF_LITE_REMOVE_VIRTUAL_DELETE 46 | }; 47 | 48 | } // namespace tflite 49 | 50 | #endif // TENSORFLOW_LITE_MICRO_MEMORY_PLANNER_LINEAR_MEMORY_PLANNER_H_ 51 | -------------------------------------------------------------------------------- /mfcc-nn/components/kws/tf/tfmicro/tensorflow/lite/micro/kernels/activation_utils.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | #ifndef TENSORFLOW_LITE_MICRO_KERNELS_ACTIVATION_UTILS_H_ 17 | #define TENSORFLOW_LITE_MICRO_KERNELS_ACTIVATION_UTILS_H_ 18 | 19 | #include 20 | #include 21 | 22 | #include "tensorflow/lite/c/builtin_op_data.h" 23 | #include "tensorflow/lite/kernels/internal/cppmath.h" 24 | #include "tensorflow/lite/kernels/internal/max.h" 25 | #include "tensorflow/lite/kernels/internal/min.h" 26 | 27 | namespace tflite { 28 | namespace ops { 29 | namespace micro { 30 | 31 | // Returns the floating point value for a fused activation: 32 | inline float ActivationValFloat(TfLiteFusedActivation act, float a) { 33 | switch (act) { 34 | case kTfLiteActNone: 35 | return a; 36 | case kTfLiteActRelu: 37 | return TfLiteMax(0.0f, a); 38 | case kTfLiteActReluN1To1: 39 | return TfLiteMax(-1.0f, TfLiteMin(a, 1.0f)); 40 | case kTfLiteActRelu6: 41 | return TfLiteMax(0.0f, TfLiteMin(a, 6.0f)); 42 | case kTfLiteActTanh: 43 | return std::tanh(a); 44 | case kTfLiteActSignBit: 45 | return std::signbit(a); 46 | case kTfLiteActSigmoid: 47 | return 1.0f / (1.0f + std::exp(-a)); 48 | } 49 | return 0.0f; // To indicate an unsupported activation (i.e. when a new fused 50 | // activation is added to the enum and not handled here). 51 | } 52 | 53 | } // namespace micro 54 | } // namespace ops 55 | } // namespace tflite 56 | 57 | #endif // TENSORFLOW_LITE_MICRO_KERNELS_ACTIVATION_UTILS_H_ 58 | -------------------------------------------------------------------------------- /mfcc-nn-streaming/components/kws/tf/tfmicro/tensorflow/lite/micro/kernels/activation_utils.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | #ifndef TENSORFLOW_LITE_MICRO_KERNELS_ACTIVATION_UTILS_H_ 17 | #define TENSORFLOW_LITE_MICRO_KERNELS_ACTIVATION_UTILS_H_ 18 | 19 | #include 20 | #include 21 | 22 | #include "tensorflow/lite/c/builtin_op_data.h" 23 | #include "tensorflow/lite/kernels/internal/cppmath.h" 24 | #include "tensorflow/lite/kernels/internal/max.h" 25 | #include "tensorflow/lite/kernels/internal/min.h" 26 | 27 | namespace tflite { 28 | namespace ops { 29 | namespace micro { 30 | 31 | // Returns the floating point value for a fused activation: 32 | inline float ActivationValFloat(TfLiteFusedActivation act, float a) { 33 | switch (act) { 34 | case kTfLiteActNone: 35 | return a; 36 | case kTfLiteActRelu: 37 | return TfLiteMax(0.0f, a); 38 | case kTfLiteActReluN1To1: 39 | return TfLiteMax(-1.0f, TfLiteMin(a, 1.0f)); 40 | case kTfLiteActRelu6: 41 | return TfLiteMax(0.0f, TfLiteMin(a, 6.0f)); 42 | case kTfLiteActTanh: 43 | return std::tanh(a); 44 | case kTfLiteActSignBit: 45 | return std::signbit(a); 46 | case kTfLiteActSigmoid: 47 | return 1.0f / (1.0f + std::exp(-a)); 48 | } 49 | return 0.0f; // To indicate an unsupported activation (i.e. when a new fused 50 | // activation is added to the enum and not handled here). 51 | } 52 | 53 | } // namespace micro 54 | } // namespace ops 55 | } // namespace tflite 56 | 57 | #endif // TENSORFLOW_LITE_MICRO_KERNELS_ACTIVATION_UTILS_H_ 58 | -------------------------------------------------------------------------------- /2-mfcc-nn-streaming/components/kws/tf/tf-streaming.cc: -------------------------------------------------------------------------------- 1 | #include "tf-streaming.h" 2 | #include 3 | #include 4 | 5 | /*****************************************************************************/ 6 | 7 | void StreamingState::init(char* ii, size_t is, char* oo, size_t os) 8 | { 9 | o = oo, i = ii, i_sz = is, o_sz = os; 10 | configASSERT(i_sz > o_sz); 11 | memset(i, 0, i_sz); 12 | r = new char[i_sz](); 13 | configASSERT(r != nullptr); 14 | mv_sz = i_sz - o_sz; 15 | } 16 | 17 | /*****************************************************************************/ 18 | 19 | StreamingState::StreamingState(TfLiteTensor* ii, TfLiteTensor* oo) 20 | { 21 | configASSERT(oo->type == kTfLiteFloat32); 22 | configASSERT(ii->type == kTfLiteFloat32); 23 | configASSERT(ii->bytes % oo->bytes == 0); 24 | init(ii->data.raw, ii->bytes, oo->data.raw, oo->bytes); 25 | } 26 | 27 | /*****************************************************************************/ 28 | 29 | StreamingState::StreamingState(TfLiteTensor* ii, TfLiteTensor* oo, size_t d) 30 | { 31 | configASSERT(oo->type == kTfLiteFloat32); 32 | configASSERT(ii->type == kTfLiteFloat32); 33 | configASSERT(ii->bytes % oo->bytes == 0); 34 | configASSERT(ii->bytes / oo->bytes == 2); 35 | configASSERT(d > 1); 36 | size_t x = 1 + (1 << (d - 1)); 37 | init(ii->data.raw, x * oo->bytes, oo->data.raw, oo->bytes); 38 | strategy = &dilated_strategy_impl; 39 | } 40 | 41 | /*****************************************************************************/ 42 | 43 | void StreamingState::default_strategy_impl(StreamingState* self) 44 | { 45 | memmove(self->r, &self->r[self->o_sz], self->mv_sz); 46 | memcpy(&self->r[self->mv_sz], self->o, self->o_sz); 47 | memcpy(self->i, self->r, self->i_sz); 48 | } 49 | 50 | /*****************************************************************************/ 51 | 52 | void StreamingState::dilated_strategy_impl(StreamingState* self) 53 | { 54 | memmove(self->r, &self->r[self->o_sz], self->mv_sz); 55 | memcpy(&self->r[self->mv_sz], self->o, self->o_sz); 56 | memcpy(self->i, self->r, self->o_sz); 57 | memcpy(&self->i[self->o_sz], &self->r[self->mv_sz], self->o_sz); 58 | } 59 | 60 | /*****************************************************************************/ 61 | -------------------------------------------------------------------------------- /2-mfcc-nn-streaming/components/kws/tf/tfmicro/tensorflow/lite/micro/kernels/activation_utils.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | #ifndef TENSORFLOW_LITE_MICRO_KERNELS_ACTIVATION_UTILS_H_ 17 | #define TENSORFLOW_LITE_MICRO_KERNELS_ACTIVATION_UTILS_H_ 18 | 19 | #include 20 | #include 21 | 22 | #include "tensorflow/lite/c/builtin_op_data.h" 23 | #include "tensorflow/lite/kernels/internal/cppmath.h" 24 | #include "tensorflow/lite/kernels/internal/max.h" 25 | #include "tensorflow/lite/kernels/internal/min.h" 26 | 27 | namespace tflite { 28 | namespace ops { 29 | namespace micro { 30 | 31 | // Returns the floating point value for a fused activation: 32 | inline float ActivationValFloat(TfLiteFusedActivation act, float a) { 33 | switch (act) { 34 | case kTfLiteActNone: 35 | return a; 36 | case kTfLiteActRelu: 37 | return TfLiteMax(0.0f, a); 38 | case kTfLiteActReluN1To1: 39 | return TfLiteMax(-1.0f, TfLiteMin(a, 1.0f)); 40 | case kTfLiteActRelu6: 41 | return TfLiteMax(0.0f, TfLiteMin(a, 6.0f)); 42 | case kTfLiteActTanh: 43 | return std::tanh(a); 44 | case kTfLiteActSignBit: 45 | return std::signbit(a); 46 | case kTfLiteActSigmoid: 47 | return 1.0f / (1.0f + std::exp(-a)); 48 | } 49 | return 0.0f; // To indicate an unsupported activation (i.e. when a new fused 50 | // activation is added to the enum and not handled here). 51 | } 52 | 53 | } // namespace micro 54 | } // namespace ops 55 | } // namespace tflite 56 | 57 | #endif // TENSORFLOW_LITE_MICRO_KERNELS_ACTIVATION_UTILS_H_ 58 | -------------------------------------------------------------------------------- /mfcc-nn/components/kws/tf/tfmicro/tensorflow/lite/micro/micro_time.cc: -------------------------------------------------------------------------------- 1 | /* Copyright 2020 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | // Reference implementation of timer functions. Platforms are not required to 17 | // implement these timer methods, but they are required to enable profiling. 18 | 19 | // On platforms that have a POSIX stack or C library, it can be written using 20 | // methods from or clock() from . 21 | 22 | // To add an equivalent function for your own platform, create your own 23 | // implementation file, and place it in a subfolder with named after the OS 24 | // you're targeting. For example, see the Cortex M bare metal version in 25 | // tensorflow/lite/micro/bluepill/micro_time.cc or the mbed one on 26 | // tensorflow/lite/micro/mbed/micro_time.cc. 27 | 28 | #include "tensorflow/lite/micro/micro_time.h" 29 | 30 | namespace tflite { 31 | 32 | // Reference implementation of the ticks_per_second() function that's required 33 | // for a platform to support Tensorflow Lite for Microcontrollers profiling. 34 | // This returns 0 by default because timing is an optional feature that builds 35 | // without errors on platforms that do not need it. 36 | int32_t ticks_per_second() { return 0; } 37 | 38 | // Reference implementation of the GetCurrentTimeTicks() function that's 39 | // required for a platform to support Tensorflow Lite for Microcontrollers 40 | // profiling. This returns 0 by default because timing is an optional feature 41 | // that builds without errors on platforms that do not need it. 42 | int32_t GetCurrentTimeTicks() { return 0; } 43 | 44 | } // namespace tflite 45 | -------------------------------------------------------------------------------- /2-mfcc-nn-streaming/components/kws/tf/tfmicro/tensorflow/lite/micro/micro_time.cc: -------------------------------------------------------------------------------- 1 | /* Copyright 2020 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | // Reference implementation of timer functions. Platforms are not required to 17 | // implement these timer methods, but they are required to enable profiling. 18 | 19 | // On platforms that have a POSIX stack or C library, it can be written using 20 | // methods from or clock() from . 21 | 22 | // To add an equivalent function for your own platform, create your own 23 | // implementation file, and place it in a subfolder with named after the OS 24 | // you're targeting. For example, see the Cortex M bare metal version in 25 | // tensorflow/lite/micro/bluepill/micro_time.cc or the mbed one on 26 | // tensorflow/lite/micro/mbed/micro_time.cc. 27 | 28 | #include "tensorflow/lite/micro/micro_time.h" 29 | 30 | namespace tflite { 31 | 32 | // Reference implementation of the ticks_per_second() function that's required 33 | // for a platform to support Tensorflow Lite for Microcontrollers profiling. 34 | // This returns 0 by default because timing is an optional feature that builds 35 | // without errors on platforms that do not need it. 36 | int32_t ticks_per_second() { return 0; } 37 | 38 | // Reference implementation of the GetCurrentTimeTicks() function that's 39 | // required for a platform to support Tensorflow Lite for Microcontrollers 40 | // profiling. This returns 0 by default because timing is an optional feature 41 | // that builds without errors on platforms that do not need it. 42 | int32_t GetCurrentTimeTicks() { return 0; } 43 | 44 | } // namespace tflite 45 | -------------------------------------------------------------------------------- /mfcc-nn-streaming/components/kws/tf/tfmicro/tensorflow/lite/micro/micro_time.cc: -------------------------------------------------------------------------------- 1 | /* Copyright 2020 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | // Reference implementation of timer functions. Platforms are not required to 17 | // implement these timer methods, but they are required to enable profiling. 18 | 19 | // On platforms that have a POSIX stack or C library, it can be written using 20 | // methods from or clock() from . 21 | 22 | // To add an equivalent function for your own platform, create your own 23 | // implementation file, and place it in a subfolder with named after the OS 24 | // you're targeting. For example, see the Cortex M bare metal version in 25 | // tensorflow/lite/micro/bluepill/micro_time.cc or the mbed one on 26 | // tensorflow/lite/micro/mbed/micro_time.cc. 27 | 28 | #include "tensorflow/lite/micro/micro_time.h" 29 | 30 | namespace tflite { 31 | 32 | // Reference implementation of the ticks_per_second() function that's required 33 | // for a platform to support Tensorflow Lite for Microcontrollers profiling. 34 | // This returns 0 by default because timing is an optional feature that builds 35 | // without errors on platforms that do not need it. 36 | int32_t ticks_per_second() { return 0; } 37 | 38 | // Reference implementation of the GetCurrentTimeTicks() function that's 39 | // required for a platform to support Tensorflow Lite for Microcontrollers 40 | // profiling. This returns 0 by default because timing is an optional feature 41 | // that builds without errors on platforms that do not need it. 42 | int32_t GetCurrentTimeTicks() { return 0; } 43 | 44 | } // namespace tflite 45 | -------------------------------------------------------------------------------- /mfcc-nn/components/kws/tf/tfmicro/tensorflow/lite/micro/kernels/floor.cc: -------------------------------------------------------------------------------- 1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | #include "tensorflow/lite/kernels/internal/reference/floor.h" 17 | 18 | #include "tensorflow/lite/c/common.h" 19 | #include "tensorflow/lite/kernels/internal/tensor_ctypes.h" 20 | #include "tensorflow/lite/kernels/kernel_util.h" 21 | 22 | namespace tflite { 23 | namespace ops { 24 | namespace micro { 25 | namespace floor { 26 | 27 | constexpr int kInputTensor = 0; 28 | constexpr int kOutputTensor = 0; 29 | 30 | TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { 31 | const TfLiteTensor* input = GetInput(context, node, kInputTensor); 32 | TF_LITE_ENSURE_TYPES_EQ(context, input->type, kTfLiteFloat32); 33 | TfLiteTensor* output = GetOutput(context, node, kOutputTensor); 34 | reference_ops::Floor(GetTensorShape(input), GetTensorData(input), 35 | GetTensorShape(output), GetTensorData(output)); 36 | return kTfLiteOk; 37 | } 38 | } // namespace floor 39 | 40 | TfLiteRegistration* Register_FLOOR() { 41 | static TfLiteRegistration r = {/*init=*/nullptr, 42 | /*free=*/nullptr, 43 | /*prepare=*/nullptr, 44 | /*invoke=*/floor::Eval, 45 | /*profiling_string=*/nullptr, 46 | /*builtin_code=*/0, 47 | /*custom_name=*/nullptr, 48 | /*version=*/0}; 49 | return &r; 50 | } 51 | 52 | } // namespace micro 53 | } // namespace ops 54 | } // namespace tflite 55 | -------------------------------------------------------------------------------- /mfcc-nn-streaming/components/kws/tf/tfmicro/tensorflow/lite/micro/kernels/floor.cc: -------------------------------------------------------------------------------- 1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | #include "tensorflow/lite/kernels/internal/reference/floor.h" 17 | 18 | #include "tensorflow/lite/c/common.h" 19 | #include "tensorflow/lite/kernels/internal/tensor_ctypes.h" 20 | #include "tensorflow/lite/kernels/kernel_util.h" 21 | 22 | namespace tflite { 23 | namespace ops { 24 | namespace micro { 25 | namespace floor { 26 | 27 | constexpr int kInputTensor = 0; 28 | constexpr int kOutputTensor = 0; 29 | 30 | TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { 31 | const TfLiteTensor* input = GetInput(context, node, kInputTensor); 32 | TF_LITE_ENSURE_TYPES_EQ(context, input->type, kTfLiteFloat32); 33 | TfLiteTensor* output = GetOutput(context, node, kOutputTensor); 34 | reference_ops::Floor(GetTensorShape(input), GetTensorData(input), 35 | GetTensorShape(output), GetTensorData(output)); 36 | return kTfLiteOk; 37 | } 38 | } // namespace floor 39 | 40 | TfLiteRegistration* Register_FLOOR() { 41 | static TfLiteRegistration r = {/*init=*/nullptr, 42 | /*free=*/nullptr, 43 | /*prepare=*/nullptr, 44 | /*invoke=*/floor::Eval, 45 | /*profiling_string=*/nullptr, 46 | /*builtin_code=*/0, 47 | /*custom_name=*/nullptr, 48 | /*version=*/0}; 49 | return &r; 50 | } 51 | 52 | } // namespace micro 53 | } // namespace ops 54 | } // namespace tflite 55 | -------------------------------------------------------------------------------- /2-mfcc-nn-streaming/components/kws/tf/tfmicro/tensorflow/lite/micro/kernels/floor.cc: -------------------------------------------------------------------------------- 1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | #include "tensorflow/lite/kernels/internal/reference/floor.h" 17 | 18 | #include "tensorflow/lite/c/common.h" 19 | #include "tensorflow/lite/kernels/internal/tensor_ctypes.h" 20 | #include "tensorflow/lite/kernels/kernel_util.h" 21 | 22 | namespace tflite { 23 | namespace ops { 24 | namespace micro { 25 | namespace floor { 26 | 27 | constexpr int kInputTensor = 0; 28 | constexpr int kOutputTensor = 0; 29 | 30 | TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { 31 | const TfLiteTensor* input = GetInput(context, node, kInputTensor); 32 | TF_LITE_ENSURE_TYPES_EQ(context, input->type, kTfLiteFloat32); 33 | TfLiteTensor* output = GetOutput(context, node, kOutputTensor); 34 | reference_ops::Floor(GetTensorShape(input), GetTensorData(input), 35 | GetTensorShape(output), GetTensorData(output)); 36 | return kTfLiteOk; 37 | } 38 | } // namespace floor 39 | 40 | TfLiteRegistration* Register_FLOOR() { 41 | static TfLiteRegistration r = {/*init=*/nullptr, 42 | /*free=*/nullptr, 43 | /*prepare=*/nullptr, 44 | /*invoke=*/floor::Eval, 45 | /*profiling_string=*/nullptr, 46 | /*builtin_code=*/0, 47 | /*custom_name=*/nullptr, 48 | /*version=*/0}; 49 | return &r; 50 | } 51 | 52 | } // namespace micro 53 | } // namespace ops 54 | } // namespace tflite 55 | -------------------------------------------------------------------------------- /mfcc-nn/components/kws/tf/tfmicro/tensorflow/lite/micro/memory_planner/linear_memory_planner.cc: -------------------------------------------------------------------------------- 1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | #include "tensorflow/lite/micro/memory_planner/linear_memory_planner.h" 17 | 18 | namespace tflite { 19 | 20 | LinearMemoryPlanner::LinearMemoryPlanner() 21 | : current_buffer_count_(0), next_free_offset_(0) {} 22 | LinearMemoryPlanner::~LinearMemoryPlanner() {} 23 | 24 | TfLiteStatus LinearMemoryPlanner::AddBuffer( 25 | tflite::ErrorReporter* error_reporter, int size, int first_time_used, 26 | int last_time_used) { 27 | if (current_buffer_count_ >= kMaxBufferCount) { 28 | TF_LITE_REPORT_ERROR(error_reporter, "Too many buffers (max is %d)", 29 | kMaxBufferCount); 30 | return kTfLiteError; 31 | } 32 | buffer_offsets_[current_buffer_count_] = next_free_offset_; 33 | next_free_offset_ += size; 34 | ++current_buffer_count_; 35 | return kTfLiteOk; 36 | } 37 | 38 | size_t LinearMemoryPlanner::GetMaximumMemorySize() { return next_free_offset_; } 39 | 40 | int LinearMemoryPlanner::GetBufferCount() { return current_buffer_count_; } 41 | 42 | TfLiteStatus LinearMemoryPlanner::GetOffsetForBuffer( 43 | tflite::ErrorReporter* error_reporter, int buffer_index, int* offset) { 44 | if ((buffer_index < 0) || (buffer_index >= current_buffer_count_)) { 45 | TF_LITE_REPORT_ERROR(error_reporter, 46 | "buffer index %d is outside range 0 to %d", 47 | buffer_index, current_buffer_count_); 48 | return kTfLiteError; 49 | } 50 | *offset = buffer_offsets_[buffer_index]; 51 | return kTfLiteOk; 52 | } 53 | 54 | } // namespace tflite 55 | -------------------------------------------------------------------------------- /mfcc-nn-streaming/components/kws/tf/tfmicro/tensorflow/lite/micro/memory_planner/linear_memory_planner.cc: -------------------------------------------------------------------------------- 1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | #include "tensorflow/lite/micro/memory_planner/linear_memory_planner.h" 17 | 18 | namespace tflite { 19 | 20 | LinearMemoryPlanner::LinearMemoryPlanner() 21 | : current_buffer_count_(0), next_free_offset_(0) {} 22 | LinearMemoryPlanner::~LinearMemoryPlanner() {} 23 | 24 | TfLiteStatus LinearMemoryPlanner::AddBuffer( 25 | tflite::ErrorReporter* error_reporter, int size, int first_time_used, 26 | int last_time_used) { 27 | if (current_buffer_count_ >= kMaxBufferCount) { 28 | TF_LITE_REPORT_ERROR(error_reporter, "Too many buffers (max is %d)", 29 | kMaxBufferCount); 30 | return kTfLiteError; 31 | } 32 | buffer_offsets_[current_buffer_count_] = next_free_offset_; 33 | next_free_offset_ += size; 34 | ++current_buffer_count_; 35 | return kTfLiteOk; 36 | } 37 | 38 | size_t LinearMemoryPlanner::GetMaximumMemorySize() { return next_free_offset_; } 39 | 40 | int LinearMemoryPlanner::GetBufferCount() { return current_buffer_count_; } 41 | 42 | TfLiteStatus LinearMemoryPlanner::GetOffsetForBuffer( 43 | tflite::ErrorReporter* error_reporter, int buffer_index, int* offset) { 44 | if ((buffer_index < 0) || (buffer_index >= current_buffer_count_)) { 45 | TF_LITE_REPORT_ERROR(error_reporter, 46 | "buffer index %d is outside range 0 to %d", 47 | buffer_index, current_buffer_count_); 48 | return kTfLiteError; 49 | } 50 | *offset = buffer_offsets_[buffer_index]; 51 | return kTfLiteOk; 52 | } 53 | 54 | } // namespace tflite 55 | -------------------------------------------------------------------------------- /2-mfcc-nn-streaming/components/kws/tf/tfmicro/tensorflow/lite/micro/memory_planner/linear_memory_planner.cc: -------------------------------------------------------------------------------- 1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | #include "tensorflow/lite/micro/memory_planner/linear_memory_planner.h" 17 | 18 | namespace tflite { 19 | 20 | LinearMemoryPlanner::LinearMemoryPlanner() 21 | : current_buffer_count_(0), next_free_offset_(0) {} 22 | LinearMemoryPlanner::~LinearMemoryPlanner() {} 23 | 24 | TfLiteStatus LinearMemoryPlanner::AddBuffer( 25 | tflite::ErrorReporter* error_reporter, int size, int first_time_used, 26 | int last_time_used) { 27 | if (current_buffer_count_ >= kMaxBufferCount) { 28 | TF_LITE_REPORT_ERROR(error_reporter, "Too many buffers (max is %d)", 29 | kMaxBufferCount); 30 | return kTfLiteError; 31 | } 32 | buffer_offsets_[current_buffer_count_] = next_free_offset_; 33 | next_free_offset_ += size; 34 | ++current_buffer_count_; 35 | return kTfLiteOk; 36 | } 37 | 38 | size_t LinearMemoryPlanner::GetMaximumMemorySize() { return next_free_offset_; } 39 | 40 | int LinearMemoryPlanner::GetBufferCount() { return current_buffer_count_; } 41 | 42 | TfLiteStatus LinearMemoryPlanner::GetOffsetForBuffer( 43 | tflite::ErrorReporter* error_reporter, int buffer_index, int* offset) { 44 | if ((buffer_index < 0) || (buffer_index >= current_buffer_count_)) { 45 | TF_LITE_REPORT_ERROR(error_reporter, 46 | "buffer index %d is outside range 0 to %d", 47 | buffer_index, current_buffer_count_); 48 | return kTfLiteError; 49 | } 50 | *offset = buffer_offsets_[buffer_index]; 51 | return kTfLiteOk; 52 | } 53 | 54 | } // namespace tflite 55 | -------------------------------------------------------------------------------- /mfcc-nn/components/kws/tf/tfmicro/tensorflow/lite/core/api/op_resolver.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_LITE_CORE_API_OP_RESOLVER_H_ 16 | #define TENSORFLOW_LITE_CORE_API_OP_RESOLVER_H_ 17 | 18 | #include "tensorflow/lite/c/common.h" 19 | #include "tensorflow/lite/core/api/error_reporter.h" 20 | #include "tensorflow/lite/schema/schema_generated.h" 21 | 22 | namespace tflite { 23 | 24 | /// Abstract interface that returns TfLiteRegistrations given op codes or custom 25 | /// op names. This is the mechanism that ops being referenced in the flatbuffer 26 | /// model are mapped to executable function pointers (TfLiteRegistrations). 27 | class OpResolver { 28 | public: 29 | /// Finds the op registration for a builtin operator by enum code. 30 | virtual const TfLiteRegistration* FindOp(tflite::BuiltinOperator op, 31 | int version) const = 0; 32 | /// Finds the op registration of a custom operator by op name. 33 | virtual const TfLiteRegistration* FindOp(const char* op, 34 | int version) const = 0; 35 | virtual ~OpResolver() {} 36 | }; 37 | 38 | // Handles the logic for converting between an OperatorCode structure extracted 39 | // from a flatbuffer and information about a registered operator 40 | // implementation. 41 | TfLiteStatus GetRegistrationFromOpCode(const OperatorCode* opcode, 42 | const OpResolver& op_resolver, 43 | ErrorReporter* error_reporter, 44 | const TfLiteRegistration** registration); 45 | 46 | } // namespace tflite 47 | 48 | #endif // TENSORFLOW_LITE_CORE_API_OP_RESOLVER_H_ 49 | -------------------------------------------------------------------------------- /2-mfcc-nn-streaming/components/kws/tf/tfmicro/tensorflow/lite/core/api/op_resolver.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_LITE_CORE_API_OP_RESOLVER_H_ 16 | #define TENSORFLOW_LITE_CORE_API_OP_RESOLVER_H_ 17 | 18 | #include "tensorflow/lite/c/common.h" 19 | #include "tensorflow/lite/core/api/error_reporter.h" 20 | #include "tensorflow/lite/schema/schema_generated.h" 21 | 22 | namespace tflite { 23 | 24 | /// Abstract interface that returns TfLiteRegistrations given op codes or custom 25 | /// op names. This is the mechanism that ops being referenced in the flatbuffer 26 | /// model are mapped to executable function pointers (TfLiteRegistrations). 27 | class OpResolver { 28 | public: 29 | /// Finds the op registration for a builtin operator by enum code. 30 | virtual const TfLiteRegistration* FindOp(tflite::BuiltinOperator op, 31 | int version) const = 0; 32 | /// Finds the op registration of a custom operator by op name. 33 | virtual const TfLiteRegistration* FindOp(const char* op, 34 | int version) const = 0; 35 | virtual ~OpResolver() {} 36 | }; 37 | 38 | // Handles the logic for converting between an OperatorCode structure extracted 39 | // from a flatbuffer and information about a registered operator 40 | // implementation. 41 | TfLiteStatus GetRegistrationFromOpCode(const OperatorCode* opcode, 42 | const OpResolver& op_resolver, 43 | ErrorReporter* error_reporter, 44 | const TfLiteRegistration** registration); 45 | 46 | } // namespace tflite 47 | 48 | #endif // TENSORFLOW_LITE_CORE_API_OP_RESOLVER_H_ 49 | -------------------------------------------------------------------------------- /mfcc-nn-streaming/components/kws/tf/tfmicro/tensorflow/lite/core/api/op_resolver.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_LITE_CORE_API_OP_RESOLVER_H_ 16 | #define TENSORFLOW_LITE_CORE_API_OP_RESOLVER_H_ 17 | 18 | #include "tensorflow/lite/c/common.h" 19 | #include "tensorflow/lite/core/api/error_reporter.h" 20 | #include "tensorflow/lite/schema/schema_generated.h" 21 | 22 | namespace tflite { 23 | 24 | /// Abstract interface that returns TfLiteRegistrations given op codes or custom 25 | /// op names. This is the mechanism that ops being referenced in the flatbuffer 26 | /// model are mapped to executable function pointers (TfLiteRegistrations). 27 | class OpResolver { 28 | public: 29 | /// Finds the op registration for a builtin operator by enum code. 30 | virtual const TfLiteRegistration* FindOp(tflite::BuiltinOperator op, 31 | int version) const = 0; 32 | /// Finds the op registration of a custom operator by op name. 33 | virtual const TfLiteRegistration* FindOp(const char* op, 34 | int version) const = 0; 35 | virtual ~OpResolver() {} 36 | }; 37 | 38 | // Handles the logic for converting between an OperatorCode structure extracted 39 | // from a flatbuffer and information about a registered operator 40 | // implementation. 41 | TfLiteStatus GetRegistrationFromOpCode(const OperatorCode* opcode, 42 | const OpResolver& op_resolver, 43 | ErrorReporter* error_reporter, 44 | const TfLiteRegistration** registration); 45 | 46 | } // namespace tflite 47 | 48 | #endif // TENSORFLOW_LITE_CORE_API_OP_RESOLVER_H_ 49 | -------------------------------------------------------------------------------- /mfcc-nn/components/kws/tf/tfmicro/tensorflow/lite/kernels/op_macros.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2017 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_LITE_KERNELS_OP_MACROS_H_ 16 | #define TENSORFLOW_LITE_KERNELS_OP_MACROS_H_ 17 | 18 | // If we're on a platform without standard IO functions, fall back to a 19 | // non-portable function. 20 | #ifdef TF_LITE_MCU_DEBUG_LOG 21 | 22 | #include "tensorflow/lite/micro/debug_log.h" 23 | 24 | #define DEBUG_LOG(x) \ 25 | do { \ 26 | DebugLog(x); \ 27 | } while (0) 28 | 29 | inline void InfiniteLoop() { 30 | DEBUG_LOG("HALTED\n"); 31 | while (1) { 32 | } 33 | } 34 | 35 | #define TFLITE_ABORT InfiniteLoop(); 36 | 37 | #else // TF_LITE_MCU_DEBUG_LOG 38 | 39 | #include 40 | #include 41 | 42 | #define DEBUG_LOG(x) \ 43 | do { \ 44 | fprintf(stderr, "%s", (x)); \ 45 | } while (0) 46 | 47 | #define TFLITE_ABORT abort() 48 | 49 | #endif // TF_LITE_MCU_DEBUG_LOG 50 | 51 | #ifdef NDEBUG 52 | #define TFLITE_ASSERT_FALSE (static_cast(0)) 53 | #else 54 | #define TFLITE_ASSERT_FALSE TFLITE_ABORT 55 | #endif 56 | 57 | #define TF_LITE_FATAL(msg) \ 58 | do { \ 59 | DEBUG_LOG(msg); \ 60 | DEBUG_LOG("\nFATAL\n"); \ 61 | TFLITE_ABORT; \ 62 | } while (0) 63 | 64 | #define TF_LITE_ASSERT(x) \ 65 | do { \ 66 | if (!(x)) TF_LITE_FATAL(#x); \ 67 | } while (0) 68 | 69 | #define TF_LITE_ASSERT_EQ(x, y) \ 70 | do { \ 71 | if ((x) != (y)) TF_LITE_FATAL(#x " didn't equal " #y); \ 72 | } while (0) 73 | 74 | #endif // TENSORFLOW_LITE_KERNELS_OP_MACROS_H_ 75 | -------------------------------------------------------------------------------- /2-mfcc-nn-streaming/components/kws/tf/tfmicro/tensorflow/lite/kernels/op_macros.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2017 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_LITE_KERNELS_OP_MACROS_H_ 16 | #define TENSORFLOW_LITE_KERNELS_OP_MACROS_H_ 17 | 18 | // If we're on a platform without standard IO functions, fall back to a 19 | // non-portable function. 20 | #ifdef TF_LITE_MCU_DEBUG_LOG 21 | 22 | #include "tensorflow/lite/micro/debug_log.h" 23 | 24 | #define DEBUG_LOG(x) \ 25 | do { \ 26 | DebugLog(x); \ 27 | } while (0) 28 | 29 | inline void InfiniteLoop() { 30 | DEBUG_LOG("HALTED\n"); 31 | while (1) { 32 | } 33 | } 34 | 35 | #define TFLITE_ABORT InfiniteLoop(); 36 | 37 | #else // TF_LITE_MCU_DEBUG_LOG 38 | 39 | #include 40 | #include 41 | 42 | #define DEBUG_LOG(x) \ 43 | do { \ 44 | fprintf(stderr, "%s", (x)); \ 45 | } while (0) 46 | 47 | #define TFLITE_ABORT abort() 48 | 49 | #endif // TF_LITE_MCU_DEBUG_LOG 50 | 51 | #ifdef NDEBUG 52 | #define TFLITE_ASSERT_FALSE (static_cast(0)) 53 | #else 54 | #define TFLITE_ASSERT_FALSE TFLITE_ABORT 55 | #endif 56 | 57 | #define TF_LITE_FATAL(msg) \ 58 | do { \ 59 | DEBUG_LOG(msg); \ 60 | DEBUG_LOG("\nFATAL\n"); \ 61 | TFLITE_ABORT; \ 62 | } while (0) 63 | 64 | #define TF_LITE_ASSERT(x) \ 65 | do { \ 66 | if (!(x)) TF_LITE_FATAL(#x); \ 67 | } while (0) 68 | 69 | #define TF_LITE_ASSERT_EQ(x, y) \ 70 | do { \ 71 | if ((x) != (y)) TF_LITE_FATAL(#x " didn't equal " #y); \ 72 | } while (0) 73 | 74 | #endif // TENSORFLOW_LITE_KERNELS_OP_MACROS_H_ 75 | -------------------------------------------------------------------------------- /mfcc-nn-streaming/components/kws/tf/tfmicro/tensorflow/lite/kernels/op_macros.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2017 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_LITE_KERNELS_OP_MACROS_H_ 16 | #define TENSORFLOW_LITE_KERNELS_OP_MACROS_H_ 17 | 18 | // If we're on a platform without standard IO functions, fall back to a 19 | // non-portable function. 20 | #ifdef TF_LITE_MCU_DEBUG_LOG 21 | 22 | #include "tensorflow/lite/micro/debug_log.h" 23 | 24 | #define DEBUG_LOG(x) \ 25 | do { \ 26 | DebugLog(x); \ 27 | } while (0) 28 | 29 | inline void InfiniteLoop() { 30 | DEBUG_LOG("HALTED\n"); 31 | while (1) { 32 | } 33 | } 34 | 35 | #define TFLITE_ABORT InfiniteLoop(); 36 | 37 | #else // TF_LITE_MCU_DEBUG_LOG 38 | 39 | #include 40 | #include 41 | 42 | #define DEBUG_LOG(x) \ 43 | do { \ 44 | fprintf(stderr, "%s", (x)); \ 45 | } while (0) 46 | 47 | #define TFLITE_ABORT abort() 48 | 49 | #endif // TF_LITE_MCU_DEBUG_LOG 50 | 51 | #ifdef NDEBUG 52 | #define TFLITE_ASSERT_FALSE (static_cast(0)) 53 | #else 54 | #define TFLITE_ASSERT_FALSE TFLITE_ABORT 55 | #endif 56 | 57 | #define TF_LITE_FATAL(msg) \ 58 | do { \ 59 | DEBUG_LOG(msg); \ 60 | DEBUG_LOG("\nFATAL\n"); \ 61 | TFLITE_ABORT; \ 62 | } while (0) 63 | 64 | #define TF_LITE_ASSERT(x) \ 65 | do { \ 66 | if (!(x)) TF_LITE_FATAL(#x); \ 67 | } while (0) 68 | 69 | #define TF_LITE_ASSERT_EQ(x, y) \ 70 | do { \ 71 | if ((x) != (y)) TF_LITE_FATAL(#x " didn't equal " #y); \ 72 | } while (0) 73 | 74 | #endif // TENSORFLOW_LITE_KERNELS_OP_MACROS_H_ 75 | -------------------------------------------------------------------------------- /mfcc-nn/components/kws/tf/tfmicro/tensorflow/lite/micro/debug_log.cc: -------------------------------------------------------------------------------- 1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | // Reference implementation of the DebugLog() function that's required for a 17 | // platform to support the TensorFlow Lite for Microcontrollers library. This is 18 | // the only function that's absolutely required to be available on a target 19 | // device, since it's used for communicating test results back to the host so 20 | // that we can verify the implementation is working correctly. 21 | // It's designed to be as easy as possible to supply an implementation though. 22 | // On platforms that have a POSIX stack or C library, it can be written as a 23 | // single call to `fprintf(stderr, "%s", s)` to output a string to the error 24 | // stream of the console, but if there's no OS or C library available, there's 25 | // almost always an equivalent way to write out a string to some serial 26 | // interface that can be used instead. For example on Arm M-series MCUs, calling 27 | // the `bkpt #0xAB` assembler instruction will output the string in r1 to 28 | // whatever debug serial connection is available. If you're running mbed, you 29 | // can do the same by creating `Serial pc(USBTX, USBRX)` and then calling 30 | // `pc.printf("%s", s)`. 31 | // To add an equivalent function for your own platform, create your own 32 | // implementation file, and place it in a subfolder with named after the OS 33 | // you're targeting. For example, see the Cortex M bare metal version in 34 | // tensorflow/lite/micro/bluepill/debug_log.cc or the mbed one on 35 | // tensorflow/lite/micro/mbed/debug_log.cc. 36 | 37 | #include "tensorflow/lite/micro/debug_log.h" 38 | 39 | #include 40 | 41 | extern "C" void DebugLog(const char* s) { fprintf(stderr, "%s", s); } 42 | -------------------------------------------------------------------------------- /mfcc-nn-streaming/components/kws/tf/tfmicro/tensorflow/lite/micro/debug_log.cc: -------------------------------------------------------------------------------- 1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | // Reference implementation of the DebugLog() function that's required for a 17 | // platform to support the TensorFlow Lite for Microcontrollers library. This is 18 | // the only function that's absolutely required to be available on a target 19 | // device, since it's used for communicating test results back to the host so 20 | // that we can verify the implementation is working correctly. 21 | // It's designed to be as easy as possible to supply an implementation though. 22 | // On platforms that have a POSIX stack or C library, it can be written as a 23 | // single call to `fprintf(stderr, "%s", s)` to output a string to the error 24 | // stream of the console, but if there's no OS or C library available, there's 25 | // almost always an equivalent way to write out a string to some serial 26 | // interface that can be used instead. For example on Arm M-series MCUs, calling 27 | // the `bkpt #0xAB` assembler instruction will output the string in r1 to 28 | // whatever debug serial connection is available. If you're running mbed, you 29 | // can do the same by creating `Serial pc(USBTX, USBRX)` and then calling 30 | // `pc.printf("%s", s)`. 31 | // To add an equivalent function for your own platform, create your own 32 | // implementation file, and place it in a subfolder with named after the OS 33 | // you're targeting. For example, see the Cortex M bare metal version in 34 | // tensorflow/lite/micro/bluepill/debug_log.cc or the mbed one on 35 | // tensorflow/lite/micro/mbed/debug_log.cc. 36 | 37 | #include "tensorflow/lite/micro/debug_log.h" 38 | 39 | #include 40 | 41 | extern "C" void DebugLog(const char* s) { fprintf(stderr, "%s", s); } 42 | -------------------------------------------------------------------------------- /2-mfcc-nn-streaming/components/kws/tf/tfmicro/tensorflow/lite/micro/debug_log.cc: -------------------------------------------------------------------------------- 1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | // Reference implementation of the DebugLog() function that's required for a 17 | // platform to support the TensorFlow Lite for Microcontrollers library. This is 18 | // the only function that's absolutely required to be available on a target 19 | // device, since it's used for communicating test results back to the host so 20 | // that we can verify the implementation is working correctly. 21 | // It's designed to be as easy as possible to supply an implementation though. 22 | // On platforms that have a POSIX stack or C library, it can be written as a 23 | // single call to `fprintf(stderr, "%s", s)` to output a string to the error 24 | // stream of the console, but if there's no OS or C library available, there's 25 | // almost always an equivalent way to write out a string to some serial 26 | // interface that can be used instead. For example on Arm M-series MCUs, calling 27 | // the `bkpt #0xAB` assembler instruction will output the string in r1 to 28 | // whatever debug serial connection is available. If you're running mbed, you 29 | // can do the same by creating `Serial pc(USBTX, USBRX)` and then calling 30 | // `pc.printf("%s", s)`. 31 | // To add an equivalent function for your own platform, create your own 32 | // implementation file, and place it in a subfolder with named after the OS 33 | // you're targeting. For example, see the Cortex M bare metal version in 34 | // tensorflow/lite/micro/bluepill/debug_log.cc or the mbed one on 35 | // tensorflow/lite/micro/mbed/debug_log.cc. 36 | 37 | #include "tensorflow/lite/micro/debug_log.h" 38 | 39 | #include 40 | 41 | extern "C" void DebugLog(const char* s) { fprintf(stderr, "%s", s); } 42 | -------------------------------------------------------------------------------- /mfcc-nn/components/kws/tf/tfmicro/tensorflow/lite/kernels/internal/reference/quantize.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_QUANTIZE_H_ 16 | #define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_QUANTIZE_H_ 17 | 18 | #include 19 | #include 20 | 21 | #include "tensorflow/lite/kernels/internal/common.h" 22 | #include "tensorflow/lite/kernels/internal/compatibility.h" 23 | #include "tensorflow/lite/kernels/internal/cppmath.h" 24 | #include "tensorflow/lite/kernels/internal/types.h" 25 | 26 | namespace tflite { 27 | 28 | namespace reference_ops { 29 | 30 | template 31 | inline void AffineQuantize(const tflite::QuantizationParams& op_params, 32 | const RuntimeShape& input_shape, 33 | const InputT* input_data, 34 | const RuntimeShape& output_shape, 35 | OutputT* output_data) { 36 | const int32 zero_point = op_params.zero_point; 37 | const double scale = op_params.scale; 38 | const int flat_size = MatchingFlatSize(input_shape, output_shape); 39 | static constexpr int32 min_val = std::numeric_limits::min(); 40 | static constexpr int32 max_val = std::numeric_limits::max(); 41 | 42 | for (int i = 0; i < flat_size; i++) { 43 | const InputT val = input_data[i]; 44 | int32 unclamped = 45 | static_cast(TfLiteRound(val / static_cast(scale))) + 46 | zero_point; 47 | int32 clamped = std::min(std::max(unclamped, min_val), max_val); 48 | output_data[i] = clamped; 49 | } 50 | } 51 | 52 | } // namespace reference_ops 53 | 54 | } // namespace tflite 55 | #endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_QUANTIZE_H_ 56 | -------------------------------------------------------------------------------- /mfcc-nn-streaming/components/kws/tf/tfmicro/tensorflow/lite/kernels/internal/reference/quantize.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_QUANTIZE_H_ 16 | #define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_QUANTIZE_H_ 17 | 18 | #include 19 | #include 20 | 21 | #include "tensorflow/lite/kernels/internal/common.h" 22 | #include "tensorflow/lite/kernels/internal/compatibility.h" 23 | #include "tensorflow/lite/kernels/internal/cppmath.h" 24 | #include "tensorflow/lite/kernels/internal/types.h" 25 | 26 | namespace tflite { 27 | 28 | namespace reference_ops { 29 | 30 | template 31 | inline void AffineQuantize(const tflite::QuantizationParams& op_params, 32 | const RuntimeShape& input_shape, 33 | const InputT* input_data, 34 | const RuntimeShape& output_shape, 35 | OutputT* output_data) { 36 | const int32 zero_point = op_params.zero_point; 37 | const double scale = op_params.scale; 38 | const int flat_size = MatchingFlatSize(input_shape, output_shape); 39 | static constexpr int32 min_val = std::numeric_limits::min(); 40 | static constexpr int32 max_val = std::numeric_limits::max(); 41 | 42 | for (int i = 0; i < flat_size; i++) { 43 | const InputT val = input_data[i]; 44 | int32 unclamped = 45 | static_cast(TfLiteRound(val / static_cast(scale))) + 46 | zero_point; 47 | int32 clamped = std::min(std::max(unclamped, min_val), max_val); 48 | output_data[i] = clamped; 49 | } 50 | } 51 | 52 | } // namespace reference_ops 53 | 54 | } // namespace tflite 55 | #endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_QUANTIZE_H_ 56 | -------------------------------------------------------------------------------- /2-mfcc-nn-streaming/components/kws/tf/tfmicro/tensorflow/lite/kernels/internal/reference/quantize.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_QUANTIZE_H_ 16 | #define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_QUANTIZE_H_ 17 | 18 | #include 19 | #include 20 | 21 | #include "tensorflow/lite/kernels/internal/common.h" 22 | #include "tensorflow/lite/kernels/internal/compatibility.h" 23 | #include "tensorflow/lite/kernels/internal/cppmath.h" 24 | #include "tensorflow/lite/kernels/internal/types.h" 25 | 26 | namespace tflite { 27 | 28 | namespace reference_ops { 29 | 30 | template 31 | inline void AffineQuantize(const tflite::QuantizationParams& op_params, 32 | const RuntimeShape& input_shape, 33 | const InputT* input_data, 34 | const RuntimeShape& output_shape, 35 | OutputT* output_data) { 36 | const int32 zero_point = op_params.zero_point; 37 | const double scale = op_params.scale; 38 | const int flat_size = MatchingFlatSize(input_shape, output_shape); 39 | static constexpr int32 min_val = std::numeric_limits::min(); 40 | static constexpr int32 max_val = std::numeric_limits::max(); 41 | 42 | for (int i = 0; i < flat_size; i++) { 43 | const InputT val = input_data[i]; 44 | int32 unclamped = 45 | static_cast(TfLiteRound(val / static_cast(scale))) + 46 | zero_point; 47 | int32 clamped = std::min(std::max(unclamped, min_val), max_val); 48 | output_data[i] = clamped; 49 | } 50 | } 51 | 52 | } // namespace reference_ops 53 | 54 | } // namespace tflite 55 | #endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_QUANTIZE_H_ 56 | --------------------------------------------------------------------------------