├── .python-version ├── firmware ├── lib │ ├── audio_input │ │ ├── library.json │ │ ├── ADCSampler.h │ │ ├── I2SMicSampler.h │ │ ├── ADCSampler.cpp │ │ ├── I2SMicSampler.cpp │ │ ├── I2SSampler.h │ │ ├── RingBuffer.h │ │ └── I2SSampler.cpp │ ├── neural_network │ │ ├── library.json │ │ └── src │ │ │ ├── model.h │ │ │ ├── NeuralNetwork.h │ │ │ └── NeuralNetwork.cpp │ ├── audio_processor │ │ ├── library.json │ │ └── src │ │ │ ├── kissfft │ │ │ ├── CMakeLists.txt │ │ │ ├── COPYING │ │ │ ├── .travis.yml │ │ │ ├── tools │ │ │ │ ├── kiss_fftnd.h │ │ │ │ ├── kiss_fftr.h │ │ │ │ ├── kiss_fftndr.h │ │ │ │ ├── kfc.h │ │ │ │ ├── Makefile │ │ │ │ └── kfc.c │ │ │ ├── .gitignore │ │ │ ├── kiss_fft_log.h │ │ │ ├── LICENSES │ │ │ │ ├── Unlicense │ │ │ │ └── BSD-3-Clause │ │ │ ├── Makefile │ │ │ ├── TIPS │ │ │ └── README.simd │ │ │ ├── HammingWindow.h │ │ │ ├── HammingWindow.cpp │ │ │ └── AudioProcessor.h │ ├── tfmicro │ │ ├── library.json │ │ └── tensorflow │ │ │ └── lite │ │ │ ├── micro │ │ │ ├── testing │ │ │ │ └── test_conv_model.h │ │ │ ├── debug_log.h │ │ │ ├── benchmarks │ │ │ │ └── keyword_scrambled_model_data.h │ │ │ ├── kernels │ │ │ │ ├── ethosu.cc │ │ │ │ ├── micro_utils.h │ │ │ │ ├── kernel_util.cc │ │ │ │ ├── activation_utils.h │ │ │ │ ├── floor.cc │ │ │ │ ├── neg.cc │ │ │ │ ├── ceil.cc │ │ │ │ ├── round.cc │ │ │ │ ├── kernel_util.h │ │ │ │ ├── kernel_runner.h │ │ │ │ └── micro_ops.h │ │ │ ├── micro_time.h │ │ │ ├── micro_error_reporter.h │ │ │ ├── compatibility.h │ │ │ ├── micro_error_reporter.cc │ │ │ ├── micro_string.h │ │ │ ├── all_ops_resolver.h │ │ │ ├── micro_profiler.cc │ │ │ ├── memory_planner │ │ │ │ ├── linear_memory_planner.h │ │ │ │ ├── linear_memory_planner.cc │ │ │ │ └── memory_planner.h │ │ │ ├── micro_time.cc │ │ │ ├── debug_log.cc │ │ │ ├── all_ops_resolver.cc │ │ │ ├── memory_helpers.h │ │ │ ├── recording_simple_memory_allocator.h │ │ │ ├── micro_profiler.h │ │ │ ├── recording_micro_interpreter.h │ │ │ ├── micro_op_resolver.h │ │ │ └── recording_simple_memory_allocator.cc │ │ │ ├── core │ │ │ └── api │ │ │ │ ├── tensor_utils.h │ │ │ │ ├── error_reporter.cc │ │ │ │ ├── tensor_utils.cc │ │ │ │ ├── error_reporter.h │ │ │ │ ├── op_resolver.cc │ │ │ │ └── op_resolver.h │ │ │ ├── kernels │ │ │ ├── internal │ │ │ │ ├── max.h │ │ │ │ ├── min.h │ │ │ │ ├── reference │ │ │ │ │ ├── neg.h │ │ │ │ │ ├── ceil.h │ │ │ │ │ ├── floor.h │ │ │ │ │ ├── round.h │ │ │ │ │ ├── quantize.h │ │ │ │ │ ├── arg_min_max.h │ │ │ │ │ ├── maximum_minimum.h │ │ │ │ │ ├── requantize.h │ │ │ │ │ ├── integer_ops │ │ │ │ │ │ ├── l2normalization.h │ │ │ │ │ │ └── mean.h │ │ │ │ │ ├── dequantize.h │ │ │ │ │ ├── binary_function.h │ │ │ │ │ └── l2normalization.h │ │ │ │ ├── cppmath.h │ │ │ │ ├── optimized │ │ │ │ │ └── neon_check.h │ │ │ │ └── tensor_ctypes.h │ │ │ ├── op_macros.h │ │ │ └── padding.h │ │ │ ├── version.h │ │ │ └── portable_type_to_tflitetype.h │ └── README ├── .gitignore ├── .vscode │ ├── extensions.json │ └── settings.json ├── src │ ├── CommandProcessor.h │ ├── config.h │ ├── CommandDetector.h │ ├── CommandProcessor.cpp │ └── main.cpp ├── test │ └── README ├── platformio.ini ├── include │ └── README └── README.md ├── .github └── FUNDING.yml ├── 3dPrinting ├── 2-wheeled-robot.f3z └── 2-wheeled-robot.stl ├── model ├── requirements.txt ├── README.md └── Convert Trained Model To TFLite.ipynb ├── LICENSE ├── README.md └── .gitignore /.python-version: -------------------------------------------------------------------------------- 1 | 3.8.5 2 | -------------------------------------------------------------------------------- /firmware/lib/audio_input/library.json: -------------------------------------------------------------------------------- 1 | { 2 | "build": { 3 | "flags": "-Ofast" 4 | } 5 | } -------------------------------------------------------------------------------- /firmware/lib/neural_network/library.json: -------------------------------------------------------------------------------- 1 | { 2 | "build": { 3 | "flags": "-Ofast" 4 | } 5 | } -------------------------------------------------------------------------------- /.github/FUNDING.yml: -------------------------------------------------------------------------------- 1 | # These are supported funding model platforms 2 | 3 | github: [atomic14] 4 | ko_fi: atomic14 5 | -------------------------------------------------------------------------------- /firmware/lib/audio_processor/library.json: -------------------------------------------------------------------------------- 1 | { 2 | "build": { 3 | "flags": "-Isrc/kissfft -Ofast" 4 | } 5 | } -------------------------------------------------------------------------------- /3dPrinting/2-wheeled-robot.f3z: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/atomic14/voice-controlled-robot/HEAD/3dPrinting/2-wheeled-robot.f3z -------------------------------------------------------------------------------- /3dPrinting/2-wheeled-robot.stl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/atomic14/voice-controlled-robot/HEAD/3dPrinting/2-wheeled-robot.stl -------------------------------------------------------------------------------- /firmware/.gitignore: -------------------------------------------------------------------------------- 1 | .pio 2 | .vscode/.browse.c_cpp.db* 3 | .vscode/c_cpp_properties.json 4 | .vscode/launch.json 5 | .vscode/ipch 6 | -------------------------------------------------------------------------------- /model/requirements.txt: -------------------------------------------------------------------------------- 1 | tensorflow 2 | matplotlib 3 | pandas 4 | numpy 5 | jupyter 6 | scipy 7 | tensorflow-io 8 | tqdm 9 | pyaudio 10 | jupyter_contrib_nbextensions 11 | jupyter_nbextensions_configurator -------------------------------------------------------------------------------- /firmware/lib/neural_network/src/model.h: -------------------------------------------------------------------------------- 1 | #ifndef __converted_model_h__ 2 | #define __converted_model_h__ 3 | 4 | extern unsigned char converted_model_tflite[]; 5 | extern unsigned int converted_model_tflite_len; 6 | 7 | #endif -------------------------------------------------------------------------------- /firmware/.vscode/extensions.json: -------------------------------------------------------------------------------- 1 | { 2 | // See http://go.microsoft.com/fwlink/?LinkId=827846 3 | // for the documentation about the extensions.json format 4 | "recommendations": [ 5 | "platformio.platformio-ide" 6 | ] 7 | } 8 | -------------------------------------------------------------------------------- /firmware/lib/tfmicro/library.json: -------------------------------------------------------------------------------- 1 | { 2 | "build": { 3 | "flags": "-Ithird_party/ruy -Ithird_party/gemmlowp -Ithird_party/flatbuffers/include -DNDEBUG -Ofast -Wno-unused-variable -Wno-strict-aliasing -Wno-return-type -Wno-strict-aliasing -Wno-return-type -Wno-strict-aliasing" 4 | } 5 | } -------------------------------------------------------------------------------- /firmware/lib/audio_processor/src/kissfft/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | cmake_minimum_required(VERSION 2.8) 2 | project(kissfft) 3 | 4 | add_library(kissfft 5 | kiss_fft.c) 6 | 7 | target_include_directories(kissfft PUBLIC 8 | $ 9 | $) 10 | -------------------------------------------------------------------------------- /firmware/lib/audio_processor/src/HammingWindow.h: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | class HammingWindow 4 | { 5 | private: 6 | float *m_coefficients; 7 | int m_window_size; 8 | 9 | public: 10 | HammingWindow(int window_size); 11 | ~HammingWindow(); 12 | void applyWindow(float *input); 13 | }; -------------------------------------------------------------------------------- /firmware/lib/audio_processor/src/kissfft/COPYING: -------------------------------------------------------------------------------- 1 | Copyright (c) 2003-2010 Mark Borgerding . All rights reserved. 2 | 3 | KISS FFT is provided under: 4 | 5 | SPDX-License-Identifier: BSD-3-Clause 6 | 7 | Being under the terms of the BSD 3-clause "New" or "Revised" License, 8 | according with: 9 | 10 | LICENSES/BSD-3-Clause 11 | 12 | -------------------------------------------------------------------------------- /firmware/lib/audio_input/ADCSampler.h: -------------------------------------------------------------------------------- 1 | #ifndef __adc_sampler_h__ 2 | #define __adc_sampler_h__ 3 | 4 | #include "I2SSampler.h" 5 | 6 | class ADCSampler : public I2SSampler 7 | { 8 | private: 9 | adc_unit_t m_adcUnit; 10 | adc1_channel_t m_adcChannel; 11 | 12 | protected: 13 | void configureI2S(); 14 | void processI2SData(uint8_t *i2sData, size_t bytesRead); 15 | 16 | public: 17 | ADCSampler(adc_unit_t adc_unit, adc1_channel_t adc_channel); 18 | }; 19 | 20 | #endif -------------------------------------------------------------------------------- /firmware/src/CommandProcessor.h: -------------------------------------------------------------------------------- 1 | #ifndef _intent_processor_h_ 2 | #define _intent_processor_h_ 3 | 4 | #include 5 | class StepperMotor; 6 | class Servo; 7 | 8 | class CommandProcessor 9 | { 10 | private: 11 | QueueHandle_t m_command_queue_handle; 12 | void processCommand(uint16_t commandIndex); 13 | 14 | public: 15 | CommandProcessor(); 16 | void queueCommand(uint16_t commandIndex, float score); 17 | friend void commandQueueProcessorTask(void *param); 18 | }; 19 | 20 | #endif 21 | -------------------------------------------------------------------------------- /firmware/lib/audio_processor/src/kissfft/.travis.yml: -------------------------------------------------------------------------------- 1 | language: python 2 | 3 | python: 4 | - "3.7" 5 | 6 | dist: bionic 7 | 8 | before_install: 9 | - sudo apt-get install -y libfftw3-dev 10 | 11 | addons: 12 | apt: 13 | update: true 14 | 15 | install: true 16 | 17 | jobs: 18 | include: 19 | - name: "build (make)" 20 | script: 21 | - make all 22 | - make testall 23 | - name: "build (cmake)" 24 | script: 25 | - mkdir build && cd build 26 | - cmake .. 27 | - make 28 | -------------------------------------------------------------------------------- /firmware/lib/audio_input/I2SMicSampler.h: -------------------------------------------------------------------------------- 1 | #ifndef __i2s_mic_sampler_h__ 2 | #define __i2s_mic_sampler_h__ 3 | 4 | #include "I2SSampler.h" 5 | 6 | class I2SMicSampler : public I2SSampler 7 | { 8 | private: 9 | i2s_pin_config_t m_i2sPins; 10 | bool m_fixSPH0645; 11 | 12 | protected: 13 | void configureI2S(); 14 | void processI2SData(uint8_t *i2sData, size_t bytesRead); 15 | 16 | public: 17 | I2SMicSampler(i2s_pin_config_t &i2sPins, bool fixSPH0645 = false); 18 | void start(TaskHandle_t writerTaskHandle); 19 | }; 20 | 21 | #endif -------------------------------------------------------------------------------- /firmware/test/README: -------------------------------------------------------------------------------- 1 | 2 | This directory is intended for PlatformIO Unit Testing and project tests. 3 | 4 | Unit Testing is a software testing method by which individual units of 5 | source code, sets of one or more MCU program modules together with associated 6 | control data, usage procedures, and operating procedures, are tested to 7 | determine whether they are fit for use. Unit testing finds problems early 8 | in the development cycle. 9 | 10 | More information about PlatformIO Unit Testing: 11 | - https://docs.platformio.org/page/plus/unit-testing.html 12 | -------------------------------------------------------------------------------- /firmware/src/config.h: -------------------------------------------------------------------------------- 1 | // are you using an I2S microphone - comment this out if you want to use an analog mic and ADC input 2 | #define USE_I2S_MIC_INPUT 3 | 4 | // I2S Microphone Settings 5 | // Which channel is the I2S microphone on? I2S_CHANNEL_FMT_ONLY_LEFT or I2S_CHANNEL_FMT_ONLY_RIGHT 6 | #define I2S_MIC_CHANNEL I2S_CHANNEL_FMT_ONLY_LEFT 7 | // #define I2S_MIC_CHANNEL I2S_CHANNEL_FMT_ONLY_RIGHT 8 | #define I2S_MIC_SERIAL_CLOCK GPIO_NUM_33 9 | #define I2S_MIC_LEFT_RIGHT_CLOCK GPIO_NUM_26 10 | #define I2S_MIC_SERIAL_DATA GPIO_NUM_25 11 | 12 | // Analog Microphone Settings - ADC1_CHANNEL_7 is GPIO35 13 | #define ADC_MIC_CHANNEL ADC1_CHANNEL_7 14 | -------------------------------------------------------------------------------- /firmware/platformio.ini: -------------------------------------------------------------------------------- 1 | ; PlatformIO Project Configuration File 2 | ; 3 | ; Build options: build flags, source filter 4 | ; Upload options: custom upload port, speed and extra flags 5 | ; Library options: dependencies, extra library storages 6 | ; Advanced options: extra scripting 7 | ; 8 | ; Please visit documentation for the other options and examples 9 | ; https://docs.platformio.org/page/projectconf.html 10 | 11 | [env:esp32doit-devkit-v1] 12 | platform = espressif32 13 | board = esp32doit-devkit-v1 14 | framework = arduino 15 | upload_port = /dev/cu.SLAB_USBtoUART 16 | monitor_port = /dev/cu.SLAB_USBtoUART 17 | monitor_speed = 115200 18 | build_flags = -Ofast 19 | monitor_filters = esp32_exception_decoder 20 | -------------------------------------------------------------------------------- /firmware/lib/audio_processor/src/kissfft/tools/kiss_fftnd.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2003-2004, Mark Borgerding. All rights reserved. 3 | * This file is part of KISS FFT - https://github.com/mborgerding/kissfft 4 | * 5 | * SPDX-License-Identifier: BSD-3-Clause 6 | * See COPYING file for more information. 7 | */ 8 | 9 | #ifndef KISS_FFTND_H 10 | #define KISS_FFTND_H 11 | 12 | #include "kiss_fft.h" 13 | 14 | #ifdef __cplusplus 15 | extern "C" { 16 | #endif 17 | 18 | typedef struct kiss_fftnd_state * kiss_fftnd_cfg; 19 | 20 | kiss_fftnd_cfg kiss_fftnd_alloc(const int *dims,int ndims,int inverse_fft,void*mem,size_t*lenmem); 21 | void kiss_fftnd(kiss_fftnd_cfg cfg,const kiss_fft_cpx *fin,kiss_fft_cpx *fout); 22 | 23 | #ifdef __cplusplus 24 | } 25 | #endif 26 | #endif 27 | -------------------------------------------------------------------------------- /firmware/lib/audio_input/ADCSampler.cpp: -------------------------------------------------------------------------------- 1 | #include "ADCSampler.h" 2 | #include "driver/i2s.h" 3 | #include "driver/adc.h" 4 | 5 | ADCSampler::ADCSampler(adc_unit_t adcUnit, adc1_channel_t adcChannel) : I2SSampler() 6 | { 7 | m_adcUnit = adcUnit; 8 | m_adcChannel = adcChannel; 9 | } 10 | 11 | void ADCSampler::configureI2S() 12 | { 13 | //init ADC pad 14 | i2s_set_adc_mode(m_adcUnit, m_adcChannel); 15 | // enable the adc 16 | i2s_adc_enable(getI2SPort()); 17 | } 18 | 19 | /** 20 | * Process the raw data that have been read from the I2S peripherals into samples 21 | **/ 22 | void ADCSampler::processI2SData(uint8_t *i2sData, size_t bytesRead) 23 | { 24 | uint16_t *rawSamples = (uint16_t *)i2sData; 25 | for (int i = 0; i < bytesRead / 2; i++) 26 | { 27 | addSample((2048 - (rawSamples[i] & 0xfff)) * 15); 28 | } 29 | } 30 | -------------------------------------------------------------------------------- /firmware/src/CommandDetector.h: -------------------------------------------------------------------------------- 1 | #ifndef _detect_wake_word_state_h_ 2 | #define _detect_wake_word_state_h_ 3 | 4 | class I2SSampler; 5 | class NeuralNetwork; 6 | class AudioProcessor; 7 | class CommandProcessor; 8 | 9 | #define NUMBER_COMMANDS 5 10 | #define COMMAND_WINDOW 3 11 | 12 | class CommandDetector 13 | { 14 | private: 15 | CommandProcessor *m_command_processor; 16 | I2SSampler *m_sample_provider; 17 | NeuralNetwork *m_nn; 18 | AudioProcessor *m_audio_processor; 19 | float m_average_detect_time; 20 | int m_number_of_runs; 21 | float m_scores[COMMAND_WINDOW][NUMBER_COMMANDS]; 22 | int m_scores_index; 23 | unsigned long m_last_detection; 24 | 25 | public: 26 | CommandDetector(I2SSampler *sample_provider, CommandProcessor *command_processor); 27 | ~CommandDetector(); 28 | void run(); 29 | }; 30 | 31 | #endif 32 | -------------------------------------------------------------------------------- /firmware/lib/audio_processor/src/HammingWindow.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include "HammingWindow.h" 4 | 5 | HammingWindow::HammingWindow(int window_size) 6 | { 7 | m_window_size = window_size; 8 | m_coefficients = static_cast(malloc(sizeof(float) * m_window_size)); 9 | // create the constants for a hamming window 10 | const float arg = M_PI * 2.0 / window_size; 11 | for (int i = 0; i < window_size; i++) 12 | { 13 | float float_value = 0.5 - (0.5 * cos(arg * (i + 0.5))); 14 | // Scale it to fixed point and round it. 15 | m_coefficients[i] = float_value; 16 | } 17 | } 18 | 19 | HammingWindow::~HammingWindow() 20 | { 21 | free(m_coefficients); 22 | } 23 | 24 | void HammingWindow::applyWindow(float *input) 25 | { 26 | for (int i = 0; i < m_window_size; i++) 27 | { 28 | input[i] = input[i] * m_coefficients[i]; 29 | } 30 | } 31 | -------------------------------------------------------------------------------- /firmware/lib/audio_input/I2SMicSampler.cpp: -------------------------------------------------------------------------------- 1 | #include "I2SMicSampler.h" 2 | #include "driver/i2s.h" 3 | #include "soc/i2s_reg.h" 4 | 5 | I2SMicSampler::I2SMicSampler(i2s_pin_config_t &i2sPins, bool fixSPH0645) : I2SSampler() 6 | { 7 | m_i2sPins = i2sPins; 8 | m_fixSPH0645 = fixSPH0645; 9 | } 10 | 11 | void I2SMicSampler::configureI2S() 12 | { 13 | if (m_fixSPH0645) 14 | { 15 | // FIXES for SPH0645 16 | REG_SET_BIT(I2S_TIMING_REG(getI2SPort()), BIT(9)); 17 | REG_SET_BIT(I2S_CONF_REG(getI2SPort()), I2S_RX_MSB_SHIFT); 18 | } 19 | 20 | i2s_set_pin(getI2SPort(), &m_i2sPins); 21 | } 22 | 23 | void I2SMicSampler::processI2SData(uint8_t *i2sData, size_t bytes_read) 24 | { 25 | int32_t *samples = (int32_t *)i2sData; 26 | for (int i = 0; i < bytes_read / 4; i++) 27 | { 28 | // addSample(samples[i] >> 11); 29 | float normalised = samples[i] / 2147483648.0f; 30 | addSample(32768 * normalised); 31 | } 32 | } 33 | -------------------------------------------------------------------------------- /firmware/lib/neural_network/src/NeuralNetwork.h: -------------------------------------------------------------------------------- 1 | #ifndef __NeuralNetwork__ 2 | #define __NeuralNetwork__ 3 | 4 | #include 5 | 6 | namespace tflite 7 | { 8 | template 9 | class MicroMutableOpResolver; 10 | class ErrorReporter; 11 | class Model; 12 | class MicroInterpreter; 13 | } // namespace tflite 14 | 15 | struct TfLiteTensor; 16 | 17 | typedef struct 18 | { 19 | float score; 20 | int index; 21 | } NNResult; 22 | 23 | class NeuralNetwork 24 | { 25 | private: 26 | tflite::MicroMutableOpResolver<10> *m_resolver; 27 | tflite::ErrorReporter *m_error_reporter; 28 | const tflite::Model *m_model; 29 | tflite::MicroInterpreter *m_interpreter; 30 | TfLiteTensor *input; 31 | TfLiteTensor *output; 32 | uint8_t *m_tensor_arena; 33 | 34 | public: 35 | NeuralNetwork(); 36 | ~NeuralNetwork(); 37 | float *getInputBuffer(); 38 | float *getOutputBuffer(); 39 | NNResult predict(); 40 | }; 41 | 42 | #endif -------------------------------------------------------------------------------- /firmware/lib/audio_processor/src/kissfft/.gitignore: -------------------------------------------------------------------------------- 1 | *.o 2 | *.swp 3 | *.so 4 | *.a 5 | *.dylib 6 | test/testcpp 7 | test/bm_fftw_double 8 | test/bm_fftw_float 9 | test/bm_fftw_int16_t 10 | test/bm_fftw_int32_t 11 | test/bm_fftw_simd 12 | test/bm_kiss_double 13 | test/bm_kiss_float 14 | test/bm_kiss_int16_t 15 | test/bm_kiss_int32_t 16 | test/bm_kiss_simd 17 | test/st_double 18 | test/st_float 19 | test/st_int16_t 20 | test/st_int32_t 21 | test/st_simd 22 | test/tkfc_double 23 | test/tkfc_float 24 | test/tkfc_int16_t 25 | test/tkfc_int32_t 26 | test/tkfc_simd 27 | test/tr_double 28 | test/tr_float 29 | test/tr_int16_t 30 | test/tr_int32_t 31 | test/tr_simd 32 | tools/fastconv_double 33 | tools/fastconv_float 34 | tools/fastconv_int16_t 35 | tools/fastconv_int32_t 36 | tools/fastconv_simd 37 | tools/fastconvr_double 38 | tools/fastconvr_float 39 | tools/fastconvr_int16_t 40 | tools/fastconvr_int32_t 41 | tools/fastconvr_simd 42 | tools/fft_double 43 | tools/fft_float 44 | tools/fft_int16_t 45 | tools/fft_int32_t 46 | tools/fft_simd 47 | test/test_simd 48 | build 49 | -------------------------------------------------------------------------------- /firmware/lib/audio_processor/src/AudioProcessor.h: -------------------------------------------------------------------------------- 1 | #ifndef AUDIO_PROCESSOR 2 | #define AUDIO_PROCESSOR 3 | 4 | #include 5 | #include 6 | // #define FIXED_POINT (16) 7 | #include "./kissfft/tools/kiss_fftr.h" 8 | 9 | class HammingWindow; 10 | 11 | class RingBufferAccessor; 12 | 13 | class AudioProcessor 14 | { 15 | private: 16 | int m_audio_length; 17 | int m_window_size; 18 | int m_step_size; 19 | int m_pooling_size; 20 | size_t m_fft_size; 21 | float *m_fft_input; 22 | int m_energy_size; 23 | int m_pooled_energy_size; 24 | float *m_energy; 25 | kiss_fft_cpx *m_fft_output; 26 | kiss_fftr_cfg m_cfg; 27 | float m_smoothed_noise_floor; 28 | 29 | HammingWindow *m_hamming_window; 30 | 31 | void get_spectrogram_segment(float *output_spectrogram_row); 32 | 33 | public: 34 | AudioProcessor(int audio_length, int window_size, int step_size, int pooling_size); 35 | ~AudioProcessor(); 36 | bool get_spectrogram(RingBufferAccessor *reader, float *output_spectrogram); 37 | }; 38 | 39 | #endif -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2020 atomic14 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /firmware/lib/audio_processor/src/kissfft/kiss_fft_log.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2003-2010, Mark Borgerding. All rights reserved. 3 | * This file is part of KISS FFT - https://github.com/mborgerding/kissfft 4 | * 5 | * SPDX-License-Identifier: BSD-3-Clause 6 | * See COPYING file for more information. 7 | */ 8 | 9 | #ifndef kiss_fft_log_h 10 | #define kiss_fft_log_h 11 | 12 | #define ERROR 1 13 | #define WARNING 2 14 | #define INFO 3 15 | #define DEBUG 4 16 | 17 | #define STRINGIFY(x) #x 18 | #define TOSTRING(x) STRINGIFY(x) 19 | 20 | #if defined(NDEBUG) 21 | # define KISS_FFT_LOG_MSG(severity, ...) ((void)0) 22 | #else 23 | # define KISS_FFT_LOG_MSG(severity, ...) \ 24 | fprintf(stderr, "[" #severity "] " __FILE__ ":" TOSTRING(__LINE__) " "); \ 25 | fprintf(stderr, __VA_ARGS__); \ 26 | fprintf(stderr, "\n") 27 | #endif 28 | 29 | #define KISS_FFT_ERROR(...) KISS_FFT_LOG_MSG(ERROR, __VA_ARGS__) 30 | #define KISS_FFT_WARNING(...) KISS_FFT_LOG_MSG(WARNING, __VA_ARGS__) 31 | #define KISS_FFT_INFO(...) KISS_FFT_LOG_MSG(INFO, __VA_ARGS__) 32 | #define KISS_FFT_DEBUG(...) KISS_FFT_LOG_MSG(DEBUG, __VA_ARGS__) 33 | 34 | 35 | 36 | #endif /* kiss_fft_log_h */ -------------------------------------------------------------------------------- /firmware/lib/tfmicro/tensorflow/lite/micro/testing/test_conv_model.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2020 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | #ifndef TENSORFLOW_LITE_MICRO_TESTING_TEST_CONV_MODEL_H_ 17 | #define TENSORFLOW_LITE_MICRO_TESTING_TEST_CONV_MODEL_H_ 18 | 19 | // See generate_test_models.py for updating the contents of this model: 20 | extern const unsigned char kTestConvModelData[]; 21 | extern const unsigned int kTestConvModelDataSize; 22 | 23 | #endif // TENSORFLOW_LITE_MICRO_TESTING_TEST_CONV_MODEL_H_ 24 | -------------------------------------------------------------------------------- /firmware/lib/tfmicro/tensorflow/lite/core/api/tensor_utils.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | #ifndef TENSORFLOW_LITE_CORE_API_TENSOR_UTILS_H_ 17 | #define TENSORFLOW_LITE_CORE_API_TENSOR_UTILS_H_ 18 | 19 | #include "tensorflow/lite/c/common.h" 20 | 21 | namespace tflite { 22 | 23 | // Resets a variable tensor to the default value. 24 | TfLiteStatus ResetVariableTensor(TfLiteTensor* tensor); 25 | 26 | } // namespace tflite 27 | 28 | #endif // TENSORFLOW_LITE_CORE_API_TENSOR_UTILS_H_ 29 | -------------------------------------------------------------------------------- /firmware/lib/tfmicro/tensorflow/lite/micro/debug_log.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_LITE_MICRO_DEBUG_LOG_H_ 16 | #define TENSORFLOW_LITE_MICRO_DEBUG_LOG_H_ 17 | 18 | // This function should be implemented by each target platform, and provide a 19 | // way for strings to be output to some text stream. For more information, see 20 | // tensorflow/lite/micro/debug_log.cc. 21 | extern "C" void DebugLog(const char* s); 22 | 23 | #endif // TENSORFLOW_LITE_MICRO_DEBUG_LOG_H_ 24 | -------------------------------------------------------------------------------- /firmware/lib/tfmicro/tensorflow/lite/micro/benchmarks/keyword_scrambled_model_data.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2020 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | #ifndef TENSORFLOW_LITE_MICRO_BENCHMARKS_KEYWORD_SCRAMBLED_MODEL_DATA_H_ 17 | #define TENSORFLOW_LITE_MICRO_BENCHMARKS_KEYWORD_SCRAMBLED_MODEL_DATA_H_ 18 | 19 | extern const unsigned char g_keyword_scrambled_model_data[]; 20 | extern const unsigned int g_keyword_scrambled_model_data_length; 21 | 22 | #endif // TENSORFLOW_LITE_MICRO_BENCHMARKS_KEYWORD_SCRAMBLED_MODEL_DATA_H_ 23 | -------------------------------------------------------------------------------- /firmware/lib/tfmicro/tensorflow/lite/micro/kernels/ethosu.cc: -------------------------------------------------------------------------------- 1 | /* Copyright 2020 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | // 17 | // This is a stub file for non-Ethos platforms 18 | // 19 | #include "tensorflow/lite/c/common.h" 20 | 21 | namespace tflite { 22 | namespace ops { 23 | namespace micro { 24 | namespace custom { 25 | TfLiteRegistration* Register_ETHOSU() { return nullptr; } 26 | 27 | const char* GetString_ETHOSU() { return ""; } 28 | 29 | } // namespace custom 30 | } // namespace micro 31 | } // namespace ops 32 | } // namespace tflite 33 | -------------------------------------------------------------------------------- /firmware/lib/README: -------------------------------------------------------------------------------- 1 | 2 | This directory is intended for project specific (private) libraries. 3 | PlatformIO will compile them to static libraries and link into executable file. 4 | 5 | The source code of each library should be placed in a an own separate directory 6 | ("lib/your_library_name/[here are source files]"). 7 | 8 | For example, see a structure of the following two libraries `Foo` and `Bar`: 9 | 10 | |--lib 11 | | | 12 | | |--Bar 13 | | | |--docs 14 | | | |--examples 15 | | | |--src 16 | | | |- Bar.c 17 | | | |- Bar.h 18 | | | |- library.json (optional, custom build options, etc) https://docs.platformio.org/page/librarymanager/config.html 19 | | | 20 | | |--Foo 21 | | | |- Foo.c 22 | | | |- Foo.h 23 | | | 24 | | |- README --> THIS FILE 25 | | 26 | |- platformio.ini 27 | |--src 28 | |- main.c 29 | 30 | and a contents of `src/main.c`: 31 | ``` 32 | #include 33 | #include 34 | 35 | int main (void) 36 | { 37 | ... 38 | } 39 | 40 | ``` 41 | 42 | PlatformIO Library Dependency Finder will find automatically dependent 43 | libraries scanning project source files. 44 | 45 | More information about PlatformIO Library Dependency Finder 46 | - https://docs.platformio.org/page/librarymanager/ldf.html 47 | -------------------------------------------------------------------------------- /firmware/lib/tfmicro/tensorflow/lite/micro/micro_time.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2020 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_LITE_MICRO_MICRO_TIME_H_ 16 | #define TENSORFLOW_LITE_MICRO_MICRO_TIME_H_ 17 | 18 | #include 19 | 20 | namespace tflite { 21 | 22 | // These functions should be implemented by each target platform, and provide an 23 | // accurate tick count along with how many ticks there are per second. 24 | int32_t ticks_per_second(); 25 | 26 | // Return time in ticks. The meaning of a tick varies per platform. 27 | int32_t GetCurrentTimeTicks(); 28 | 29 | } // namespace tflite 30 | 31 | #endif // TENSORFLOW_LITE_MICRO_MICRO_TIME_H_ 32 | -------------------------------------------------------------------------------- /firmware/lib/tfmicro/tensorflow/lite/kernels/internal/max.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2020 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_MAX_H_ 16 | #define TENSORFLOW_LITE_KERNELS_INTERNAL_MAX_H_ 17 | 18 | #include 19 | 20 | namespace tflite 21 | { 22 | 23 | #if defined(TF_LITE_USE_GLOBAL_MAX) || defined(__ZEPHYR__) 24 | inline float TfLiteMax(const float &x, const float &y) 25 | { 26 | return std::max(x, y); 27 | } 28 | #else 29 | template 30 | inline T TfLiteMax(const T &x, const T &y) 31 | { 32 | return std::max(x, y); 33 | } 34 | #endif 35 | 36 | } // namespace tflite 37 | 38 | #endif // TENSORFLOW_LITE_KERNELS_INTERNAL_MAX_H_ 39 | -------------------------------------------------------------------------------- /firmware/lib/tfmicro/tensorflow/lite/kernels/internal/min.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2020 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_MIN_H_ 16 | #define TENSORFLOW_LITE_KERNELS_INTERNAL_MIN_H_ 17 | 18 | #include 19 | 20 | namespace tflite 21 | { 22 | 23 | #if defined(TF_LITE_USE_GLOBAL_MIN) || defined(__ZEPHYR__) 24 | inline float TfLiteMin(const float &x, const float &y) 25 | { 26 | return std::min(x, y); 27 | } 28 | #else 29 | template 30 | inline T TfLiteMin(const T &x, const T &y) 31 | { 32 | return std::min(x, y); 33 | } 34 | #endif 35 | 36 | } // namespace tflite 37 | 38 | #endif // TENSORFLOW_LITE_KERNELS_INTERNAL_MIN_H_ 39 | -------------------------------------------------------------------------------- /firmware/lib/tfmicro/tensorflow/lite/version.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2017 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_LITE_VERSION_H_ 16 | #define TENSORFLOW_LITE_VERSION_H_ 17 | 18 | #include "tensorflow/core/public/version.h" 19 | 20 | // The version number of the Schema. Ideally all changes will be backward 21 | // compatible. If that ever changes, we must ensure that version is the first 22 | // entry in the new tflite root so that we can see that version is not 1. 23 | #define TFLITE_SCHEMA_VERSION (3) 24 | 25 | // TensorFlow Lite Runtime version. 26 | // This value is currently shared with that of TensorFlow. 27 | #define TFLITE_VERSION_STRING TF_VERSION_STRING 28 | 29 | #endif // TENSORFLOW_LITE_VERSION_H_ 30 | -------------------------------------------------------------------------------- /firmware/lib/audio_processor/src/kissfft/tools/kiss_fftr.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2003-2004, Mark Borgerding. All rights reserved. 3 | * This file is part of KISS FFT - https://github.com/mborgerding/kissfft 4 | * 5 | * SPDX-License-Identifier: BSD-3-Clause 6 | * See COPYING file for more information. 7 | */ 8 | 9 | #ifndef KISS_FTR_H 10 | #define KISS_FTR_H 11 | 12 | #include "kiss_fft.h" 13 | #ifdef __cplusplus 14 | extern "C" { 15 | #endif 16 | 17 | 18 | /* 19 | 20 | Real optimized version can save about 45% cpu time vs. complex fft of a real seq. 21 | 22 | 23 | 24 | */ 25 | 26 | typedef struct kiss_fftr_state *kiss_fftr_cfg; 27 | 28 | 29 | kiss_fftr_cfg kiss_fftr_alloc(int nfft,int inverse_fft,void * mem, size_t * lenmem); 30 | /* 31 | nfft must be even 32 | 33 | If you don't care to allocate space, use mem = lenmem = NULL 34 | */ 35 | 36 | 37 | void kiss_fftr(kiss_fftr_cfg cfg,const kiss_fft_scalar *timedata,kiss_fft_cpx *freqdata); 38 | /* 39 | input timedata has nfft scalar points 40 | output freqdata has nfft/2+1 complex points 41 | */ 42 | 43 | void kiss_fftri(kiss_fftr_cfg cfg,const kiss_fft_cpx *freqdata,kiss_fft_scalar *timedata); 44 | /* 45 | input freqdata has nfft/2+1 complex points 46 | output timedata has nfft scalar points 47 | */ 48 | 49 | #define kiss_fftr_free KISS_FFT_FREE 50 | 51 | #ifdef __cplusplus 52 | } 53 | #endif 54 | #endif 55 | -------------------------------------------------------------------------------- /firmware/lib/tfmicro/tensorflow/lite/micro/micro_error_reporter.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_LITE_MICRO_MICRO_ERROR_REPORTER_H_ 16 | #define TENSORFLOW_LITE_MICRO_MICRO_ERROR_REPORTER_H_ 17 | 18 | #include 19 | 20 | #include "tensorflow/lite/core/api/error_reporter.h" 21 | #include "tensorflow/lite/micro/compatibility.h" 22 | 23 | namespace tflite { 24 | 25 | class MicroErrorReporter : public ErrorReporter { 26 | public: 27 | ~MicroErrorReporter() override {} 28 | int Report(const char* format, va_list args) override; 29 | 30 | private: 31 | TF_LITE_REMOVE_VIRTUAL_DELETE 32 | }; 33 | 34 | } // namespace tflite 35 | 36 | #endif // TENSORFLOW_LITE_MICRO_MICRO_ERROR_REPORTER_H_ 37 | -------------------------------------------------------------------------------- /firmware/lib/tfmicro/tensorflow/lite/core/api/error_reporter.cc: -------------------------------------------------------------------------------- 1 | /* Copyright 2017 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #include "tensorflow/lite/core/api/error_reporter.h" 16 | #include 17 | 18 | namespace tflite { 19 | 20 | int ErrorReporter::Report(const char* format, ...) { 21 | va_list args; 22 | va_start(args, format); 23 | int code = Report(format, args); 24 | va_end(args); 25 | return code; 26 | } 27 | 28 | // TODO(aselle): Make the name of ReportError on context the same, so 29 | // we can use the ensure functions w/o a context and w/ a reporter. 30 | int ErrorReporter::ReportError(void*, const char* format, ...) { 31 | va_list args; 32 | va_start(args, format); 33 | int code = Report(format, args); 34 | va_end(args); 35 | return code; 36 | } 37 | 38 | } // namespace tflite 39 | -------------------------------------------------------------------------------- /firmware/lib/audio_processor/src/kissfft/tools/kiss_fftndr.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2003-2004, Mark Borgerding. All rights reserved. 3 | * This file is part of KISS FFT - https://github.com/mborgerding/kissfft 4 | * 5 | * SPDX-License-Identifier: BSD-3-Clause 6 | * See COPYING file for more information. 7 | */ 8 | 9 | #ifndef KISS_NDR_H 10 | #define KISS_NDR_H 11 | 12 | #include "kiss_fft.h" 13 | #include "kiss_fftr.h" 14 | #include "kiss_fftnd.h" 15 | 16 | #ifdef __cplusplus 17 | extern "C" { 18 | #endif 19 | 20 | typedef struct kiss_fftndr_state *kiss_fftndr_cfg; 21 | 22 | 23 | kiss_fftndr_cfg kiss_fftndr_alloc(const int *dims,int ndims,int inverse_fft,void*mem,size_t*lenmem); 24 | /* 25 | dims[0] must be even 26 | 27 | If you don't care to allocate space, use mem = lenmem = NULL 28 | */ 29 | 30 | 31 | void kiss_fftndr( 32 | kiss_fftndr_cfg cfg, 33 | const kiss_fft_scalar *timedata, 34 | kiss_fft_cpx *freqdata); 35 | /* 36 | input timedata has dims[0] X dims[1] X ... X dims[ndims-1] scalar points 37 | output freqdata has dims[0] X dims[1] X ... X dims[ndims-1]/2+1 complex points 38 | */ 39 | 40 | void kiss_fftndri( 41 | kiss_fftndr_cfg cfg, 42 | const kiss_fft_cpx *freqdata, 43 | kiss_fft_scalar *timedata); 44 | /* 45 | input and output dimensions are the exact opposite of kiss_fftndr 46 | */ 47 | 48 | 49 | #define kiss_fftndr_free free 50 | 51 | #ifdef __cplusplus 52 | } 53 | #endif 54 | 55 | #endif 56 | -------------------------------------------------------------------------------- /firmware/lib/tfmicro/tensorflow/lite/micro/kernels/micro_utils.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved. 2 | Licensed under the Apache License, Version 2.0 (the "License"); 3 | you may not use this file except in compliance with the License. 4 | You may obtain a copy of the License at 5 | http://www.apache.org/licenses/LICENSE-2.0 6 | Unless required by applicable law or agreed to in writing, software 7 | distributed under the License is distributed on an "AS IS" BASIS, 8 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 9 | See the License for the specific language governing permissions and 10 | limitations under the License. 11 | ==============================================================================*/ 12 | #ifndef TENSORFLOW_LITE_MICRO_KERNELS_MICRO_UTILS_H_ 13 | #define TENSORFLOW_LITE_MICRO_KERNELS_MICRO_UTILS_H_ 14 | namespace tflite { 15 | namespace ops { 16 | namespace micro { 17 | 18 | // Same as gtl::Greater but defined here to reduce dependencies and 19 | // binary size for micro environment. 20 | struct Greater { 21 | template 22 | bool operator()(const T& x, const T& y) const { 23 | return x > y; 24 | } 25 | }; 26 | 27 | struct Less { 28 | template 29 | bool operator()(const T& x, const T& y) const { 30 | return x < y; 31 | } 32 | }; 33 | 34 | } // namespace micro 35 | } // namespace ops 36 | } // namespace tflite 37 | #endif // TENSORFLOW_LITE_MICRO_KERNELS_MICRO_UTILS_H_ 38 | -------------------------------------------------------------------------------- /firmware/.vscode/settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "files.associations": { 3 | "new": "cpp", 4 | "array": "cpp", 5 | "*.tcc": "cpp", 6 | "bitset": "cpp", 7 | "cctype": "cpp", 8 | "clocale": "cpp", 9 | "cmath": "cpp", 10 | "complex": "cpp", 11 | "cstdarg": "cpp", 12 | "cstddef": "cpp", 13 | "cstdint": "cpp", 14 | "cstdio": "cpp", 15 | "cstdlib": "cpp", 16 | "cstring": "cpp", 17 | "ctime": "cpp", 18 | "cwchar": "cpp", 19 | "cwctype": "cpp", 20 | "deque": "cpp", 21 | "unordered_map": "cpp", 22 | "unordered_set": "cpp", 23 | "vector": "cpp", 24 | "exception": "cpp", 25 | "algorithm": "cpp", 26 | "functional": "cpp", 27 | "string_view": "cpp", 28 | "system_error": "cpp", 29 | "tuple": "cpp", 30 | "type_traits": "cpp", 31 | "fstream": "cpp", 32 | "initializer_list": "cpp", 33 | "iomanip": "cpp", 34 | "iosfwd": "cpp", 35 | "iostream": "cpp", 36 | "istream": "cpp", 37 | "limits": "cpp", 38 | "memory": "cpp", 39 | "ostream": "cpp", 40 | "numeric": "cpp", 41 | "sstream": "cpp", 42 | "stdexcept": "cpp", 43 | "streambuf": "cpp", 44 | "cinttypes": "cpp", 45 | "regex": "cpp", 46 | "utility": "cpp", 47 | "typeinfo": "cpp", 48 | "chrono": "cpp", 49 | "string": "cpp" 50 | } 51 | } -------------------------------------------------------------------------------- /firmware/lib/tfmicro/tensorflow/lite/kernels/internal/reference/neg.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_NEG_H_ 16 | #define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_NEG_H_ 17 | 18 | #include "tensorflow/lite/kernels/internal/types.h" 19 | 20 | namespace tflite { 21 | 22 | namespace reference_ops { 23 | 24 | template 25 | inline void Negate(const RuntimeShape& input_shape, const T* input_data, 26 | const RuntimeShape& output_shape, T* output_data) { 27 | const int flat_size = MatchingFlatSize(input_shape, output_shape); 28 | 29 | for (int i = 0; i < flat_size; ++i) { 30 | output_data[i] = -input_data[i]; 31 | } 32 | } 33 | 34 | } // namespace reference_ops 35 | } // namespace tflite 36 | 37 | #endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_NEG_H_ 38 | -------------------------------------------------------------------------------- /firmware/lib/tfmicro/tensorflow/lite/kernels/internal/reference/ceil.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_CEIL_H_ 16 | #define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_CEIL_H_ 17 | 18 | #include 19 | 20 | #include "tensorflow/lite/kernels/internal/types.h" 21 | 22 | namespace tflite { 23 | 24 | namespace reference_ops { 25 | 26 | inline void Ceil(const RuntimeShape& input_shape, const float* input_data, 27 | const RuntimeShape& output_shape, float* output_data) { 28 | const int flat_size = MatchingFlatSize(input_shape, output_shape); 29 | 30 | for (int i = 0; i < flat_size; ++i) { 31 | output_data[i] = std::ceil(input_data[i]); 32 | } 33 | } 34 | 35 | } // namespace reference_ops 36 | } // namespace tflite 37 | #endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_CEIL_H_ 38 | -------------------------------------------------------------------------------- /firmware/include/README: -------------------------------------------------------------------------------- 1 | 2 | This directory is intended for project header files. 3 | 4 | A header file is a file containing C declarations and macro definitions 5 | to be shared between several project source files. You request the use of a 6 | header file in your project source file (C, C++, etc) located in `src` folder 7 | by including it, with the C preprocessing directive `#include'. 8 | 9 | ```src/main.c 10 | 11 | #include "header.h" 12 | 13 | int main (void) 14 | { 15 | ... 16 | } 17 | ``` 18 | 19 | Including a header file produces the same results as copying the header file 20 | into each source file that needs it. Such copying would be time-consuming 21 | and error-prone. With a header file, the related declarations appear 22 | in only one place. If they need to be changed, they can be changed in one 23 | place, and programs that include the header file will automatically use the 24 | new version when next recompiled. The header file eliminates the labor of 25 | finding and changing all the copies as well as the risk that a failure to 26 | find one copy will result in inconsistencies within a program. 27 | 28 | In C, the usual convention is to give header files names that end with `.h'. 29 | It is most portable to use only letters, digits, dashes, and underscores in 30 | header file names, and at most one dot. 31 | 32 | Read more about using header files in official GCC documentation: 33 | 34 | * Include Syntax 35 | * Include Operation 36 | * Once-Only Headers 37 | * Computed Includes 38 | 39 | https://gcc.gnu.org/onlinedocs/cpp/Header-Files.html 40 | -------------------------------------------------------------------------------- /firmware/lib/tfmicro/tensorflow/lite/kernels/internal/reference/floor.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_FLOOR_H_ 16 | #define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_FLOOR_H_ 17 | 18 | #include 19 | 20 | #include "tensorflow/lite/kernels/internal/types.h" 21 | 22 | namespace tflite { 23 | 24 | namespace reference_ops { 25 | 26 | inline void Floor(const RuntimeShape& input_shape, const float* input_data, 27 | const RuntimeShape& output_shape, float* output_data) { 28 | const int flat_size = MatchingFlatSize(input_shape, output_shape); 29 | 30 | for (int i = 0; i < flat_size; i++) { 31 | int offset = i; 32 | output_data[offset] = std::floor(input_data[offset]); 33 | } 34 | } 35 | 36 | } // namespace reference_ops 37 | } // namespace tflite 38 | 39 | #endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_FLOOR_H_ 40 | -------------------------------------------------------------------------------- /firmware/lib/tfmicro/tensorflow/lite/micro/compatibility.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_LITE_MICRO_COMPATIBILITY_H_ 16 | #define TENSORFLOW_LITE_MICRO_COMPATIBILITY_H_ 17 | 18 | // C++ will automatically create class-specific delete operators for virtual 19 | // objects, which by default call the global delete function. For embedded 20 | // applications we want to avoid this, and won't be calling new/delete on these 21 | // objects, so we need to override the default implementation with one that does 22 | // nothing to avoid linking in ::delete(). 23 | // This macro needs to be included in all subclasses of a virtual base class in 24 | // the private section. 25 | #ifdef TF_LITE_STATIC_MEMORY 26 | #define TF_LITE_REMOVE_VIRTUAL_DELETE \ 27 | void operator delete(void* p) {} 28 | #else 29 | #define TF_LITE_REMOVE_VIRTUAL_DELETE 30 | #endif 31 | 32 | #endif // TENSORFLOW_LITE_MICRO_COMPATIBILITY_H_ 33 | -------------------------------------------------------------------------------- /firmware/lib/tfmicro/tensorflow/lite/micro/micro_error_reporter.cc: -------------------------------------------------------------------------------- 1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | #include "tensorflow/lite/micro/micro_error_reporter.h" 17 | 18 | #include 19 | 20 | #ifndef TF_LITE_STRIP_ERROR_STRINGS 21 | #include "tensorflow/lite/micro/debug_log.h" 22 | #include "tensorflow/lite/micro/micro_string.h" 23 | #endif 24 | 25 | namespace tflite { 26 | 27 | int MicroErrorReporter::Report(const char* format, va_list args) { 28 | #ifndef TF_LITE_STRIP_ERROR_STRINGS 29 | // Only pulling in the implementation of this function for builds where we 30 | // expect to make use of it to be extra cautious about not increasing the code 31 | // size. 32 | static constexpr int kMaxLogLen = 256; 33 | char log_buffer[kMaxLogLen]; 34 | MicroVsnprintf(log_buffer, kMaxLogLen, format, args); 35 | DebugLog(log_buffer); 36 | DebugLog("\r\n"); 37 | #endif 38 | return 0; 39 | } 40 | 41 | } // namespace tflite 42 | -------------------------------------------------------------------------------- /firmware/lib/audio_input/I2SSampler.h: -------------------------------------------------------------------------------- 1 | #ifndef __sampler_base_h__ 2 | #define __sampler_base_h__ 3 | #include 4 | #include 5 | #include 6 | #include 7 | 8 | #include "RingBuffer.h" 9 | 10 | #define AUDIO_BUFFER_COUNT 11 11 | 12 | /** 13 | * Base Class for both the ADC and I2S sampler 14 | **/ 15 | class I2SSampler 16 | { 17 | private: 18 | // audio buffers 19 | AudioBuffer **m_audio_buffers; 20 | RingBufferAccessor *m_write_ring_buffer_accessor; 21 | // current audio buffer 22 | int m_current_audio_buffer; 23 | // I2S reader task 24 | TaskHandle_t m_reader_task_handle; 25 | // processor task 26 | TaskHandle_t m_processor_task_handle; 27 | // i2s reader queue 28 | QueueHandle_t m_i2s_queue; 29 | // i2s port 30 | i2s_port_t m_i2s_port; 31 | 32 | protected: 33 | void addSample(int16_t sample); 34 | virtual void configureI2S() = 0; 35 | virtual void processI2SData(uint8_t *i2sData, size_t bytesRead) = 0; 36 | i2s_port_t getI2SPort() 37 | { 38 | return m_i2s_port; 39 | } 40 | 41 | public: 42 | I2SSampler(); 43 | void start(i2s_port_t i2s_port, i2s_config_t &i2s_config, TaskHandle_t processor_task_handle); 44 | 45 | RingBufferAccessor *getRingBufferReader(); 46 | 47 | int getCurrentWritePosition() 48 | { 49 | return m_write_ring_buffer_accessor->getIndex(); 50 | } 51 | int getRingBufferSize() 52 | { 53 | return AUDIO_BUFFER_COUNT * SAMPLE_BUFFER_SIZE; 54 | } 55 | 56 | friend void i2sReaderTask(void *param); 57 | }; 58 | 59 | #endif -------------------------------------------------------------------------------- /firmware/lib/tfmicro/tensorflow/lite/micro/micro_string.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_LITE_MICRO_MICRO_STRING_H_ 16 | #define TENSORFLOW_LITE_MICRO_MICRO_STRING_H_ 17 | 18 | #include 19 | 20 | // Implements simple string formatting for numeric types. Returns the number of 21 | // bytes written to output. 22 | extern "C" { 23 | // Functionally equivalent to vsnprintf, trimmed down for TFLite Micro. 24 | // MicroSnprintf() is implemented using MicroVsnprintf(). 25 | int MicroVsnprintf(char* output, int len, const char* format, va_list args); 26 | // Functionally equavalent to snprintf, trimmed down for TFLite Micro. 27 | // For example, MicroSnprintf(buffer, 10, "int %d", 10) will put the string 28 | // "int 10" in the buffer. 29 | // Floating point values are logged in exponent notation (1.XXX*2^N). 30 | int MicroSnprintf(char* output, int len, const char* format, ...); 31 | } 32 | 33 | #endif // TENSORFLOW_LITE_MICRO_MICRO_STRING_H_ 34 | -------------------------------------------------------------------------------- /firmware/lib/tfmicro/tensorflow/lite/kernels/internal/cppmath.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2020 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_CPPMATH_H_ 16 | #define TENSORFLOW_LITE_KERNELS_INTERNAL_CPPMATH_H_ 17 | 18 | #include 19 | 20 | namespace tflite { 21 | 22 | #if defined(TF_LITE_USE_GLOBAL_CMATH_FUNCTIONS) || \ 23 | (defined(__ANDROID__) && !defined(__NDK_MAJOR__)) || defined(ARDUINO) || \ 24 | defined(__ZEPHYR__) 25 | #define TF_LITE_GLOBAL_STD_PREFIX 26 | #else 27 | #define TF_LITE_GLOBAL_STD_PREFIX std 28 | #endif 29 | 30 | #define DECLARE_STD_GLOBAL_SWITCH1(tf_name, std_name) \ 31 | template \ 32 | inline T tf_name(const T x) { \ 33 | return TF_LITE_GLOBAL_STD_PREFIX::std_name(x); \ 34 | } 35 | 36 | DECLARE_STD_GLOBAL_SWITCH1(TfLiteRound, round); 37 | 38 | } // namespace tflite 39 | 40 | #endif // TENSORFLOW_LITE_KERNELS_INTERNAL_CPPMATH_H_ 41 | -------------------------------------------------------------------------------- /firmware/lib/tfmicro/tensorflow/lite/micro/all_ops_resolver.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved. 2 | Licensed under the Apache License, Version 2.0 (the "License"); 3 | you may not use this file except in compliance with the License. 4 | You may obtain a copy of the License at 5 | http://www.apache.org/licenses/LICENSE-2.0 6 | Unless required by applicable law or agreed to in writing, software 7 | distributed under the License is distributed on an "AS IS" BASIS, 8 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 9 | See the License for the specific language governing permissions and 10 | limitations under the License. 11 | ==============================================================================*/ 12 | #ifndef TENSORFLOW_LITE_MICRO_ALL_OPS_RESOLVER_H_ 13 | #define TENSORFLOW_LITE_MICRO_ALL_OPS_RESOLVER_H_ 14 | 15 | #include "tensorflow/lite/micro/compatibility.h" 16 | #include "tensorflow/lite/micro/micro_mutable_op_resolver.h" 17 | 18 | namespace tflite { 19 | 20 | // The magic number in the template parameter is the maximum number of ops that 21 | // can be added to AllOpsResolver. It can be increased if needed. And most 22 | // applications that care about the memory footprint will want to directly use 23 | // MicroMutableOpResolver and have an application specific template parameter. 24 | // The examples directory has sample code for this. 25 | class AllOpsResolver : public MicroMutableOpResolver<128> { 26 | public: 27 | AllOpsResolver(); 28 | 29 | private: 30 | TF_LITE_REMOVE_VIRTUAL_DELETE 31 | }; 32 | 33 | } // namespace tflite 34 | 35 | #endif // TENSORFLOW_LITE_MICRO_ALL_OPS_RESOLVER_H_ 36 | -------------------------------------------------------------------------------- /firmware/lib/tfmicro/tensorflow/lite/micro/kernels/kernel_util.cc: -------------------------------------------------------------------------------- 1 | /* Copyright 2020 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | #include "tensorflow/lite/micro/kernels/kernel_util.h" 17 | 18 | #include "tensorflow/lite/c/common.h" 19 | 20 | namespace tflite { 21 | namespace micro { 22 | 23 | bool HaveSameShapes(const TfLiteEvalTensor* input1, 24 | const TfLiteEvalTensor* input2) { 25 | TFLITE_DCHECK(input1 != nullptr); 26 | TFLITE_DCHECK(input2 != nullptr); 27 | return TfLiteIntArrayEqual(input1->dims, input2->dims); 28 | } 29 | 30 | const RuntimeShape GetTensorShape(const TfLiteEvalTensor* tensor) { 31 | if (tensor == nullptr || tensor->dims == nullptr) { 32 | return RuntimeShape(); 33 | } 34 | TfLiteIntArray* dims = tensor->dims; 35 | const int dims_size = dims->size; 36 | const int32_t* dims_data = reinterpret_cast(dims->data); 37 | return RuntimeShape(dims_size, dims_data); 38 | } 39 | 40 | } // namespace micro 41 | } // namespace tflite 42 | -------------------------------------------------------------------------------- /firmware/lib/audio_processor/src/kissfft/LICENSES/Unlicense: -------------------------------------------------------------------------------- 1 | Valid-License-Identifier: Unlicense 2 | SPDX-URL: https://spdx.org/licenses/Unlicense.html 3 | Usage-Guide: 4 | To use the Unlicense put the following SPDX tag/value pair into a 5 | comment according to the placement guidelines in the licensing rules 6 | documentation: 7 | SPDX-License-Identifier: Unlicense 8 | License-Text: 9 | 10 | This is free and unencumbered software released into the public domain. 11 | 12 | Anyone is free to copy, modify, publish, use, compile, sell, or distribute 13 | this software, either in source code form or as a compiled binary, for any 14 | purpose, commercial or non-commercial, and by any means. 15 | 16 | In jurisdictions that recognize copyright laws, the author or authors of this 17 | software dedicate any and all copyright interest in the software to the public 18 | domain. We make this dedication for the benefit of the public at large and 19 | to the detriment of our heirs and successors. We intend this dedication to be 20 | an overt act of relinquishment in perpetuity of all present and future rights 21 | to this software under copyright law. 22 | 23 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 24 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS 25 | FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS 26 | BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION 27 | OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH 28 | THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 29 | 30 | For more information, please refer to 31 | -------------------------------------------------------------------------------- /firmware/lib/tfmicro/tensorflow/lite/kernels/internal/optimized/neon_check.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_OPTIMIZED_NEON_CHECK_H_ 16 | #define TENSORFLOW_LITE_KERNELS_INTERNAL_OPTIMIZED_NEON_CHECK_H_ 17 | 18 | #if defined(__ARM_NEON__) || defined(__ARM_NEON) 19 | #define USE_NEON 20 | #include 21 | #endif 22 | 23 | #if defined __GNUC__ && defined __SSE4_1__ && !defined TF_LITE_DISABLE_X86_NEON 24 | #define USE_NEON 25 | #include "NEON_2_SSE.h" 26 | #endif 27 | 28 | // NEON_OR_PORTABLE(SomeFunc, args) calls NeonSomeFunc(args) if USE_NEON is 29 | // defined, PortableSomeFunc(args) otherwise. 30 | #ifdef USE_NEON 31 | // Always use Neon code 32 | #define NEON_OR_PORTABLE(funcname, ...) Neon##funcname(__VA_ARGS__) 33 | 34 | #else 35 | // No NEON available: Use Portable code 36 | #define NEON_OR_PORTABLE(funcname, ...) Portable##funcname(__VA_ARGS__) 37 | 38 | #endif // defined(USE_NEON) 39 | 40 | #endif // TENSORFLOW_LITE_KERNELS_INTERNAL_OPTIMIZED_NEON_CHECK_H_ 41 | -------------------------------------------------------------------------------- /firmware/lib/tfmicro/tensorflow/lite/micro/micro_profiler.cc: -------------------------------------------------------------------------------- 1 | /* Copyright 2020 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | #include "tensorflow/lite/micro/micro_profiler.h" 17 | 18 | #include "tensorflow/lite/kernels/internal/compatibility.h" 19 | #include "tensorflow/lite/micro/micro_time.h" 20 | 21 | namespace tflite { 22 | 23 | MicroProfiler::MicroProfiler(tflite::ErrorReporter* reporter) 24 | : reporter_(reporter) {} 25 | 26 | uint32_t MicroProfiler::BeginEvent(const char* tag, EventType event_type, 27 | int64_t event_metadata1, 28 | int64_t event_metadata2) { 29 | start_time_ = GetCurrentTimeTicks(); 30 | TFLITE_DCHECK(tag != nullptr); 31 | event_tag_ = tag; 32 | return 0; 33 | } 34 | 35 | void MicroProfiler::EndEvent(uint32_t event_handle) { 36 | #ifndef TF_LITE_STRIP_ERROR_STRINGS 37 | int32_t end_time = GetCurrentTimeTicks(); 38 | TF_LITE_REPORT_ERROR(reporter_, "%s took %d cycles\n", event_tag_, 39 | end_time - start_time_); 40 | #endif 41 | } 42 | } // namespace tflite 43 | -------------------------------------------------------------------------------- /firmware/lib/audio_processor/src/kissfft/tools/kfc.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2003-2004, Mark Borgerding. All rights reserved. 3 | * This file is part of KISS FFT - https://github.com/mborgerding/kissfft 4 | * 5 | * SPDX-License-Identifier: BSD-3-Clause 6 | * See COPYING file for more information. 7 | */ 8 | 9 | #ifndef KFC_H 10 | #define KFC_H 11 | #include "kiss_fft.h" 12 | 13 | #ifdef __cplusplus 14 | extern "C" { 15 | #endif 16 | 17 | /* 18 | KFC -- Kiss FFT Cache 19 | 20 | Not needing to deal with kiss_fft_alloc and a config 21 | object may be handy for a lot of programs. 22 | 23 | KFC uses the underlying KISS FFT functions, but caches the config object. 24 | The first time kfc_fft or kfc_ifft for a given FFT size, the cfg 25 | object is created for it. All subsequent calls use the cached 26 | configuration object. 27 | 28 | NOTE: 29 | You should probably not use this if your program will be using a lot 30 | of various sizes of FFTs. There is a linear search through the 31 | cached objects. If you are only using one or two FFT sizes, this 32 | will be negligible. Otherwise, you may want to use another method 33 | of managing the cfg objects. 34 | 35 | There is no automated cleanup of the cached objects. This could lead 36 | to large memory usage in a program that uses a lot of *DIFFERENT* 37 | sized FFTs. If you want to force all cached cfg objects to be freed, 38 | call kfc_cleanup. 39 | 40 | */ 41 | 42 | /*forward complex FFT */ 43 | void kfc_fft(int nfft, const kiss_fft_cpx * fin,kiss_fft_cpx * fout); 44 | /*reverse complex FFT */ 45 | void kfc_ifft(int nfft, const kiss_fft_cpx * fin,kiss_fft_cpx * fout); 46 | 47 | /*free all cached objects*/ 48 | void kfc_cleanup(void); 49 | 50 | #ifdef __cplusplus 51 | } 52 | #endif 53 | 54 | #endif 55 | -------------------------------------------------------------------------------- /firmware/lib/tfmicro/tensorflow/lite/core/api/tensor_utils.cc: -------------------------------------------------------------------------------- 1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | #include "tensorflow/lite/core/api/tensor_utils.h" 17 | 18 | #include 19 | 20 | #include "tensorflow/lite/c/common.h" 21 | 22 | namespace tflite { 23 | 24 | TfLiteStatus ResetVariableTensor(TfLiteTensor* tensor) { 25 | if (!tensor->is_variable) { 26 | return kTfLiteOk; 27 | } 28 | // TODO(b/115961645): Implement - If a variable tensor has a buffer, reset it 29 | // to the value of the buffer. 30 | int value = 0; 31 | if (tensor->type == kTfLiteInt8) { 32 | value = tensor->params.zero_point; 33 | } 34 | // TODO(b/139446230): Provide a platform header to better handle these 35 | // specific scenarios. 36 | #if __ANDROID__ || defined(__x86_64__) || defined(__i386__) || \ 37 | defined(__i386) || defined(__x86__) || defined(__X86__) || \ 38 | defined(_X86_) || defined(_M_IX86) || defined(_M_X64) 39 | memset(tensor->data.raw, value, tensor->bytes); 40 | #else 41 | char* raw_ptr = tensor->data.raw; 42 | for (size_t i = 0; i < tensor->bytes; ++i) { 43 | *raw_ptr = value; 44 | raw_ptr++; 45 | } 46 | #endif 47 | return kTfLiteOk; 48 | } 49 | 50 | } // namespace tflite 51 | -------------------------------------------------------------------------------- /firmware/lib/tfmicro/tensorflow/lite/kernels/internal/tensor_ctypes.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2017 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_TENSOR_CTYPES_H_ 16 | #define TENSORFLOW_LITE_KERNELS_INTERNAL_TENSOR_CTYPES_H_ 17 | 18 | #include "tensorflow/lite/c/common.h" 19 | #include "tensorflow/lite/kernels/internal/types.h" 20 | 21 | namespace tflite { 22 | 23 | template 24 | inline T* GetTensorData(TfLiteTensor* tensor) { 25 | return tensor != nullptr ? reinterpret_cast(tensor->data.raw) : nullptr; 26 | } 27 | 28 | template 29 | inline const T* GetTensorData(const TfLiteTensor* tensor) { 30 | return tensor != nullptr ? reinterpret_cast(tensor->data.raw) 31 | : nullptr; 32 | } 33 | 34 | inline RuntimeShape GetTensorShape(const TfLiteTensor* tensor) { 35 | if (tensor == nullptr) { 36 | return RuntimeShape(); 37 | } 38 | 39 | TfLiteIntArray* dims = tensor->dims; 40 | const int dims_size = dims->size; 41 | const int32_t* dims_data = reinterpret_cast(dims->data); 42 | return RuntimeShape(dims_size, dims_data); 43 | } 44 | 45 | } // namespace tflite 46 | 47 | #endif // TENSORFLOW_LITE_KERNELS_INTERNAL_TENSOR_CTYPES_H_ 48 | -------------------------------------------------------------------------------- /firmware/README.md: -------------------------------------------------------------------------------- 1 | # Voice Command Recognition Firmware 2 | 3 | This folder contains the firmware for the voice-controlled robot. 4 | 5 | We are using Platform.io to build the firmware. 6 | 7 | To understand the code the best place to start is `src/main.cpp`. This creates our `CommandDetector` and `CommandProcessor` objects and also creates a task to service the command detector as audio samples come in. 8 | 9 | From there you can look at `src/CommandDetector.cpp` and `src/CommandProcessor.cpp`. 10 | 11 | The code should be well commented and hopefully easy to understand. 12 | 13 | ## Config options 14 | 15 | To set things up for yourself, edit the `config.h` file and fill in your WiFi details. 16 | 17 | There are a number of options in this file that you can modify to suit your own setup. 18 | 19 | If you want to use an analog microphone instead of I2S then you need to comment out this line: 20 | 21 | ``` 22 | // are you using an I2S microphone - comment this out if you want to use an analog mic and ADC input 23 | #define USE_I2S_MIC_INPUT 24 | ``` 25 | 26 | And you will need to select the appropriate ADC channel to read data from: 27 | 28 | ``` 29 | // Analog Microphone Settings - ADC1_CHANNEL_7 is GPIO35 30 | #define ADC_MIC_CHANNEL ADC1_CHANNEL_7 31 | ``` 32 | 33 | If you are using an I2S Microphone then you need to tell the system which channel you have configure the microphone on (left or right - generally these devices default to left). 34 | 35 | ``` 36 | // Which channel is the I2S microphone on? I2S_CHANNEL_FMT_ONLY_LEFT or I2S_CHANNEL_FMT_ONLY_RIGHT 37 | #define I2S_MIC_CHANNEL I2S_CHANNEL_FMT_ONLY_LEFT 38 | // #define I2S_MIC_CHANNEL I2S_CHANNEL_FMT_ONLY_RIGHT 39 | ``` 40 | 41 | And you will need to tell it which pins you have connected to the microphone: 42 | 43 | ``` 44 | #define I2S_MIC_SERIAL_CLOCK GPIO_NUM_33 45 | #define I2S_MIC_LEFT_RIGHT_CLOCK GPIO_NUM_26 46 | #define I2S_MIC_SERIAL_DATA GPIO_NUM_25 47 | ``` 48 | -------------------------------------------------------------------------------- /firmware/lib/audio_processor/src/kissfft/tools/Makefile: -------------------------------------------------------------------------------- 1 | WARNINGS=-W -Wall -Wstrict-prototypes -Wmissing-prototypes -Waggregate-return \ 2 | -Wcast-align -Wcast-qual -Wnested-externs -Wshadow -Wbad-function-cast \ 3 | -Wwrite-strings 4 | 5 | ifeq "$(DATATYPE)" "" 6 | DATATYPE=float 7 | endif 8 | 9 | ifeq "$(DATATYPE)" "int32_t" 10 | TYPEFLAGS=-DFIXED_POINT=32 11 | endif 12 | 13 | ifeq "$(DATATYPE)" "int16_t" 14 | TYPEFLAGS=-DFIXED_POINT=16 15 | endif 16 | 17 | ifeq "$(DATATYPE)" "simd" 18 | TYPEFLAGS=-DUSE_SIMD=1 -msse 19 | endif 20 | 21 | ifeq "$(TYPEFLAGS)" "" 22 | TYPEFLAGS=-Dkiss_fft_scalar=$(DATATYPE) 23 | endif 24 | 25 | ifneq ("$(KISS_FFT_USE_ALLOCA)","") 26 | CFLAGS+= -DKISS_FFT_USE_ALLOCA=1 27 | endif 28 | CFLAGS+= $(CFLAGADD) 29 | 30 | 31 | FFTUTIL=fft_$(DATATYPE) 32 | FASTFILT=fastconv_$(DATATYPE) 33 | FASTFILTREAL=fastconvr_$(DATATYPE) 34 | PSDPNG=psdpng_$(DATATYPE) 35 | DUMPHDR=dumphdr_$(DATATYPE) 36 | 37 | all: $(FFTUTIL) $(FASTFILT) $(FASTFILTREAL) 38 | # $(PSDPNG) 39 | # $(DUMPHDR) 40 | 41 | #CFLAGS=-Wall -O3 -pedantic -march=pentiumpro -ffast-math -fomit-frame-pointer $(WARNINGS) 42 | # If the above flags do not work, try the following 43 | CFLAGS=-Wall -O3 $(WARNINGS) 44 | # tip: try -openmp or -fopenmp to use multiple cores 45 | 46 | $(FASTFILTREAL): ../kiss_fft.c kiss_fastfir.c kiss_fftr.c 47 | $(CC) -o $@ $(CFLAGS) -I.. $(TYPEFLAGS) -DREAL_FASTFIR $+ -DFAST_FILT_UTIL -lm 48 | 49 | $(FASTFILT): ../kiss_fft.c kiss_fastfir.c 50 | $(CC) -o $@ $(CFLAGS) -I.. $(TYPEFLAGS) $+ -DFAST_FILT_UTIL -lm 51 | 52 | $(FFTUTIL): ../kiss_fft.c fftutil.c kiss_fftnd.c kiss_fftr.c kiss_fftndr.c 53 | $(CC) -o $@ $(CFLAGS) -I.. $(TYPEFLAGS) $+ -lm 54 | 55 | $(PSDPNG): ../kiss_fft.c psdpng.c kiss_fftr.c 56 | $(CC) -o $@ $(CFLAGS) -I.. $(TYPEFLAGS) $+ -lpng -lm 57 | 58 | $(DUMPHDR): ../kiss_fft.c dumphdr.c 59 | $(CC) -o $@ $(CFLAGS) -I.. $(TYPEFLAGS) $+ -lm 60 | 61 | clean: 62 | rm -f *~ fft fft_* fastconv fastconv_* fastconvr fastconvr_* psdpng psdpng_* 63 | -------------------------------------------------------------------------------- /firmware/lib/audio_processor/src/kissfft/LICENSES/BSD-3-Clause: -------------------------------------------------------------------------------- 1 | Valid-License-Identifier: BSD-3-Clause 2 | SPDX-URL: https://spdx.org/licenses/BSD-3-Clause.html 3 | Usage-Guide: 4 | To use the BSD 3-clause "New" or "Revised" License put the following SPDX 5 | tag/value pair into a comment according to the placement guidelines in 6 | the licensing rules documentation: 7 | SPDX-License-Identifier: BSD-3-Clause 8 | License-Text: 9 | 10 | Copyright (c) . All rights reserved. 11 | 12 | Redistribution and use in source and binary forms, with or without modification, 13 | are permitted provided that the following conditions are met: 14 | 15 | 1. Redistributions of source code must retain the above copyright notice, 16 | this list of conditions and the following disclaimer. 17 | 18 | 2. Redistributions in binary form must reproduce the above copyright notice, 19 | this list of conditions and the following disclaimer in the documentation 20 | and/or other materials provided with the distribution. 21 | 22 | 3. Neither the name of the copyright holder nor the names of its contributors 23 | may be used to endorse or promote products derived from this software without 24 | specific prior written permission. 25 | 26 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 27 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 28 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 29 | ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE 30 | LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 31 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 32 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 33 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 34 | OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE 35 | USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 36 | -------------------------------------------------------------------------------- /firmware/lib/tfmicro/tensorflow/lite/kernels/internal/reference/round.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_ROUND_H_ 16 | #define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_ROUND_H_ 17 | 18 | #include 19 | 20 | #include "tensorflow/lite/kernels/internal/types.h" 21 | 22 | namespace tflite { 23 | 24 | namespace reference_ops { 25 | 26 | inline float RoundToNearest(float value) { 27 | auto floor_val = std::floor(value); 28 | auto diff = value - floor_val; 29 | if ((diff < 0.5f) || 30 | ((diff == 0.5f) && (static_cast(floor_val) % 2 == 0))) { 31 | return floor_val; 32 | } else { 33 | return floor_val = floor_val + 1.0f; 34 | } 35 | } 36 | 37 | inline void Round(const RuntimeShape& input_shape, const float* input_data, 38 | const RuntimeShape& output_shape, float* output_data) { 39 | const int flat_size = MatchingFlatSize(input_shape, output_shape); 40 | for (int i = 0; i < flat_size; ++i) { 41 | // Note that this implementation matches that of tensorFlow tf.round 42 | // and corresponds to the bankers rounding method. 43 | // cfenv (for fesetround) is not yet supported universally on Android, so 44 | // using a work around. 45 | output_data[i] = RoundToNearest(input_data[i]); 46 | } 47 | } 48 | 49 | } // namespace reference_ops 50 | } // namespace tflite 51 | #endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_ROUND_H_ 52 | -------------------------------------------------------------------------------- /firmware/lib/tfmicro/tensorflow/lite/micro/memory_planner/linear_memory_planner.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | #ifndef TENSORFLOW_LITE_MICRO_MEMORY_PLANNER_LINEAR_MEMORY_PLANNER_H_ 17 | #define TENSORFLOW_LITE_MICRO_MEMORY_PLANNER_LINEAR_MEMORY_PLANNER_H_ 18 | 19 | #include "tensorflow/lite/micro/compatibility.h" 20 | #include "tensorflow/lite/micro/memory_planner/memory_planner.h" 21 | 22 | namespace tflite { 23 | 24 | // The simplest possible memory planner that just lays out all buffers at 25 | // increasing offsets without trying to reuse memory. 26 | class LinearMemoryPlanner : public MemoryPlanner { 27 | public: 28 | LinearMemoryPlanner(); 29 | ~LinearMemoryPlanner() override; 30 | 31 | TfLiteStatus AddBuffer(tflite::ErrorReporter* error_reporter, int size, 32 | int first_time_used, int last_time_used) override; 33 | 34 | size_t GetMaximumMemorySize() override; 35 | int GetBufferCount() override; 36 | TfLiteStatus GetOffsetForBuffer(tflite::ErrorReporter* error_reporter, 37 | int buffer_index, int* offset) override; 38 | 39 | private: 40 | static constexpr int kMaxBufferCount = 1024; 41 | size_t buffer_offsets_[kMaxBufferCount]; 42 | int current_buffer_count_; 43 | size_t next_free_offset_; 44 | 45 | TF_LITE_REMOVE_VIRTUAL_DELETE 46 | }; 47 | 48 | } // namespace tflite 49 | 50 | #endif // TENSORFLOW_LITE_MICRO_MEMORY_PLANNER_LINEAR_MEMORY_PLANNER_H_ 51 | -------------------------------------------------------------------------------- /firmware/lib/tfmicro/tensorflow/lite/micro/kernels/activation_utils.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | #ifndef TENSORFLOW_LITE_MICRO_KERNELS_ACTIVATION_UTILS_H_ 17 | #define TENSORFLOW_LITE_MICRO_KERNELS_ACTIVATION_UTILS_H_ 18 | 19 | #include 20 | #include 21 | 22 | #include "tensorflow/lite/c/builtin_op_data.h" 23 | #include "tensorflow/lite/kernels/internal/cppmath.h" 24 | #include "tensorflow/lite/kernels/internal/max.h" 25 | #include "tensorflow/lite/kernels/internal/min.h" 26 | 27 | namespace tflite { 28 | namespace ops { 29 | namespace micro { 30 | 31 | // Returns the floating point value for a fused activation: 32 | inline float ActivationValFloat(TfLiteFusedActivation act, float a) { 33 | switch (act) { 34 | case kTfLiteActNone: 35 | return a; 36 | case kTfLiteActRelu: 37 | return TfLiteMax(0.0f, a); 38 | case kTfLiteActReluN1To1: 39 | return TfLiteMax(-1.0f, TfLiteMin(a, 1.0f)); 40 | case kTfLiteActRelu6: 41 | return TfLiteMax(0.0f, TfLiteMin(a, 6.0f)); 42 | case kTfLiteActTanh: 43 | return std::tanh(a); 44 | case kTfLiteActSignBit: 45 | return std::signbit(a); 46 | case kTfLiteActSigmoid: 47 | return 1.0f / (1.0f + std::exp(-a)); 48 | } 49 | return 0.0f; // To indicate an unsupported activation (i.e. when a new fused 50 | // activation is added to the enum and not handled here). 51 | } 52 | 53 | } // namespace micro 54 | } // namespace ops 55 | } // namespace tflite 56 | 57 | #endif // TENSORFLOW_LITE_MICRO_KERNELS_ACTIVATION_UTILS_H_ 58 | -------------------------------------------------------------------------------- /firmware/lib/tfmicro/tensorflow/lite/micro/micro_time.cc: -------------------------------------------------------------------------------- 1 | /* Copyright 2020 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | // Reference implementation of timer functions. Platforms are not required to 17 | // implement these timer methods, but they are required to enable profiling. 18 | 19 | // On platforms that have a POSIX stack or C library, it can be written using 20 | // methods from or clock() from . 21 | 22 | // To add an equivalent function for your own platform, create your own 23 | // implementation file, and place it in a subfolder with named after the OS 24 | // you're targeting. For example, see the Cortex M bare metal version in 25 | // tensorflow/lite/micro/bluepill/micro_time.cc or the mbed one on 26 | // tensorflow/lite/micro/mbed/micro_time.cc. 27 | 28 | #include "tensorflow/lite/micro/micro_time.h" 29 | 30 | namespace tflite { 31 | 32 | // Reference implementation of the ticks_per_second() function that's required 33 | // for a platform to support Tensorflow Lite for Microcontrollers profiling. 34 | // This returns 0 by default because timing is an optional feature that builds 35 | // without errors on platforms that do not need it. 36 | int32_t ticks_per_second() { return 0; } 37 | 38 | // Reference implementation of the GetCurrentTimeTicks() function that's 39 | // required for a platform to support Tensorflow Lite for Microcontrollers 40 | // profiling. This returns 0 by default because timing is an optional feature 41 | // that builds without errors on platforms that do not need it. 42 | int32_t GetCurrentTimeTicks() { return 0; } 43 | 44 | } // namespace tflite 45 | -------------------------------------------------------------------------------- /firmware/lib/tfmicro/tensorflow/lite/micro/kernels/floor.cc: -------------------------------------------------------------------------------- 1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | #include "tensorflow/lite/kernels/internal/reference/floor.h" 17 | 18 | #include "tensorflow/lite/c/common.h" 19 | #include "tensorflow/lite/kernels/internal/tensor_ctypes.h" 20 | #include "tensorflow/lite/micro/kernels/kernel_util.h" 21 | 22 | namespace tflite { 23 | namespace ops { 24 | namespace micro { 25 | namespace floor { 26 | 27 | constexpr int kInputTensor = 0; 28 | constexpr int kOutputTensor = 0; 29 | 30 | TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { 31 | const TfLiteEvalTensor* input = 32 | tflite::micro::GetEvalInput(context, node, kInputTensor); 33 | TF_LITE_ENSURE_TYPES_EQ(context, input->type, kTfLiteFloat32); 34 | TfLiteEvalTensor* output = 35 | tflite::micro::GetEvalOutput(context, node, kOutputTensor); 36 | reference_ops::Floor(tflite::micro::GetTensorShape(input), 37 | tflite::micro::GetTensorData(input), 38 | tflite::micro::GetTensorShape(output), 39 | tflite::micro::GetTensorData(output)); 40 | return kTfLiteOk; 41 | } 42 | } // namespace floor 43 | 44 | TfLiteRegistration Register_FLOOR() { 45 | return {/*init=*/nullptr, 46 | /*free=*/nullptr, 47 | /*prepare=*/nullptr, 48 | /*invoke=*/floor::Eval, 49 | /*profiling_string=*/nullptr, 50 | /*builtin_code=*/0, 51 | /*custom_name=*/nullptr, 52 | /*version=*/0}; 53 | } 54 | 55 | } // namespace micro 56 | } // namespace ops 57 | } // namespace tflite 58 | -------------------------------------------------------------------------------- /firmware/lib/audio_processor/src/kissfft/Makefile: -------------------------------------------------------------------------------- 1 | KFVER=131 2 | 3 | DATATYPE ?= float 4 | 5 | PREFIX ?= /usr/local 6 | LIBDIR ?= $(PREFIX)/lib 7 | 8 | INSTALL ?= install 9 | 10 | ifeq ($(shell uname -s),Darwin) 11 | SHARED_NAME := libkissfft.dylib 12 | SHARED_FLAGS := -Wl,-install_name,$(SHARED_NAME) 13 | else 14 | SHARED_NAME := libkissfft.so 15 | SHARED_FLAGS := -Wl,-soname,$(SHARED_NAME) 16 | endif 17 | 18 | all: 19 | gcc -Wall -fPIC -c *.c -Dkiss_fft_scalar=$(DATATYPE) -o kiss_fft.o 20 | ar crus libkissfft.a kiss_fft.o 21 | gcc -shared $(SHARED_FLAGS) -o $(SHARED_NAME) kiss_fft.o 22 | 23 | install: all 24 | $(INSTALL) -Dt $(LIBDIR) $(SHARED_NAME) 25 | 26 | doc: 27 | @echo "Start by reading the README file. If you want to build and test lots of stuff, do a 'make testall'" 28 | @echo "but be aware that 'make testall' has dependencies that the basic kissfft software does not." 29 | @echo "It is generally unneeded to run these tests yourself, unless you plan on changing the inner workings" 30 | @echo "of kissfft and would like to make use of its regression tests." 31 | 32 | testall: 33 | # The simd and int32_t types may or may not work on your machine 34 | make -C test testcpp && test/testcpp 35 | make -C test DATATYPE=simd CFLAGADD="$(CFLAGADD)" test 36 | make -C test DATATYPE=int32_t CFLAGADD="$(CFLAGADD)" test 37 | make -C test DATATYPE=int16_t CFLAGADD="$(CFLAGADD)" test 38 | make -C test DATATYPE=float CFLAGADD="$(CFLAGADD)" test 39 | make -C test DATATYPE=double CFLAGADD="$(CFLAGADD)" test 40 | make -C test testsse 41 | echo "all tests passed" 42 | 43 | tarball: clean 44 | git archive --prefix=kissfft/ -o kissfft$(KFVER).tar.gz v$(KFVER) 45 | git archive --prefix=kissfft/ -o kissfft$(KFVER).zip v$(KFVER) 46 | 47 | clean: 48 | cd test && make clean 49 | cd tools && make clean 50 | rm -f kiss_fft*.tar.gz *~ *.pyc kiss_fft*.zip 51 | 52 | asm: kiss_fft.s 53 | 54 | kiss_fft.s: kiss_fft.c kiss_fft.h _kiss_fft_guts.h 55 | [ -e kiss_fft.s ] && mv kiss_fft.s kiss_fft.s~ || true 56 | gcc -S kiss_fft.c -O3 -mtune=native -ffast-math -fomit-frame-pointer -unroll-loops -dA -fverbose-asm 57 | gcc -o kiss_fft_short.s -S kiss_fft.c -O3 -mtune=native -ffast-math -fomit-frame-pointer -dA -fverbose-asm -DFIXED_POINT 58 | [ -e kiss_fft.s~ ] && diff kiss_fft.s~ kiss_fft.s || true 59 | -------------------------------------------------------------------------------- /firmware/lib/audio_processor/src/kissfft/TIPS: -------------------------------------------------------------------------------- 1 | Speed: 2 | * If you want to use multiple cores, then compile with -openmp or -fopenmp (see your compiler docs). 3 | Realize that larger FFTs will reap more benefit than smaller FFTs. This generally uses more CPU time, but 4 | less wall time. 5 | 6 | * experiment with compiler flags 7 | Special thanks to Oscar Lesta. He suggested some compiler flags 8 | for gcc that make a big difference. They shave 10-15% off 9 | execution time on some systems. Try some combination of: 10 | -march=pentiumpro 11 | -ffast-math 12 | -fomit-frame-pointer 13 | 14 | * If the input data has no imaginary component, use the kiss_fftr code under tools/. 15 | Real ffts are roughly twice as fast as complex. 16 | 17 | * If you can rearrange your code to do 4 FFTs in parallel and you are on a recent Intel or AMD machine, 18 | then you might want to experiment with the USE_SIMD code. See README.simd 19 | 20 | 21 | Reducing code size: 22 | * remove some of the butterflies. There are currently butterflies optimized for radices 23 | 2,3,4,5. It is worth mentioning that you can still use FFT sizes that contain 24 | other factors, they just won't be quite as fast. You can decide for yourself 25 | whether to keep radix 2 or 4. If you do some work in this area, let me 26 | know what you find. 27 | 28 | * For platforms where ROM/code space is more plentiful than RAM, 29 | consider creating a hardcoded kiss_fft_state. In other words, decide which 30 | FFT size(s) you want and make a structure with the correct factors and twiddles. 31 | 32 | * Frank van der Hulst offered numerous suggestions for smaller code size and correct operation 33 | on embedded targets. "I'm happy to help anyone who is trying to implement KISSFFT on a micro" 34 | 35 | Some of these were rolled into the mainline code base: 36 | - using long casts to promote intermediate results of short*short multiplication 37 | - delaying allocation of buffers that are sometimes unused. 38 | In some cases, it may be desirable to limit capability in order to better suit the target: 39 | - predefining the twiddle tables for the desired fft size. 40 | -------------------------------------------------------------------------------- /firmware/lib/tfmicro/tensorflow/lite/micro/memory_planner/linear_memory_planner.cc: -------------------------------------------------------------------------------- 1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | #include "tensorflow/lite/micro/memory_planner/linear_memory_planner.h" 17 | 18 | namespace tflite { 19 | 20 | LinearMemoryPlanner::LinearMemoryPlanner() 21 | : current_buffer_count_(0), next_free_offset_(0) {} 22 | LinearMemoryPlanner::~LinearMemoryPlanner() {} 23 | 24 | TfLiteStatus LinearMemoryPlanner::AddBuffer( 25 | tflite::ErrorReporter* error_reporter, int size, int first_time_used, 26 | int last_time_used) { 27 | if (current_buffer_count_ >= kMaxBufferCount) { 28 | TF_LITE_REPORT_ERROR(error_reporter, "Too many buffers (max is %d)", 29 | kMaxBufferCount); 30 | return kTfLiteError; 31 | } 32 | buffer_offsets_[current_buffer_count_] = next_free_offset_; 33 | next_free_offset_ += size; 34 | ++current_buffer_count_; 35 | return kTfLiteOk; 36 | } 37 | 38 | size_t LinearMemoryPlanner::GetMaximumMemorySize() { return next_free_offset_; } 39 | 40 | int LinearMemoryPlanner::GetBufferCount() { return current_buffer_count_; } 41 | 42 | TfLiteStatus LinearMemoryPlanner::GetOffsetForBuffer( 43 | tflite::ErrorReporter* error_reporter, int buffer_index, int* offset) { 44 | if ((buffer_index < 0) || (buffer_index >= current_buffer_count_)) { 45 | TF_LITE_REPORT_ERROR(error_reporter, 46 | "buffer index %d is outside range 0 to %d", 47 | buffer_index, current_buffer_count_); 48 | return kTfLiteError; 49 | } 50 | *offset = buffer_offsets_[buffer_index]; 51 | return kTfLiteOk; 52 | } 53 | 54 | } // namespace tflite 55 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | [![ko-fi](https://ko-fi.com/img/githubbutton_sm.svg)](https://ko-fi.com/Z8Z734F5Y) 2 | # Voice-Controlled Robot With the ESP32 3 | 4 | Build your own voice-controlled robot - all you will need is an ESP32 and Microphone board (and some motors etc...). 5 | 6 | You can watch a video of the robot in action [here (YouTube)](https://www.youtube.com/watch?v=cp2qRrhaZRA): 7 | 8 | [![Demo Video](https://img.youtube.com/vi/cp2qRrhaZRA/0.jpg)](https://www.youtube.com/watch?v=cp2qRrhaZRA) 9 | 10 | I'm using a microphone breakout board that I've built myself based around the ICS-43434 - but any microphone board will work. The code has been written so that you can either use an I2S microphone or an analogue microphone using the built-in ADC. 11 | 12 | I would recommend using an I2S microphone if you have one as they have a lot better noise characteristics. 13 | 14 | My board is available on [eBay](https://www.ebay.co.uk/itm/154115095985) and [Tindie](https://www.tindie.com/products/atomic14/ics-43434-i2s-mems-microphone-breakout-board/) 15 | 16 | Other I2S microphones are equally suitable. Boards based around the INMP441 work very well. 17 | 18 | The voice recognition is carried out using a model trained with TensorFlow and runs on the ESP32 using TensorFlow Lite. A pre-trained model is included in the Firmware folder so you can get up and running straight away. 19 | 20 | There are two folders in this repo `model` and `firmware` check the `README.md` file in each one for complete details. 21 | 22 | ## Model 23 | 24 | Jupyter notebooks for creating a TensorFlow Lite model for "wake word" recognition. 25 | 26 | A pre-trained model has already been generated and added to the firmware folder. 27 | 28 | If you want to train your own, I added a couple of extra folders to the training data they are available here: 29 | 30 | ## Firmware 31 | 32 | ESP32 firmware built using Platform.io. This runs the neural network trying to detect the words `Left`, `Right`, `Forward` and `Backward`. 33 | 34 | The code assumes there are two continuous servos attached to the board such as the FS90R servo - these are readily available from various suppliers and you can normally buy them with wheels as well. 35 | 36 | # 3dPrinting 37 | 38 | This contains the 3d models that I used to build my 2 wheeled robot. It's designed around the FS90R continuous servos and a cylindrical power bank. 39 | -------------------------------------------------------------------------------- /firmware/lib/tfmicro/tensorflow/lite/micro/debug_log.cc: -------------------------------------------------------------------------------- 1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | // Reference implementation of the DebugLog() function that's required for a 17 | // platform to support the TensorFlow Lite for Microcontrollers library. This is 18 | // the only function that's absolutely required to be available on a target 19 | // device, since it's used for communicating test results back to the host so 20 | // that we can verify the implementation is working correctly. 21 | // It's designed to be as easy as possible to supply an implementation though. 22 | // On platforms that have a POSIX stack or C library, it can be written as a 23 | // single call to `fprintf(stderr, "%s", s)` to output a string to the error 24 | // stream of the console, but if there's no OS or C library available, there's 25 | // almost always an equivalent way to write out a string to some serial 26 | // interface that can be used instead. For example on Arm M-series MCUs, calling 27 | // the `bkpt #0xAB` assembler instruction will output the string in r1 to 28 | // whatever debug serial connection is available. If you're running mbed, you 29 | // can do the same by creating `Serial pc(USBTX, USBRX)` and then calling 30 | // `pc.printf("%s", s)`. 31 | // To add an equivalent function for your own platform, create your own 32 | // implementation file, and place it in a subfolder with named after the OS 33 | // you're targeting. For example, see the Cortex M bare metal version in 34 | // tensorflow/lite/micro/bluepill/debug_log.cc or the mbed one on 35 | // tensorflow/lite/micro/mbed/debug_log.cc. 36 | 37 | #include "tensorflow/lite/micro/debug_log.h" 38 | 39 | #include 40 | 41 | extern "C" void DebugLog(const char* s) { fprintf(stderr, "%s", s); } 42 | -------------------------------------------------------------------------------- /firmware/lib/audio_input/RingBuffer.h: -------------------------------------------------------------------------------- 1 | #ifndef _ring_buffer_h_ 2 | #define _ring_buffer_h_ 3 | 4 | #include 5 | 6 | #define SAMPLE_BUFFER_SIZE 1600 7 | 8 | class AudioBuffer 9 | { 10 | public: 11 | int16_t samples[SAMPLE_BUFFER_SIZE]; 12 | AudioBuffer() 13 | { 14 | memset(samples, 0, SAMPLE_BUFFER_SIZE * sizeof(int16_t)); 15 | } 16 | }; 17 | 18 | class RingBufferAccessor 19 | { 20 | private: 21 | AudioBuffer **m_audio_buffers; 22 | int m_number_audio_buffers; 23 | AudioBuffer *m_current_buffer; 24 | int m_buffer_pos; 25 | int m_buffer_idx; 26 | int m_total_size; 27 | 28 | public: 29 | RingBufferAccessor(AudioBuffer **audioBuffers, int number_audio_buffers) 30 | { 31 | m_buffer_pos = 0; 32 | m_buffer_idx = 0; 33 | m_total_size = number_audio_buffers * SAMPLE_BUFFER_SIZE; 34 | m_audio_buffers = audioBuffers; 35 | m_number_audio_buffers = number_audio_buffers; 36 | m_current_buffer = audioBuffers[0]; 37 | } 38 | int getIndex() 39 | { 40 | return m_buffer_idx * SAMPLE_BUFFER_SIZE + m_buffer_pos; 41 | } 42 | void setIndex(int index) 43 | { 44 | // handle negative indexes 45 | index = (index + m_total_size) % m_total_size; 46 | // work out which buffer 47 | m_buffer_idx = (index / SAMPLE_BUFFER_SIZE) % m_number_audio_buffers; 48 | // and where we are in the buffer 49 | m_buffer_pos = index % SAMPLE_BUFFER_SIZE; 50 | m_current_buffer = m_audio_buffers[m_buffer_idx]; 51 | } 52 | inline int16_t getCurrentSample() 53 | { 54 | return m_current_buffer->samples[m_buffer_pos]; 55 | } 56 | inline void setCurrentSample(int16_t sample) 57 | { 58 | m_current_buffer->samples[m_buffer_pos] = sample; 59 | } 60 | inline void rewind(int samples) { 61 | setIndex(getIndex() - samples); 62 | } 63 | inline bool moveToNextSample() 64 | { 65 | m_buffer_pos++; 66 | if (m_buffer_pos == SAMPLE_BUFFER_SIZE) 67 | { 68 | m_buffer_pos = 0; 69 | m_buffer_idx++; 70 | if (m_buffer_idx == m_number_audio_buffers) 71 | { 72 | m_buffer_idx = 0; 73 | } 74 | m_current_buffer = m_audio_buffers[m_buffer_idx]; 75 | return true; 76 | } 77 | return false; 78 | } 79 | }; 80 | 81 | #endif -------------------------------------------------------------------------------- /firmware/lib/tfmicro/tensorflow/lite/kernels/internal/reference/quantize.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_QUANTIZE_H_ 16 | #define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_QUANTIZE_H_ 17 | 18 | #include 19 | #include 20 | 21 | #include "tensorflow/lite/kernels/internal/common.h" 22 | #include "tensorflow/lite/kernels/internal/compatibility.h" 23 | #include "tensorflow/lite/kernels/internal/cppmath.h" 24 | #include "tensorflow/lite/kernels/internal/types.h" 25 | 26 | namespace tflite { 27 | 28 | namespace reference_ops { 29 | 30 | template 31 | inline void AffineQuantize(const tflite::QuantizationParams& op_params, 32 | const RuntimeShape& input_shape, 33 | const InputT* input_data, 34 | const RuntimeShape& output_shape, 35 | OutputT* output_data) { 36 | const int32_t zero_point = op_params.zero_point; 37 | const double scale = op_params.scale; 38 | const int flat_size = MatchingFlatSize(input_shape, output_shape); 39 | static constexpr int32_t min_val = std::numeric_limits::min(); 40 | static constexpr int32_t max_val = std::numeric_limits::max(); 41 | 42 | for (int i = 0; i < flat_size; i++) { 43 | const InputT val = input_data[i]; 44 | int32_t unclamped = 45 | static_cast(TfLiteRound(val / static_cast(scale))) + 46 | zero_point; 47 | int32_t clamped = std::min(std::max(unclamped, min_val), max_val); 48 | output_data[i] = clamped; 49 | } 50 | } 51 | 52 | } // namespace reference_ops 53 | 54 | } // namespace tflite 55 | #endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_QUANTIZE_H_ 56 | -------------------------------------------------------------------------------- /model/README.md: -------------------------------------------------------------------------------- 1 | # Voice-controlled robot model 2 | 3 | This folder contains the Jupyter notebooks for creating the training data, training the model, and exporting the model to TensorFlow Lite. 4 | 5 | ## Setup 6 | 7 | You will need python3 installed - follow the instructions for your platform to get this set up and then create a virtual environment. 8 | You may have to install the `portaudio` library manually by using the following command `sudo apt-get install portaudio19-dev`. 9 | ``` 10 | python3 -m venv venv 11 | . ./venv/bin/activate 12 | pip install -r requirements.txt 13 | ``` 14 | 15 | ## Running the notebooks 16 | 17 | ``` 18 | . ./venv/bin/activate 19 | jupyter notebook . 20 | ``` 21 | 22 | # The notebooks 23 | 24 | ## Generate Training Data.pynb 25 | 26 | We make use of the speech commands dataset available from here: 27 | 28 | [https://storage.cloud.google.com/download.tensorflow.org/data/speech_commands_v0.02.tar.gz](https://storage.cloud.google.com/download.tensorflow.org/data/speech_commands_v0.02.tar.gz) 29 | 30 | Download and expand the data using: 31 | 32 | ``` 33 | tar -xzf data_speech_commands_v0.02.tar.gz -C speech_data 34 | ``` 35 | 36 | For my training, I add a lot more data to the `_background_noise_` folder. I also created a `_problem_noise_` folder and recorded sounds that seemed to confuse the model - low frequency humming noises around 100Hz seem to cause problems. 37 | 38 | You can get this data from here: 39 | 40 | * [\_background\_noise.zip](https://data.atomic14.com/_background_noise_.zip) 41 | * [\_problem_noise\_.zip](https://data.atomic14.com/_problem_noise_.zip) 42 | 43 | The notebook will run through all these samples and output files for the training step. You will need about 15GB of free space to save these files. 44 | 45 | ## Train Model.ipynb 46 | 47 | This will train a model against the training data. This will train on a CPU in 2-3 hours. If you have a suitable GPU this training will be considerably faster. 48 | 49 | The training will output a file called `checkpoint.model` every time is sees an improvement in the validation performance and a file called `trained.model` on training completion. 50 | 51 | You can optionally take these and train them on the complete dataset. 52 | 53 | ## Convert Trained Model To TFLite.ipynb 54 | 55 | This will take the TensorFlow model and convert it for use in TensorFlow lite. 56 | 57 | Copy the output of this workbook into `firmware/lib/nerual_network/model.cc`. 58 | 59 | A pre-trained model has already been generated and placed in that location. 60 | -------------------------------------------------------------------------------- /firmware/lib/tfmicro/tensorflow/lite/micro/kernels/neg.cc: -------------------------------------------------------------------------------- 1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | #include "tensorflow/lite/kernels/internal/reference/neg.h" 17 | 18 | #include "tensorflow/lite/c/common.h" 19 | #include "tensorflow/lite/kernels/internal/tensor_ctypes.h" 20 | #include "tensorflow/lite/micro/kernels/kernel_util.h" 21 | 22 | namespace tflite { 23 | namespace ops { 24 | namespace micro { 25 | namespace neg { 26 | 27 | constexpr int kInputTensor = 0; 28 | constexpr int kOutputTensor = 0; 29 | 30 | TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { 31 | const TfLiteEvalTensor* input = 32 | tflite::micro::GetEvalInput(context, node, kInputTensor); 33 | TfLiteEvalTensor* output = 34 | tflite::micro::GetEvalOutput(context, node, kOutputTensor); 35 | switch (input->type) { 36 | // TODO(wangtz): handle for kTfLiteInt8 37 | case kTfLiteFloat32: 38 | reference_ops::Negate(tflite::micro::GetTensorShape(input), 39 | tflite::micro::GetTensorData(input), 40 | tflite::micro::GetTensorShape(output), 41 | tflite::micro::GetTensorData(output)); 42 | break; 43 | default: 44 | TF_LITE_KERNEL_LOG(context, "Type %s (%d) not supported.", 45 | TfLiteTypeGetName(input->type), input->type); 46 | return kTfLiteError; 47 | } 48 | return kTfLiteOk; 49 | } 50 | 51 | } // namespace neg 52 | 53 | TfLiteRegistration Register_NEG() { 54 | return {/*init=*/nullptr, 55 | /*free=*/nullptr, 56 | /*prepare=*/nullptr, 57 | /*invoke=*/neg::Eval, 58 | /*profiling_string=*/nullptr, 59 | /*builtin_code=*/0, 60 | /*custom_name=*/nullptr, 61 | /*version=*/0}; 62 | } 63 | 64 | } // namespace micro 65 | } // namespace ops 66 | } // namespace tflite 67 | -------------------------------------------------------------------------------- /firmware/lib/tfmicro/tensorflow/lite/core/api/error_reporter.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2017 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_LITE_CORE_API_ERROR_REPORTER_H_ 16 | #define TENSORFLOW_LITE_CORE_API_ERROR_REPORTER_H_ 17 | 18 | #include 19 | 20 | namespace tflite { 21 | 22 | /// A functor that reports error to supporting system. Invoked similar to 23 | /// printf. 24 | /// 25 | /// Usage: 26 | /// ErrorReporter foo; 27 | /// foo.Report("test %d", 5); 28 | /// or 29 | /// va_list args; 30 | /// foo.Report("test %d", args); // where args is va_list 31 | /// 32 | /// Subclass ErrorReporter to provide another reporting destination. 33 | /// For example, if you have a GUI program, you might redirect to a buffer 34 | /// that drives a GUI error log box. 35 | class ErrorReporter { 36 | public: 37 | virtual ~ErrorReporter() {} 38 | virtual int Report(const char* format, va_list args) = 0; 39 | int Report(const char* format, ...); 40 | int ReportError(void*, const char* format, ...); 41 | }; 42 | 43 | } // namespace tflite 44 | 45 | // You should not make bare calls to the error reporter, instead use the 46 | // TF_LITE_REPORT_ERROR macro, since this allows message strings to be 47 | // stripped when the binary size has to be optimized. If you are looking to 48 | // reduce binary size, define TF_LITE_STRIP_ERROR_STRINGS when compiling and 49 | // every call will be stubbed out, taking no memory. 50 | #ifndef TF_LITE_STRIP_ERROR_STRINGS 51 | #define TF_LITE_REPORT_ERROR(reporter, ...) \ 52 | do { \ 53 | static_cast(reporter)->Report(__VA_ARGS__); \ 54 | } while (false) 55 | #else // TF_LITE_STRIP_ERROR_STRINGS 56 | #define TF_LITE_REPORT_ERROR(reporter, ...) 57 | #endif // TF_LITE_STRIP_ERROR_STRINGS 58 | 59 | #endif // TENSORFLOW_LITE_CORE_API_ERROR_REPORTER_H_ 60 | -------------------------------------------------------------------------------- /firmware/lib/tfmicro/tensorflow/lite/micro/all_ops_resolver.cc: -------------------------------------------------------------------------------- 1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved. 2 | Licensed under the Apache License, Version 2.0 (the "License"); 3 | you may not use this file except in compliance with the License. 4 | You may obtain a copy of the License at 5 | http://www.apache.org/licenses/LICENSE-2.0 6 | Unless required by applicable law or agreed to in writing, software 7 | distributed under the License is distributed on an "AS IS" BASIS, 8 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 9 | See the License for the specific language governing permissions and 10 | limitations under the License. 11 | ==============================================================================*/ 12 | 13 | #include "tensorflow/lite/micro/all_ops_resolver.h" 14 | 15 | #include "tensorflow/lite/micro/kernels/micro_ops.h" 16 | 17 | namespace tflite { 18 | namespace ops { 19 | namespace micro { 20 | namespace custom { 21 | TfLiteRegistration* Register_ETHOSU(); 22 | const char* GetString_ETHOSU(); 23 | } // namespace custom 24 | } // namespace micro 25 | } // namespace ops 26 | 27 | AllOpsResolver::AllOpsResolver() { 28 | // Please keep this list of Builtin Operators in alphabetical order. 29 | AddAbs(); 30 | AddAdd(); 31 | AddArgMax(); 32 | AddArgMin(); 33 | AddAveragePool2D(); 34 | AddCeil(); 35 | AddConcatenation(); 36 | AddConv2D(); 37 | AddCos(); 38 | AddDepthwiseConv2D(); 39 | AddDequantize(); 40 | AddEqual(); 41 | AddFloor(); 42 | AddFullyConnected(); 43 | AddGreater(); 44 | AddGreaterEqual(); 45 | AddHardSwish(); 46 | AddL2Normalization(); 47 | AddLess(); 48 | AddLessEqual(); 49 | AddLog(); 50 | AddLogicalAnd(); 51 | AddLogicalNot(); 52 | AddLogicalOr(); 53 | AddLogistic(); 54 | AddMaximum(); 55 | AddMaxPool2D(); 56 | AddMean(); 57 | AddMinimum(); 58 | AddMul(); 59 | AddNeg(); 60 | AddNotEqual(); 61 | AddPack(); 62 | AddPad(); 63 | AddPadV2(); 64 | AddPrelu(); 65 | AddQuantize(); 66 | AddReduceMax(); 67 | AddRelu(); 68 | AddRelu6(); 69 | AddReshape(); 70 | AddResizeNearestNeighbor(); 71 | AddRound(); 72 | AddRsqrt(); 73 | AddSin(); 74 | AddSoftmax(); 75 | AddSplit(); 76 | AddSplitV(); 77 | AddSqrt(); 78 | AddSquare(); 79 | AddStridedSlice(); 80 | AddSub(); 81 | AddSvdf(); 82 | AddTanh(); 83 | AddUnpack(); 84 | 85 | // TODO(b/159644355): Figure out if custom Ops belong in AllOpsResolver. 86 | TfLiteRegistration* registration = 87 | tflite::ops::micro::custom::Register_ETHOSU(); 88 | if (registration) { 89 | AddCustom(tflite::ops::micro::custom::GetString_ETHOSU(), registration); 90 | } 91 | } 92 | 93 | } // namespace tflite 94 | -------------------------------------------------------------------------------- /firmware/lib/tfmicro/tensorflow/lite/core/api/op_resolver.cc: -------------------------------------------------------------------------------- 1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | #include "tensorflow/lite/core/api/op_resolver.h" 17 | 18 | #include "flatbuffers/flatbuffers.h" // from @flatbuffers 19 | #include "tensorflow/lite/c/common.h" 20 | #include "tensorflow/lite/core/api/error_reporter.h" 21 | 22 | namespace tflite { 23 | 24 | TfLiteStatus GetRegistrationFromOpCode( 25 | const OperatorCode* opcode, const OpResolver& op_resolver, 26 | ErrorReporter* error_reporter, const TfLiteRegistration** registration) { 27 | TfLiteStatus status = kTfLiteOk; 28 | *registration = nullptr; 29 | auto builtin_code = opcode->builtin_code(); 30 | int version = opcode->version(); 31 | 32 | if (builtin_code > BuiltinOperator_MAX || 33 | builtin_code < BuiltinOperator_MIN) { 34 | TF_LITE_REPORT_ERROR( 35 | error_reporter, 36 | "Op builtin_code out of range: %d. Are you using old TFLite binary " 37 | "with newer model?", 38 | builtin_code); 39 | status = kTfLiteError; 40 | } else if (builtin_code != BuiltinOperator_CUSTOM) { 41 | *registration = op_resolver.FindOp(builtin_code, version); 42 | if (*registration == nullptr) { 43 | TF_LITE_REPORT_ERROR( 44 | error_reporter, 45 | "Didn't find op for builtin opcode '%s' version '%d'\n", 46 | EnumNameBuiltinOperator(builtin_code), version); 47 | status = kTfLiteError; 48 | } 49 | } else if (!opcode->custom_code()) { 50 | TF_LITE_REPORT_ERROR( 51 | error_reporter, 52 | "Operator with CUSTOM builtin_code has no custom_code.\n"); 53 | status = kTfLiteError; 54 | } else { 55 | const char* name = opcode->custom_code()->c_str(); 56 | *registration = op_resolver.FindOp(name, version); 57 | if (*registration == nullptr) { 58 | // Do not report error for unresolved custom op, we do the final check 59 | // while preparing ops. 60 | status = kTfLiteError; 61 | } 62 | } 63 | return status; 64 | } 65 | 66 | } // namespace tflite 67 | -------------------------------------------------------------------------------- /firmware/lib/audio_processor/src/kissfft/README.simd: -------------------------------------------------------------------------------- 1 | If you are reading this, it means you think you may be interested in using the SIMD extensions in kissfft 2 | to do 4 *separate* FFTs at once. 3 | 4 | Beware! Beyond here there be dragons! 5 | 6 | This API is not easy to use, is not well documented, and breaks the KISS principle. 7 | 8 | 9 | Still reading? Okay, you may get rewarded for your patience with a considerable speedup 10 | (2-3x) on intel x86 machines with SSE if you are willing to jump through some hoops. 11 | 12 | The basic idea is to use the packed 4 float __m128 data type as a scalar element. 13 | This means that the format is pretty convoluted. It performs 4 FFTs per fft call on signals A,B,C,D. 14 | 15 | For complex data, the data is interlaced as follows: 16 | rA0,rB0,rC0,rD0, iA0,iB0,iC0,iD0, rA1,rB1,rC1,rD1, iA1,iB1,iC1,iD1 ... 17 | where "rA0" is the real part of the zeroth sample for signal A 18 | 19 | Real-only data is laid out: 20 | rA0,rB0,rC0,rD0, rA1,rB1,rC1,rD1, ... 21 | 22 | Compile with gcc flags something like 23 | -O3 -mpreferred-stack-boundary=4 -DUSE_SIMD=1 -msse 24 | 25 | Be aware of SIMD alignment. This is the most likely cause of segfaults. 26 | The code within kissfft uses scratch variables on the stack. 27 | With SIMD, these must have addresses on 16 byte boundaries. 28 | Search on "SIMD alignment" for more info. 29 | 30 | 31 | 32 | Robin at Divide Concept was kind enough to share his code for formatting to/from the SIMD kissfft. 33 | I have not run it -- use it at your own risk. It appears to do 4xN and Nx4 transpositions 34 | (out of place). 35 | 36 | void SSETools::pack128(float* target, float* source, unsigned long size128) 37 | { 38 | __m128* pDest = (__m128*)target; 39 | __m128* pDestEnd = pDest+size128; 40 | float* source0=source; 41 | float* source1=source0+size128; 42 | float* source2=source1+size128; 43 | float* source3=source2+size128; 44 | 45 | while(pDest 19 | #include 20 | 21 | #include "tensorflow/lite/c/common.h" 22 | #include "tensorflow/lite/core/api/error_reporter.h" 23 | #include "tensorflow/lite/schema/schema_generated.h" 24 | 25 | namespace tflite { 26 | 27 | // Returns the next pointer address aligned to the given alignment. 28 | uint8_t* AlignPointerUp(uint8_t* data, size_t alignment); 29 | 30 | // Returns the previous pointer address aligned to the given alignment. 31 | uint8_t* AlignPointerDown(uint8_t* data, size_t alignment); 32 | 33 | // Returns an increased size that's a multiple of alignment. 34 | size_t AlignSizeUp(size_t size, size_t alignment); 35 | 36 | // Returns size in bytes for a given TfLiteType. 37 | TfLiteStatus TfLiteTypeSizeOf(TfLiteType type, size_t* size); 38 | 39 | // How many bytes are needed to hold a tensor's contents. 40 | TfLiteStatus BytesRequiredForTensor(const tflite::Tensor& flatbuffer_tensor, 41 | size_t* bytes, size_t* type_size, 42 | ErrorReporter* error_reporter); 43 | 44 | // How many bytes are used in a TfLiteEvalTensor instance. The byte length is 45 | // returned in out_bytes. 46 | TfLiteStatus TfLiteEvalTensorByteLength(const TfLiteEvalTensor* eval_tensor, 47 | size_t* out_bytes); 48 | 49 | // Deduce output dimensions from input and allocate given size. 50 | // Useful for operators with two inputs where the largest input should equal the 51 | // output dimension. 52 | TfLiteStatus AllocateOutputDimensionsFromInput(TfLiteContext* context, 53 | const TfLiteTensor* input1, 54 | const TfLiteTensor* input2, 55 | TfLiteTensor* output); 56 | 57 | } // namespace tflite 58 | 59 | #endif // TENSORFLOW_LITE_MICRO_MEMORY_HELPERS_H_ 60 | -------------------------------------------------------------------------------- /firmware/lib/tfmicro/tensorflow/lite/micro/recording_simple_memory_allocator.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2020 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | #ifndef TENSORFLOW_LITE_MICRO_RECORDING_SIMPLE_MEMORY_ALLOCATOR_H_ 17 | #define TENSORFLOW_LITE_MICRO_RECORDING_SIMPLE_MEMORY_ALLOCATOR_H_ 18 | 19 | #include "tensorflow/lite/micro/compatibility.h" 20 | #include "tensorflow/lite/micro/simple_memory_allocator.h" 21 | 22 | namespace tflite { 23 | 24 | // Utility class used to log allocations of a SimpleMemoryAllocator. Should only 25 | // be used in debug/evaluation settings or unit tests to evaluate allocation 26 | // usage. 27 | class RecordingSimpleMemoryAllocator : public SimpleMemoryAllocator { 28 | public: 29 | RecordingSimpleMemoryAllocator(ErrorReporter* error_reporter, 30 | uint8_t* buffer_head, size_t buffer_size); 31 | // TODO(b/157615197): Cleanup constructors/destructor and use factory 32 | // functions. 33 | ~RecordingSimpleMemoryAllocator() override; 34 | 35 | static RecordingSimpleMemoryAllocator* Create(ErrorReporter* error_reporter, 36 | uint8_t* buffer_head, 37 | size_t buffer_size); 38 | 39 | // Returns the number of bytes requested from the head or tail. 40 | size_t GetRequestedBytes() const; 41 | 42 | // Returns the number of bytes actually allocated from the head or tail. This 43 | // value will be >= to the number of requested bytes due to padding and 44 | // alignment. 45 | size_t GetUsedBytes() const; 46 | 47 | // Returns the number of alloc calls from the head or tail. 48 | size_t GetAllocatedCount() const; 49 | 50 | TfLiteStatus EnsureHeadSize(size_t size, size_t alignment) override; 51 | uint8_t* AllocateFromTail(size_t size, size_t alignment) override; 52 | 53 | private: 54 | size_t requested_head_bytes_; 55 | size_t requested_tail_bytes_; 56 | size_t used_bytes_; 57 | size_t alloc_count_; 58 | 59 | TF_LITE_REMOVE_VIRTUAL_DELETE 60 | }; 61 | 62 | } // namespace tflite 63 | 64 | #endif // TENSORFLOW_LITE_MICRO_RECORDING_SIMPLE_MEMORY_ALLOCATOR_H_ 65 | -------------------------------------------------------------------------------- /firmware/lib/audio_input/I2SSampler.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include "I2SSampler.h" 3 | #include 4 | #include 5 | #include "RingBuffer.h" 6 | 7 | void I2SSampler::addSample(int16_t sample) 8 | { 9 | // store the sample 10 | m_write_ring_buffer_accessor->setCurrentSample(sample); 11 | if (m_write_ring_buffer_accessor->moveToNextSample()) 12 | { 13 | // trigger the processor task as we've filled a buffer 14 | xTaskNotify(m_processor_task_handle, 1, eSetBits); 15 | } 16 | } 17 | 18 | void i2sReaderTask(void *param) 19 | { 20 | I2SSampler *sampler = (I2SSampler *)param; 21 | while (true) 22 | { 23 | // wait for some data to arrive on the queue 24 | i2s_event_t evt; 25 | if (xQueueReceive(sampler->m_i2s_queue, &evt, portMAX_DELAY) == pdPASS) 26 | { 27 | if (evt.type == I2S_EVENT_RX_DONE) 28 | { 29 | size_t bytesRead = 0; 30 | do 31 | { 32 | // read data from the I2S peripheral 33 | uint8_t i2sData[1024]; 34 | // read from i2s 35 | i2s_read(sampler->getI2SPort(), i2sData, 1024, &bytesRead, 10); 36 | // process the raw data 37 | sampler->processI2SData(i2sData, bytesRead); 38 | } while (bytesRead > 0); 39 | } 40 | } 41 | } 42 | } 43 | 44 | I2SSampler::I2SSampler() 45 | { 46 | m_audio_buffers = (AudioBuffer **)malloc(sizeof(AudioBuffer **) * AUDIO_BUFFER_COUNT); 47 | 48 | // allocate the audio buffers 49 | for (int i = 0; i < AUDIO_BUFFER_COUNT; i++) 50 | { 51 | m_audio_buffers[i] = new AudioBuffer(); 52 | } 53 | m_write_ring_buffer_accessor = new RingBufferAccessor(m_audio_buffers, AUDIO_BUFFER_COUNT); 54 | } 55 | 56 | void I2SSampler::start(i2s_port_t i2s_port, i2s_config_t &i2s_config, TaskHandle_t processor_task_handle) 57 | { 58 | Serial.println("Starting i2s"); 59 | m_i2s_port = i2s_port; 60 | m_processor_task_handle = processor_task_handle; 61 | //install and start i2s driver 62 | i2s_driver_install(m_i2s_port, &i2s_config, 4, &m_i2s_queue); 63 | // set up the I2S configuration from the subclass 64 | configureI2S(); 65 | // start a task to read samples 66 | xTaskCreatePinnedToCore(i2sReaderTask, "i2s Reader Task", 4096, this, 1, &m_reader_task_handle, 0); 67 | } 68 | 69 | RingBufferAccessor *I2SSampler::getRingBufferReader() 70 | { 71 | RingBufferAccessor *reader = new RingBufferAccessor(m_audio_buffers, AUDIO_BUFFER_COUNT); 72 | // place the reaader at the same position as the writer - clients can move it around as required 73 | reader->setIndex(m_write_ring_buffer_accessor->getIndex()); 74 | return reader; 75 | } 76 | -------------------------------------------------------------------------------- /firmware/lib/tfmicro/tensorflow/lite/core/api/op_resolver.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_LITE_CORE_API_OP_RESOLVER_H_ 16 | #define TENSORFLOW_LITE_CORE_API_OP_RESOLVER_H_ 17 | 18 | #include 19 | 20 | #include "tensorflow/lite/c/common.h" 21 | #include "tensorflow/lite/core/api/error_reporter.h" 22 | #include "tensorflow/lite/schema/schema_generated.h" 23 | 24 | namespace tflite { 25 | 26 | /// Abstract interface that returns TfLiteRegistrations given op codes or custom 27 | /// op names. This is the mechanism that ops being referenced in the flatbuffer 28 | /// model are mapped to executable function pointers (TfLiteRegistrations). 29 | class OpResolver { 30 | public: 31 | /// Finds the op registration for a builtin operator by enum code. 32 | virtual const TfLiteRegistration* FindOp(tflite::BuiltinOperator op, 33 | int version) const = 0; 34 | /// Finds the op registration of a custom operator by op name. 35 | virtual const TfLiteRegistration* FindOp(const char* op, 36 | int version) const = 0; 37 | 38 | // Returns optional delegates for resolving and handling ops in the flatbuffer 39 | // model. This may be used in addition to the standard TfLiteRegistration 40 | // lookup for graph resolution. 41 | using TfLiteDelegatePtrVector = 42 | std::vector>; 43 | virtual TfLiteDelegatePtrVector GetDelegates(int num_threads) const { 44 | return TfLiteDelegatePtrVector(); 45 | } 46 | 47 | virtual ~OpResolver() {} 48 | }; 49 | 50 | // Handles the logic for converting between an OperatorCode structure extracted 51 | // from a flatbuffer and information about a registered operator 52 | // implementation. 53 | TfLiteStatus GetRegistrationFromOpCode(const OperatorCode* opcode, 54 | const OpResolver& op_resolver, 55 | ErrorReporter* error_reporter, 56 | const TfLiteRegistration** registration); 57 | 58 | } // namespace tflite 59 | 60 | #endif // TENSORFLOW_LITE_CORE_API_OP_RESOLVER_H_ 61 | -------------------------------------------------------------------------------- /firmware/lib/tfmicro/tensorflow/lite/kernels/internal/reference/arg_min_max.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_ARG_MIN_MAX_H_ 16 | #define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_ARG_MIN_MAX_H_ 17 | 18 | #include "tensorflow/lite/kernels/internal/types.h" 19 | 20 | namespace tflite { 21 | 22 | namespace reference_ops { 23 | 24 | template 25 | void ArgMinMax(const RuntimeShape& input1_shape, const T1* input1_data, 26 | const T3* input2_data, const RuntimeShape& output_shape, 27 | T2* output_data, const Cmp& cmp) { 28 | TFLITE_DCHECK_GT(input1_shape.DimensionsCount(), 0); 29 | TFLITE_DCHECK_EQ(input1_shape.DimensionsCount() - 1, 30 | output_shape.DimensionsCount()); 31 | int axis = input2_data[0]; 32 | if (axis < 0) { 33 | axis += input1_shape.DimensionsCount(); 34 | } 35 | const int axis_size = input1_shape.Dims(axis); 36 | 37 | int outer_size = 1; 38 | for (int i = 0; i < axis; ++i) { 39 | TFLITE_DCHECK_EQ(input1_shape.Dims(i), output_shape.Dims(i)); 40 | outer_size *= input1_shape.Dims(i); 41 | } 42 | 43 | int inner_size = 1; 44 | const int dims_count = input1_shape.DimensionsCount(); 45 | for (int i = axis + 1; i < dims_count; ++i) { 46 | TFLITE_DCHECK_EQ(input1_shape.Dims(i), output_shape.Dims(i - 1)); 47 | inner_size *= input1_shape.Dims(i); 48 | } 49 | for (int outer = 0; outer < outer_size; ++outer) { 50 | for (int inner = 0; inner < inner_size; ++inner) { 51 | auto min_max_value = input1_data[outer * axis_size * inner_size + inner]; 52 | T2 min_max_index = 0; 53 | for (int i = 1; i < axis_size; ++i) { 54 | const auto& curr_value = 55 | input1_data[(outer * axis_size + i) * inner_size + inner]; 56 | if (cmp(curr_value, min_max_value)) { 57 | min_max_value = curr_value; 58 | min_max_index = static_cast(i); 59 | } 60 | } 61 | output_data[outer * inner_size + inner] = min_max_index; 62 | } 63 | } 64 | } 65 | } // namespace reference_ops 66 | } // namespace tflite 67 | 68 | #endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_ARG_MIN_MAX_H_ 69 | -------------------------------------------------------------------------------- /firmware/lib/tfmicro/tensorflow/lite/kernels/op_macros.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2017 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_LITE_KERNELS_OP_MACROS_H_ 16 | #define TENSORFLOW_LITE_KERNELS_OP_MACROS_H_ 17 | 18 | // If we're on a platform without standard IO functions, fall back to a 19 | // non-portable function. 20 | #ifdef TF_LITE_MCU_DEBUG_LOG 21 | 22 | #include "tensorflow/lite/micro/debug_log.h" 23 | 24 | #define DEBUG_LOG(x) \ 25 | do { \ 26 | DebugLog(x); \ 27 | } while (0) 28 | 29 | inline void InfiniteLoop() { 30 | DEBUG_LOG("HALTED\n"); 31 | while (1) { 32 | } 33 | } 34 | 35 | #define TFLITE_ABORT InfiniteLoop(); 36 | 37 | #else // TF_LITE_MCU_DEBUG_LOG 38 | 39 | #include 40 | #include 41 | 42 | #define DEBUG_LOG(x) \ 43 | do { \ 44 | fprintf(stderr, "%s", (x)); \ 45 | } while (0) 46 | 47 | // Report Error for unsupported type by op 'op_name' and returns kTfLiteError. 48 | #define TF_LITE_UNSUPPORTED_TYPE(context, type, op_name) \ 49 | do { \ 50 | TF_LITE_KERNEL_LOG((context), "%s:%d Type %s is unsupported by op %s.", \ 51 | __FILE__, __LINE__, TfLiteTypeGetName(type), \ 52 | (op_name)); \ 53 | return kTfLiteError; \ 54 | } while (0) 55 | 56 | #define TFLITE_ABORT abort() 57 | 58 | #endif // TF_LITE_MCU_DEBUG_LOG 59 | 60 | #ifdef NDEBUG 61 | #define TFLITE_ASSERT_FALSE (static_cast(0)) 62 | #else 63 | #define TFLITE_ASSERT_FALSE TFLITE_ABORT 64 | #endif 65 | 66 | #define TF_LITE_FATAL(msg) \ 67 | do { \ 68 | DEBUG_LOG(msg); \ 69 | DEBUG_LOG("\nFATAL\n"); \ 70 | TFLITE_ABORT; \ 71 | } while (0) 72 | 73 | #define TF_LITE_ASSERT(x) \ 74 | do { \ 75 | if (!(x)) TF_LITE_FATAL(#x); \ 76 | } while (0) 77 | 78 | #define TF_LITE_ASSERT_EQ(x, y) \ 79 | do { \ 80 | if ((x) != (y)) TF_LITE_FATAL(#x " didn't equal " #y); \ 81 | } while (0) 82 | 83 | #endif // TENSORFLOW_LITE_KERNELS_OP_MACROS_H_ 84 | -------------------------------------------------------------------------------- /firmware/lib/tfmicro/tensorflow/lite/micro/micro_profiler.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2020 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | #ifndef TENSORFLOW_LITE_MICRO_MICRO_PROFILER_H_ 17 | #define TENSORFLOW_LITE_MICRO_MICRO_PROFILER_H_ 18 | 19 | #include "tensorflow/lite/core/api/error_reporter.h" 20 | #include "tensorflow/lite/core/api/profiler.h" 21 | #include "tensorflow/lite/micro/compatibility.h" 22 | 23 | namespace tflite { 24 | 25 | // MicroProfiler creates a common way to gain fine-grained insight into runtime 26 | // performance. Bottleck operators can be identified along with slow code 27 | // sections. This can be used in conjunction with running the relevant micro 28 | // benchmark to evaluate end-to-end performance. 29 | // 30 | // Usage example: 31 | // MicroProfiler profiler(error_reporter); 32 | // { 33 | // ScopedProfile scoped_profile(profiler, tag); 34 | // work_to_profile(); 35 | // } 36 | // 37 | // This will call the following methods in order: 38 | // int event_handle = profiler->BeginEvent(op_name, EventType::DEFAULT, 0) 39 | // work_to_profile(); 40 | // profiler->EndEvent(event_handle) 41 | class MicroProfiler : public tflite::Profiler { 42 | public: 43 | explicit MicroProfiler(tflite::ErrorReporter* reporter); 44 | ~MicroProfiler() override = default; 45 | 46 | // AddEvent is unused for Tf Micro. 47 | void AddEvent(const char* tag, EventType event_type, uint64_t start, 48 | uint64_t end, int64_t event_metadata1, 49 | int64_t event_metadata2) override{}; 50 | 51 | // BeginEvent followed by code followed by EndEvent will profile the code 52 | // enclosed. Multiple concurrent events are unsupported, so the return value 53 | // is always 0. Event_metadata1 and event_metadata2 are unused. The tag 54 | // pointer must be valid until EndEvent is called. 55 | uint32_t BeginEvent(const char* tag, EventType event_type, 56 | int64_t event_metadata1, 57 | int64_t event_metadata2) override; 58 | 59 | // Event_handle is ignored since TF Micro does not support concurrent events. 60 | void EndEvent(uint32_t event_handle) override; 61 | 62 | private: 63 | tflite::ErrorReporter* reporter_; 64 | int32_t start_time_; 65 | const char* event_tag_; 66 | TF_LITE_REMOVE_VIRTUAL_DELETE 67 | }; 68 | 69 | } // namespace tflite 70 | 71 | #endif // TENSORFLOW_LITE_MICRO_MICRO_PROFILER_H_ 72 | -------------------------------------------------------------------------------- /firmware/lib/tfmicro/tensorflow/lite/kernels/internal/reference/maximum_minimum.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2017 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_MAXIMUM_MINIMUM_H_ 16 | #define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_MAXIMUM_MINIMUM_H_ 17 | 18 | #include "tensorflow/lite/kernels/internal/common.h" 19 | #include "tensorflow/lite/kernels/internal/types.h" 20 | 21 | namespace tflite { 22 | namespace reference_ops { 23 | 24 | template 25 | void MaximumMinimumBroadcastSlow(const RuntimeShape& unextended_input1_shape, 26 | const T* input1_data, 27 | const RuntimeShape& unextended_input2_shape, 28 | const T* input2_data, 29 | const RuntimeShape& unextended_output_shape, 30 | T* output_data, Op op) { 31 | // Uses element-wise calculation if broadcast is not required. 32 | if (unextended_input1_shape == unextended_input2_shape) { 33 | const int flat_size = 34 | MatchingElementsSize(unextended_input1_shape, unextended_input2_shape, 35 | unextended_output_shape); 36 | for (int i = 0; i < flat_size; ++i) { 37 | output_data[i] = op(input1_data[i], input2_data[i]); 38 | } 39 | } else { 40 | TFLITE_DCHECK_LE(unextended_input1_shape.DimensionsCount(), N); 41 | TFLITE_DCHECK_LE(unextended_input2_shape.DimensionsCount(), N); 42 | TFLITE_DCHECK_LE(unextended_output_shape.DimensionsCount(), N); 43 | 44 | NdArrayDesc desc1; 45 | NdArrayDesc desc2; 46 | NdArrayDesc output_desc; 47 | NdArrayDescsForElementwiseBroadcast( 48 | unextended_input1_shape, unextended_input2_shape, &desc1, &desc2); 49 | CopyDimsToDesc(RuntimeShape::ExtendedShape(N, unextended_output_shape), 50 | &output_desc); 51 | 52 | auto maxmin_func = [&](int indexes[N]) { 53 | output_data[SubscriptToIndex(output_desc, indexes)] = 54 | op(input1_data[SubscriptToIndex(desc1, indexes)], 55 | input2_data[SubscriptToIndex(desc2, indexes)]); 56 | }; 57 | NDOpsHelper(output_desc, maxmin_func); 58 | } 59 | } 60 | 61 | } // namespace reference_ops 62 | } // namespace tflite 63 | 64 | #endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_MAXIMUM_MINIMUM_H_ 65 | -------------------------------------------------------------------------------- /firmware/lib/audio_processor/src/kissfft/tools/kfc.c: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2003-2004, Mark Borgerding. All rights reserved. 3 | * This file is part of KISS FFT - https://github.com/mborgerding/kissfft 4 | * 5 | * SPDX-License-Identifier: BSD-3-Clause 6 | * See COPYING file for more information. 7 | */ 8 | 9 | #include "kfc.h" 10 | 11 | typedef struct cached_fft *kfc_cfg; 12 | 13 | struct cached_fft 14 | { 15 | int nfft; 16 | int inverse; 17 | kiss_fft_cfg cfg; 18 | kfc_cfg next; 19 | }; 20 | 21 | static kfc_cfg cache_root=NULL; 22 | static int ncached=0; 23 | 24 | static kiss_fft_cfg find_cached_fft(int nfft,int inverse) 25 | { 26 | size_t len; 27 | kfc_cfg cur=cache_root; 28 | kfc_cfg prev=NULL; 29 | while ( cur ) { 30 | if ( cur->nfft == nfft && inverse == cur->inverse ) 31 | break;/*found the right node*/ 32 | prev = cur; 33 | cur = prev->next; 34 | } 35 | if (cur== NULL) { 36 | /* no cached node found, need to create a new one*/ 37 | kiss_fft_alloc(nfft,inverse,0,&len); 38 | #ifdef USE_SIMD 39 | int padding = (16-sizeof(struct cached_fft)) & 15; 40 | // make sure the cfg aligns on a 16 byte boundary 41 | len += padding; 42 | #endif 43 | cur = (kfc_cfg)KISS_FFT_MALLOC((sizeof(struct cached_fft) + len )); 44 | if (cur == NULL) 45 | return NULL; 46 | cur->cfg = (kiss_fft_cfg)(cur+1); 47 | #ifdef USE_SIMD 48 | cur->cfg = (kiss_fft_cfg) ((char*)(cur+1)+padding); 49 | #endif 50 | kiss_fft_alloc(nfft,inverse,cur->cfg,&len); 51 | cur->nfft=nfft; 52 | cur->inverse=inverse; 53 | cur->next = NULL; 54 | if ( prev ) 55 | prev->next = cur; 56 | else 57 | cache_root = cur; 58 | ++ncached; 59 | } 60 | return cur->cfg; 61 | } 62 | 63 | void kfc_cleanup(void) 64 | { 65 | kfc_cfg cur=cache_root; 66 | kfc_cfg next=NULL; 67 | while (cur){ 68 | next = cur->next; 69 | free(cur); 70 | cur=next; 71 | } 72 | ncached=0; 73 | cache_root = NULL; 74 | } 75 | void kfc_fft(int nfft, const kiss_fft_cpx * fin,kiss_fft_cpx * fout) 76 | { 77 | kiss_fft( find_cached_fft(nfft,0),fin,fout ); 78 | } 79 | 80 | void kfc_ifft(int nfft, const kiss_fft_cpx * fin,kiss_fft_cpx * fout) 81 | { 82 | kiss_fft( find_cached_fft(nfft,1),fin,fout ); 83 | } 84 | 85 | #ifdef KFC_TEST 86 | static void check(int nc) 87 | { 88 | if (ncached != nc) { 89 | fprintf(stderr,"ncached should be %d,but it is %d\n",nc,ncached); 90 | exit(1); 91 | } 92 | } 93 | 94 | int main(void) 95 | { 96 | kiss_fft_cpx buf1[1024],buf2[1024]; 97 | memset(buf1,0,sizeof(buf1)); 98 | check(0); 99 | kfc_fft(512,buf1,buf2); 100 | check(1); 101 | kfc_fft(512,buf1,buf2); 102 | check(1); 103 | kfc_ifft(512,buf1,buf2); 104 | check(2); 105 | kfc_cleanup(); 106 | check(0); 107 | return 0; 108 | } 109 | #endif 110 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib64/ 18 | parts/ 19 | sdist/ 20 | var/ 21 | wheels/ 22 | share/python-wheels/ 23 | *.egg-info/ 24 | .installed.cfg 25 | *.egg 26 | MANIFEST 27 | 28 | # PyInstaller 29 | # Usually these files are written by a python script from a template 30 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 31 | *.manifest 32 | *.spec 33 | 34 | # Installer logs 35 | pip-log.txt 36 | pip-delete-this-directory.txt 37 | 38 | # Unit test / coverage reports 39 | htmlcov/ 40 | .tox/ 41 | .nox/ 42 | .coverage 43 | .coverage.* 44 | .cache 45 | nosetests.xml 46 | coverage.xml 47 | *.cover 48 | *.py,cover 49 | .hypothesis/ 50 | .pytest_cache/ 51 | cover/ 52 | 53 | # Translations 54 | *.mo 55 | *.pot 56 | 57 | # Django stuff: 58 | *.log 59 | local_settings.py 60 | db.sqlite3 61 | db.sqlite3-journal 62 | 63 | # Flask stuff: 64 | instance/ 65 | .webassets-cache 66 | 67 | # Scrapy stuff: 68 | .scrapy 69 | 70 | # Sphinx documentation 71 | docs/_build/ 72 | 73 | # PyBuilder 74 | .pybuilder/ 75 | target/ 76 | 77 | # Jupyter Notebook 78 | .ipynb_checkpoints 79 | 80 | # IPython 81 | profile_default/ 82 | ipython_config.py 83 | 84 | # pyenv 85 | # For a library or package, you might want to ignore these files since the code is 86 | # intended to run in multiple environments; otherwise, check them in: 87 | # .python-version 88 | 89 | # pipenv 90 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 91 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 92 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 93 | # install all needed dependencies. 94 | #Pipfile.lock 95 | 96 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow 97 | __pypackages__/ 98 | 99 | # Celery stuff 100 | celerybeat-schedule 101 | celerybeat.pid 102 | 103 | # SageMath parsed files 104 | *.sage.py 105 | 106 | # Environments 107 | .env 108 | .venv 109 | env/ 110 | venv/ 111 | ENV/ 112 | env.bak/ 113 | venv.bak/ 114 | 115 | # Spyder project settings 116 | .spyderproject 117 | .spyproject 118 | 119 | # Rope project settings 120 | .ropeproject 121 | 122 | # mkdocs documentation 123 | /site 124 | 125 | # mypy 126 | .mypy_cache/ 127 | .dmypy.json 128 | dmypy.json 129 | 130 | # Pyre type checker 131 | .pyre/ 132 | 133 | # pytype static type analyzer 134 | .pytype/ 135 | 136 | # Cython debug symbols 137 | cython_debug/ 138 | 139 | # training data 140 | training_data 141 | testing_data 142 | all_data 143 | 144 | # fonts 145 | fonts/google/* 146 | fonts/vi/* 147 | fonts/win/* 148 | 149 | # tensorboard 150 | logs/* 151 | 152 | 153 | # ignore the speech data 154 | speech_data 155 | *.npz 156 | 157 | # models 158 | *.model 159 | *.tflite 160 | logs 161 | -------------------------------------------------------------------------------- /firmware/lib/tfmicro/tensorflow/lite/micro/recording_micro_interpreter.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2020 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | #ifndef TENSORFLOW_LITE_MICRO_RECORDING_MICRO_INTERPRETER_H_ 17 | #define TENSORFLOW_LITE_MICRO_RECORDING_MICRO_INTERPRETER_H_ 18 | 19 | #include "tensorflow/lite/micro/micro_interpreter.h" 20 | #include "tensorflow/lite/micro/recording_micro_allocator.h" 21 | 22 | namespace tflite { 23 | 24 | // Utility subclass that enables internal recordings of the MicroInterpreter. 25 | // This class should be used to audit and analyze memory arena usage for a given 26 | // model and interpreter. 27 | // 28 | // After construction and the first Invoke() or AllocateTensors() call - the 29 | // memory usage is recorded and available through the GetMicroAllocator() 30 | // function. See RecordingMicroAlloctor for more details on what is currently 31 | // recorded from arena allocations. 32 | // 33 | // It is recommended for users to increase the tensor arena size by at least 1kb 34 | // to ensure enough additional memory is available for internal recordings. 35 | class RecordingMicroInterpreter : public MicroInterpreter { 36 | public: 37 | RecordingMicroInterpreter(const Model* model, 38 | const MicroOpResolver& op_resolver, 39 | uint8_t* tensor_arena, size_t tensor_arena_size, 40 | ErrorReporter* error_reporter) 41 | : MicroInterpreter(model, op_resolver, 42 | RecordingMicroAllocator::Create( 43 | tensor_arena, tensor_arena_size, error_reporter), 44 | error_reporter), 45 | recording_micro_allocator_( 46 | static_cast(allocator())) {} 47 | 48 | RecordingMicroInterpreter(const Model* model, 49 | const MicroOpResolver& op_resolver, 50 | RecordingMicroAllocator* allocator, 51 | ErrorReporter* error_reporter) 52 | : MicroInterpreter(model, op_resolver, allocator, error_reporter), 53 | recording_micro_allocator_(*allocator) {} 54 | 55 | const RecordingMicroAllocator& GetMicroAllocator() const { 56 | return recording_micro_allocator_; 57 | } 58 | 59 | private: 60 | const RecordingMicroAllocator& recording_micro_allocator_; 61 | }; 62 | 63 | } // namespace tflite 64 | 65 | #endif // TENSORFLOW_LITE_MICRO_RECORDING_MICRO_INTERPRETER_H_ 66 | -------------------------------------------------------------------------------- /firmware/lib/tfmicro/tensorflow/lite/micro/kernels/ceil.cc: -------------------------------------------------------------------------------- 1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | #include "tensorflow/lite/kernels/internal/reference/ceil.h" 17 | 18 | #include "tensorflow/lite/c/common.h" 19 | #include "tensorflow/lite/kernels/internal/tensor_ctypes.h" 20 | #include "tensorflow/lite/kernels/kernel_util.h" 21 | #include "tensorflow/lite/micro/kernels/kernel_util.h" 22 | 23 | namespace tflite { 24 | namespace ops { 25 | namespace micro { 26 | namespace ceil { 27 | 28 | constexpr int kInputTensor = 0; 29 | constexpr int kOutputTensor = 0; 30 | 31 | TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { 32 | const TfLiteTensor* input = GetInput(context, node, kInputTensor); 33 | TF_LITE_ENSURE(context, input != nullptr); 34 | TfLiteTensor* output = GetOutput(context, node, kOutputTensor); 35 | TF_LITE_ENSURE(context, output != nullptr); 36 | TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); 37 | TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); 38 | TF_LITE_ENSURE_TYPES_EQ(context, input->type, kTfLiteFloat32); 39 | TF_LITE_ENSURE_TYPES_EQ(context, output->type, input->type); 40 | TF_LITE_ENSURE_EQ(context, output->bytes, input->bytes); 41 | TF_LITE_ENSURE_EQ(context, output->dims->size, input->dims->size); 42 | for (int i = 0; i < output->dims->size; ++i) { 43 | TF_LITE_ENSURE_EQ(context, output->dims->data[i], input->dims->data[i]); 44 | } 45 | return kTfLiteOk; 46 | } 47 | 48 | TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { 49 | const TfLiteEvalTensor* input = 50 | tflite::micro::GetEvalInput(context, node, kInputTensor); 51 | TfLiteEvalTensor* output = 52 | tflite::micro::GetEvalOutput(context, node, kOutputTensor); 53 | 54 | reference_ops::Ceil(tflite::micro::GetTensorShape(input), 55 | tflite::micro::GetTensorData(input), 56 | tflite::micro::GetTensorShape(output), 57 | tflite::micro::GetTensorData(output)); 58 | 59 | return kTfLiteOk; 60 | } 61 | } // namespace ceil 62 | 63 | TfLiteRegistration Register_CEIL() { 64 | return {/*init=*/nullptr, 65 | /*free=*/nullptr, 66 | /*prepare=*/ceil::Prepare, 67 | /*invoke=*/ceil::Eval, 68 | /*profiling_string=*/nullptr, 69 | /*builtin_code=*/0, 70 | /*custom_name=*/nullptr, 71 | /*version=*/0}; 72 | } 73 | 74 | } // namespace micro 75 | } // namespace ops 76 | } // namespace tflite 77 | -------------------------------------------------------------------------------- /firmware/lib/tfmicro/tensorflow/lite/micro/kernels/round.cc: -------------------------------------------------------------------------------- 1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | #include "tensorflow/lite/kernels/internal/reference/round.h" 17 | 18 | #include "tensorflow/lite/c/common.h" 19 | #include "tensorflow/lite/kernels/internal/tensor_ctypes.h" 20 | #include "tensorflow/lite/kernels/kernel_util.h" 21 | #include "tensorflow/lite/micro/kernels/kernel_util.h" 22 | 23 | namespace tflite { 24 | namespace ops { 25 | namespace micro { 26 | namespace round { 27 | 28 | constexpr int kInputTensor = 0; 29 | constexpr int kOutputTensor = 0; 30 | 31 | TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { 32 | const TfLiteTensor* input = GetInput(context, node, kInputTensor); 33 | TF_LITE_ENSURE(context, input != nullptr); 34 | TfLiteTensor* output = GetOutput(context, node, kOutputTensor); 35 | TF_LITE_ENSURE(context, output != nullptr); 36 | TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); 37 | TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); 38 | TF_LITE_ENSURE_TYPES_EQ(context, input->type, kTfLiteFloat32); 39 | TF_LITE_ENSURE_TYPES_EQ(context, output->type, input->type); 40 | TF_LITE_ENSURE_EQ(context, output->bytes, input->bytes); 41 | TF_LITE_ENSURE_EQ(context, output->dims->size, input->dims->size); 42 | for (int i = 0; i < output->dims->size; ++i) { 43 | TF_LITE_ENSURE_EQ(context, output->dims->data[i], input->dims->data[i]); 44 | } 45 | return kTfLiteOk; 46 | } 47 | 48 | TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { 49 | const TfLiteEvalTensor* input = 50 | tflite::micro::GetEvalInput(context, node, kInputTensor); 51 | TfLiteEvalTensor* output = 52 | tflite::micro::GetEvalOutput(context, node, kOutputTensor); 53 | 54 | reference_ops::Round(tflite::micro::GetTensorShape(input), 55 | tflite::micro::GetTensorData(input), 56 | tflite::micro::GetTensorShape(output), 57 | tflite::micro::GetTensorData(output)); 58 | 59 | return kTfLiteOk; 60 | } 61 | } // namespace round 62 | 63 | TfLiteRegistration Register_ROUND() { 64 | return {/*init=*/nullptr, 65 | /*free=*/nullptr, 66 | /*prepare=*/round::Prepare, 67 | /*invoke=*/round::Eval, 68 | /*profiling_string=*/nullptr, 69 | /*builtin_code=*/0, 70 | /*custom_name=*/nullptr, 71 | /*version=*/0}; 72 | } 73 | 74 | } // namespace micro 75 | } // namespace ops 76 | } // namespace tflite 77 | -------------------------------------------------------------------------------- /firmware/lib/tfmicro/tensorflow/lite/kernels/internal/reference/requantize.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2020 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_REQUANTIZE_H_ 16 | #define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_REQUANTIZE_H_ 17 | 18 | #include "ruy/profiler/instrumentation.h" // from @ruy 19 | #include "tensorflow/lite/kernels/internal/common.h" 20 | #include "tensorflow/lite/kernels/internal/types.h" 21 | 22 | namespace tflite { 23 | namespace reference_ops { 24 | 25 | template 26 | inline void Requantize(const input_type* input_data, int32_t size, 27 | int32_t effective_scale_multiplier, 28 | int32_t effective_scale_shift, int32_t input_zeropoint, 29 | int32_t output_zeropoint, output_type* output_data) { 30 | ruy::profiler::ScopeLabel label("Requantize"); 31 | const bool same_scale = 32 | (effective_scale_multiplier == 1 << 30 && effective_scale_shift == 1); 33 | if (same_scale) { 34 | const bool mixed_type_int8_uint8 = 35 | std::is_same::value && 36 | std::is_same::value; 37 | const bool mixed_type_uint8_int8 = 38 | std::is_same::value && 39 | std::is_same::value; 40 | const int32_t zero_point_diff = input_zeropoint - output_zeropoint; 41 | // Fast path to do requantization for the case when just a shift of 128 is 42 | // needed. 43 | if ((mixed_type_int8_uint8 && zero_point_diff == -128) || 44 | (mixed_type_uint8_int8 && zero_point_diff == 128)) { 45 | for (int i = 0; i < size; ++i) { 46 | output_data[i] = input_data[i] ^ 0x80; 47 | } 48 | } 49 | } 50 | static constexpr int32_t kMinOutput = std::numeric_limits::min(); 51 | static constexpr int32_t kMaxOutput = std::numeric_limits::max(); 52 | for (int i = 0; i < size; ++i) { 53 | const int32_t input = input_data[i] - input_zeropoint; 54 | const int32_t output = 55 | MultiplyByQuantizedMultiplier(input, effective_scale_multiplier, 56 | effective_scale_shift) + 57 | output_zeropoint; 58 | const int32_t clamped_output = 59 | std::max(std::min(output, kMaxOutput), kMinOutput); 60 | output_data[i] = static_cast(clamped_output); 61 | } 62 | } 63 | 64 | } // namespace reference_ops 65 | } // namespace tflite 66 | 67 | #endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_REQUANTIZE_H_ 68 | -------------------------------------------------------------------------------- /firmware/lib/tfmicro/tensorflow/lite/kernels/internal/reference/integer_ops/l2normalization.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_L2NORMALIZATION_H_ 16 | #define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_L2NORMALIZATION_H_ 17 | 18 | #include "tensorflow/lite/kernels/internal/common.h" 19 | 20 | namespace tflite { 21 | namespace reference_integer_ops { 22 | 23 | inline void L2Normalization(int32_t input_zero_point, int32_t outer_size, 24 | int32_t depth, const int8_t* input_data, 25 | int8_t* output_data) { 26 | static constexpr int8_t kMinInt8 = std::numeric_limits::min(); 27 | static constexpr int8_t kMaxInt8 = std::numeric_limits::max(); 28 | // The output scale must be in sync with Prepare(). 29 | // Output is in 1/128 scale so the actual output range is nudged from [-1, 1] 30 | // to [-1, 127/128]. 31 | static constexpr int32_t kOutputScale = 7; 32 | for (int outer_index = 0; outer_index < outer_size; ++outer_index) { 33 | // int32_t = (int8_t - int8_t) ^ 2. 34 | // ([-128, 127] - [-128, 127]) ^ 2 = [0, (2^8 - 1)^2] so the accumulator is 35 | // safe from overflowing in at least 2^16 steps. 36 | int32_t acc = 0; 37 | for (int inner_index = 0; inner_index < depth; ++inner_index) { 38 | int32_t input = 39 | input_data[depth * outer_index + inner_index] - input_zero_point; 40 | acc += input * input; 41 | } 42 | int32_t inv_l2norm_multiplier; 43 | int inv_l2norm_shift; 44 | GetInvSqrtQuantizedMultiplierExp(acc, kReverseShift, &inv_l2norm_multiplier, 45 | &inv_l2norm_shift); 46 | 47 | for (int inner_index = 0; inner_index < depth; ++inner_index) { 48 | int32_t input = 49 | input_data[depth * outer_index + inner_index] - input_zero_point; 50 | 51 | // Rescale and downcast. Rescale is folded into the division. 52 | int32_t output_in_q24 = MultiplyByQuantizedMultiplier( 53 | input, inv_l2norm_multiplier, inv_l2norm_shift + kOutputScale); 54 | output_in_q24 = 55 | std::min(static_cast(kMaxInt8), 56 | std::max(static_cast(kMinInt8), output_in_q24)); 57 | output_data[depth * outer_index + inner_index] = 58 | static_cast(output_in_q24); 59 | } 60 | } 61 | } 62 | } // namespace reference_integer_ops 63 | } // namespace tflite 64 | 65 | #endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_L2NORMALIZATION_H_ 66 | -------------------------------------------------------------------------------- /firmware/lib/tfmicro/tensorflow/lite/micro/kernels/kernel_util.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2020 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | #ifndef TENSORFLOW_LITE_MICRO_KERNELS_KERNEL_UTIL_H_ 17 | #define TENSORFLOW_LITE_MICRO_KERNELS_KERNEL_UTIL_H_ 18 | 19 | #include 20 | 21 | #include "tensorflow/lite/c/common.h" 22 | #include "tensorflow/lite/kernels/internal/compatibility.h" 23 | #include "tensorflow/lite/kernels/internal/types.h" 24 | 25 | namespace tflite { 26 | namespace micro { 27 | 28 | // Returns a mutable tensor for a given input index. is_variable must be checked 29 | // during prepare when the full TfLiteTensor is available. 30 | inline TfLiteEvalTensor* GetMutableEvalInput(const TfLiteContext* context, 31 | const TfLiteNode* node, 32 | int index) { 33 | TFLITE_DCHECK(context != nullptr); 34 | TFLITE_DCHECK(node != nullptr); 35 | return context->GetEvalTensor(context, node->inputs->data[index]); 36 | } 37 | 38 | // Returns the TfLiteEvalTensor struct for a given input index in a node. 39 | inline const TfLiteEvalTensor* GetEvalInput(const TfLiteContext* context, 40 | const TfLiteNode* node, int index) { 41 | return GetMutableEvalInput(context, node, index); 42 | } 43 | 44 | // Returns the TfLiteEvalTensor struct for a given output index in a node. 45 | inline TfLiteEvalTensor* GetEvalOutput(const TfLiteContext* context, 46 | const TfLiteNode* node, int index) { 47 | TFLITE_DCHECK(context != nullptr); 48 | TFLITE_DCHECK(node != nullptr); 49 | return context->GetEvalTensor(context, node->outputs->data[index]); 50 | } 51 | 52 | // Returns data for a TfLiteEvalTensor struct. 53 | template 54 | T* GetTensorData(TfLiteEvalTensor* tensor) { 55 | return tensor != nullptr ? reinterpret_cast(tensor->data.raw) : nullptr; 56 | } 57 | 58 | // Returns const data for a TfLiteEvalTensor struct. 59 | template 60 | const T* GetTensorData(const TfLiteEvalTensor* tensor) { 61 | TFLITE_DCHECK(tensor != nullptr); 62 | return reinterpret_cast(tensor->data.raw); 63 | } 64 | 65 | // Returns the shape of a TfLiteEvalTensor struct. 66 | const RuntimeShape GetTensorShape(const TfLiteEvalTensor* tensor); 67 | 68 | // Return true if the given tensors have the same shape. 69 | bool HaveSameShapes(const TfLiteEvalTensor* input1, 70 | const TfLiteEvalTensor* input2); 71 | 72 | } // namespace micro 73 | } // namespace tflite 74 | 75 | #endif // TENSORFLOW_LITE_MICRO_KERNELS_KERNEL_UTIL_H_ 76 | -------------------------------------------------------------------------------- /firmware/lib/tfmicro/tensorflow/lite/portable_type_to_tflitetype.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_LITE_PORTABLE_TYPE_TO_TFLITETYPE_H_ 16 | #define TENSORFLOW_LITE_PORTABLE_TYPE_TO_TFLITETYPE_H_ 17 | 18 | // Most of the definitions have been moved to this subheader so that Micro 19 | // can include it without relying on , which isn't available on all 20 | // platforms. 21 | 22 | // Arduino build defines abs as a macro here. That is invalid C++, and breaks 23 | // libc++'s header, undefine it. 24 | #ifdef abs 25 | #undef abs 26 | #endif 27 | 28 | #include 29 | 30 | #include "tensorflow/lite/c/common.h" 31 | 32 | namespace tflite { 33 | 34 | // Map statically from a C++ type to a TfLiteType. Used in interpreter for 35 | // safe casts. 36 | // Example: 37 | // typeToTfLiteType() -> kTfLiteBool 38 | template 39 | constexpr TfLiteType typeToTfLiteType() { 40 | return kTfLiteNoType; 41 | } 42 | // Map from TfLiteType to the corresponding C++ type. 43 | // Example: 44 | // TfLiteTypeToType::Type -> bool 45 | template 46 | struct TfLiteTypeToType {}; // Specializations below 47 | 48 | // Template specialization for both typeToTfLiteType and TfLiteTypeToType. 49 | #define MATCH_TYPE_AND_TFLITE_TYPE(CPP_TYPE, TFLITE_TYPE_ENUM) \ 50 | template <> \ 51 | constexpr TfLiteType typeToTfLiteType() { \ 52 | return TFLITE_TYPE_ENUM; \ 53 | } \ 54 | template <> \ 55 | struct TfLiteTypeToType { \ 56 | using Type = CPP_TYPE; \ 57 | } 58 | 59 | // No string mapping is included here, since the TF Lite packed representation 60 | // doesn't correspond to a C++ type well. 61 | MATCH_TYPE_AND_TFLITE_TYPE(int, kTfLiteInt32); 62 | MATCH_TYPE_AND_TFLITE_TYPE(int16_t, kTfLiteInt16); 63 | MATCH_TYPE_AND_TFLITE_TYPE(int64_t, kTfLiteInt64); 64 | MATCH_TYPE_AND_TFLITE_TYPE(float, kTfLiteFloat32); 65 | MATCH_TYPE_AND_TFLITE_TYPE(unsigned char, kTfLiteUInt8); 66 | MATCH_TYPE_AND_TFLITE_TYPE(int8_t, kTfLiteInt8); 67 | MATCH_TYPE_AND_TFLITE_TYPE(bool, kTfLiteBool); 68 | MATCH_TYPE_AND_TFLITE_TYPE(std::complex, kTfLiteComplex64); 69 | MATCH_TYPE_AND_TFLITE_TYPE(std::complex, kTfLiteComplex128); 70 | MATCH_TYPE_AND_TFLITE_TYPE(TfLiteFloat16, kTfLiteFloat16); 71 | MATCH_TYPE_AND_TFLITE_TYPE(double, kTfLiteFloat64); 72 | 73 | } // namespace tflite 74 | #endif // TENSORFLOW_LITE_PORTABLE_TYPE_TO_TFLITETYPE_H_ 75 | -------------------------------------------------------------------------------- /firmware/lib/tfmicro/tensorflow/lite/kernels/internal/reference/dequantize.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_DEQUANTIZE_H_ 16 | #define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_DEQUANTIZE_H_ 17 | 18 | #include 19 | 20 | #include 21 | 22 | #include "tensorflow/lite/kernels/internal/common.h" 23 | #include "tensorflow/lite/kernels/internal/types.h" 24 | 25 | namespace tflite { 26 | 27 | namespace reference_ops { 28 | 29 | // Dequantizes into a float without rounding. 30 | template 31 | inline void Dequantize(const tflite::DequantizationParams& op_params, 32 | const RuntimeShape& input_shape, 33 | const InputT* input_data, 34 | const RuntimeShape& output_shape, OutputT* output_data) { 35 | int32_t zero_point = op_params.zero_point; 36 | const double scale = op_params.scale; 37 | const int flat_size = MatchingFlatSize(input_shape, output_shape); 38 | 39 | for (int i = 0; i < flat_size; i++) { 40 | const int32_t val = input_data[i]; 41 | const OutputT result = static_cast(scale * (val - zero_point)); 42 | output_data[i] = result; 43 | } 44 | } 45 | 46 | // Dequantizes per-channel quantized tensor to float. 47 | template 48 | inline void PerChannelDequantize( 49 | const tflite::PerChannelDequantizationParams& op_params, 50 | const RuntimeShape& input_shape, const T* input_data, 51 | const RuntimeShape& output_shape, float* output_data) { 52 | // Ensure flat size is same. 53 | MatchingFlatSize(input_shape, output_shape); 54 | 55 | const int32_t* zero_point = op_params.zero_point; 56 | const float* scale = op_params.scale; 57 | const int32_t quantized_dimension = op_params.quantized_dimension; 58 | const int32_t num_dims = input_shape.DimensionsCount(); 59 | const int32_t* dims_data = input_shape.DimsData(); 60 | std::vector current_dim(num_dims, 0); 61 | 62 | do { 63 | size_t offset = 64 | ReducedOutputOffset(num_dims, reinterpret_cast(dims_data), 65 | current_dim.data(), 0, nullptr); 66 | const int channel = current_dim[quantized_dimension]; 67 | const int32_t val = input_data[offset]; 68 | const float result = 69 | static_cast(scale[channel] * (val - zero_point[channel])); 70 | output_data[offset] = result; 71 | } while (NextIndex(num_dims, reinterpret_cast(dims_data), 72 | current_dim.data())); 73 | } 74 | 75 | } // namespace reference_ops 76 | 77 | } // namespace tflite 78 | #endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_DEQUANTIZE_H_ 79 | -------------------------------------------------------------------------------- /firmware/lib/tfmicro/tensorflow/lite/micro/memory_planner/memory_planner.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | #ifndef TENSORFLOW_LITE_MICRO_MEMORY_PLANNER_MEMORY_PLANNER_H_ 17 | #define TENSORFLOW_LITE_MICRO_MEMORY_PLANNER_MEMORY_PLANNER_H_ 18 | 19 | #include "tensorflow/lite/c/common.h" 20 | #include "tensorflow/lite/core/api/error_reporter.h" 21 | 22 | namespace tflite { 23 | 24 | // Interface class for planning the layout of memory buffers during the 25 | // execution of a graph. 26 | // It's designed to be used by a client that iterates in any order through the 27 | // buffers it wants to lay out, and then calls the getter functions for 28 | // information about the calculated layout. For example: 29 | // 30 | // SomeMemoryPlanner planner; 31 | // planner.AddBuffer(reporter, 100, 0, 1); // Buffer 0 32 | // planner.AddBuffer(reporter, 50, 2, 3); // Buffer 1 33 | // planner.AddBuffer(reporter, 50, 2, 3); // Buffer 2 34 | // 35 | // int offset0; 36 | // TF_EXPECT_OK(planner.GetOffsetForBuffer(reporter, 0, &offset0)); 37 | // int offset1; 38 | // TF_EXPECT_OK(planner.GetOffsetForBuffer(reporter, 1, &offset1)); 39 | // int offset2; 40 | // TF_EXPECT_OK(planner.GetOffsetForBuffer(reporter, 2, &offset2)); 41 | // const int arena_size_needed = planner.GetMaximumMemorySize(); 42 | // 43 | // The goal is for applications to be able to experiment with different layout 44 | // strategies without changing their client code, by swapping out classes that 45 | // implement this interface.= 46 | class MemoryPlanner { 47 | public: 48 | MemoryPlanner() {} 49 | virtual ~MemoryPlanner() {} 50 | 51 | // Pass information about a buffer's size and lifetime to the layout 52 | // algorithm. The order this is called implicitly assigns an index to the 53 | // result, so the buffer information that's passed into the N-th call of 54 | // this method will be used as the buffer_index argument to 55 | // GetOffsetForBuffer(). 56 | virtual TfLiteStatus AddBuffer(tflite::ErrorReporter* error_reporter, 57 | int size, int first_time_used, 58 | int last_time_used) = 0; 59 | 60 | // The largest contiguous block of memory that's needed to hold the layout. 61 | virtual size_t GetMaximumMemorySize() = 0; 62 | // How many buffers have been added to the planner. 63 | virtual int GetBufferCount() = 0; 64 | // Calculated layout offset for the N-th buffer added to the planner. 65 | virtual TfLiteStatus GetOffsetForBuffer(tflite::ErrorReporter* error_reporter, 66 | int buffer_index, int* offset) = 0; 67 | }; 68 | 69 | } // namespace tflite 70 | 71 | #endif // TENSORFLOW_LITE_MICRO_MEMORY_PLANNER_MEMORY_PLANNER_H_ 72 | -------------------------------------------------------------------------------- /firmware/lib/tfmicro/tensorflow/lite/micro/micro_op_resolver.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2020 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_LITE_MICRO_MICRO_OP_RESOLVER_H_ 16 | #define TENSORFLOW_LITE_MICRO_MICRO_OP_RESOLVER_H_ 17 | 18 | #include "tensorflow/lite/c/common.h" 19 | #include "tensorflow/lite/core/api/error_reporter.h" 20 | #include "tensorflow/lite/core/api/flatbuffer_conversions.h" 21 | #include "tensorflow/lite/core/api/op_resolver.h" 22 | #include "tensorflow/lite/schema/schema_generated.h" 23 | 24 | namespace tflite { 25 | 26 | // This is an interface for the OpResolver for TFLiteMicro. The differences from 27 | // the TFLite OpResolver base class are to: 28 | // * explicitly remove support for Op versions 29 | // * allow for finer grained registration of the Builtin Ops to reduce code 30 | // size for TFLiteMicro. 31 | // 32 | // We need an interface class instead of directly using MicroMutableOpResolver 33 | // because MicroMutableOpResolver is a class template with the number of 34 | // registered Ops as the template parameter. 35 | class MicroOpResolver : public OpResolver { 36 | public: 37 | typedef TfLiteStatus (*BuiltinParseFunction)(const Operator* op, 38 | ErrorReporter* error_reporter, 39 | BuiltinDataAllocator* allocator, 40 | void** builtin_data); 41 | 42 | // Returns the Op registration struct corresponding to the enum code from the 43 | // flatbuffer schema. Returns nullptr if the op is not found or if op == 44 | // BuiltinOperator_CUSTOM. 45 | virtual const TfLiteRegistration* FindOp(BuiltinOperator op) const = 0; 46 | 47 | // Returns the Op registration struct corresponding to the custom operator by 48 | // name. 49 | virtual const TfLiteRegistration* FindOp(const char* op) const = 0; 50 | 51 | // This implementation exists for compatibility with the OpResolver base class 52 | // and disregards the version parameter. 53 | const TfLiteRegistration* FindOp(BuiltinOperator op, 54 | int version) const final { 55 | return FindOp(op); 56 | } 57 | 58 | // This implementation exists for compatibility with the OpResolver base class 59 | // and disregards the version parameter. 60 | const TfLiteRegistration* FindOp(const char* op, int version) const final { 61 | return FindOp(op); 62 | } 63 | 64 | // Returns the operator specific parsing function for the OpData for a 65 | // BuiltinOperator (if registered), else nullptr. 66 | virtual BuiltinParseFunction GetOpDataParser(BuiltinOperator op) const = 0; 67 | 68 | ~MicroOpResolver() override {} 69 | }; 70 | 71 | } // namespace tflite 72 | 73 | #endif // TENSORFLOW_LITE_MICRO_MICRO_OP_RESOLVER_H_ 74 | -------------------------------------------------------------------------------- /firmware/lib/tfmicro/tensorflow/lite/micro/recording_simple_memory_allocator.cc: -------------------------------------------------------------------------------- 1 | /* Copyright 2020 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | #include "tensorflow/lite/micro/recording_simple_memory_allocator.h" 17 | 18 | #include 19 | 20 | #include "tensorflow/lite/kernels/internal/compatibility.h" 21 | 22 | namespace tflite { 23 | 24 | RecordingSimpleMemoryAllocator::RecordingSimpleMemoryAllocator( 25 | ErrorReporter* error_reporter, uint8_t* buffer_head, size_t buffer_size) 26 | : SimpleMemoryAllocator(error_reporter, buffer_head, buffer_size), 27 | requested_head_bytes_(0), 28 | requested_tail_bytes_(0), 29 | used_bytes_(0), 30 | alloc_count_(0) {} 31 | 32 | RecordingSimpleMemoryAllocator::~RecordingSimpleMemoryAllocator() {} 33 | 34 | RecordingSimpleMemoryAllocator* RecordingSimpleMemoryAllocator::Create( 35 | ErrorReporter* error_reporter, uint8_t* buffer_head, size_t buffer_size) { 36 | TFLITE_DCHECK(error_reporter != nullptr); 37 | TFLITE_DCHECK(buffer_head != nullptr); 38 | RecordingSimpleMemoryAllocator tmp = 39 | RecordingSimpleMemoryAllocator(error_reporter, buffer_head, buffer_size); 40 | 41 | uint8_t* allocator_buffer = 42 | tmp.AllocateFromTail(sizeof(RecordingSimpleMemoryAllocator), 43 | alignof(RecordingSimpleMemoryAllocator)); 44 | // Use the default copy constructor to populate internal states. 45 | return new (allocator_buffer) RecordingSimpleMemoryAllocator(tmp); 46 | } 47 | 48 | size_t RecordingSimpleMemoryAllocator::GetRequestedBytes() const { 49 | return requested_head_bytes_ + requested_tail_bytes_; 50 | } 51 | 52 | size_t RecordingSimpleMemoryAllocator::GetUsedBytes() const { 53 | return used_bytes_; 54 | } 55 | 56 | size_t RecordingSimpleMemoryAllocator::GetAllocatedCount() const { 57 | return alloc_count_; 58 | } 59 | 60 | TfLiteStatus RecordingSimpleMemoryAllocator::EnsureHeadSize(size_t size, 61 | size_t alignment) { 62 | const uint8_t* previous_head = GetHead(); 63 | TfLiteStatus status = SimpleMemoryAllocator::EnsureHeadSize(size, alignment); 64 | if (status == kTfLiteOk) { 65 | used_bytes_ += GetHead() - previous_head; 66 | requested_head_bytes_ = size; 67 | } 68 | return status; 69 | } 70 | 71 | uint8_t* RecordingSimpleMemoryAllocator::AllocateFromTail(size_t size, 72 | size_t alignment) { 73 | const uint8_t* previous_tail = GetTail(); 74 | uint8_t* result = SimpleMemoryAllocator::AllocateFromTail(size, alignment); 75 | if (result != nullptr) { 76 | used_bytes_ += previous_tail - GetTail(); 77 | requested_tail_bytes_ += size; 78 | alloc_count_++; 79 | } 80 | return result; 81 | } 82 | 83 | } // namespace tflite 84 | -------------------------------------------------------------------------------- /model/Convert Trained Model To TFLite.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# Converting a trained model to tflite\n", 8 | "https://www.tensorflow.org/lite/microcontrollers/build_convert#model_conversion" 9 | ] 10 | }, 11 | { 12 | "cell_type": "markdown", 13 | "metadata": {}, 14 | "source": [ 15 | "# Convert model to tflite" 16 | ] 17 | }, 18 | { 19 | "cell_type": "code", 20 | "execution_count": 1, 21 | "metadata": {}, 22 | "outputs": [], 23 | "source": [ 24 | "import tensorflow as tf\n", 25 | "import numpy as np" 26 | ] 27 | }, 28 | { 29 | "cell_type": "code", 30 | "execution_count": 2, 31 | "metadata": {}, 32 | "outputs": [], 33 | "source": [ 34 | "training_spectrogram = np.load('training_spectrogram.npz')\n", 35 | "validation_spectrogram = np.load('validation_spectrogram.npz')\n", 36 | "test_spectrogram = np.load('test_spectrogram.npz')\n", 37 | "\n", 38 | "X_train = training_spectrogram['X']\n", 39 | "X_validate = validation_spectrogram['X']\n", 40 | "X_test = test_spectrogram['X']\n", 41 | "\n", 42 | "complete_train_X = np.concatenate((X_train, X_validate, X_test))\n", 43 | "# complete_train_X = X_validate" 44 | ] 45 | }, 46 | { 47 | "cell_type": "code", 48 | "execution_count": null, 49 | "metadata": {}, 50 | "outputs": [], 51 | "source": [ 52 | "converter2 = tf.lite.TFLiteConverter.from_saved_model(\"fully_trained.model\")\n", 53 | "converter2.optimizations = [tf.lite.Optimize.DEFAULT]\n", 54 | "def representative_dataset_gen():\n", 55 | " for i in range(0, len(complete_train_X), 100):\n", 56 | " # Get sample input data as a numpy array in a method of your choosing.\n", 57 | " yield [complete_train_X[i:i+100]]\n", 58 | "converter2.representative_dataset = representative_dataset_gen\n", 59 | "# converter.optimizations = [tf.lite.Optimize.OPTIMIZE_FOR_SIZE]\n", 60 | "converter2.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]\n", 61 | "tflite_quant_model = converter2.convert()\n", 62 | "open(\"converted_model.tflite\", \"wb\").write(tflite_quant_model)" 63 | ] 64 | }, 65 | { 66 | "cell_type": "markdown", 67 | "metadata": {}, 68 | "source": [ 69 | "# To convert to C++\n", 70 | "This will run a command line too to convert out tflite model into C code." 71 | ] 72 | }, 73 | { 74 | "cell_type": "code", 75 | "execution_count": null, 76 | "metadata": {}, 77 | "outputs": [], 78 | "source": [ 79 | "!xxd -i converted_model.tflite > model_data.cc" 80 | ] 81 | }, 82 | { 83 | "cell_type": "code", 84 | "execution_count": null, 85 | "metadata": {}, 86 | "outputs": [], 87 | "source": [] 88 | } 89 | ], 90 | "metadata": { 91 | "kernelspec": { 92 | "display_name": "Python 3", 93 | "language": "python", 94 | "name": "python3" 95 | }, 96 | "language_info": { 97 | "codemirror_mode": { 98 | "name": "ipython", 99 | "version": 3 100 | }, 101 | "file_extension": ".py", 102 | "mimetype": "text/x-python", 103 | "name": "python", 104 | "nbconvert_exporter": "python", 105 | "pygments_lexer": "ipython3", 106 | "version": "3.6.9" 107 | } 108 | }, 109 | "nbformat": 4, 110 | "nbformat_minor": 4 111 | } 112 | -------------------------------------------------------------------------------- /firmware/src/CommandProcessor.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include "CommandProcessor.h" 3 | 4 | const char *words[] = { 5 | "forward", 6 | "backward", 7 | "left", 8 | "right", 9 | "_nonsense", 10 | }; 11 | 12 | void commandQueueProcessorTask(void *param) 13 | { 14 | CommandProcessor *commandProcessor = (CommandProcessor *)param; 15 | while (true) 16 | { 17 | uint16_t commandIndex = 0; 18 | if (xQueueReceive(commandProcessor->m_command_queue_handle, &commandIndex, portMAX_DELAY) == pdTRUE) 19 | { 20 | commandProcessor->processCommand(commandIndex); 21 | } 22 | } 23 | } 24 | 25 | int calcDuty(int ms) 26 | { 27 | // 50Hz = 20ms period 28 | return (65536 * ms) / 20000; 29 | } 30 | 31 | const int leftForward = 1600; 32 | const int leftBackward = 1400; 33 | const int leftStop = 1500; 34 | const int rightBackward = 1600; 35 | const int rightForward = 1445; 36 | const int rightStop = 1500; 37 | 38 | void CommandProcessor::processCommand(uint16_t commandIndex) 39 | { 40 | digitalWrite(GPIO_NUM_2, HIGH); 41 | switch (commandIndex) 42 | { 43 | case 0: // forward 44 | ledcWrite(0, calcDuty(leftForward)); 45 | ledcWrite(1, calcDuty(rightForward)); 46 | vTaskDelay(1000 / portTICK_PERIOD_MS); 47 | break; 48 | case 1: // backward 49 | ledcWrite(0, calcDuty(leftBackward)); 50 | ledcWrite(1, calcDuty(rightBackward)); 51 | vTaskDelay(1000 / portTICK_PERIOD_MS); 52 | break; 53 | case 2: // left 54 | ledcWrite(0, calcDuty(leftBackward)); 55 | ledcWrite(1, calcDuty(rightForward)); 56 | vTaskDelay(500 / portTICK_PERIOD_MS); 57 | break; 58 | case 3: // right 59 | ledcWrite(0, calcDuty(leftForward)); 60 | ledcWrite(1, calcDuty(rightBackward)); 61 | vTaskDelay(500 / portTICK_PERIOD_MS); 62 | break; 63 | } 64 | digitalWrite(GPIO_NUM_2, LOW); 65 | ledcWrite(0, calcDuty(leftStop)); // stop 66 | ledcWrite(1, calcDuty(rightStop)); // stop 67 | } 68 | 69 | CommandProcessor::CommandProcessor() 70 | { 71 | pinMode(GPIO_NUM_2, OUTPUT); 72 | // setup the motors 73 | ledcSetup(0, 50, 16); 74 | ledcAttachPin(GPIO_NUM_13, 0); 75 | ledcSetup(1, 50, 16); 76 | ledcAttachPin(GPIO_NUM_12, 1); 77 | ledcWrite(0, calcDuty(1500)); // left 78 | ledcWrite(1, calcDuty(1500)); // right 79 | 80 | // allow up to 5 commands to be in flight at once 81 | m_command_queue_handle = xQueueCreate(5, sizeof(uint16_t)); 82 | if (!m_command_queue_handle) 83 | { 84 | Serial.println("Failed to create command queue"); 85 | } 86 | // kick off the command processor task 87 | TaskHandle_t command_queue_task_handle; 88 | xTaskCreate(commandQueueProcessorTask, "Command Queue Processor", 1024, this, 1, &command_queue_task_handle); 89 | } 90 | 91 | void CommandProcessor::queueCommand(uint16_t commandIndex, float best_score) 92 | { 93 | // unsigned long now = millis(); 94 | if (commandIndex != 5 && commandIndex != -1) 95 | { 96 | Serial.printf("***** %ld Detected command %s(%f)\n", millis(), words[commandIndex], best_score); 97 | if (xQueueSendToBack(m_command_queue_handle, &commandIndex, 0) != pdTRUE) 98 | { 99 | Serial.println("No more space for command"); 100 | } 101 | } 102 | } 103 | -------------------------------------------------------------------------------- /firmware/lib/neural_network/src/NeuralNetwork.cpp: -------------------------------------------------------------------------------- 1 | #include "NeuralNetwork.h" 2 | #include "model.h" 3 | #include "tensorflow/lite/micro/all_ops_resolver.h" 4 | #include "tensorflow/lite/micro/micro_error_reporter.h" 5 | #include "tensorflow/lite/micro/micro_interpreter.h" 6 | #include "tensorflow/lite/schema/schema_generated.h" 7 | #include "tensorflow/lite/version.h" 8 | 9 | // approximate working size of our model 10 | const int kArenaSize = 50000; 11 | 12 | NeuralNetwork::NeuralNetwork() 13 | { 14 | m_error_reporter = new tflite::MicroErrorReporter(); 15 | 16 | m_tensor_arena = (uint8_t *)malloc(kArenaSize); 17 | if (!m_tensor_arena) 18 | { 19 | TF_LITE_REPORT_ERROR(m_error_reporter, "Could not allocate arena"); 20 | return; 21 | } 22 | TF_LITE_REPORT_ERROR(m_error_reporter, "Loading model"); 23 | 24 | m_model = tflite::GetModel(converted_model_tflite); 25 | if (m_model->version() != TFLITE_SCHEMA_VERSION) 26 | { 27 | TF_LITE_REPORT_ERROR(m_error_reporter, "Model provided is schema version %d not equal to supported version %d.", 28 | m_model->version(), TFLITE_SCHEMA_VERSION); 29 | return; 30 | } 31 | // This pulls in the operators implementations we need 32 | m_resolver = new tflite::MicroMutableOpResolver<10>(); 33 | m_resolver->AddConv2D(); 34 | m_resolver->AddMaxPool2D(); 35 | m_resolver->AddFullyConnected(); 36 | m_resolver->AddMul(); 37 | m_resolver->AddAdd(); 38 | m_resolver->AddLogistic(); 39 | m_resolver->AddReshape(); 40 | m_resolver->AddQuantize(); 41 | m_resolver->AddDequantize(); 42 | m_resolver->AddSoftmax(); 43 | 44 | // Build an interpreter to run the model with. 45 | m_interpreter = new tflite::MicroInterpreter( 46 | m_model, *m_resolver, m_tensor_arena, kArenaSize, m_error_reporter); 47 | 48 | // Allocate memory from the tensor_arena for the model's tensors. 49 | TfLiteStatus allocate_status = m_interpreter->AllocateTensors(); 50 | if (allocate_status != kTfLiteOk) 51 | { 52 | TF_LITE_REPORT_ERROR(m_error_reporter, "AllocateTensors() failed"); 53 | return; 54 | } 55 | 56 | size_t used_bytes = m_interpreter->arena_used_bytes(); 57 | TF_LITE_REPORT_ERROR(m_error_reporter, "Used bytes %d\n", used_bytes); 58 | 59 | TF_LITE_REPORT_ERROR(m_error_reporter, "Output Size %d\n", m_interpreter->outputs_size()); 60 | // Obtain pointers to the model's input and output tensors. 61 | input = m_interpreter->input(0); 62 | output = m_interpreter->output(0); 63 | } 64 | 65 | NeuralNetwork::~NeuralNetwork() 66 | { 67 | delete m_interpreter; 68 | delete m_resolver; 69 | free(m_tensor_arena); 70 | delete m_error_reporter; 71 | } 72 | 73 | float *NeuralNetwork::getInputBuffer() 74 | { 75 | return input->data.f; 76 | } 77 | 78 | float *NeuralNetwork::getOutputBuffer() 79 | { 80 | return output->data.f; 81 | } 82 | 83 | NNResult NeuralNetwork::predict() 84 | { 85 | m_interpreter->Invoke(); 86 | // work out the "best output" 87 | float best_score = 0; 88 | int best_index = -1; 89 | for (int i = 0; i < 5; i++) 90 | { 91 | float score = output->data.f[i]; 92 | if (score > best_score) 93 | { 94 | best_score = score; 95 | best_index = i; 96 | } 97 | } 98 | return { 99 | .score = best_score, 100 | .index = best_index}; 101 | } 102 | -------------------------------------------------------------------------------- /firmware/src/main.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | #include "I2SMicSampler.h" 6 | #include "ADCSampler.h" 7 | #include "config.h" 8 | #include "CommandDetector.h" 9 | #include "CommandProcessor.h" 10 | 11 | // i2s config for using the internal ADC 12 | i2s_config_t adcI2SConfig = { 13 | .mode = (i2s_mode_t)(I2S_MODE_MASTER | I2S_MODE_RX | I2S_MODE_ADC_BUILT_IN), 14 | .sample_rate = 16000, 15 | .bits_per_sample = I2S_BITS_PER_SAMPLE_16BIT, 16 | .channel_format = I2S_CHANNEL_FMT_ONLY_LEFT, 17 | .communication_format = I2S_COMM_FORMAT_I2S_LSB, 18 | .intr_alloc_flags = ESP_INTR_FLAG_LEVEL1, 19 | .dma_buf_count = 4, 20 | .dma_buf_len = 64, 21 | .use_apll = false, 22 | .tx_desc_auto_clear = false, 23 | .fixed_mclk = 0}; 24 | 25 | // i2s config for reading from both channels of I2S 26 | i2s_config_t i2sMemsConfigBothChannels = { 27 | .mode = (i2s_mode_t)(I2S_MODE_MASTER | I2S_MODE_RX), 28 | .sample_rate = 16000, 29 | .bits_per_sample = I2S_BITS_PER_SAMPLE_32BIT, 30 | .channel_format = I2S_MIC_CHANNEL, 31 | .communication_format = i2s_comm_format_t(I2S_COMM_FORMAT_I2S), 32 | .intr_alloc_flags = ESP_INTR_FLAG_LEVEL1, 33 | .dma_buf_count = 4, 34 | .dma_buf_len = 64, 35 | .use_apll = false, 36 | .tx_desc_auto_clear = false, 37 | .fixed_mclk = 0}; 38 | 39 | // i2s microphone pins 40 | i2s_pin_config_t i2s_mic_pins = { 41 | .bck_io_num = I2S_MIC_SERIAL_CLOCK, 42 | .ws_io_num = I2S_MIC_LEFT_RIGHT_CLOCK, 43 | .data_out_num = I2S_PIN_NO_CHANGE, 44 | .data_in_num = I2S_MIC_SERIAL_DATA}; 45 | 46 | // This task does all the heavy lifting for our application 47 | void applicationTask(void *param) 48 | { 49 | CommandDetector *commandDetector = static_cast(param); 50 | 51 | const TickType_t xMaxBlockTime = pdMS_TO_TICKS(100); 52 | while (true) 53 | { 54 | // wait for some audio samples to arrive 55 | uint32_t ulNotificationValue = ulTaskNotifyTake(pdTRUE, xMaxBlockTime); 56 | if (ulNotificationValue > 0) 57 | { 58 | commandDetector->run(); 59 | } 60 | } 61 | } 62 | 63 | void setup() 64 | { 65 | Serial.begin(115200); 66 | delay(1000); 67 | Serial.println("Starting up"); 68 | 69 | // make sure we don't get killed for our long running tasks 70 | esp_task_wdt_init(10, false); 71 | 72 | // start up the I2S input (from either an I2S microphone or Analogue microphone via the ADC) 73 | #ifdef USE_I2S_MIC_INPUT 74 | // Direct i2s input from INMP441 or the SPH0645 75 | I2SSampler *i2s_sampler = new I2SMicSampler(i2s_mic_pins, false); 76 | #else 77 | // Use the internal ADC 78 | I2SSampler *i2s_sampler = new ADCSampler(ADC_UNIT_1, ADC_MIC_CHANNEL); 79 | #endif 80 | // the command processor 81 | CommandProcessor *command_processor = new CommandProcessor(); 82 | 83 | // create our application 84 | CommandDetector *commandDetector = new CommandDetector(i2s_sampler, command_processor); 85 | 86 | // set up the i2s sample writer task 87 | TaskHandle_t applicationTaskHandle; 88 | xTaskCreatePinnedToCore(applicationTask, "Command Detect", 8192, commandDetector, 1, &applicationTaskHandle, 0); 89 | 90 | // start sampling from i2s device - use I2S_NUM_0 as that's the one that supports the internal ADC 91 | #ifdef USE_I2S_MIC_INPUT 92 | i2s_sampler->start(I2S_NUM_0, i2sMemsConfigBothChannels, applicationTaskHandle); 93 | #else 94 | i2s_sampler->start(I2S_NUM_0, adcI2SConfig, applicationTaskHandle); 95 | #endif 96 | } 97 | 98 | void loop() 99 | { 100 | vTaskDelay(pdMS_TO_TICKS(1000)); 101 | } -------------------------------------------------------------------------------- /firmware/lib/tfmicro/tensorflow/lite/kernels/padding.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2017 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_LITE_KERNELS_PADDING_H_ 16 | #define TENSORFLOW_LITE_KERNELS_PADDING_H_ 17 | 18 | #include "tensorflow/lite/c/builtin_op_data.h" 19 | 20 | namespace tflite { 21 | 22 | // TODO(renjieliu): Migrate others to use ComputePaddingWithLeftover. 23 | inline int ComputePadding(int stride, int dilation_rate, int in_size, 24 | int filter_size, int out_size) { 25 | int effective_filter_size = (filter_size - 1) * dilation_rate + 1; 26 | int padding = ((out_size - 1) * stride + effective_filter_size - in_size) / 2; 27 | return padding > 0 ? padding : 0; 28 | } 29 | 30 | // It's not guaranteed that padding is symmetric. It's important to keep 31 | // offset for algorithms need all paddings. 32 | inline int ComputePaddingWithOffset(int stride, int dilation_rate, int in_size, 33 | int filter_size, int out_size, 34 | int* offset) { 35 | int effective_filter_size = (filter_size - 1) * dilation_rate + 1; 36 | int total_padding = 37 | ((out_size - 1) * stride + effective_filter_size - in_size); 38 | total_padding = total_padding > 0 ? total_padding : 0; 39 | *offset = total_padding % 2; 40 | return total_padding / 2; 41 | } 42 | 43 | // Matching GetWindowedOutputSize in TensorFlow. 44 | inline int ComputeOutSize(TfLitePadding padding, int image_size, 45 | int filter_size, int stride, int dilation_rate = 1) { 46 | int effective_filter_size = (filter_size - 1) * dilation_rate + 1; 47 | switch (padding) { 48 | case kTfLitePaddingSame: 49 | return (image_size + stride - 1) / stride; 50 | case kTfLitePaddingValid: 51 | return (image_size + stride - effective_filter_size) / stride; 52 | default: 53 | return 0; 54 | } 55 | } 56 | 57 | inline TfLitePaddingValues ComputePaddingHeightWidth( 58 | int stride_height, int stride_width, int dilation_rate_height, 59 | int dilation_rate_width, int in_height, int in_width, int filter_height, 60 | int filter_width, TfLitePadding padding, int* out_height, int* out_width) { 61 | *out_width = ComputeOutSize(padding, in_width, filter_width, stride_width, 62 | dilation_rate_width); 63 | *out_height = ComputeOutSize(padding, in_height, filter_height, stride_height, 64 | dilation_rate_height); 65 | 66 | TfLitePaddingValues padding_values; 67 | int offset = 0; 68 | padding_values.height = 69 | ComputePaddingWithOffset(stride_height, dilation_rate_height, in_height, 70 | filter_height, *out_height, &offset); 71 | padding_values.height_offset = offset; 72 | padding_values.width = 73 | ComputePaddingWithOffset(stride_width, dilation_rate_width, in_width, 74 | filter_width, *out_width, &offset); 75 | padding_values.width_offset = offset; 76 | return padding_values; 77 | } 78 | } // namespace tflite 79 | 80 | #endif // TENSORFLOW_LITE_KERNELS_PADDING_H_ 81 | -------------------------------------------------------------------------------- /firmware/lib/tfmicro/tensorflow/lite/kernels/internal/reference/integer_ops/mean.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_MEAN_H_ 16 | #define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_MEAN_H_ 17 | 18 | #include "tensorflow/lite/kernels/internal/common.h" 19 | 20 | namespace tflite { 21 | namespace reference_integer_ops { 22 | 23 | template 24 | inline void Mean(const tflite::MeanParams& op_params, int32_t multiplier, 25 | int32_t shift, const RuntimeShape& unextended_input_shape, 26 | const integer_type* input_data, int32_t input_zero_point, 27 | const RuntimeShape& unextended_output_shape, 28 | integer_type* output_data, int32_t output_zero_point) { 29 | // Current implementation only supports dimension equals 4 and simultaneous 30 | // reduction over width and height. 31 | TFLITE_CHECK_EQ(unextended_input_shape.DimensionsCount(), 4); 32 | TFLITE_CHECK_LE(unextended_output_shape.DimensionsCount(), 4); 33 | const RuntimeShape input_shape = 34 | RuntimeShape::ExtendedShape(4, unextended_input_shape); 35 | const RuntimeShape output_shape = 36 | RuntimeShape::ExtendedShape(4, unextended_output_shape); 37 | const int output_batch = output_shape.Dims(0); 38 | const int output_height = output_shape.Dims(1); 39 | const int output_width = output_shape.Dims(2); 40 | const int output_depth = output_shape.Dims(3); 41 | const int input_height = input_shape.Dims(1); 42 | const int input_width = input_shape.Dims(2); 43 | const int num_elements_in_axis = input_width * input_height; 44 | 45 | TFLITE_CHECK_EQ(op_params.axis_count, 2); 46 | TFLITE_CHECK((op_params.axis[0] == 1 && op_params.axis[1] == 2) || 47 | (op_params.axis[0] == 2 && op_params.axis[1] == 1)); 48 | TFLITE_CHECK_EQ(output_height, 1); 49 | TFLITE_CHECK_EQ(output_width, 1); 50 | 51 | static constexpr int32_t kMinInt = std::numeric_limits::min(); 52 | static constexpr int32_t kMaxInt = std::numeric_limits::max(); 53 | 54 | for (int out_b = 0; out_b < output_batch; ++out_b) { 55 | for (int out_d = 0; out_d < output_depth; ++out_d) { 56 | int32_t acc = 0; 57 | for (int in_h = 0; in_h < input_height; ++in_h) { 58 | for (int in_w = 0; in_w < input_width; ++in_w) { 59 | acc += input_data[Offset(input_shape, out_b, in_h, in_w, out_d)] - 60 | input_zero_point; 61 | } 62 | } 63 | acc = MultiplyByQuantizedMultiplier(acc, multiplier, shift); 64 | acc = acc > 0 ? (acc + num_elements_in_axis / 2) / num_elements_in_axis 65 | : (acc - num_elements_in_axis / 2) / num_elements_in_axis; 66 | acc += output_zero_point; 67 | acc = std::min(std::max(acc, kMinInt), kMaxInt); 68 | output_data[Offset(output_shape, out_b, 0, 0, out_d)] = 69 | static_cast(acc); 70 | } 71 | } 72 | } 73 | 74 | } // namespace reference_integer_ops 75 | } // namespace tflite 76 | 77 | #endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_MEAN_H_ 78 | -------------------------------------------------------------------------------- /firmware/lib/tfmicro/tensorflow/lite/micro/kernels/kernel_runner.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2020 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | #ifndef TENSORFLOW_LITE_MICRO_KERNELS_KERNEL_RUNNER_H_ 17 | #define TENSORFLOW_LITE_MICRO_KERNELS_KERNEL_RUNNER_H_ 18 | 19 | #include "tensorflow/lite/c/common.h" 20 | #include "tensorflow/lite/kernels/internal/compatibility.h" 21 | #include "tensorflow/lite/micro/simple_memory_allocator.h" 22 | 23 | namespace tflite { 24 | namespace micro { 25 | 26 | // Helper class to perform a simulated kernel (i.e. TfLiteRegistration) lifecyle 27 | // (init, prepare, invoke). All internal allocations are handled by this class. 28 | // Simply pass in the registration, list of required tensors, inputs array, 29 | // outputs array, and any pre-builtin data. Calling Invoke() will automatically 30 | // walk the kernl and outputs will be ready on the the TfLiteTensor output 31 | // provided during construction. 32 | class KernelRunner { 33 | public: 34 | KernelRunner(const TfLiteRegistration& registration, TfLiteTensor* tensors, 35 | int tensors_size, TfLiteIntArray* inputs, 36 | TfLiteIntArray* outputs, void* builtin_data, 37 | ErrorReporter* error_reporter); 38 | 39 | // Calls init and prepare on the kernel (i.e. TfLiteRegistration) struct. Any 40 | // exceptions will be reported through the error_reporter and returned as a 41 | // status code here. 42 | TfLiteStatus InitAndPrepare(const char* init_data = nullptr); 43 | 44 | // Calls init, prepare, and invoke on a given TfLiteRegistration pointer. 45 | // After successful invoke, results will be available in the output tensor as 46 | // passed into the constructor of this class. 47 | TfLiteStatus Invoke(); 48 | 49 | protected: 50 | static TfLiteTensor* GetTensor(const struct TfLiteContext* context, 51 | int tensor_index); 52 | static TfLiteEvalTensor* GetEvalTensor(const struct TfLiteContext* context, 53 | int tensor_index); 54 | static void* AllocatePersistentBuffer(TfLiteContext* context, size_t bytes); 55 | static TfLiteStatus RequestScratchBufferInArena(TfLiteContext* context, 56 | size_t bytes, 57 | int* buffer_index); 58 | static void* GetScratchBuffer(TfLiteContext* context, int buffer_index); 59 | static void ReportOpError(struct TfLiteContext* context, const char* format, 60 | ...); 61 | 62 | private: 63 | static constexpr int kNumScratchBuffers_ = 5; 64 | 65 | static constexpr int kKernelRunnerBufferSize_ = 10000; 66 | static uint8_t kKernelRunnerBuffer_[kKernelRunnerBufferSize_]; 67 | 68 | SimpleMemoryAllocator* allocator_ = nullptr; 69 | const TfLiteRegistration& registration_; 70 | TfLiteTensor* tensors_ = nullptr; 71 | ErrorReporter* error_reporter_ = nullptr; 72 | 73 | TfLiteContext context_ = {}; 74 | TfLiteNode node_ = {}; 75 | 76 | int scratch_buffer_count_ = 0; 77 | uint8_t* scratch_buffers_[kNumScratchBuffers_]; 78 | }; 79 | 80 | } // namespace micro 81 | } // namespace tflite 82 | 83 | #endif // TENSORFLOW_LITE_MICRO_KERNELS_KERNEL_RUNNER_H_ 84 | -------------------------------------------------------------------------------- /firmware/lib/tfmicro/tensorflow/lite/kernels/internal/reference/binary_function.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_BINARY_FUNCTION_H_ 16 | #define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_BINARY_FUNCTION_H_ 17 | 18 | #include "tensorflow/lite/kernels/internal/common.h" 19 | #include "tensorflow/lite/kernels/internal/compatibility.h" 20 | #include "tensorflow/lite/kernels/internal/types.h" 21 | 22 | namespace tflite { 23 | 24 | namespace reference_ops { 25 | 26 | // TODO(ycling): Refactoring. Remove BroadcastLogical and use the more 27 | // generalized and efficient BroadcastBinaryFunction. 28 | // 29 | // Also appears to duplicate MinimumMaximum. 30 | // 31 | // R: Result type. T1: Input 1 type. T2: Input 2 type. 32 | template 33 | inline void BroadcastBinaryFunction4DSlow( 34 | const RuntimeShape& unextended_input1_shape, const T1* input1_data, 35 | const RuntimeShape& unextended_input2_shape, const T2* input2_data, 36 | const RuntimeShape& unextended_output_shape, R* output_data, 37 | R (*func)(T1, T2)) { 38 | TFLITE_DCHECK_LE(unextended_input1_shape.DimensionsCount(), 4); 39 | TFLITE_DCHECK_LE(unextended_input2_shape.DimensionsCount(), 4); 40 | TFLITE_DCHECK_LE(unextended_output_shape.DimensionsCount(), 4); 41 | const RuntimeShape output_shape = 42 | RuntimeShape::ExtendedShape(4, unextended_output_shape); 43 | 44 | NdArrayDesc<4> desc1; 45 | NdArrayDesc<4> desc2; 46 | NdArrayDescsForElementwiseBroadcast(unextended_input1_shape, 47 | unextended_input2_shape, &desc1, &desc2); 48 | 49 | for (int b = 0; b < output_shape.Dims(0); ++b) { 50 | for (int y = 0; y < output_shape.Dims(1); ++y) { 51 | for (int x = 0; x < output_shape.Dims(2); ++x) { 52 | for (int c = 0; c < output_shape.Dims(3); ++c) { 53 | auto out_idx = Offset(output_shape, b, y, x, c); 54 | auto in1_idx = SubscriptToIndex(desc1, b, y, x, c); 55 | auto in2_idx = SubscriptToIndex(desc2, b, y, x, c); 56 | auto in1_val = input1_data[in1_idx]; 57 | auto in2_val = input2_data[in2_idx]; 58 | output_data[out_idx] = func(in1_val, in2_val); 59 | } 60 | } 61 | } 62 | } 63 | } 64 | 65 | // R: Result type. T1: Input 1 type. T2: Input 2 type. 66 | // TODO(renjieliu): Refactor other binary functions to use this one. 67 | template 68 | inline void BinaryFunction(const RuntimeShape& input1_shape, 69 | const T1* input1_data, 70 | const RuntimeShape& input2_shape, 71 | const T2* input2_data, 72 | const RuntimeShape& output_shape, R* output_data, 73 | R (*func)(T1, T2)) { 74 | const int flat_size = 75 | MatchingFlatSize(input1_shape, input2_shape, output_shape); 76 | for (int i = 0; i < flat_size; ++i) { 77 | output_data[i] = func(input1_data[i], input2_data[i]); 78 | } 79 | } 80 | 81 | } // namespace reference_ops 82 | } // namespace tflite 83 | 84 | #endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_BINARY_FUNCTION_H_ 85 | -------------------------------------------------------------------------------- /firmware/lib/tfmicro/tensorflow/lite/micro/kernels/micro_ops.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_LITE_MICRO_KERNELS_MICRO_OPS_H_ 16 | #define TENSORFLOW_LITE_MICRO_KERNELS_MICRO_OPS_H_ 17 | 18 | #include "tensorflow/lite/c/common.h" 19 | 20 | namespace tflite { 21 | namespace ops { 22 | namespace micro { 23 | 24 | // Forward declaration of all micro op kernel registration methods. These 25 | // registrations are included with the standard `BuiltinOpResolver`. 26 | // 27 | // This header is particularly useful in cases where only a subset of ops are 28 | // needed. In such cases, the client can selectively add only the registrations 29 | // their model requires, using a custom `(Micro)MutableOpResolver`. Selective 30 | // registration in turn allows the linker to strip unused kernels. 31 | 32 | TfLiteRegistration Register_ABS(); 33 | TfLiteRegistration Register_ADD(); 34 | TfLiteRegistration Register_ARG_MAX(); 35 | TfLiteRegistration Register_ARG_MIN(); 36 | TfLiteRegistration Register_AVERAGE_POOL_2D(); 37 | TfLiteRegistration Register_CEIL(); 38 | // TODO(b/160234179): Change custom OPs to also return by value. 39 | TfLiteRegistration* Register_CIRCULAR_BUFFER(); 40 | TfLiteRegistration Register_CONV_2D(); 41 | TfLiteRegistration Register_CONCATENATION(); 42 | TfLiteRegistration Register_COS(); 43 | TfLiteRegistration Register_DEPTHWISE_CONV_2D(); 44 | TfLiteRegistration Register_DEQUANTIZE(); 45 | TfLiteRegistration Register_EQUAL(); 46 | TfLiteRegistration Register_FLOOR(); 47 | TfLiteRegistration Register_FULLY_CONNECTED(); 48 | TfLiteRegistration Register_GREATER(); 49 | TfLiteRegistration Register_GREATER_EQUAL(); 50 | TfLiteRegistration Register_HARD_SWISH(); 51 | TfLiteRegistration Register_LESS(); 52 | TfLiteRegistration Register_LESS_EQUAL(); 53 | TfLiteRegistration Register_LOG(); 54 | TfLiteRegistration Register_LOGICAL_AND(); 55 | TfLiteRegistration Register_LOGICAL_NOT(); 56 | TfLiteRegistration Register_LOGICAL_OR(); 57 | TfLiteRegistration Register_LOGISTIC(); 58 | TfLiteRegistration Register_MAXIMUM(); 59 | TfLiteRegistration Register_MAX_POOL_2D(); 60 | TfLiteRegistration Register_MEAN(); 61 | TfLiteRegistration Register_MINIMUM(); 62 | TfLiteRegistration Register_MUL(); 63 | TfLiteRegistration Register_NEG(); 64 | TfLiteRegistration Register_NOT_EQUAL(); 65 | TfLiteRegistration Register_PACK(); 66 | TfLiteRegistration Register_PAD(); 67 | TfLiteRegistration Register_PADV2(); 68 | TfLiteRegistration Register_PRELU(); 69 | TfLiteRegistration Register_QUANTIZE(); 70 | TfLiteRegistration Register_REDUCE_MAX(); 71 | TfLiteRegistration Register_RELU(); 72 | TfLiteRegistration Register_RELU6(); 73 | TfLiteRegistration Register_RESHAPE(); 74 | TfLiteRegistration Register_RESIZE_NEAREST_NEIGHBOR(); 75 | TfLiteRegistration Register_ROUND(); 76 | TfLiteRegistration Register_RSQRT(); 77 | TfLiteRegistration Register_SIN(); 78 | TfLiteRegistration Register_SOFTMAX(); 79 | TfLiteRegistration Register_SPLIT(); 80 | TfLiteRegistration Register_SPLIT_V(); 81 | TfLiteRegistration Register_SQRT(); 82 | TfLiteRegistration Register_SQUARE(); 83 | TfLiteRegistration Register_STRIDED_SLICE(); 84 | TfLiteRegistration Register_SUB(); 85 | TfLiteRegistration Register_SVDF(); 86 | TfLiteRegistration Register_UNPACK(); 87 | TfLiteRegistration Register_L2_NORMALIZATION(); 88 | TfLiteRegistration Register_TANH(); 89 | 90 | } // namespace micro 91 | } // namespace ops 92 | } // namespace tflite 93 | 94 | #endif // TENSORFLOW_LITE_MICRO_KERNELS_MICRO_OPS_H_ 95 | -------------------------------------------------------------------------------- /firmware/lib/tfmicro/tensorflow/lite/kernels/internal/reference/l2normalization.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2020 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_L2NORMALIZATION_H_ 16 | #define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_L2NORMALIZATION_H_ 17 | 18 | #include 19 | #include 20 | 21 | #include "tensorflow/lite/c/common.h" 22 | #include "tensorflow/lite/kernels/internal/common.h" 23 | #include "tensorflow/lite/kernels/internal/types.h" 24 | 25 | namespace tflite { 26 | 27 | namespace reference_ops { 28 | 29 | inline void L2Normalization(const tflite::L2NormalizationParams& op_params, 30 | const RuntimeShape& input_shape, 31 | const float* input_data, 32 | const RuntimeShape& output_shape, 33 | float* output_data, float epsilon = 1e-6) { 34 | const int trailing_dim = input_shape.DimensionsCount() - 1; 35 | const int outer_size = 36 | MatchingFlatSizeSkipDim(input_shape, trailing_dim, output_shape); 37 | const int depth = 38 | MatchingDim(input_shape, trailing_dim, output_shape, trailing_dim); 39 | for (int i = 0; i < outer_size; ++i) { 40 | float squared_l2_norm = 0; 41 | for (int c = 0; c < depth; ++c) { 42 | const float val = input_data[depth * i + c]; 43 | squared_l2_norm += val * val; 44 | } 45 | float l2_norm = std::sqrt(squared_l2_norm); 46 | l2_norm = std::max(l2_norm, epsilon); 47 | for (int c = 0; c < depth; ++c) { 48 | output_data[depth * i + c] = input_data[depth * i + c] / l2_norm; 49 | } 50 | } 51 | } 52 | 53 | inline void L2Normalization(const tflite::L2NormalizationParams& op_params, 54 | const RuntimeShape& input_shape, 55 | const uint8_t* input_data, 56 | const RuntimeShape& output_shape, 57 | uint8_t* output_data) { 58 | const int trailing_dim = input_shape.DimensionsCount() - 1; 59 | const int depth = 60 | MatchingDim(input_shape, trailing_dim, output_shape, trailing_dim); 61 | const int outer_size = 62 | MatchingFlatSizeSkipDim(input_shape, trailing_dim, output_shape); 63 | const int32_t input_zero_point = op_params.input_zero_point; 64 | 65 | for (int i = 0; i < outer_size; ++i) { 66 | int32_t square_l2_norm = 0; 67 | for (int c = 0; c < depth; c++) { 68 | int32_t diff = input_data[depth * i + c] - input_zero_point; 69 | square_l2_norm += diff * diff; 70 | } 71 | int32_t inv_l2norm_multiplier; 72 | int inv_l2norm_shift; 73 | GetInvSqrtQuantizedMultiplierExp(square_l2_norm, kReverseShift, 74 | &inv_l2norm_multiplier, &inv_l2norm_shift); 75 | for (int c = 0; c < depth; c++) { 76 | int32_t diff = input_data[depth * i + c] - input_zero_point; 77 | int32_t rescaled_diff = MultiplyByQuantizedMultiplierSmallerThanOneExp( 78 | 128 * diff, inv_l2norm_multiplier, inv_l2norm_shift); 79 | int32_t unclamped_output_val = 128 + rescaled_diff; 80 | int32_t output_val = 81 | std::min(static_cast(255), 82 | std::max(static_cast(0), unclamped_output_val)); 83 | output_data[depth * i + c] = static_cast(output_val); 84 | } 85 | } 86 | } 87 | 88 | } // namespace reference_ops 89 | } // namespace tflite 90 | #endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_L2NORMALIZATION_H_ 91 | --------------------------------------------------------------------------------