├── .DS_Store
├── dl
├── .DS_Store
├── pickle_initial_model_weights.p
├── pickle_masked_processed_color_1209.p
├── pickle_unmasked_processed_color_1209.p
└── ActivationLayer.h
├── source
├── .DS_Store
├── simulation
│ ├── .DS_Store
│ ├── simulation-batch
│ ├── simulation-devices
│ ├── simulation-comparison
│ ├── simulation-local-episodes
│ ├── simulation-results
│ │ ├── .DS_Store
│ │ ├── results_batch.p
│ │ ├── results_device.p
│ │ ├── figures
│ │ │ ├── batch-val.pdf
│ │ │ ├── local-val.pdf
│ │ │ ├── batch-train.pdf
│ │ │ ├── device-train.pdf
│ │ │ ├── devices-val.pdf
│ │ │ ├── local-train.pdf
│ │ │ ├── comparison-random-val.pdf
│ │ │ ├── comparison-pretrain-val.pdf
│ │ │ ├── comparison-random-train.pdf
│ │ │ └── comparison-pretrain-train.pdf
│ │ └── results_local_episodes.p
│ ├── simulation-comparison-pretrained
│ ├── simulation.dSYM
│ │ └── Contents
│ │ │ ├── Resources
│ │ │ └── DWARF
│ │ │ │ └── simulation
│ │ │ └── Info.plist
│ ├── simulation-batch.dSYM
│ │ └── Contents
│ │ │ ├── Resources
│ │ │ └── DWARF
│ │ │ │ └── simulation-batch
│ │ │ └── Info.plist
│ ├── simulation-devices.dSYM
│ │ └── Contents
│ │ │ ├── Resources
│ │ │ └── DWARF
│ │ │ │ └── simulation-devices
│ │ │ └── Info.plist
│ ├── simulation-comparison.dSYM
│ │ └── Contents
│ │ │ ├── Resources
│ │ │ └── DWARF
│ │ │ │ └── simulation-comparison
│ │ │ └── Info.plist
│ ├── simulation-comparison-pretrained.dSYM
│ │ └── Contents
│ │ │ ├── Resources
│ │ │ └── DWARF
│ │ │ │ └── simulation-comparison-pretrained
│ │ │ └── Info.plist
│ └── FCLayer.h
└── arduino_training_final_v3
│ ├── model_settings.cpp
│ ├── arduino_main.cpp
│ ├── main_functions.h
│ ├── FCLayer.h
│ ├── person_detect_model_data.h
│ ├── model_settings.h
│ ├── detection_responder.h
│ ├── arduino_detection_responder.cpp
│ └── image_provider.h
├── previous_iterations
├── .DS_Store
├── person_detection_arducam_5mp_plus
│ ├── .DS_Store
│ ├── model_settings.cpp
│ ├── test_read_write.py
│ ├── arduino_main.cpp
│ ├── FCLayer.h
│ ├── main_functions.h
│ ├── person_detect_model_data.h
│ ├── model_settings.h
│ ├── detection_responder.h
│ ├── arduino_detection_responder.cpp
│ └── image_provider.h
├── arduino_training_final
│ ├── model_settings.cpp
│ ├── arduino_main.cpp
│ ├── FCLayer.h
│ ├── main_functions.h
│ ├── person_detect_model_data.h
│ ├── model_settings.h
│ ├── detection_responder.h
│ ├── arduino_detection_responder.cpp
│ └── image_provider.h
├── arduino_training_final_v2
│ ├── model_settings.cpp
│ ├── arduino_main.cpp
│ ├── main_functions.h
│ ├── FCLayer.h
│ ├── person_detect_model_data.h
│ ├── model_settings.h
│ ├── detection_responder.h
│ ├── arduino_detection_responder.cpp
│ └── image_provider.h
├── test_arduino_read_write.ino
└── model_settings-original.h
├── .gitignore
├── tensorflow
└── lite
│ ├── string_type.h
│ ├── micro
│ ├── tools
│ │ └── make
│ │ │ └── downloads
│ │ │ ├── kissfft
│ │ │ ├── tools
│ │ │ │ └── kiss_fftr.h
│ │ │ └── COPYING
│ │ │ └── cmsis
│ │ │ └── CMSIS
│ │ │ └── NN
│ │ │ ├── Source
│ │ │ ├── ReshapeFunctions
│ │ │ │ └── arm_reshape_s8.c
│ │ │ ├── ActivationFunctions
│ │ │ │ └── arm_relu6_s8.c
│ │ │ ├── ConcatenationFunctions
│ │ │ │ ├── arm_concatenation_s8_w.c
│ │ │ │ ├── arm_concatenation_s8_x.c
│ │ │ │ ├── arm_concatenation_s8_z.c
│ │ │ │ └── arm_concatenation_s8_y.c
│ │ │ ├── NNSupportFunctions
│ │ │ │ ├── arm_nn_accumulate_q7_to_q15.c
│ │ │ │ └── arm_nn_add_q7.c
│ │ │ └── SoftmaxFunctions
│ │ │ │ └── arm_softmax_with_batch_q7.c
│ │ │ └── Include
│ │ │ └── arm_nn_tables.h
│ ├── testing
│ │ └── test_conv_model.h
│ ├── debug_log.h
│ ├── benchmarks
│ │ └── keyword_scrambled_model_data.h
│ ├── kernels
│ │ ├── ethosu.cpp
│ │ ├── micro_utils.h
│ │ ├── activation_utils.h
│ │ ├── floor.cpp
│ │ └── neg.cpp
│ ├── micro_time.h
│ ├── micro_error_reporter.h
│ ├── micro_optional_debug_tools.h
│ ├── arduino
│ │ └── debug_log.cpp
│ ├── compatibility.h
│ ├── micro_error_reporter.cpp
│ ├── micro_string.h
│ ├── all_ops_resolver.h
│ ├── micro_profiler.cpp
│ ├── memory_helpers.h
│ ├── memory_planner
│ │ ├── linear_memory_planner.h
│ │ └── linear_memory_planner.cpp
│ └── micro_time.cpp
│ ├── core
│ └── api
│ │ ├── tensor_utils.h
│ │ ├── error_reporter.cpp
│ │ ├── tensor_utils.cpp
│ │ ├── op_resolver.h
│ │ └── error_reporter.h
│ ├── experimental
│ └── microfrontend
│ │ └── lib
│ │ ├── log_scale_util.c
│ │ ├── fft_util.h
│ │ ├── log_lut.h
│ │ ├── log_scale.h
│ │ ├── fft.h
│ │ ├── noise_reduction.h
│ │ ├── pcan_gain_control.h
│ │ ├── window.h
│ │ ├── log_scale_util.h
│ │ ├── window_util.h
│ │ ├── log_lut.c
│ │ ├── filterbank_util.h
│ │ ├── noise_reduction_util.c
│ │ ├── noise_reduction_util.h
│ │ ├── fft.cpp
│ │ ├── pcan_gain_control.c
│ │ ├── noise_reduction.c
│ │ ├── frontend_util.h
│ │ ├── pcan_gain_control_util.h
│ │ ├── filterbank.h
│ │ └── window_util.c
│ ├── kernels
│ ├── internal
│ │ ├── max.h
│ │ ├── min.h
│ │ ├── reference
│ │ │ ├── neg.h
│ │ │ ├── ceil.h
│ │ │ ├── floor.h
│ │ │ ├── round.h
│ │ │ └── quantize.h
│ │ ├── cppmath.h
│ │ ├── optimized
│ │ │ └── neon_check.h
│ │ └── tensor_ctypes.h
│ └── op_macros.h
│ ├── version.h
│ └── type_to_tflitetype.h
├── third_party
└── kissfft
│ ├── tools
│ └── kiss_fftr.h
│ └── COPYING
├── TensorFlowLite.h
└── README.md
/.DS_Store:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kavyakvk/TinyFederatedLearning/HEAD/.DS_Store
--------------------------------------------------------------------------------
/dl/.DS_Store:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kavyakvk/TinyFederatedLearning/HEAD/dl/.DS_Store
--------------------------------------------------------------------------------
/source/.DS_Store:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kavyakvk/TinyFederatedLearning/HEAD/source/.DS_Store
--------------------------------------------------------------------------------
/source/simulation/.DS_Store:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kavyakvk/TinyFederatedLearning/HEAD/source/simulation/.DS_Store
--------------------------------------------------------------------------------
/previous_iterations/.DS_Store:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kavyakvk/TinyFederatedLearning/HEAD/previous_iterations/.DS_Store
--------------------------------------------------------------------------------
/dl/pickle_initial_model_weights.p:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kavyakvk/TinyFederatedLearning/HEAD/dl/pickle_initial_model_weights.p
--------------------------------------------------------------------------------
/source/simulation/simulation-batch:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kavyakvk/TinyFederatedLearning/HEAD/source/simulation/simulation-batch
--------------------------------------------------------------------------------
/source/simulation/simulation-devices:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kavyakvk/TinyFederatedLearning/HEAD/source/simulation/simulation-devices
--------------------------------------------------------------------------------
/dl/pickle_masked_processed_color_1209.p:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kavyakvk/TinyFederatedLearning/HEAD/dl/pickle_masked_processed_color_1209.p
--------------------------------------------------------------------------------
/dl/pickle_unmasked_processed_color_1209.p:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kavyakvk/TinyFederatedLearning/HEAD/dl/pickle_unmasked_processed_color_1209.p
--------------------------------------------------------------------------------
/source/simulation/simulation-comparison:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kavyakvk/TinyFederatedLearning/HEAD/source/simulation/simulation-comparison
--------------------------------------------------------------------------------
/source/simulation/simulation-local-episodes:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kavyakvk/TinyFederatedLearning/HEAD/source/simulation/simulation-local-episodes
--------------------------------------------------------------------------------
/source/simulation/simulation-results/.DS_Store:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kavyakvk/TinyFederatedLearning/HEAD/source/simulation/simulation-results/.DS_Store
--------------------------------------------------------------------------------
/source/simulation/simulation-comparison-pretrained:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kavyakvk/TinyFederatedLearning/HEAD/source/simulation/simulation-comparison-pretrained
--------------------------------------------------------------------------------
/source/simulation/simulation-results/results_batch.p:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kavyakvk/TinyFederatedLearning/HEAD/source/simulation/simulation-results/results_batch.p
--------------------------------------------------------------------------------
/source/simulation/simulation-results/results_device.p:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kavyakvk/TinyFederatedLearning/HEAD/source/simulation/simulation-results/results_device.p
--------------------------------------------------------------------------------
/source/simulation/simulation-results/figures/batch-val.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kavyakvk/TinyFederatedLearning/HEAD/source/simulation/simulation-results/figures/batch-val.pdf
--------------------------------------------------------------------------------
/source/simulation/simulation-results/figures/local-val.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kavyakvk/TinyFederatedLearning/HEAD/source/simulation/simulation-results/figures/local-val.pdf
--------------------------------------------------------------------------------
/source/simulation/simulation-results/figures/batch-train.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kavyakvk/TinyFederatedLearning/HEAD/source/simulation/simulation-results/figures/batch-train.pdf
--------------------------------------------------------------------------------
/source/simulation/simulation-results/figures/device-train.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kavyakvk/TinyFederatedLearning/HEAD/source/simulation/simulation-results/figures/device-train.pdf
--------------------------------------------------------------------------------
/source/simulation/simulation-results/figures/devices-val.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kavyakvk/TinyFederatedLearning/HEAD/source/simulation/simulation-results/figures/devices-val.pdf
--------------------------------------------------------------------------------
/source/simulation/simulation-results/figures/local-train.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kavyakvk/TinyFederatedLearning/HEAD/source/simulation/simulation-results/figures/local-train.pdf
--------------------------------------------------------------------------------
/source/simulation/simulation-results/results_local_episodes.p:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kavyakvk/TinyFederatedLearning/HEAD/source/simulation/simulation-results/results_local_episodes.p
--------------------------------------------------------------------------------
/previous_iterations/person_detection_arducam_5mp_plus/.DS_Store:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kavyakvk/TinyFederatedLearning/HEAD/previous_iterations/person_detection_arducam_5mp_plus/.DS_Store
--------------------------------------------------------------------------------
/source/simulation/simulation-results/figures/comparison-random-val.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kavyakvk/TinyFederatedLearning/HEAD/source/simulation/simulation-results/figures/comparison-random-val.pdf
--------------------------------------------------------------------------------
/source/simulation/simulation.dSYM/Contents/Resources/DWARF/simulation:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kavyakvk/TinyFederatedLearning/HEAD/source/simulation/simulation.dSYM/Contents/Resources/DWARF/simulation
--------------------------------------------------------------------------------
/source/simulation/simulation-results/figures/comparison-pretrain-val.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kavyakvk/TinyFederatedLearning/HEAD/source/simulation/simulation-results/figures/comparison-pretrain-val.pdf
--------------------------------------------------------------------------------
/source/simulation/simulation-results/figures/comparison-random-train.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kavyakvk/TinyFederatedLearning/HEAD/source/simulation/simulation-results/figures/comparison-random-train.pdf
--------------------------------------------------------------------------------
/source/simulation/simulation-results/figures/comparison-pretrain-train.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kavyakvk/TinyFederatedLearning/HEAD/source/simulation/simulation-results/figures/comparison-pretrain-train.pdf
--------------------------------------------------------------------------------
/source/simulation/simulation-batch.dSYM/Contents/Resources/DWARF/simulation-batch:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kavyakvk/TinyFederatedLearning/HEAD/source/simulation/simulation-batch.dSYM/Contents/Resources/DWARF/simulation-batch
--------------------------------------------------------------------------------
/source/simulation/simulation-devices.dSYM/Contents/Resources/DWARF/simulation-devices:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kavyakvk/TinyFederatedLearning/HEAD/source/simulation/simulation-devices.dSYM/Contents/Resources/DWARF/simulation-devices
--------------------------------------------------------------------------------
/source/simulation/simulation-comparison.dSYM/Contents/Resources/DWARF/simulation-comparison:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kavyakvk/TinyFederatedLearning/HEAD/source/simulation/simulation-comparison.dSYM/Contents/Resources/DWARF/simulation-comparison
--------------------------------------------------------------------------------
/source/simulation/simulation-comparison-pretrained.dSYM/Contents/Resources/DWARF/simulation-comparison-pretrained:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kavyakvk/TinyFederatedLearning/HEAD/source/simulation/simulation-comparison-pretrained.dSYM/Contents/Resources/DWARF/simulation-comparison-pretrained
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 |
2 | source/person_detection_arducam_5mp_plus/valgrind-out.txt
3 | source/person_detection_arducam_5mp_plus/valgrind-out.txt
4 | source/simulation/embeddings-cat.txt
5 | source/simulation/ground_truth-cat.txt
6 | *.txt
7 | source/simulation/.DS_Store
8 | source/simulation/.DS_Store
9 |
--------------------------------------------------------------------------------
/dl/ActivationLayer.h:
--------------------------------------------------------------------------------
1 | #ifndef NN_ACT_H
2 | #define NN_ACT_H
3 |
4 | class ActivationLayer {
5 | int input_size, output_size, batch_size, quant_zero_point;
6 | double quant_scale;
7 |
8 | public:
9 | ActivationLayer ();
10 | double forward (double **input_float, double **output);
11 | double backward (double output_error, double learning_rate);
12 | ~ActivationLayer();
13 | };
--------------------------------------------------------------------------------
/source/simulation/simulation.dSYM/Contents/Info.plist:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | CFBundleDevelopmentRegion
6 | English
7 | CFBundleIdentifier
8 | com.apple.xcode.dsym.simulation
9 | CFBundleInfoDictionaryVersion
10 | 6.0
11 | CFBundlePackageType
12 | dSYM
13 | CFBundleSignature
14 | ????
15 | CFBundleShortVersionString
16 | 1.0
17 | CFBundleVersion
18 | 1
19 |
20 |
21 |
--------------------------------------------------------------------------------
/source/simulation/simulation-batch.dSYM/Contents/Info.plist:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | CFBundleDevelopmentRegion
6 | English
7 | CFBundleIdentifier
8 | com.apple.xcode.dsym.simulation-batch
9 | CFBundleInfoDictionaryVersion
10 | 6.0
11 | CFBundlePackageType
12 | dSYM
13 | CFBundleSignature
14 | ????
15 | CFBundleShortVersionString
16 | 1.0
17 | CFBundleVersion
18 | 1
19 |
20 |
21 |
--------------------------------------------------------------------------------
/source/simulation/simulation-devices.dSYM/Contents/Info.plist:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | CFBundleDevelopmentRegion
6 | English
7 | CFBundleIdentifier
8 | com.apple.xcode.dsym.simulation-devices
9 | CFBundleInfoDictionaryVersion
10 | 6.0
11 | CFBundlePackageType
12 | dSYM
13 | CFBundleSignature
14 | ????
15 | CFBundleShortVersionString
16 | 1.0
17 | CFBundleVersion
18 | 1
19 |
20 |
21 |
--------------------------------------------------------------------------------
/source/simulation/simulation-comparison.dSYM/Contents/Info.plist:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | CFBundleDevelopmentRegion
6 | English
7 | CFBundleIdentifier
8 | com.apple.xcode.dsym.simulation-comparison
9 | CFBundleInfoDictionaryVersion
10 | 6.0
11 | CFBundlePackageType
12 | dSYM
13 | CFBundleSignature
14 | ????
15 | CFBundleShortVersionString
16 | 1.0
17 | CFBundleVersion
18 | 1
19 |
20 |
21 |
--------------------------------------------------------------------------------
/source/simulation/simulation-comparison-pretrained.dSYM/Contents/Info.plist:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | CFBundleDevelopmentRegion
6 | English
7 | CFBundleIdentifier
8 | com.apple.xcode.dsym.simulation-comparison-pretrained
9 | CFBundleInfoDictionaryVersion
10 | 6.0
11 | CFBundlePackageType
12 | dSYM
13 | CFBundleSignature
14 | ????
15 | CFBundleShortVersionString
16 | 1.0
17 | CFBundleVersion
18 | 1
19 |
20 |
21 |
--------------------------------------------------------------------------------
/source/arduino_training_final_v3/model_settings.cpp:
--------------------------------------------------------------------------------
1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 |
16 | #include "model_settings.h"
17 |
18 | const char* kCategoryLabels[kCategoryCount] = {
19 | "notperson",
20 | "person",
21 | };
22 |
--------------------------------------------------------------------------------
/previous_iterations/arduino_training_final/model_settings.cpp:
--------------------------------------------------------------------------------
1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 |
16 | #include "model_settings.h"
17 |
18 | const char* kCategoryLabels[kCategoryCount] = {
19 | "notperson",
20 | "person",
21 | };
22 |
--------------------------------------------------------------------------------
/previous_iterations/arduino_training_final_v2/model_settings.cpp:
--------------------------------------------------------------------------------
1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 |
16 | #include "model_settings.h"
17 |
18 | const char* kCategoryLabels[kCategoryCount] = {
19 | "notperson",
20 | "person",
21 | };
22 |
--------------------------------------------------------------------------------
/previous_iterations/person_detection_arducam_5mp_plus/model_settings.cpp:
--------------------------------------------------------------------------------
1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 |
16 | #include "model_settings.h"
17 |
18 | const char* kCategoryLabels[kCategoryCount] = {
19 | "notperson",
20 | "person",
21 | };
22 |
--------------------------------------------------------------------------------
/previous_iterations/test_arduino_read_write.ino:
--------------------------------------------------------------------------------
1 | // Serial test script
2 |
3 | int setPoint = 55;
4 | String readString;
5 |
6 | void setup()
7 | {
8 |
9 | Serial.begin(9600); // initialize serial communications at 9600 bps
10 |
11 | }
12 |
13 | void loop()
14 | {
15 | while(!Serial.available()) {} // wait for data to arrive
16 | // serial read section
17 | while (Serial.available())
18 | {
19 | if (Serial.available() >0)
20 | {
21 | char c = Serial.read(); //gets one byte from serial buffer
22 | readString += c; //makes the string readString
23 | }
24 | }
25 |
26 | if (readString.length() >0)
27 | {
28 | Serial.print("Arduino received: ");
29 | Serial.println(readString); //see what was received
30 | }
31 |
32 | delay(500);
33 |
34 | // serial write section
35 | char ard_sends = '1';
36 | Serial.print("Arduino sends: ");
37 | Serial.println(ard_sends);
38 | Serial.print("\n");
39 | Serial.flush();
40 | }
--------------------------------------------------------------------------------
/previous_iterations/person_detection_arducam_5mp_plus/test_read_write.py:
--------------------------------------------------------------------------------
1 | import serial
2 | import syslog
3 | import time
4 |
5 | #The following line is for serial over GPIO
6 | port = '/dev/cu.usbmodem142101' # change this to what the Arduino Port is
7 |
8 |
9 | ard = serial.Serial(port,9600,timeout=5) # default is baud 9600
10 | time.sleep(5) # wait for Arduino to load and connect to port
11 |
12 | i = 0
13 |
14 | while (i < 4):
15 | # Serial write section
16 |
17 | setTempCar1 = 63
18 | setTempCar2 = 37
19 | ard.flush()
20 | setTemp1 = str(setTempCar1)
21 | setTemp2 = str(setTempCar2)
22 | print ("Python value sent: ")
23 | print (setTemp1)
24 | ard.write(setTemp1)
25 | time.sleep(1) # Match the Arduino code
26 |
27 | # Serial read section
28 | msg = ard.read(ard.inWaiting()) # read all characters in buffer
29 | print ("Message from arduino: ")
30 | print (msg)
31 | i = i + 1
32 | else:
33 | print("Exiting")
34 | exit()
--------------------------------------------------------------------------------
/source/arduino_training_final_v3/arduino_main.cpp:
--------------------------------------------------------------------------------
1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 |
16 | #include "main_functions.h"
17 |
18 | // Arduino automatically calls the setup() and loop() functions in a sketch, so
19 | // where other systems need their own main routine in this file, it can be left
20 | // empty.
21 |
--------------------------------------------------------------------------------
/previous_iterations/arduino_training_final/arduino_main.cpp:
--------------------------------------------------------------------------------
1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 |
16 | #include "main_functions.h"
17 |
18 | // Arduino automatically calls the setup() and loop() functions in a sketch, so
19 | // where other systems need their own main routine in this file, it can be left
20 | // empty.
21 |
--------------------------------------------------------------------------------
/previous_iterations/arduino_training_final_v2/arduino_main.cpp:
--------------------------------------------------------------------------------
1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 |
16 | #include "main_functions.h"
17 |
18 | // Arduino automatically calls the setup() and loop() functions in a sketch, so
19 | // where other systems need their own main routine in this file, it can be left
20 | // empty.
21 |
--------------------------------------------------------------------------------
/previous_iterations/person_detection_arducam_5mp_plus/arduino_main.cpp:
--------------------------------------------------------------------------------
1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 |
16 | #include "main_functions.h"
17 |
18 | // Arduino automatically calls the setup() and loop() functions in a sketch, so
19 | // where other systems need their own main routine in this file, it can be left
20 | // empty.
21 |
--------------------------------------------------------------------------------
/tensorflow/lite/string_type.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 | // Abstract string. We don't want even absl at this level.
16 | #ifndef TENSORFLOW_LITE_STRING_TYPE_H_
17 | #define TENSORFLOW_LITE_STRING_TYPE_H_
18 |
19 | #include
20 |
21 | namespace tflite {
22 |
23 | using std::string;
24 |
25 | } // namespace tflite
26 |
27 | #endif // TENSORFLOW_LITE_STRING_TYPE_H_
28 |
--------------------------------------------------------------------------------
/third_party/kissfft/tools/kiss_fftr.h:
--------------------------------------------------------------------------------
1 | #ifndef KISS_FTR_H
2 | #define KISS_FTR_H
3 |
4 | #include "third_party/kissfft/kiss_fft.h"
5 | #ifdef __cplusplus
6 | extern "C" {
7 | #endif
8 |
9 |
10 | /*
11 |
12 | Real optimized version can save about 45% cpu time vs. complex fft of a real seq.
13 |
14 |
15 |
16 | */
17 |
18 | typedef struct kiss_fftr_state *kiss_fftr_cfg;
19 |
20 |
21 | kiss_fftr_cfg kiss_fftr_alloc(int nfft,int inverse_fft,void * mem, size_t * lenmem);
22 | /*
23 | nfft must be even
24 |
25 | If you don't care to allocate space, use mem = lenmem = NULL
26 | */
27 |
28 |
29 | void kiss_fftr(kiss_fftr_cfg cfg,const kiss_fft_scalar *timedata,kiss_fft_cpx *freqdata);
30 | /*
31 | input timedata has nfft scalar points
32 | output freqdata has nfft/2+1 complex points
33 | */
34 |
35 | void kiss_fftri(kiss_fftr_cfg cfg,const kiss_fft_cpx *freqdata,kiss_fft_scalar *timedata);
36 | /*
37 | input freqdata has nfft/2+1 complex points
38 | output timedata has nfft scalar points
39 | */
40 |
41 | #define kiss_fftr_free free
42 |
43 | #ifdef __cplusplus
44 | }
45 | #endif
46 | #endif
47 |
--------------------------------------------------------------------------------
/TensorFlowLite.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 | #ifndef TENSORFLOW_LITE_MICRO_TOOLS_MAKE_TEMPLATES_TENSORFLOWLITE_H_
16 | #define TENSORFLOW_LITE_MICRO_TOOLS_MAKE_TEMPLATES_TENSORFLOWLITE_H_
17 |
18 | // This header is deliberately empty, and is only present because including it
19 | // in a .ino sketch forces the Arduino toolchain to build the rest of the
20 | // library.
21 |
22 | #endif // TENSORFLOW_LITE_MICRO_TOOLS_MAKE_TEMPLATES_TENSORFLOWLITE_H_
23 |
--------------------------------------------------------------------------------
/tensorflow/lite/micro/tools/make/downloads/kissfft/tools/kiss_fftr.h:
--------------------------------------------------------------------------------
1 | #ifndef KISS_FTR_H
2 | #define KISS_FTR_H
3 |
4 | #include "third_party/kissfft/kiss_fft.h"
5 | #ifdef __cplusplus
6 | extern "C" {
7 | #endif
8 |
9 |
10 | /*
11 |
12 | Real optimized version can save about 45% cpu time vs. complex fft of a real seq.
13 |
14 |
15 |
16 | */
17 |
18 | typedef struct kiss_fftr_state *kiss_fftr_cfg;
19 |
20 |
21 | kiss_fftr_cfg kiss_fftr_alloc(int nfft,int inverse_fft,void * mem, size_t * lenmem);
22 | /*
23 | nfft must be even
24 |
25 | If you don't care to allocate space, use mem = lenmem = NULL
26 | */
27 |
28 |
29 | void kiss_fftr(kiss_fftr_cfg cfg,const kiss_fft_scalar *timedata,kiss_fft_cpx *freqdata);
30 | /*
31 | input timedata has nfft scalar points
32 | output freqdata has nfft/2+1 complex points
33 | */
34 |
35 | void kiss_fftri(kiss_fftr_cfg cfg,const kiss_fft_cpx *freqdata,kiss_fft_scalar *timedata);
36 | /*
37 | input freqdata has nfft/2+1 complex points
38 | output timedata has nfft scalar points
39 | */
40 |
41 | #define kiss_fftr_free free
42 |
43 | #ifdef __cplusplus
44 | }
45 | #endif
46 | #endif
47 |
--------------------------------------------------------------------------------
/tensorflow/lite/micro/testing/test_conv_model.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 |
16 | #ifndef TENSORFLOW_LITE_MICRO_TESTING_TEST_CONV_MODEL_H_
17 | #define TENSORFLOW_LITE_MICRO_TESTING_TEST_CONV_MODEL_H_
18 |
19 | // See generate_test_models.py for updating the contents of this model:
20 | extern const unsigned char kTestConvModelData[];
21 | extern const unsigned int kTestConvModelDataSize;
22 |
23 | #endif // TENSORFLOW_LITE_MICRO_TESTING_TEST_CONV_MODEL_H_
24 |
--------------------------------------------------------------------------------
/tensorflow/lite/core/api/tensor_utils.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 |
16 | #ifndef TENSORFLOW_LITE_CORE_API_TENSOR_UTILS_H_
17 | #define TENSORFLOW_LITE_CORE_API_TENSOR_UTILS_H_
18 |
19 | #include "tensorflow/lite/c/common.h"
20 |
21 | namespace tflite {
22 |
23 | // Resets a variable tensor to the default value.
24 | TfLiteStatus ResetVariableTensor(TfLiteTensor* tensor);
25 |
26 | } // namespace tflite
27 |
28 | #endif // TENSORFLOW_LITE_CORE_API_TENSOR_UTILS_H_
29 |
--------------------------------------------------------------------------------
/tensorflow/lite/micro/debug_log.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 | #ifndef TENSORFLOW_LITE_MICRO_DEBUG_LOG_H_
16 | #define TENSORFLOW_LITE_MICRO_DEBUG_LOG_H_
17 |
18 | // This function should be implemented by each target platform, and provide a
19 | // way for strings to be output to some text stream. For more information, see
20 | // tensorflow/lite/micro/debug_log.cc.
21 | extern "C" void DebugLog(const char* s);
22 |
23 | #endif // TENSORFLOW_LITE_MICRO_DEBUG_LOG_H_
24 |
--------------------------------------------------------------------------------
/tensorflow/lite/micro/benchmarks/keyword_scrambled_model_data.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 |
16 | #ifndef TENSORFLOW_LITE_MICRO_BENCHMARKS_KEYWORD_SCRAMBLED_MODEL_DATA_H_
17 | #define TENSORFLOW_LITE_MICRO_BENCHMARKS_KEYWORD_SCRAMBLED_MODEL_DATA_H_
18 |
19 | extern const unsigned char g_keyword_scrambled_model_data[];
20 | extern const unsigned int g_keyword_scrambled_model_data_length;
21 |
22 | #endif // TENSORFLOW_LITE_MICRO_BENCHMARKS_KEYWORD_SCRAMBLED_MODEL_DATA_H_
23 |
--------------------------------------------------------------------------------
/tensorflow/lite/micro/kernels/ethosu.cpp:
--------------------------------------------------------------------------------
1 | /* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 |
16 | //
17 | // This is a stub file for non-Ethos platforms
18 | //
19 | #include "tensorflow/lite/c/common.h"
20 |
21 | namespace tflite {
22 | namespace ops {
23 | namespace micro {
24 | namespace custom {
25 | TfLiteRegistration* Register_ETHOSU() { return nullptr; }
26 |
27 | const char* GetString_ETHOSU() { return ""; }
28 |
29 | } // namespace custom
30 | } // namespace micro
31 | } // namespace ops
32 | } // namespace tflite
33 |
--------------------------------------------------------------------------------
/tensorflow/lite/experimental/microfrontend/lib/log_scale_util.c:
--------------------------------------------------------------------------------
1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 | #include "tensorflow/lite/experimental/microfrontend/lib/log_scale_util.h"
16 |
17 | void LogScaleFillConfigWithDefaults(struct LogScaleConfig* config) {
18 | config->enable_log = 1;
19 | config->scale_shift = 6;
20 | }
21 |
22 | int LogScalePopulateState(const struct LogScaleConfig* config,
23 | struct LogScaleState* state) {
24 | state->enable_log = config->enable_log;
25 | state->scale_shift = config->scale_shift;
26 | return 1;
27 | }
28 |
--------------------------------------------------------------------------------
/source/simulation/FCLayer.h:
--------------------------------------------------------------------------------
1 | #ifndef NN_FC_H
2 | #define NN_FC_H
3 |
4 | class FCLayer {
5 | public:
6 | int input_size, output_size, batch_size, quant_zero_point;
7 | double quant_scale;
8 | double *bias;
9 | double **weights;
10 | double **output_error_softmax;
11 |
12 | FCLayer();
13 | FCLayer (int input_sz, int output_sz, double scale,
14 | int zero_point, int batch, bool default_weight);
15 | void forward (double **input_float, double **output);
16 | void backward (double **output, int **ground_truth,
17 | double **input_error, double **input_float,
18 | double learning_rate, double lambda);
19 | void dequantize(int *input_data, double *input_float);
20 | void cleanup();
21 | ~FCLayer();
22 | void set_weights_bias(double **new_weights, double *new_bias);
23 | };
24 |
25 | // class KMeans{
26 | // public:
27 |
28 | // }
29 |
30 | void FL_round_simulation(double **input_f, int **input_i, int **ground_truth, int local_episodes,
31 | double learning_rate, FCLayer *model, double lambda,
32 | bool verbose, bool local, bool unquantize);
33 |
34 |
35 | #endif
36 |
--------------------------------------------------------------------------------
/tensorflow/lite/kernels/internal/max.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 | #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_MAX_H_
16 | #define TENSORFLOW_LITE_KERNELS_INTERNAL_MAX_H_
17 |
18 | #include
19 |
20 | namespace tflite {
21 |
22 | #if defined(TF_LITE_USE_GLOBAL_MAX) || defined(__ZEPHYR__)
23 | inline float TfLiteMax(const float& x, const float& y) {
24 | return std::max(x, y);
25 | }
26 | #else
27 | template
28 | inline T TfLiteMax(const T& x, const T& y) {
29 | return std::fmax(x, y);
30 | }
31 | #endif
32 |
33 | } // namespace tflite
34 |
35 | #endif // TENSORFLOW_LITE_KERNELS_INTERNAL_MAX_H_
36 |
--------------------------------------------------------------------------------
/tensorflow/lite/kernels/internal/min.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 | #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_MIN_H_
16 | #define TENSORFLOW_LITE_KERNELS_INTERNAL_MIN_H_
17 |
18 | #include
19 |
20 | namespace tflite {
21 |
22 | #if defined(TF_LITE_USE_GLOBAL_MIN) || defined(__ZEPHYR__)
23 | inline float TfLiteMin(const float& x, const float& y) {
24 | return std::min(x, y);
25 | }
26 | #else
27 | template
28 | inline T TfLiteMin(const T& x, const T& y) {
29 | return std::fmin(x, y);
30 | }
31 | #endif
32 |
33 | } // namespace tflite
34 |
35 | #endif // TENSORFLOW_LITE_KERNELS_INTERNAL_MIN_H_
36 |
--------------------------------------------------------------------------------
/tensorflow/lite/micro/micro_time.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 | #ifndef TENSORFLOW_LITE_MICRO_MICRO_TIME_H_
16 | #define TENSORFLOW_LITE_MICRO_MICRO_TIME_H_
17 |
18 | #include
19 |
20 | namespace tflite {
21 |
22 | // These functions should be implemented by each target platform, and provide an
23 | // accurate tick count along with how many ticks there are per second.
24 | int32_t ticks_per_second();
25 |
26 | // Return time in ticks. The meaning of a tick varies per platform.
27 | int32_t GetCurrentTimeTicks();
28 |
29 | } // namespace tflite
30 |
31 | #endif // TENSORFLOW_LITE_MICRO_MICRO_TIME_H_
32 |
--------------------------------------------------------------------------------
/previous_iterations/arduino_training_final/FCLayer.h:
--------------------------------------------------------------------------------
1 | #ifndef NN_FC_H
2 | #define NN_FC_H
3 |
4 | class FCLayer {
5 | public:
6 | int input_size, output_size, batch_size, quant_zero_point;
7 | double quant_scale;
8 | double *bias;
9 | double **weights;
10 | double **output_error_softmax;
11 |
12 | FCLayer();
13 | FCLayer (int input_sz, int output_sz, double scale,
14 | int zero_point, int batch, bool default_weight);
15 | void forward (double **input_float, double **output);
16 | void backward (double **output, int **ground_truth,
17 | double **input_error, double **input_float,
18 | double learning_rate, double lambda);
19 | void dequantize(int *input_data, double *input_float);
20 | void cleanup();
21 | ~FCLayer();
22 | void set_weights_bias(double **new_weights, double *new_bias);
23 | };
24 |
25 | // class KMeans{
26 | // public:
27 |
28 | // }
29 |
30 | void FL_round_simulation(double **input_f, int **input_i, int **ground_truth, int local_episodes,
31 | double learning_rate, FCLayer *model, double lambda,
32 | bool verbose, bool local, bool unquantize);
33 |
34 |
35 | #endif
36 |
--------------------------------------------------------------------------------
/previous_iterations/person_detection_arducam_5mp_plus/FCLayer.h:
--------------------------------------------------------------------------------
1 | #ifndef NN_FC_H
2 | #define NN_FC_H
3 |
4 | class FCLayer {
5 | public:
6 | int input_size, output_size, batch_size, quant_zero_point;
7 | double quant_scale;
8 | double *bias;
9 | double **weights;
10 | double **output_error_softmax;
11 |
12 | FCLayer();
13 | FCLayer (int input_sz, int output_sz, double scale,
14 | int zero_point, int batch, bool default_weight);
15 | void forward (double **input_float, double **output);
16 | void backward (double **output, int **ground_truth,
17 | double **input_error, double **input_float,
18 | double learning_rate, double lambda);
19 | void dequantize(int *input_data, double *input_float);
20 | void cleanup();
21 | ~FCLayer();
22 | void set_weights_bias(double **new_weights, double *new_bias);
23 | };
24 |
25 | void FL_round_simulation(double **input_float, int **ground_truth, int local_episodes,
26 | double learning_rate, FCLayer *model, bool verbose, bool local, bool unquantize);
27 |
28 | void FL_round_quantize(int **input_data, int **ground_truth, int local_epochs,
29 | double learning_rate, FCLayer *model);
30 |
31 |
32 | #endif
33 |
--------------------------------------------------------------------------------
/tensorflow/lite/version.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 | #ifndef TENSORFLOW_LITE_VERSION_H_
16 | #define TENSORFLOW_LITE_VERSION_H_
17 |
18 | #include "tensorflow/core/public/version.h"
19 |
20 | // The version number of the Schema. Ideally all changes will be backward
21 | // compatible. If that ever changes, we must ensure that version is the first
22 | // entry in the new tflite root so that we can see that version is not 1.
23 | #define TFLITE_SCHEMA_VERSION (3)
24 |
25 | // TensorFlow Lite Runtime version.
26 | // This value is currently shared with that of TensorFlow.
27 | #define TFLITE_VERSION_STRING TF_VERSION_STRING
28 |
29 | #endif // TENSORFLOW_LITE_VERSION_H_
30 |
--------------------------------------------------------------------------------
/tensorflow/lite/micro/micro_error_reporter.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 | #ifndef TENSORFLOW_LITE_MICRO_MICRO_ERROR_REPORTER_H_
16 | #define TENSORFLOW_LITE_MICRO_MICRO_ERROR_REPORTER_H_
17 |
18 | #include
19 |
20 | #include "tensorflow/lite/core/api/error_reporter.h"
21 | #include "tensorflow/lite/micro/compatibility.h"
22 |
23 | namespace tflite {
24 |
25 | class MicroErrorReporter : public ErrorReporter {
26 | public:
27 | ~MicroErrorReporter() override {}
28 | int Report(const char* format, va_list args) override;
29 |
30 | private:
31 | TF_LITE_REMOVE_VIRTUAL_DELETE
32 | };
33 |
34 | } // namespace tflite
35 |
36 | #endif // TENSORFLOW_LITE_MICRO_MICRO_ERROR_REPORTER_H_
37 |
--------------------------------------------------------------------------------
/tensorflow/lite/experimental/microfrontend/lib/fft_util.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 | #ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_FFT_UTIL_H_
16 | #define TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_FFT_UTIL_H_
17 |
18 | #include "tensorflow/lite/experimental/microfrontend/lib/fft.h"
19 |
20 | #ifdef __cplusplus
21 | extern "C" {
22 | #endif
23 |
24 | // Prepares and FFT for the given input size.
25 | int FftPopulateState(struct FftState* state, size_t input_size);
26 |
27 | // Frees any allocated buffers.
28 | void FftFreeStateContents(struct FftState* state);
29 |
30 | #ifdef __cplusplus
31 | } // extern "C"
32 | #endif
33 |
34 | #endif // TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_FFT_UTIL_H_
35 |
--------------------------------------------------------------------------------
/source/arduino_training_final_v3/main_functions.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 |
16 | #ifndef TENSORFLOW_LITE_MICRO_EXAMPLES_PERSON_DETECTION_EXPERIMENTAL_MAIN_FUNCTIONS_H_
17 | #define TENSORFLOW_LITE_MICRO_EXAMPLES_PERSON_DETECTION_EXPERIMENTAL_MAIN_FUNCTIONS_H_
18 |
19 | // Initializes all data needed for the example. The name is important, and needs
20 | // to be setup() for Arduino compatibility.
21 | void setup();
22 |
23 | // Runs one iteration of data gathering and inference. This should be called
24 | // repeatedly from the application code. The name needs to be loop() for Arduino
25 | // compatibility.
26 | void loop();
27 |
28 | #endif // TENSORFLOW_LITE_MICRO_EXAMPLES_PERSON_DETECTION_EXPERIMENTAL_MAIN_FUNCTIONS_H_
29 |
--------------------------------------------------------------------------------
/previous_iterations/arduino_training_final/main_functions.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 |
16 | #ifndef TENSORFLOW_LITE_MICRO_EXAMPLES_PERSON_DETECTION_EXPERIMENTAL_MAIN_FUNCTIONS_H_
17 | #define TENSORFLOW_LITE_MICRO_EXAMPLES_PERSON_DETECTION_EXPERIMENTAL_MAIN_FUNCTIONS_H_
18 |
19 | // Initializes all data needed for the example. The name is important, and needs
20 | // to be setup() for Arduino compatibility.
21 | void setup();
22 |
23 | // Runs one iteration of data gathering and inference. This should be called
24 | // repeatedly from the application code. The name needs to be loop() for Arduino
25 | // compatibility.
26 | void loop();
27 |
28 | #endif // TENSORFLOW_LITE_MICRO_EXAMPLES_PERSON_DETECTION_EXPERIMENTAL_MAIN_FUNCTIONS_H_
29 |
--------------------------------------------------------------------------------
/previous_iterations/arduino_training_final_v2/main_functions.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 |
16 | #ifndef TENSORFLOW_LITE_MICRO_EXAMPLES_PERSON_DETECTION_EXPERIMENTAL_MAIN_FUNCTIONS_H_
17 | #define TENSORFLOW_LITE_MICRO_EXAMPLES_PERSON_DETECTION_EXPERIMENTAL_MAIN_FUNCTIONS_H_
18 |
19 | // Initializes all data needed for the example. The name is important, and needs
20 | // to be setup() for Arduino compatibility.
21 | void setup();
22 |
23 | // Runs one iteration of data gathering and inference. This should be called
24 | // repeatedly from the application code. The name needs to be loop() for Arduino
25 | // compatibility.
26 | void loop();
27 |
28 | #endif // TENSORFLOW_LITE_MICRO_EXAMPLES_PERSON_DETECTION_EXPERIMENTAL_MAIN_FUNCTIONS_H_
29 |
--------------------------------------------------------------------------------
/previous_iterations/person_detection_arducam_5mp_plus/main_functions.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 |
16 | #ifndef TENSORFLOW_LITE_MICRO_EXAMPLES_PERSON_DETECTION_EXPERIMENTAL_MAIN_FUNCTIONS_H_
17 | #define TENSORFLOW_LITE_MICRO_EXAMPLES_PERSON_DETECTION_EXPERIMENTAL_MAIN_FUNCTIONS_H_
18 |
19 | // Initializes all data needed for the example. The name is important, and needs
20 | // to be setup() for Arduino compatibility.
21 | void setup();
22 |
23 | // Runs one iteration of data gathering and inference. This should be called
24 | // repeatedly from the application code. The name needs to be loop() for Arduino
25 | // compatibility.
26 | void loop();
27 |
28 | #endif // TENSORFLOW_LITE_MICRO_EXAMPLES_PERSON_DETECTION_EXPERIMENTAL_MAIN_FUNCTIONS_H_
29 |
--------------------------------------------------------------------------------
/source/arduino_training_final_v3/FCLayer.h:
--------------------------------------------------------------------------------
1 | #ifndef NN_FC_H
2 | #define NN_FC_H
3 |
4 | class FCLayer {
5 | public:
6 | int input_size, output_size, batch_size, quant_zero_point;
7 | double quant_scale;
8 | double *bias;
9 | double **weights;
10 | double **output_error_softmax;
11 |
12 | FCLayer();
13 | FCLayer (int input_sz, int output_sz, double scale,
14 | int zero_point, int batch, bool default_weight);
15 | void forward (double **input_float, double **output);
16 | void backward (double **output, int **ground_truth,
17 | double **input_error, double **input_float,
18 | double learning_rate, double lambda);
19 | void dequantize(int *input_data, double *input_float);
20 | void cleanup();
21 | ~FCLayer();
22 | void set_weights_bias(double **new_weights, double *new_bias);
23 | };
24 |
25 | // void FL_round_simulation(double **input_float, int **ground_truth, int local_episodes,
26 | // double learning_rate, FCLayer *model, bool verbose, bool local);
27 | void FL_round_simulation(double **input_f, int **input_i, int **ground_truth, int local_episodes,
28 | double learning_rate, FCLayer *model, double lambda,
29 | bool verbose, bool local, bool unquantize);
30 |
31 |
32 | #endif
33 |
--------------------------------------------------------------------------------
/previous_iterations/arduino_training_final_v2/FCLayer.h:
--------------------------------------------------------------------------------
1 | #ifndef NN_FC_H
2 | #define NN_FC_H
3 |
4 | class FCLayer {
5 | public:
6 | int input_size, output_size, batch_size, quant_zero_point;
7 | double quant_scale;
8 | double *bias;
9 | double **weights;
10 | double **output_error_softmax;
11 |
12 | FCLayer();
13 | FCLayer (int input_sz, int output_sz, double scale,
14 | int zero_point, int batch, bool default_weight);
15 | void forward (double **input_float, double **output);
16 | void backward (double **output, int **ground_truth,
17 | double **input_error, double **input_float,
18 | double learning_rate, double lambda);
19 | void dequantize(int *input_data, double *input_float);
20 | void cleanup();
21 | ~FCLayer();
22 | void set_weights_bias(double **new_weights, double *new_bias);
23 | };
24 |
25 | // void FL_round_simulation(double **input_float, int **ground_truth, int local_episodes,
26 | // double learning_rate, FCLayer *model, bool verbose, bool local);
27 | void FL_round_simulation(double **input_f, int **input_i, int **ground_truth, int local_episodes,
28 | double learning_rate, FCLayer *model, double lambda,
29 | bool verbose, bool local, bool unquantize);
30 |
31 |
32 | #endif
33 |
--------------------------------------------------------------------------------
/tensorflow/lite/core/api/error_reporter.cpp:
--------------------------------------------------------------------------------
1 | /* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 | #include "tensorflow/lite/core/api/error_reporter.h"
16 | #include
17 |
18 | namespace tflite {
19 |
20 | int ErrorReporter::Report(const char* format, ...) {
21 | va_list args;
22 | va_start(args, format);
23 | int code = Report(format, args);
24 | va_end(args);
25 | return code;
26 | }
27 |
28 | // TODO(aselle): Make the name of ReportError on context the same, so
29 | // we can use the ensure functions w/o a context and w/ a reporter.
30 | int ErrorReporter::ReportError(void*, const char* format, ...) {
31 | va_list args;
32 | va_start(args, format);
33 | int code = Report(format, args);
34 | va_end(args);
35 | return code;
36 | }
37 |
38 | } // namespace tflite
39 |
--------------------------------------------------------------------------------
/tensorflow/lite/micro/kernels/micro_utils.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2 | Licensed under the Apache License, Version 2.0 (the "License");
3 | you may not use this file except in compliance with the License.
4 | You may obtain a copy of the License at
5 | http://www.apache.org/licenses/LICENSE-2.0
6 | Unless required by applicable law or agreed to in writing, software
7 | distributed under the License is distributed on an "AS IS" BASIS,
8 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
9 | See the License for the specific language governing permissions and
10 | limitations under the License.
11 | ==============================================================================*/
12 | #ifndef TENSORFLOW_LITE_MICRO_KERNELS_MICRO_UTILS_H_
13 | #define TENSORFLOW_LITE_MICRO_KERNELS_MICRO_UTILS_H_
14 | namespace tflite {
15 | namespace ops {
16 | namespace micro {
17 |
18 | // Same as gtl::Greater but defined here to reduce dependencies and
19 | // binary size for micro environment.
20 | struct Greater {
21 | template
22 | bool operator()(const T& x, const T& y) const {
23 | return x > y;
24 | }
25 | };
26 |
27 | struct Less {
28 | template
29 | bool operator()(const T& x, const T& y) const {
30 | return x < y;
31 | }
32 | };
33 |
34 | } // namespace micro
35 | } // namespace ops
36 | } // namespace tflite
37 | #endif // TENSORFLOW_LITE_MICRO_KERNELS_MICRO_UTILS_H_
38 |
--------------------------------------------------------------------------------
/tensorflow/lite/experimental/microfrontend/lib/log_lut.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 | #ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_LOG_LUT_H_
16 | #define TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_LOG_LUT_H_
17 |
18 | #include
19 |
20 | #ifdef __cplusplus
21 | extern "C" {
22 | #endif
23 |
24 | // Number of segments in the log lookup table. The table will be kLogSegments+1
25 | // in length (with some padding).
26 | #define kLogSegments 128
27 | #define kLogSegmentsLog2 7
28 |
29 | // Scale used by lookup table.
30 | #define kLogScale 65536
31 | #define kLogScaleLog2 16
32 | #define kLogCoeff 45426
33 |
34 | extern const uint16_t kLogLut[];
35 |
36 | #ifdef __cplusplus
37 | } // extern "C"
38 | #endif
39 |
40 | #endif // TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_LOG_LUT_H_
41 |
--------------------------------------------------------------------------------
/source/arduino_training_final_v3/person_detect_model_data.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 |
16 | // This is a standard TensorFlow Lite model file that has been converted into a
17 | // C data array, so it can be easily compiled into a binary for devices that
18 | // don't have a file system. It was created using the command:
19 | // xxd -i person_detect.tflite > person_detect_model_data.cc
20 |
21 | #ifndef TENSORFLOW_LITE_MICRO_EXAMPLES_PERSON_DETECTION_EXPERIMENTAL_PERSON_DETECT_MODEL_DATA_H_
22 | #define TENSORFLOW_LITE_MICRO_EXAMPLES_PERSON_DETECTION_EXPERIMENTAL_PERSON_DETECT_MODEL_DATA_H_
23 |
24 | extern const unsigned char g_person_detect_model_data[];
25 | extern const int g_person_detect_model_data_len;
26 |
27 | #endif // TENSORFLOW_LITE_MICRO_EXAMPLES_PERSON_DETECTION_EXPERIMENTAL_PERSON_DETECT_MODEL_DATA_H_
28 |
--------------------------------------------------------------------------------
/tensorflow/lite/kernels/internal/reference/neg.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 | #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_NEG_H_
16 | #define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_NEG_H_
17 |
18 | #include "tensorflow/lite/kernels/internal/types.h"
19 |
20 | namespace tflite {
21 |
22 | namespace reference_ops {
23 |
24 | template
25 | inline void Negate(const RuntimeShape& input_shape, const T* input_data,
26 | const RuntimeShape& output_shape, T* output_data) {
27 | const int flat_size = MatchingFlatSize(input_shape, output_shape);
28 |
29 | for (int i = 0; i < flat_size; ++i) {
30 | output_data[i] = -input_data[i];
31 | }
32 | }
33 |
34 | } // namespace reference_ops
35 | } // namespace tflite
36 |
37 | #endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_NEG_H_
38 |
--------------------------------------------------------------------------------
/third_party/kissfft/COPYING:
--------------------------------------------------------------------------------
1 | Copyright (c) 2003-2010 Mark Borgerding
2 |
3 | All rights reserved.
4 |
5 | Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
6 |
7 | * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
8 | * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
9 | * Neither the author nor the names of any contributors may be used to endorse or promote products derived from this software without specific prior written permission.
10 |
11 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
12 |
--------------------------------------------------------------------------------
/previous_iterations/arduino_training_final/person_detect_model_data.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 |
16 | // This is a standard TensorFlow Lite model file that has been converted into a
17 | // C data array, so it can be easily compiled into a binary for devices that
18 | // don't have a file system. It was created using the command:
19 | // xxd -i person_detect.tflite > person_detect_model_data.cc
20 |
21 | #ifndef TENSORFLOW_LITE_MICRO_EXAMPLES_PERSON_DETECTION_EXPERIMENTAL_PERSON_DETECT_MODEL_DATA_H_
22 | #define TENSORFLOW_LITE_MICRO_EXAMPLES_PERSON_DETECTION_EXPERIMENTAL_PERSON_DETECT_MODEL_DATA_H_
23 |
24 | extern const unsigned char g_person_detect_model_data[];
25 | extern const int g_person_detect_model_data_len;
26 |
27 | #endif // TENSORFLOW_LITE_MICRO_EXAMPLES_PERSON_DETECTION_EXPERIMENTAL_PERSON_DETECT_MODEL_DATA_H_
28 |
--------------------------------------------------------------------------------
/previous_iterations/arduino_training_final_v2/person_detect_model_data.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 |
16 | // This is a standard TensorFlow Lite model file that has been converted into a
17 | // C data array, so it can be easily compiled into a binary for devices that
18 | // don't have a file system. It was created using the command:
19 | // xxd -i person_detect.tflite > person_detect_model_data.cc
20 |
21 | #ifndef TENSORFLOW_LITE_MICRO_EXAMPLES_PERSON_DETECTION_EXPERIMENTAL_PERSON_DETECT_MODEL_DATA_H_
22 | #define TENSORFLOW_LITE_MICRO_EXAMPLES_PERSON_DETECTION_EXPERIMENTAL_PERSON_DETECT_MODEL_DATA_H_
23 |
24 | extern const unsigned char g_person_detect_model_data[];
25 | extern const int g_person_detect_model_data_len;
26 |
27 | #endif // TENSORFLOW_LITE_MICRO_EXAMPLES_PERSON_DETECTION_EXPERIMENTAL_PERSON_DETECT_MODEL_DATA_H_
28 |
--------------------------------------------------------------------------------
/tensorflow/lite/kernels/internal/reference/ceil.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 | #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_CEIL_H_
16 | #define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_CEIL_H_
17 |
18 | #include
19 |
20 | #include "tensorflow/lite/kernels/internal/types.h"
21 |
22 | namespace tflite {
23 |
24 | namespace reference_ops {
25 |
26 | inline void Ceil(const RuntimeShape& input_shape, const float* input_data,
27 | const RuntimeShape& output_shape, float* output_data) {
28 | const int flat_size = MatchingFlatSize(input_shape, output_shape);
29 |
30 | for (int i = 0; i < flat_size; ++i) {
31 | output_data[i] = std::ceil(input_data[i]);
32 | }
33 | }
34 |
35 | } // namespace reference_ops
36 | } // namespace tflite
37 | #endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_CEIL_H_
38 |
--------------------------------------------------------------------------------
/previous_iterations/person_detection_arducam_5mp_plus/person_detect_model_data.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 |
16 | // This is a standard TensorFlow Lite model file that has been converted into a
17 | // C data array, so it can be easily compiled into a binary for devices that
18 | // don't have a file system. It was created using the command:
19 | // xxd -i person_detect.tflite > person_detect_model_data.cc
20 |
21 | #ifndef TENSORFLOW_LITE_MICRO_EXAMPLES_PERSON_DETECTION_EXPERIMENTAL_PERSON_DETECT_MODEL_DATA_H_
22 | #define TENSORFLOW_LITE_MICRO_EXAMPLES_PERSON_DETECTION_EXPERIMENTAL_PERSON_DETECT_MODEL_DATA_H_
23 |
24 | extern const unsigned char g_person_detect_model_data[];
25 | extern const int g_person_detect_model_data_len;
26 |
27 | #endif // TENSORFLOW_LITE_MICRO_EXAMPLES_PERSON_DETECTION_EXPERIMENTAL_PERSON_DETECT_MODEL_DATA_H_
28 |
--------------------------------------------------------------------------------
/tensorflow/lite/experimental/microfrontend/lib/log_scale.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 | #ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_LOG_SCALE_H_
16 | #define TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_LOG_SCALE_H_
17 |
18 | #include
19 | #include
20 |
21 | #ifdef __cplusplus
22 | extern "C" {
23 | #endif
24 |
25 | struct LogScaleState {
26 | int enable_log;
27 | int scale_shift;
28 | };
29 |
30 | // Applies a fixed point logarithm to the signal and converts it to 16 bit. Note
31 | // that the signal array will be modified.
32 | uint16_t* LogScaleApply(struct LogScaleState* state, uint32_t* signal,
33 | int signal_size, int correction_bits);
34 |
35 | #ifdef __cplusplus
36 | } // extern "C"
37 | #endif
38 |
39 | #endif // TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_LOG_SCALE_H_
40 |
--------------------------------------------------------------------------------
/tensorflow/lite/micro/micro_optional_debug_tools.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 | // Optional debugging functionality. For small sized binaries, these are not
16 | // needed.
17 | #ifndef TENSORFLOW_LITE_MICRO_MICRO_OPTIONAL_DEBUG_TOOLS_H_
18 | #define TENSORFLOW_LITE_MICRO_MICRO_OPTIONAL_DEBUG_TOOLS_H_
19 |
20 | #include "tensorflow/lite/micro/micro_interpreter.h"
21 |
22 | namespace tflite {
23 | // Helper function to print model flatbuffer data. This function is not called
24 | // by default. Hence it's not linked in to the final binary code.
25 | void PrintModelData(const Model* model, ErrorReporter* error_reporter);
26 | // Prints a dump of what tensors and what nodes are in the interpreter.
27 | void PrintInterpreterState(MicroInterpreter* interpreter);
28 | } // namespace tflite
29 |
30 | #endif // TENSORFLOW_LITE_MICRO_MICRO_OPTIONAL_DEBUG_TOOLS_H_
31 |
--------------------------------------------------------------------------------
/tensorflow/lite/micro/tools/make/downloads/kissfft/COPYING:
--------------------------------------------------------------------------------
1 | Copyright (c) 2003-2010 Mark Borgerding
2 |
3 | All rights reserved.
4 |
5 | Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
6 |
7 | * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
8 | * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
9 | * Neither the author nor the names of any contributors may be used to endorse or promote products derived from this software without specific prior written permission.
10 |
11 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
12 |
--------------------------------------------------------------------------------
/tensorflow/lite/kernels/internal/reference/floor.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 | #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_FLOOR_H_
16 | #define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_FLOOR_H_
17 |
18 | #include
19 |
20 | #include "tensorflow/lite/kernels/internal/types.h"
21 |
22 | namespace tflite {
23 |
24 | namespace reference_ops {
25 |
26 | inline void Floor(const RuntimeShape& input_shape, const float* input_data,
27 | const RuntimeShape& output_shape, float* output_data) {
28 | const int flat_size = MatchingFlatSize(input_shape, output_shape);
29 |
30 | for (int i = 0; i < flat_size; i++) {
31 | int offset = i;
32 | output_data[offset] = std::floor(input_data[offset]);
33 | }
34 | }
35 |
36 | } // namespace reference_ops
37 | } // namespace tflite
38 |
39 | #endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_FLOOR_H_
40 |
--------------------------------------------------------------------------------
/tensorflow/lite/micro/arduino/debug_log.cpp:
--------------------------------------------------------------------------------
1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 |
16 | #include "tensorflow/lite/micro/debug_log.h"
17 |
18 | #include "Arduino.h"
19 |
20 | // The Arduino DUE uses a different object for the default serial port shown in
21 | // the monitor than most other models, so make sure we pick the right one. See
22 | // https://github.com/arduino/Arduino/issues/3088#issuecomment-406655244
23 | #if defined(__SAM3X8E__)
24 | #define DEBUG_SERIAL_OBJECT (SerialUSB)
25 | #else
26 | #define DEBUG_SERIAL_OBJECT (Serial)
27 | #endif
28 |
29 | // On Arduino platforms, we set up a serial port and write to it for debug
30 | // logging.
31 | extern "C" void DebugLog(const char* s) {
32 | static bool is_initialized = false;
33 | if (!is_initialized) {
34 | DEBUG_SERIAL_OBJECT.begin(9600);
35 | is_initialized = true;
36 | }
37 | DEBUG_SERIAL_OBJECT.print(s);
38 | }
39 |
--------------------------------------------------------------------------------
/tensorflow/lite/micro/compatibility.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 | #ifndef TENSORFLOW_LITE_MICRO_COMPATIBILITY_H_
16 | #define TENSORFLOW_LITE_MICRO_COMPATIBILITY_H_
17 |
18 | // C++ will automatically create class-specific delete operators for virtual
19 | // objects, which by default call the global delete function. For embedded
20 | // applications we want to avoid this, and won't be calling new/delete on these
21 | // objects, so we need to override the default implementation with one that does
22 | // nothing to avoid linking in ::delete().
23 | // This macro needs to be included in all subclasses of a virtual base class in
24 | // the private section.
25 | #ifdef TF_LITE_STATIC_MEMORY
26 | #define TF_LITE_REMOVE_VIRTUAL_DELETE \
27 | void operator delete(void* p) {}
28 | #else
29 | #define TF_LITE_REMOVE_VIRTUAL_DELETE
30 | #endif
31 |
32 | #endif // TENSORFLOW_LITE_MICRO_COMPATIBILITY_H_
33 |
--------------------------------------------------------------------------------
/tensorflow/lite/micro/micro_error_reporter.cpp:
--------------------------------------------------------------------------------
1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 |
16 | #include "tensorflow/lite/micro/micro_error_reporter.h"
17 |
18 | #include
19 |
20 | #ifndef TF_LITE_STRIP_ERROR_STRINGS
21 | #include "tensorflow/lite/micro/debug_log.h"
22 | #include "tensorflow/lite/micro/micro_string.h"
23 | #endif
24 |
25 | namespace tflite {
26 |
27 | int MicroErrorReporter::Report(const char* format, va_list args) {
28 | #ifndef TF_LITE_STRIP_ERROR_STRINGS
29 | // Only pulling in the implementation of this function for builds where we
30 | // expect to make use of it to be extra cautious about not increasing the code
31 | // size.
32 | static constexpr int kMaxLogLen = 256;
33 | char log_buffer[kMaxLogLen];
34 | MicroVsnprintf(log_buffer, kMaxLogLen, format, args);
35 | DebugLog(log_buffer);
36 | DebugLog("\r\n");
37 | #endif
38 | return 0;
39 | }
40 |
41 | } // namespace tflite
42 |
--------------------------------------------------------------------------------
/tensorflow/lite/micro/micro_string.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 | #ifndef TENSORFLOW_LITE_MICRO_MICRO_STRING_H_
16 | #define TENSORFLOW_LITE_MICRO_MICRO_STRING_H_
17 |
18 | #include
19 |
20 | // Implements simple string formatting for numeric types. Returns the number of
21 | // bytes written to output.
22 | extern "C" {
23 | // Functionally equivalent to vsnprintf, trimmed down for TFLite Micro.
24 | // MicroSnprintf() is implemented using MicroVsnprintf().
25 | int MicroVsnprintf(char* output, int len, const char* format, va_list args);
26 | // Functionally equavalent to snprintf, trimmed down for TFLite Micro.
27 | // For example, MicroSnprintf(buffer, 10, "int %d", 10) will put the string
28 | // "int 10" in the buffer.
29 | // Floating point values are logged in exponent notation (1.XXX*2^N).
30 | int MicroSnprintf(char* output, int len, const char* format, ...);
31 | }
32 |
33 | #endif // TENSORFLOW_LITE_MICRO_MICRO_STRING_H_
34 |
--------------------------------------------------------------------------------
/tensorflow/lite/kernels/internal/cppmath.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 | #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_CPPMATH_H_
16 | #define TENSORFLOW_LITE_KERNELS_INTERNAL_CPPMATH_H_
17 |
18 | #include
19 |
20 | namespace tflite {
21 |
22 | #if defined(TF_LITE_USE_GLOBAL_CMATH_FUNCTIONS) || \
23 | (defined(__ANDROID__) && !defined(__NDK_MAJOR__)) || defined(ARDUINO) || \
24 | defined(__ZEPHYR__)
25 | #define TF_LITE_GLOBAL_STD_PREFIX
26 | #else
27 | #define TF_LITE_GLOBAL_STD_PREFIX std
28 | #endif
29 |
30 | #define DECLARE_STD_GLOBAL_SWITCH1(tf_name, std_name) \
31 | template \
32 | inline T tf_name(const T x) { \
33 | return TF_LITE_GLOBAL_STD_PREFIX::std_name(x); \
34 | }
35 |
36 | DECLARE_STD_GLOBAL_SWITCH1(TfLiteRound, round);
37 |
38 | } // namespace tflite
39 |
40 | #endif // TENSORFLOW_LITE_KERNELS_INTERNAL_CPPMATH_H_
41 |
--------------------------------------------------------------------------------
/tensorflow/lite/experimental/microfrontend/lib/fft.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 | #ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_FFT_H_
16 | #define TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_FFT_H_
17 |
18 | #include
19 | #include
20 |
21 | #ifdef __cplusplus
22 | extern "C" {
23 | #endif
24 |
25 | struct complex_int16_t {
26 | int16_t real;
27 | int16_t imag;
28 | };
29 |
30 | struct FftState {
31 | int16_t* input;
32 | struct complex_int16_t* output;
33 | size_t fft_size;
34 | size_t input_size;
35 | void* scratch;
36 | size_t scratch_size;
37 | };
38 |
39 | void FftCompute(struct FftState* state, const int16_t* input,
40 | int input_scale_shift);
41 |
42 | void FftInit(struct FftState* state);
43 |
44 | void FftReset(struct FftState* state);
45 |
46 | #ifdef __cplusplus
47 | } // extern "C"
48 | #endif
49 |
50 | #endif // TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_FFT_H_
51 |
--------------------------------------------------------------------------------
/tensorflow/lite/micro/all_ops_resolver.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2 | Licensed under the Apache License, Version 2.0 (the "License");
3 | you may not use this file except in compliance with the License.
4 | You may obtain a copy of the License at
5 | http://www.apache.org/licenses/LICENSE-2.0
6 | Unless required by applicable law or agreed to in writing, software
7 | distributed under the License is distributed on an "AS IS" BASIS,
8 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
9 | See the License for the specific language governing permissions and
10 | limitations under the License.
11 | ==============================================================================*/
12 | #ifndef TENSORFLOW_LITE_MICRO_ALL_OPS_RESOLVER_H_
13 | #define TENSORFLOW_LITE_MICRO_ALL_OPS_RESOLVER_H_
14 |
15 | #include "tensorflow/lite/micro/compatibility.h"
16 | #include "tensorflow/lite/micro/micro_mutable_op_resolver.h"
17 |
18 | namespace tflite {
19 |
20 | // The magic number in the template parameter is the maximum number of ops that
21 | // can be added to AllOpsResolver. It can be increased if needed. And most
22 | // applications that care about the memory footprint will want to directly use
23 | // MicroMutableOpResolver and have an application specific template parameter.
24 | // The examples directory has sample code for this.
25 | class AllOpsResolver : public MicroMutableOpResolver<128> {
26 | public:
27 | AllOpsResolver();
28 |
29 | private:
30 | TF_LITE_REMOVE_VIRTUAL_DELETE
31 | };
32 |
33 | } // namespace tflite
34 |
35 | #endif // TENSORFLOW_LITE_MICRO_ALL_OPS_RESOLVER_H_
36 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # TinyFederatedLearning
2 | Repo of PerCom 2022 demo paper title 'TinyFedTL: Federated Transfer Learning on Ubiquitous Tiny IoT Devices'
3 |
4 | TinyML has rose to popularity in an era where data is everywhere. However, the data that is in most demand is subject to strict privacy and security guarantees. In addition, the deployment of TinyML hardware in the real world has significant memory and communication constraints that traditional ML fails to address. In light of these challenges, we persent TinyFedTL, the first implementation of federated transfer learning on a resource- constrained microcontroller.
5 |
6 | * C++ implementation to accompany Arduino code is provided (without standard library so it will run on-device!)
7 | * Please see our demo video presentation here: https://www.youtube.com/watch?v=KSaidr3ZN9M
8 |
9 | ## File Structure and Important Files
10 | * dl
11 | * source > arduino_training_final_v3: the .ino file has the implementation of our FL code for the Arduino IDE to compile
12 | * python_final_script.py acts as the "central server" for the arduino
13 | * source > simulation:
14 | * NeuralNetwork.cpp has our FC implementation and the FL implementation, simulation.cc is the file with the code necessary to run our simulations.
15 | * simulation-xxx are the executables that can be run with ./ for each of our experiments, and the .txt files are the output from terminal when running the experiments
16 | * fl_simulation_analysis generates the .csv from the .txt
17 | * graphing.ipynb has the information to graph our figures from the paper from the .csv files
18 | * tensorflow (no changes)
19 | * third_party (no changes)
20 |
--------------------------------------------------------------------------------
/tensorflow/lite/kernels/internal/optimized/neon_check.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 | #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_OPTIMIZED_NEON_CHECK_H_
16 | #define TENSORFLOW_LITE_KERNELS_INTERNAL_OPTIMIZED_NEON_CHECK_H_
17 |
18 | #if defined(__ARM_NEON__) || defined(__ARM_NEON)
19 | #define USE_NEON
20 | #include
21 | #endif
22 |
23 | #if defined __GNUC__ && defined __SSE4_1__ && !defined TF_LITE_DISABLE_X86_NEON
24 | #define USE_NEON
25 | #include "NEON_2_SSE.h"
26 | #endif
27 |
28 | // NEON_OR_PORTABLE(SomeFunc, args) calls NeonSomeFunc(args) if USE_NEON is
29 | // defined, PortableSomeFunc(args) otherwise.
30 | #ifdef USE_NEON
31 | // Always use Neon code
32 | #define NEON_OR_PORTABLE(funcname, ...) Neon##funcname(__VA_ARGS__)
33 |
34 | #else
35 | // No NEON available: Use Portable code
36 | #define NEON_OR_PORTABLE(funcname, ...) Portable##funcname(__VA_ARGS__)
37 |
38 | #endif // defined(USE_NEON)
39 |
40 | #endif // TENSORFLOW_LITE_KERNELS_INTERNAL_OPTIMIZED_NEON_CHECK_H_
41 |
--------------------------------------------------------------------------------
/tensorflow/lite/micro/micro_profiler.cpp:
--------------------------------------------------------------------------------
1 | /* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 |
16 | #include "tensorflow/lite/micro/micro_profiler.h"
17 |
18 | #include "tensorflow/lite/kernels/internal/compatibility.h"
19 | #include "tensorflow/lite/micro/micro_time.h"
20 |
21 | namespace tflite {
22 |
23 | MicroProfiler::MicroProfiler(tflite::ErrorReporter* reporter)
24 | : reporter_(reporter) {}
25 |
26 | uint32_t MicroProfiler::BeginEvent(const char* tag, EventType event_type,
27 | int64_t event_metadata1,
28 | int64_t event_metadata2) {
29 | start_time_ = GetCurrentTimeTicks();
30 | TFLITE_DCHECK(tag != nullptr);
31 | event_tag_ = tag;
32 | return 0;
33 | }
34 |
35 | void MicroProfiler::EndEvent(uint32_t event_handle) {
36 | int32_t end_time = GetCurrentTimeTicks();
37 | TF_LITE_REPORT_ERROR(reporter_, "%s took %d cycles\n", event_tag_,
38 | end_time - start_time_);
39 | }
40 |
41 | } // namespace tflite
42 |
--------------------------------------------------------------------------------
/previous_iterations/model_settings-original.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 |
16 | #ifndef TENSORFLOW_LITE_MICRO_EXAMPLES_PERSON_DETECTION_EXPERIMENTAL_MODEL_SETTINGS_H_
17 | #define TENSORFLOW_LITE_MICRO_EXAMPLES_PERSON_DETECTION_EXPERIMENTAL_MODEL_SETTINGS_H_
18 |
19 | // Keeping these as constant expressions allow us to allocate fixed-sized arrays
20 | // on the stack for our working memory.
21 |
22 | // All of these values are derived from the values used during model training,
23 | // if you change your model you'll need to update these constants.
24 | constexpr int kNumCols = 96;
25 | constexpr int kNumRows = 96;
26 | constexpr int kNumChannels = 1;
27 |
28 | constexpr int kMaxImageSize = kNumCols * kNumRows * kNumChannels;
29 |
30 | constexpr int kCategoryCount = 256;
31 | constexpr int kPersonIndex = 1;
32 | constexpr int kNotAPersonIndex = 0;
33 | extern const char* kCategoryLabels[kCategoryCount];
34 |
35 | #endif // TENSORFLOW_LITE_MICRO_EXAMPLES_PERSON_DETECTION_EXPERIMENTAL_MODEL_SETTINGS_H_
36 |
--------------------------------------------------------------------------------
/source/arduino_training_final_v3/model_settings.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 |
16 | #ifndef TENSORFLOW_LITE_MICRO_EXAMPLES_PERSON_DETECTION_EXPERIMENTAL_MODEL_SETTINGS_H_
17 | #define TENSORFLOW_LITE_MICRO_EXAMPLES_PERSON_DETECTION_EXPERIMENTAL_MODEL_SETTINGS_H_
18 |
19 | // Keeping these as constant expressions allow us to allocate fixed-sized arrays
20 | // on the stack for our working memory.
21 |
22 | // All of these values are derived from the values used during model training,
23 | // if you change your model you'll need to update these constants.
24 | constexpr int kNumCols = 96;
25 | constexpr int kNumRows = 96;
26 | constexpr int kNumChannels = 3;
27 |
28 | constexpr int kMaxImageSize = kNumCols * kNumRows * kNumChannels;
29 |
30 | constexpr int kCategoryCount = 2;
31 | constexpr int kPersonIndex = 1;
32 | constexpr int kNotAPersonIndex = 0;
33 | extern const char* kCategoryLabels[kCategoryCount];
34 |
35 | #endif // TENSORFLOW_LITE_MICRO_EXAMPLES_PERSON_DETECTION_EXPERIMENTAL_MODEL_SETTINGS_H_
36 |
--------------------------------------------------------------------------------
/previous_iterations/arduino_training_final/model_settings.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 |
16 | #ifndef TENSORFLOW_LITE_MICRO_EXAMPLES_PERSON_DETECTION_EXPERIMENTAL_MODEL_SETTINGS_H_
17 | #define TENSORFLOW_LITE_MICRO_EXAMPLES_PERSON_DETECTION_EXPERIMENTAL_MODEL_SETTINGS_H_
18 |
19 | // Keeping these as constant expressions allow us to allocate fixed-sized arrays
20 | // on the stack for our working memory.
21 |
22 | // All of these values are derived from the values used during model training,
23 | // if you change your model you'll need to update these constants.
24 | constexpr int kNumCols = 96;
25 | constexpr int kNumRows = 96;
26 | constexpr int kNumChannels = 3;
27 |
28 | constexpr int kMaxImageSize = kNumCols * kNumRows * kNumChannels;
29 |
30 | constexpr int kCategoryCount = 2;
31 | constexpr int kPersonIndex = 1;
32 | constexpr int kNotAPersonIndex = 0;
33 | extern const char* kCategoryLabels[kCategoryCount];
34 |
35 | #endif // TENSORFLOW_LITE_MICRO_EXAMPLES_PERSON_DETECTION_EXPERIMENTAL_MODEL_SETTINGS_H_
36 |
--------------------------------------------------------------------------------
/previous_iterations/arduino_training_final_v2/model_settings.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 |
16 | #ifndef TENSORFLOW_LITE_MICRO_EXAMPLES_PERSON_DETECTION_EXPERIMENTAL_MODEL_SETTINGS_H_
17 | #define TENSORFLOW_LITE_MICRO_EXAMPLES_PERSON_DETECTION_EXPERIMENTAL_MODEL_SETTINGS_H_
18 |
19 | // Keeping these as constant expressions allow us to allocate fixed-sized arrays
20 | // on the stack for our working memory.
21 |
22 | // All of these values are derived from the values used during model training,
23 | // if you change your model you'll need to update these constants.
24 | constexpr int kNumCols = 96;
25 | constexpr int kNumRows = 96;
26 | constexpr int kNumChannels = 3;
27 |
28 | constexpr int kMaxImageSize = kNumCols * kNumRows * kNumChannels;
29 |
30 | constexpr int kCategoryCount = 2;
31 | constexpr int kPersonIndex = 1;
32 | constexpr int kNotAPersonIndex = 0;
33 | extern const char* kCategoryLabels[kCategoryCount];
34 |
35 | #endif // TENSORFLOW_LITE_MICRO_EXAMPLES_PERSON_DETECTION_EXPERIMENTAL_MODEL_SETTINGS_H_
36 |
--------------------------------------------------------------------------------
/previous_iterations/person_detection_arducam_5mp_plus/model_settings.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 |
16 | #ifndef TENSORFLOW_LITE_MICRO_EXAMPLES_PERSON_DETECTION_EXPERIMENTAL_MODEL_SETTINGS_H_
17 | #define TENSORFLOW_LITE_MICRO_EXAMPLES_PERSON_DETECTION_EXPERIMENTAL_MODEL_SETTINGS_H_
18 |
19 | // Keeping these as constant expressions allow us to allocate fixed-sized arrays
20 | // on the stack for our working memory.
21 |
22 | // All of these values are derived from the values used during model training,
23 | // if you change your model you'll need to update these constants.
24 | constexpr int kNumCols = 96;
25 | constexpr int kNumRows = 96;
26 | constexpr int kNumChannels = 3;
27 |
28 | constexpr int kMaxImageSize = kNumCols * kNumRows * kNumChannels;
29 |
30 | constexpr int kCategoryCount = 2;
31 | constexpr int kPersonIndex = 1;
32 | constexpr int kNotAPersonIndex = 0;
33 | extern const char* kCategoryLabels[kCategoryCount];
34 |
35 | #endif // TENSORFLOW_LITE_MICRO_EXAMPLES_PERSON_DETECTION_EXPERIMENTAL_MODEL_SETTINGS_H_
36 |
--------------------------------------------------------------------------------
/tensorflow/lite/experimental/microfrontend/lib/noise_reduction.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 | #ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_NOISE_REDUCTION_H_
16 | #define TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_NOISE_REDUCTION_H_
17 |
18 | #define kNoiseReductionBits 14
19 |
20 | #include
21 | #include
22 |
23 | #ifdef __cplusplus
24 | extern "C" {
25 | #endif
26 |
27 | struct NoiseReductionState {
28 | int smoothing_bits;
29 | uint16_t even_smoothing;
30 | uint16_t odd_smoothing;
31 | uint16_t min_signal_remaining;
32 | int num_channels;
33 | uint32_t* estimate;
34 | };
35 |
36 | // Removes stationary noise from each channel of the signal using a low pass
37 | // filter.
38 | void NoiseReductionApply(struct NoiseReductionState* state, uint32_t* signal);
39 |
40 | void NoiseReductionReset(struct NoiseReductionState* state);
41 |
42 | #ifdef __cplusplus
43 | } // extern "C"
44 | #endif
45 |
46 | #endif // TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_NOISE_REDUCTION_H_
47 |
--------------------------------------------------------------------------------
/tensorflow/lite/experimental/microfrontend/lib/pcan_gain_control.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 | #ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_PCAN_GAIN_CONTROL_H_
16 | #define TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_PCAN_GAIN_CONTROL_H_
17 |
18 | #include
19 | #include
20 |
21 | #define kPcanSnrBits 12
22 | #define kPcanOutputBits 6
23 |
24 | #ifdef __cplusplus
25 | extern "C" {
26 | #endif
27 |
28 | // Details at https://research.google/pubs/pub45911.pdf
29 | struct PcanGainControlState {
30 | int enable_pcan;
31 | uint32_t* noise_estimate;
32 | int num_channels;
33 | int16_t* gain_lut;
34 | int32_t snr_shift;
35 | };
36 |
37 | int16_t WideDynamicFunction(const uint32_t x, const int16_t* lut);
38 |
39 | uint32_t PcanShrink(const uint32_t x);
40 |
41 | void PcanGainControlApply(struct PcanGainControlState* state, uint32_t* signal);
42 |
43 | #ifdef __cplusplus
44 | } // extern "C"
45 | #endif
46 |
47 | #endif // TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_PCAN_GAIN_CONTROL_H_
48 |
--------------------------------------------------------------------------------
/tensorflow/lite/experimental/microfrontend/lib/window.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 | #ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_WINDOW_H_
16 | #define TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_WINDOW_H_
17 |
18 | #include
19 | #include
20 |
21 | #define kFrontendWindowBits 12
22 |
23 | #ifdef __cplusplus
24 | extern "C" {
25 | #endif
26 |
27 | struct WindowState {
28 | size_t size;
29 | int16_t* coefficients;
30 | size_t step;
31 |
32 | int16_t* input;
33 | size_t input_used;
34 | int16_t* output;
35 | int16_t max_abs_output_value;
36 | };
37 |
38 | // Applies a window to the samples coming in, stepping forward at the given
39 | // rate.
40 | int WindowProcessSamples(struct WindowState* state, const int16_t* samples,
41 | size_t num_samples, size_t* num_samples_read);
42 |
43 | void WindowReset(struct WindowState* state);
44 |
45 | #ifdef __cplusplus
46 | } // extern "C"
47 | #endif
48 |
49 | #endif // TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_WINDOW_H_
50 |
--------------------------------------------------------------------------------
/tensorflow/lite/experimental/microfrontend/lib/log_scale_util.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 | #ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_LOG_SCALE_UTIL_H_
16 | #define TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_LOG_SCALE_UTIL_H_
17 |
18 | #include
19 | #include
20 |
21 | #include "tensorflow/lite/experimental/microfrontend/lib/log_scale.h"
22 |
23 | #ifdef __cplusplus
24 | extern "C" {
25 | #endif
26 |
27 | struct LogScaleConfig {
28 | // set to false (0) to disable this module
29 | int enable_log;
30 | // scale results by 2^(scale_shift)
31 | int scale_shift;
32 | };
33 |
34 | // Populates the LogScaleConfig with "sane" default values.
35 | void LogScaleFillConfigWithDefaults(struct LogScaleConfig* config);
36 |
37 | // Allocates any buffers.
38 | int LogScalePopulateState(const struct LogScaleConfig* config,
39 | struct LogScaleState* state);
40 |
41 | #ifdef __cplusplus
42 | } // extern "C"
43 | #endif
44 |
45 | #endif // TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_LOG_SCALE_UTIL_H_
46 |
--------------------------------------------------------------------------------
/tensorflow/lite/experimental/microfrontend/lib/window_util.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 | #ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_WINDOW_UTIL_H_
16 | #define TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_WINDOW_UTIL_H_
17 |
18 | #include "tensorflow/lite/experimental/microfrontend/lib/window.h"
19 |
20 | #ifdef __cplusplus
21 | extern "C" {
22 | #endif
23 |
24 | struct WindowConfig {
25 | // length of window frame in milliseconds
26 | size_t size_ms;
27 | // length of step for next frame in milliseconds
28 | size_t step_size_ms;
29 | };
30 |
31 | // Populates the WindowConfig with "sane" default values.
32 | void WindowFillConfigWithDefaults(struct WindowConfig* config);
33 |
34 | // Allocates any buffers.
35 | int WindowPopulateState(const struct WindowConfig* config,
36 | struct WindowState* state, int sample_rate);
37 |
38 | // Frees any allocated buffers.
39 | void WindowFreeStateContents(struct WindowState* state);
40 |
41 | #ifdef __cplusplus
42 | } // extern "C"
43 | #endif
44 |
45 | #endif // TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_WINDOW_UTIL_H_
46 |
--------------------------------------------------------------------------------
/tensorflow/lite/micro/tools/make/downloads/cmsis/CMSIS/NN/Source/ReshapeFunctions/arm_reshape_s8.c:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (C) 2010-2019 Arm Limited or its affiliates. All rights reserved.
3 | *
4 | * SPDX-License-Identifier: Apache-2.0
5 | *
6 | * Licensed under the Apache License, Version 2.0 (the License); you may
7 | * not use this file except in compliance with the License.
8 | * You may obtain a copy of the License at
9 | *
10 | * www.apache.org/licenses/LICENSE-2.0
11 | *
12 | * Unless required by applicable law or agreed to in writing, software
13 | * distributed under the License is distributed on an AS IS BASIS, WITHOUT
14 | * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 | * See the License for the specific language governing permissions and
16 | * limitations under the License.
17 | */
18 |
19 | /* ----------------------------------------------------------------------
20 | * Project: CMSIS NN Library
21 | * Title: arm_reshape_s8.c
22 | * Description: Reshape a s8 vector
23 | *
24 | * $Date: September 2019
25 | * $Revision: V.1.0.0
26 | *
27 | * Target Processor: Cortex-M cores
28 | *
29 | * -------------------------------------------------------------------- */
30 |
31 | #include "tensorflow/lite/micro/tools/make/downloads/cmsis/CMSIS/NN/Include/arm_nnfunctions.h"
32 |
33 | /**
34 | * @ingroup groupNN
35 | */
36 |
37 | /**
38 | * @addtogroup Reshape
39 | * @{
40 | */
41 |
42 | /**
43 | * Basic s8 reshape function.
44 | *
45 | * Refer header file for details.
46 | *
47 | */
48 |
49 | void arm_reshape_s8(const int8_t *input,
50 | int8_t *output,
51 | const uint32_t total_size)
52 | {
53 | memcpy(output, input, total_size);
54 | }
55 |
56 | /**
57 | * @} end of Reshape group
58 | */
--------------------------------------------------------------------------------
/tensorflow/lite/experimental/microfrontend/lib/log_lut.c:
--------------------------------------------------------------------------------
1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 | #include "tensorflow/lite/experimental/microfrontend/lib/log_lut.h"
16 | const uint16_t kLogLut[]
17 | #ifndef _MSC_VER
18 | __attribute__((aligned(4)))
19 | #endif // _MSV_VER
20 | = {0, 224, 442, 654, 861, 1063, 1259, 1450, 1636, 1817, 1992, 2163,
21 | 2329, 2490, 2646, 2797, 2944, 3087, 3224, 3358, 3487, 3611, 3732, 3848,
22 | 3960, 4068, 4172, 4272, 4368, 4460, 4549, 4633, 4714, 4791, 4864, 4934,
23 | 5001, 5063, 5123, 5178, 5231, 5280, 5326, 5368, 5408, 5444, 5477, 5507,
24 | 5533, 5557, 5578, 5595, 5610, 5622, 5631, 5637, 5640, 5641, 5638, 5633,
25 | 5626, 5615, 5602, 5586, 5568, 5547, 5524, 5498, 5470, 5439, 5406, 5370,
26 | 5332, 5291, 5249, 5203, 5156, 5106, 5054, 5000, 4944, 4885, 4825, 4762,
27 | 4697, 4630, 4561, 4490, 4416, 4341, 4264, 4184, 4103, 4020, 3935, 3848,
28 | 3759, 3668, 3575, 3481, 3384, 3286, 3186, 3084, 2981, 2875, 2768, 2659,
29 | 2549, 2437, 2323, 2207, 2090, 1971, 1851, 1729, 1605, 1480, 1353, 1224,
30 | 1094, 963, 830, 695, 559, 421, 282, 142, 0, 0};
31 |
--------------------------------------------------------------------------------
/source/arduino_training_final_v3/detection_responder.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 |
16 | // Provides an interface to take an action based on the output from the person
17 | // detection model.
18 |
19 | #ifndef TENSORFLOW_LITE_MICRO_EXAMPLES_PERSON_DETECTION_EXPERIMENTAL_DETECTION_RESPONDER_H_
20 | #define TENSORFLOW_LITE_MICRO_EXAMPLES_PERSON_DETECTION_EXPERIMENTAL_DETECTION_RESPONDER_H_
21 |
22 | #include "tensorflow/lite/c/common.h"
23 | #include "tensorflow/lite/micro/micro_error_reporter.h"
24 |
25 | // Called every time the results of a person detection run are available. The
26 | // `person_score` has the numerical confidence that the captured image contains
27 | // a person, and `no_person_score` has the numerical confidence that the image
28 | // does not contain a person. Typically if person_score > no person score, the
29 | // image is considered to contain a person. This threshold may be adjusted for
30 | // particular applications.
31 | void RespondToDetection(tflite::ErrorReporter* error_reporter,
32 | int8_t person_score, int8_t no_person_score);
33 |
34 | #endif // TENSORFLOW_LITE_MICRO_EXAMPLES_PERSON_DETECTION_EXPERIMENTAL_DETECTION_RESPONDER_H_
35 |
--------------------------------------------------------------------------------
/previous_iterations/arduino_training_final/detection_responder.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 |
16 | // Provides an interface to take an action based on the output from the person
17 | // detection model.
18 |
19 | #ifndef TENSORFLOW_LITE_MICRO_EXAMPLES_PERSON_DETECTION_EXPERIMENTAL_DETECTION_RESPONDER_H_
20 | #define TENSORFLOW_LITE_MICRO_EXAMPLES_PERSON_DETECTION_EXPERIMENTAL_DETECTION_RESPONDER_H_
21 |
22 | #include "tensorflow/lite/c/common.h"
23 | #include "tensorflow/lite/micro/micro_error_reporter.h"
24 |
25 | // Called every time the results of a person detection run are available. The
26 | // `person_score` has the numerical confidence that the captured image contains
27 | // a person, and `no_person_score` has the numerical confidence that the image
28 | // does not contain a person. Typically if person_score > no person score, the
29 | // image is considered to contain a person. This threshold may be adjusted for
30 | // particular applications.
31 | void RespondToDetection(tflite::ErrorReporter* error_reporter,
32 | int8_t person_score, int8_t no_person_score);
33 |
34 | #endif // TENSORFLOW_LITE_MICRO_EXAMPLES_PERSON_DETECTION_EXPERIMENTAL_DETECTION_RESPONDER_H_
35 |
--------------------------------------------------------------------------------
/tensorflow/lite/core/api/tensor_utils.cpp:
--------------------------------------------------------------------------------
1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 |
16 | #include "tensorflow/lite/core/api/tensor_utils.h"
17 |
18 | #include
19 |
20 | #include "tensorflow/lite/c/common.h"
21 |
22 | namespace tflite {
23 |
24 | TfLiteStatus ResetVariableTensor(TfLiteTensor* tensor) {
25 | if (!tensor->is_variable) {
26 | return kTfLiteOk;
27 | }
28 | // TODO(b/115961645): Implement - If a variable tensor has a buffer, reset it
29 | // to the value of the buffer.
30 | int value = 0;
31 | if (tensor->type == kTfLiteInt8) {
32 | value = tensor->params.zero_point;
33 | }
34 | // TODO(b/139446230): Provide a platform header to better handle these
35 | // specific scenarios.
36 | #if __ANDROID__ || defined(__x86_64__) || defined(__i386__) || \
37 | defined(__i386) || defined(__x86__) || defined(__X86__) || \
38 | defined(_X86_) || defined(_M_IX86) || defined(_M_X64)
39 | memset(tensor->data.raw, value, tensor->bytes);
40 | #else
41 | char* raw_ptr = tensor->data.raw;
42 | for (size_t i = 0; i < tensor->bytes; ++i) {
43 | *raw_ptr = value;
44 | raw_ptr++;
45 | }
46 | #endif
47 | return kTfLiteOk;
48 | }
49 |
50 | } // namespace tflite
51 |
--------------------------------------------------------------------------------
/tensorflow/lite/kernels/internal/tensor_ctypes.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 | #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_TENSOR_CTYPES_H_
16 | #define TENSORFLOW_LITE_KERNELS_INTERNAL_TENSOR_CTYPES_H_
17 |
18 | #include "tensorflow/lite/c/common.h"
19 | #include "tensorflow/lite/kernels/internal/types.h"
20 |
21 | namespace tflite {
22 |
23 | template
24 | inline T* GetTensorData(TfLiteTensor* tensor) {
25 | return tensor != nullptr ? reinterpret_cast(tensor->data.raw) : nullptr;
26 | }
27 |
28 | template
29 | inline const T* GetTensorData(const TfLiteTensor* tensor) {
30 | return tensor != nullptr ? reinterpret_cast(tensor->data.raw)
31 | : nullptr;
32 | }
33 |
34 | inline RuntimeShape GetTensorShape(const TfLiteTensor* tensor) {
35 | if (tensor == nullptr) {
36 | return RuntimeShape();
37 | }
38 |
39 | TfLiteIntArray* dims = tensor->dims;
40 | const int dims_size = dims->size;
41 | const int32_t* dims_data = reinterpret_cast(dims->data);
42 | return RuntimeShape(dims_size, dims_data);
43 | }
44 |
45 | } // namespace tflite
46 |
47 | #endif // TENSORFLOW_LITE_KERNELS_INTERNAL_TENSOR_CTYPES_H_
48 |
--------------------------------------------------------------------------------
/previous_iterations/arduino_training_final_v2/detection_responder.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 |
16 | // Provides an interface to take an action based on the output from the person
17 | // detection model.
18 |
19 | #ifndef TENSORFLOW_LITE_MICRO_EXAMPLES_PERSON_DETECTION_EXPERIMENTAL_DETECTION_RESPONDER_H_
20 | #define TENSORFLOW_LITE_MICRO_EXAMPLES_PERSON_DETECTION_EXPERIMENTAL_DETECTION_RESPONDER_H_
21 |
22 | #include "tensorflow/lite/c/common.h"
23 | #include "tensorflow/lite/micro/micro_error_reporter.h"
24 |
25 | // Called every time the results of a person detection run are available. The
26 | // `person_score` has the numerical confidence that the captured image contains
27 | // a person, and `no_person_score` has the numerical confidence that the image
28 | // does not contain a person. Typically if person_score > no person score, the
29 | // image is considered to contain a person. This threshold may be adjusted for
30 | // particular applications.
31 | void RespondToDetection(tflite::ErrorReporter* error_reporter,
32 | int8_t person_score, int8_t no_person_score);
33 |
34 | #endif // TENSORFLOW_LITE_MICRO_EXAMPLES_PERSON_DETECTION_EXPERIMENTAL_DETECTION_RESPONDER_H_
35 |
--------------------------------------------------------------------------------
/previous_iterations/person_detection_arducam_5mp_plus/detection_responder.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 |
16 | // Provides an interface to take an action based on the output from the person
17 | // detection model.
18 |
19 | #ifndef TENSORFLOW_LITE_MICRO_EXAMPLES_PERSON_DETECTION_EXPERIMENTAL_DETECTION_RESPONDER_H_
20 | #define TENSORFLOW_LITE_MICRO_EXAMPLES_PERSON_DETECTION_EXPERIMENTAL_DETECTION_RESPONDER_H_
21 |
22 | #include "tensorflow/lite/c/common.h"
23 | #include "tensorflow/lite/micro/micro_error_reporter.h"
24 |
25 | // Called every time the results of a person detection run are available. The
26 | // `person_score` has the numerical confidence that the captured image contains
27 | // a person, and `no_person_score` has the numerical confidence that the image
28 | // does not contain a person. Typically if person_score > no person score, the
29 | // image is considered to contain a person. This threshold may be adjusted for
30 | // particular applications.
31 | void RespondToDetection(tflite::ErrorReporter* error_reporter,
32 | int8_t person_score, int8_t no_person_score);
33 |
34 | #endif // TENSORFLOW_LITE_MICRO_EXAMPLES_PERSON_DETECTION_EXPERIMENTAL_DETECTION_RESPONDER_H_
35 |
--------------------------------------------------------------------------------
/tensorflow/lite/micro/tools/make/downloads/cmsis/CMSIS/NN/Source/ActivationFunctions/arm_relu6_s8.c:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (C) 2010-2019 Arm Limited or its affiliates. All rights reserved.
3 | *
4 | * SPDX-License-Identifier: Apache-2.0
5 | *
6 | * Licensed under the Apache License, Version 2.0 (the License); you may
7 | * not use this file except in compliance with the License.
8 | * You may obtain a copy of the License at
9 | *
10 | * www.apache.org/licenses/LICENSE-2.0
11 | *
12 | * Unless required by applicable law or agreed to in writing, software
13 | * distributed under the License is distributed on an AS IS BASIS, WITHOUT
14 | * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 | * See the License for the specific language governing permissions and
16 | * limitations under the License.
17 | */
18 |
19 | /* ----------------------------------------------------------------------
20 | * Project: CMSIS NN Library
21 | * Title: arm_relu6_s8.c
22 | * Description: Basic s8 version of ReLU6
23 | *
24 | * $Date: Spetember 2019
25 | * $Revision: V.1.0.0
26 | *
27 | * Target Processor: Cortex-M cores
28 | *
29 | * -------------------------------------------------------------------- */
30 |
31 | #include "tensorflow/lite/micro/tools/make/downloads/cmsis/CMSIS/DSP/Include/arm_math.h"
32 | #include "tensorflow/lite/micro/tools/make/downloads/cmsis/CMSIS/NN/Include/arm_nnfunctions.h"
33 |
34 | /**
35 | * @ingroup groupNN
36 | */
37 |
38 | /**
39 | * @addtogroup Acti
40 | * @{
41 | */
42 |
43 | /*
44 | * Basic ReLU6 function
45 | *
46 | * Refer to header file for details.
47 | *
48 | */
49 |
50 | void arm_relu6_s8(q7_t *data, uint16_t size)
51 | {
52 | int32_t i;
53 |
54 | for (i = 0; i < size; i++)
55 | {
56 | int32_t ip = data[i];
57 |
58 | ip = MAX(ip, 0);
59 | data[i] = MIN(ip, 6);
60 | }
61 | }
62 |
63 | /**
64 | * @} end of Acti group
65 | */
66 |
--------------------------------------------------------------------------------
/tensorflow/lite/experimental/microfrontend/lib/filterbank_util.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 | #ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_FILTERBANK_UTIL_H_
16 | #define TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_FILTERBANK_UTIL_H_
17 |
18 | #include "tensorflow/lite/experimental/microfrontend/lib/filterbank.h"
19 |
20 | #ifdef __cplusplus
21 | extern "C" {
22 | #endif
23 |
24 | struct FilterbankConfig {
25 | // number of frequency channel buckets for filterbank
26 | int num_channels;
27 | // maximum frequency to include
28 | float upper_band_limit;
29 | // minimum frequency to include
30 | float lower_band_limit;
31 | // unused
32 | int output_scale_shift;
33 | };
34 |
35 | // Fills the frontendConfig with "sane" defaults.
36 | void FilterbankFillConfigWithDefaults(struct FilterbankConfig* config);
37 |
38 | // Allocates any buffers.
39 | int FilterbankPopulateState(const struct FilterbankConfig* config,
40 | struct FilterbankState* state, int sample_rate,
41 | int spectrum_size);
42 |
43 | // Frees any allocated buffers.
44 | void FilterbankFreeStateContents(struct FilterbankState* state);
45 |
46 | #ifdef __cplusplus
47 | } // extern "C"
48 | #endif
49 |
50 | #endif // TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_FILTERBANK_UTIL_H_
51 |
--------------------------------------------------------------------------------
/tensorflow/lite/experimental/microfrontend/lib/noise_reduction_util.c:
--------------------------------------------------------------------------------
1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 | #include "tensorflow/lite/experimental/microfrontend/lib/noise_reduction_util.h"
16 |
17 | #include
18 |
19 | void NoiseReductionFillConfigWithDefaults(struct NoiseReductionConfig* config) {
20 | config->smoothing_bits = 10;
21 | config->even_smoothing = 0.025;
22 | config->odd_smoothing = 0.06;
23 | config->min_signal_remaining = 0.05;
24 | }
25 |
26 | int NoiseReductionPopulateState(const struct NoiseReductionConfig* config,
27 | struct NoiseReductionState* state,
28 | int num_channels) {
29 | state->smoothing_bits = config->smoothing_bits;
30 | state->odd_smoothing = config->odd_smoothing * (1 << kNoiseReductionBits);
31 | state->even_smoothing = config->even_smoothing * (1 << kNoiseReductionBits);
32 | state->min_signal_remaining =
33 | config->min_signal_remaining * (1 << kNoiseReductionBits);
34 | state->num_channels = num_channels;
35 | state->estimate = calloc(state->num_channels, sizeof(*state->estimate));
36 | if (state->estimate == NULL) {
37 | fprintf(stderr, "Failed to alloc estimate buffer\n");
38 | return 0;
39 | }
40 | return 1;
41 | }
42 |
43 | void NoiseReductionFreeStateContents(struct NoiseReductionState* state) {
44 | free(state->estimate);
45 | }
46 |
--------------------------------------------------------------------------------
/tensorflow/lite/micro/memory_helpers.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 | #ifndef TENSORFLOW_LITE_MICRO_MEMORY_HELPERS_H_
16 | #define TENSORFLOW_LITE_MICRO_MEMORY_HELPERS_H_
17 |
18 | #include
19 | #include
20 |
21 | #include "tensorflow/lite/c/common.h"
22 | #include "tensorflow/lite/core/api/error_reporter.h"
23 | #include "tensorflow/lite/schema/schema_generated.h"
24 |
25 | namespace tflite {
26 |
27 | // Returns the next pointer address aligned to the given alignment.
28 | uint8_t* AlignPointerUp(uint8_t* data, size_t alignment);
29 |
30 | // Returns the previous pointer address aligned to the given alignment.
31 | uint8_t* AlignPointerDown(uint8_t* data, size_t alignment);
32 |
33 | // Returns an increased size that's a multiple of alignment.
34 | size_t AlignSizeUp(size_t size, size_t alignment);
35 |
36 | // Returns size in bytes for a given TfLiteType.
37 | TfLiteStatus TfLiteTypeSizeOf(TfLiteType type, size_t* size,
38 | ErrorReporter* reporter);
39 |
40 | // How many bytes are needed to hold a tensor's contents.
41 | TfLiteStatus BytesRequiredForTensor(const tflite::Tensor& flatbuffer_tensor,
42 | size_t* bytes, size_t* type_size,
43 | ErrorReporter* error_reporter);
44 |
45 | } // namespace tflite
46 |
47 | #endif // TENSORFLOW_LITE_MICRO_MEMORY_HELPERS_H_
48 |
--------------------------------------------------------------------------------
/tensorflow/lite/kernels/internal/reference/round.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 | #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_ROUND_H_
16 | #define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_ROUND_H_
17 |
18 | #include
19 |
20 | #include "tensorflow/lite/kernels/internal/types.h"
21 |
22 | namespace tflite {
23 |
24 | namespace reference_ops {
25 |
26 | inline float RoundToNearest(float value) {
27 | auto floor_val = std::floor(value);
28 | auto diff = value - floor_val;
29 | if ((diff < 0.5f) ||
30 | ((diff == 0.5f) && (static_cast(floor_val) % 2 == 0))) {
31 | return floor_val;
32 | } else {
33 | return floor_val = floor_val + 1.0f;
34 | }
35 | }
36 |
37 | inline void Round(const RuntimeShape& input_shape, const float* input_data,
38 | const RuntimeShape& output_shape, float* output_data) {
39 | const int flat_size = MatchingFlatSize(input_shape, output_shape);
40 | for (int i = 0; i < flat_size; ++i) {
41 | // Note that this implementation matches that of tensorFlow tf.round
42 | // and corresponds to the bankers rounding method.
43 | // cfenv (for fesetround) is not yet supported universally on Android, so
44 | // using a work around.
45 | output_data[i] = RoundToNearest(input_data[i]);
46 | }
47 | }
48 |
49 | } // namespace reference_ops
50 | } // namespace tflite
51 | #endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_ROUND_H_
52 |
--------------------------------------------------------------------------------
/tensorflow/lite/micro/memory_planner/linear_memory_planner.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 |
16 | #ifndef TENSORFLOW_LITE_MICRO_MEMORY_PLANNER_LINEAR_MEMORY_PLANNER_H_
17 | #define TENSORFLOW_LITE_MICRO_MEMORY_PLANNER_LINEAR_MEMORY_PLANNER_H_
18 |
19 | #include "tensorflow/lite/micro/compatibility.h"
20 | #include "tensorflow/lite/micro/memory_planner/memory_planner.h"
21 |
22 | namespace tflite {
23 |
24 | // The simplest possible memory planner that just lays out all buffers at
25 | // increasing offsets without trying to reuse memory.
26 | class LinearMemoryPlanner : public MemoryPlanner {
27 | public:
28 | LinearMemoryPlanner();
29 | ~LinearMemoryPlanner() override;
30 |
31 | TfLiteStatus AddBuffer(tflite::ErrorReporter* error_reporter, int size,
32 | int first_time_used, int last_time_used) override;
33 |
34 | size_t GetMaximumMemorySize() override;
35 | int GetBufferCount() override;
36 | TfLiteStatus GetOffsetForBuffer(tflite::ErrorReporter* error_reporter,
37 | int buffer_index, int* offset) override;
38 |
39 | private:
40 | static constexpr int kMaxBufferCount = 1024;
41 | size_t buffer_offsets_[kMaxBufferCount];
42 | int current_buffer_count_;
43 | size_t next_free_offset_;
44 |
45 | TF_LITE_REMOVE_VIRTUAL_DELETE
46 | };
47 |
48 | } // namespace tflite
49 |
50 | #endif // TENSORFLOW_LITE_MICRO_MEMORY_PLANNER_LINEAR_MEMORY_PLANNER_H_
51 |
--------------------------------------------------------------------------------
/tensorflow/lite/micro/tools/make/downloads/cmsis/CMSIS/NN/Include/arm_nn_tables.h:
--------------------------------------------------------------------------------
1 | /* ----------------------------------------------------------------------
2 | * Project: CMSIS NN Library
3 | * Title: arm_nn_tables.h
4 | * Description: Extern declaration for NN tables
5 | *
6 | * $Date: 17. January 2018
7 | * $Revision: V.1.0.0
8 | *
9 | * Target Processor: Cortex-M cores
10 | * -------------------------------------------------------------------- */
11 | /*
12 | * Copyright (C) 2010-2018 Arm Limited or its affiliates. All rights reserved.
13 | *
14 | * SPDX-License-Identifier: Apache-2.0
15 | *
16 | * Licensed under the Apache License, Version 2.0 (the License); you may
17 | * not use this file except in compliance with the License.
18 | * You may obtain a copy of the License at
19 | *
20 | * www.apache.org/licenses/LICENSE-2.0
21 | *
22 | * Unless required by applicable law or agreed to in writing, software
23 | * distributed under the License is distributed on an AS IS BASIS, WITHOUT
24 | * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
25 | * See the License for the specific language governing permissions and
26 | * limitations under the License.
27 | */
28 |
29 | #ifndef _ARM_NN_TABLES_H
30 | #define _ARM_NN_TABLES_H
31 |
32 | #include "tensorflow/lite/micro/tools/make/downloads/cmsis/CMSIS/DSP/Include/arm_math.h"
33 |
34 | /**
35 | * @brief tables for various activation functions
36 | *
37 | */
38 |
39 | extern const q15_t sigmoidTable_q15[256];
40 | extern const q7_t sigmoidTable_q7[256];
41 |
42 | extern const q7_t tanhTable_q7[256];
43 | extern const q15_t tanhTable_q15[256];
44 |
45 | /**
46 | * @brief 2-way tables for various activation functions
47 | *
48 | * 2-way table, H table for value larger than 1/4
49 | * L table for value smaller than 1/4, H table for remaining
50 | * We have this only for the q15_t version. It does not make
51 | * sense to have it for q7_t type
52 | */
53 | extern const q15_t sigmoidHTable_q15[192];
54 | extern const q15_t sigmoidLTable_q15[128];
55 |
56 | #endif /* ARM_NN_TABLES_H */
57 |
--------------------------------------------------------------------------------
/source/arduino_training_final_v3/arduino_detection_responder.cpp:
--------------------------------------------------------------------------------
1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2 | Licensed under the Apache License, Version 2.0 (the "License");
3 | you may not use this file except in compliance with the License.
4 | You may obtain a copy of the License at
5 | http://www.apache.org/licenses/LICENSE-2.0
6 | Unless required by applicable law or agreed to in writing, software
7 | distributed under the License is distributed on an "AS IS" BASIS,
8 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
9 | See the License for the specific language governing permissions and
10 | limitations under the License.
11 | ==============================================================================*/
12 |
13 | #include "detection_responder.h"
14 |
15 | #include "Arduino.h"
16 |
17 | // Flash the blue LED after each inference
18 | void RespondToDetection(tflite::ErrorReporter* error_reporter,
19 | int8_t person_score, int8_t no_person_score) {
20 | static bool is_initialized = false;
21 | if (!is_initialized) {
22 | // Pins for the built-in RGB LEDs on the Arduino Nano 33 BLE Sense
23 | pinMode(LEDR, OUTPUT);
24 | pinMode(LEDG, OUTPUT);
25 | pinMode(LEDB, OUTPUT);
26 | is_initialized = true;
27 | }
28 |
29 | // Note: The RGB LEDs on the Arduino Nano 33 BLE
30 | // Sense are on when the pin is LOW, off when HIGH.
31 |
32 | // Switch the person/not person LEDs off
33 | digitalWrite(LEDG, HIGH);
34 | digitalWrite(LEDR, HIGH);
35 |
36 | // Flash the blue LED after every inference.
37 | digitalWrite(LEDB, LOW);
38 | delay(100);
39 | digitalWrite(LEDB, HIGH);
40 |
41 | // Switch on the green LED when a person is detected,
42 | // the red when no person is detected
43 | if (person_score > no_person_score) {
44 | digitalWrite(LEDG, LOW);
45 | digitalWrite(LEDR, HIGH);
46 | } else {
47 | digitalWrite(LEDG, HIGH);
48 | digitalWrite(LEDR, LOW);
49 | }
50 |
51 | TF_LITE_REPORT_ERROR(error_reporter, "Person score: %d No person score: %d",
52 | person_score, no_person_score);
53 | }
--------------------------------------------------------------------------------
/previous_iterations/arduino_training_final/arduino_detection_responder.cpp:
--------------------------------------------------------------------------------
1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2 | Licensed under the Apache License, Version 2.0 (the "License");
3 | you may not use this file except in compliance with the License.
4 | You may obtain a copy of the License at
5 | http://www.apache.org/licenses/LICENSE-2.0
6 | Unless required by applicable law or agreed to in writing, software
7 | distributed under the License is distributed on an "AS IS" BASIS,
8 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
9 | See the License for the specific language governing permissions and
10 | limitations under the License.
11 | ==============================================================================*/
12 |
13 | #include "detection_responder.h"
14 |
15 | #include "Arduino.h"
16 |
17 | // Flash the blue LED after each inference
18 | void RespondToDetection(tflite::ErrorReporter* error_reporter,
19 | int8_t person_score, int8_t no_person_score) {
20 | static bool is_initialized = false;
21 | if (!is_initialized) {
22 | // Pins for the built-in RGB LEDs on the Arduino Nano 33 BLE Sense
23 | pinMode(LEDR, OUTPUT);
24 | pinMode(LEDG, OUTPUT);
25 | pinMode(LEDB, OUTPUT);
26 | is_initialized = true;
27 | }
28 |
29 | // Note: The RGB LEDs on the Arduino Nano 33 BLE
30 | // Sense are on when the pin is LOW, off when HIGH.
31 |
32 | // Switch the person/not person LEDs off
33 | digitalWrite(LEDG, HIGH);
34 | digitalWrite(LEDR, HIGH);
35 |
36 | // Flash the blue LED after every inference.
37 | digitalWrite(LEDB, LOW);
38 | delay(100);
39 | digitalWrite(LEDB, HIGH);
40 |
41 | // Switch on the green LED when a person is detected,
42 | // the red when no person is detected
43 | if (person_score > no_person_score) {
44 | digitalWrite(LEDG, LOW);
45 | digitalWrite(LEDR, HIGH);
46 | } else {
47 | digitalWrite(LEDG, HIGH);
48 | digitalWrite(LEDR, LOW);
49 | }
50 |
51 | TF_LITE_REPORT_ERROR(error_reporter, "Person score: %d No person score: %d",
52 | person_score, no_person_score);
53 | }
--------------------------------------------------------------------------------
/previous_iterations/arduino_training_final_v2/arduino_detection_responder.cpp:
--------------------------------------------------------------------------------
1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2 | Licensed under the Apache License, Version 2.0 (the "License");
3 | you may not use this file except in compliance with the License.
4 | You may obtain a copy of the License at
5 | http://www.apache.org/licenses/LICENSE-2.0
6 | Unless required by applicable law or agreed to in writing, software
7 | distributed under the License is distributed on an "AS IS" BASIS,
8 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
9 | See the License for the specific language governing permissions and
10 | limitations under the License.
11 | ==============================================================================*/
12 |
13 | #include "detection_responder.h"
14 |
15 | #include "Arduino.h"
16 |
17 | // Flash the blue LED after each inference
18 | void RespondToDetection(tflite::ErrorReporter* error_reporter,
19 | int8_t person_score, int8_t no_person_score) {
20 | static bool is_initialized = false;
21 | if (!is_initialized) {
22 | // Pins for the built-in RGB LEDs on the Arduino Nano 33 BLE Sense
23 | pinMode(LEDR, OUTPUT);
24 | pinMode(LEDG, OUTPUT);
25 | pinMode(LEDB, OUTPUT);
26 | is_initialized = true;
27 | }
28 |
29 | // Note: The RGB LEDs on the Arduino Nano 33 BLE
30 | // Sense are on when the pin is LOW, off when HIGH.
31 |
32 | // Switch the person/not person LEDs off
33 | digitalWrite(LEDG, HIGH);
34 | digitalWrite(LEDR, HIGH);
35 |
36 | // Flash the blue LED after every inference.
37 | digitalWrite(LEDB, LOW);
38 | delay(100);
39 | digitalWrite(LEDB, HIGH);
40 |
41 | // Switch on the green LED when a person is detected,
42 | // the red when no person is detected
43 | if (person_score > no_person_score) {
44 | digitalWrite(LEDG, LOW);
45 | digitalWrite(LEDR, HIGH);
46 | } else {
47 | digitalWrite(LEDG, HIGH);
48 | digitalWrite(LEDR, LOW);
49 | }
50 |
51 | TF_LITE_REPORT_ERROR(error_reporter, "Person score: %d No person score: %d",
52 | person_score, no_person_score);
53 | }
--------------------------------------------------------------------------------
/previous_iterations/person_detection_arducam_5mp_plus/arduino_detection_responder.cpp:
--------------------------------------------------------------------------------
1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2 | Licensed under the Apache License, Version 2.0 (the "License");
3 | you may not use this file except in compliance with the License.
4 | You may obtain a copy of the License at
5 | http://www.apache.org/licenses/LICENSE-2.0
6 | Unless required by applicable law or agreed to in writing, software
7 | distributed under the License is distributed on an "AS IS" BASIS,
8 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
9 | See the License for the specific language governing permissions and
10 | limitations under the License.
11 | ==============================================================================*/
12 |
13 | #include "detection_responder.h"
14 |
15 | #include "Arduino.h"
16 |
17 | // Flash the blue LED after each inference
18 | void RespondToDetection(tflite::ErrorReporter* error_reporter,
19 | int8_t person_score, int8_t no_person_score) {
20 | static bool is_initialized = false;
21 | if (!is_initialized) {
22 | // Pins for the built-in RGB LEDs on the Arduino Nano 33 BLE Sense
23 | pinMode(LEDR, OUTPUT);
24 | pinMode(LEDG, OUTPUT);
25 | pinMode(LEDB, OUTPUT);
26 | is_initialized = true;
27 | }
28 |
29 | // Note: The RGB LEDs on the Arduino Nano 33 BLE
30 | // Sense are on when the pin is LOW, off when HIGH.
31 |
32 | // Switch the person/not person LEDs off
33 | digitalWrite(LEDG, HIGH);
34 | digitalWrite(LEDR, HIGH);
35 |
36 | // Flash the blue LED after every inference.
37 | digitalWrite(LEDB, LOW);
38 | delay(100);
39 | digitalWrite(LEDB, HIGH);
40 |
41 | // Switch on the green LED when a person is detected,
42 | // the red when no person is detected
43 | if (person_score > no_person_score) {
44 | digitalWrite(LEDG, LOW);
45 | digitalWrite(LEDR, HIGH);
46 | } else {
47 | digitalWrite(LEDG, HIGH);
48 | digitalWrite(LEDR, LOW);
49 | }
50 |
51 | TF_LITE_REPORT_ERROR(error_reporter, "Person score: %d No person score: %d",
52 | person_score, no_person_score);
53 | }
--------------------------------------------------------------------------------
/tensorflow/lite/experimental/microfrontend/lib/noise_reduction_util.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 | #ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_NOISE_REDUCTION_UTIL_H_
16 | #define TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_NOISE_REDUCTION_UTIL_H_
17 |
18 | #include "tensorflow/lite/experimental/microfrontend/lib/noise_reduction.h"
19 |
20 | #ifdef __cplusplus
21 | extern "C" {
22 | #endif
23 |
24 | struct NoiseReductionConfig {
25 | // scale the signal up by 2^(smoothing_bits) before reduction
26 | int smoothing_bits;
27 | // smoothing coefficient for even-numbered channels
28 | float even_smoothing;
29 | // smoothing coefficient for odd-numbered channels
30 | float odd_smoothing;
31 | // fraction of signal to preserve (1.0 disables this module)
32 | float min_signal_remaining;
33 | };
34 |
35 | // Populates the NoiseReductionConfig with "sane" default values.
36 | void NoiseReductionFillConfigWithDefaults(struct NoiseReductionConfig* config);
37 |
38 | // Allocates any buffers.
39 | int NoiseReductionPopulateState(const struct NoiseReductionConfig* config,
40 | struct NoiseReductionState* state,
41 | int num_channels);
42 |
43 | // Frees any allocated buffers.
44 | void NoiseReductionFreeStateContents(struct NoiseReductionState* state);
45 |
46 | #ifdef __cplusplus
47 | } // extern "C"
48 | #endif
49 |
50 | #endif // TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_NOISE_REDUCTION_UTIL_H_
51 |
--------------------------------------------------------------------------------
/tensorflow/lite/experimental/microfrontend/lib/fft.cpp:
--------------------------------------------------------------------------------
1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 | #include "tensorflow/lite/experimental/microfrontend/lib/fft.h"
16 |
17 | #include
18 |
19 | #define FIXED_POINT 16
20 | #include "third_party/kissfft/kiss_fft.h"
21 | #include "third_party/kissfft/tools/kiss_fftr.h"
22 |
23 | void FftCompute(struct FftState* state, const int16_t* input,
24 | int input_scale_shift) {
25 | const size_t input_size = state->input_size;
26 | const size_t fft_size = state->fft_size;
27 |
28 | int16_t* fft_input = state->input;
29 | // First, scale the input by the given shift.
30 | size_t i;
31 | for (i = 0; i < input_size; ++i) {
32 | fft_input[i] = static_cast(static_cast(input[i])
33 | << input_scale_shift);
34 | }
35 | // Zero out whatever else remains in the top part of the input.
36 | for (; i < fft_size; ++i) {
37 | fft_input[i] = 0;
38 | }
39 |
40 | // Apply the FFT.
41 | kiss_fftr(
42 | reinterpret_cast(state->scratch),
43 | state->input,
44 | reinterpret_cast(state->output));
45 | }
46 |
47 | void FftInit(struct FftState* state) {
48 | // All the initialization is done in FftPopulateState()
49 | }
50 |
51 | void FftReset(struct FftState* state) {
52 | memset(state->input, 0, state->fft_size * sizeof(*state->input));
53 | memset(state->output, 0, (state->fft_size / 2 + 1) * sizeof(*state->output));
54 | }
55 |
--------------------------------------------------------------------------------
/tensorflow/lite/experimental/microfrontend/lib/pcan_gain_control.c:
--------------------------------------------------------------------------------
1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 | #include "tensorflow/lite/experimental/microfrontend/lib/pcan_gain_control.h"
16 |
17 | #include "tensorflow/lite/experimental/microfrontend/lib/bits.h"
18 |
19 | int16_t WideDynamicFunction(const uint32_t x, const int16_t* lut) {
20 | if (x <= 2) {
21 | return lut[x];
22 | }
23 |
24 | const int16_t interval = MostSignificantBit32(x);
25 | lut += 4 * interval - 6;
26 |
27 | const int16_t frac =
28 | ((interval < 11) ? (x << (11 - interval)) : (x >> (interval - 11))) &
29 | 0x3FF;
30 |
31 | int32_t result = ((int32_t)lut[2] * frac) >> 5;
32 | result += (int32_t)((uint32_t)lut[1] << 5);
33 | result *= frac;
34 | result = (result + (1 << 14)) >> 15;
35 | result += lut[0];
36 | return (int16_t)result;
37 | }
38 |
39 | uint32_t PcanShrink(const uint32_t x) {
40 | if (x < (2 << kPcanSnrBits)) {
41 | return (x * x) >> (2 + 2 * kPcanSnrBits - kPcanOutputBits);
42 | } else {
43 | return (x >> (kPcanSnrBits - kPcanOutputBits)) - (1 << kPcanOutputBits);
44 | }
45 | }
46 |
47 | void PcanGainControlApply(struct PcanGainControlState* state,
48 | uint32_t* signal) {
49 | int i;
50 | for (i = 0; i < state->num_channels; ++i) {
51 | const uint32_t gain =
52 | WideDynamicFunction(state->noise_estimate[i], state->gain_lut);
53 | const uint32_t snr = ((uint64_t)signal[i] * gain) >> state->snr_shift;
54 | signal[i] = PcanShrink(snr);
55 | }
56 | }
57 |
--------------------------------------------------------------------------------
/tensorflow/lite/micro/kernels/activation_utils.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 |
16 | #ifndef TENSORFLOW_LITE_MICRO_KERNELS_ACTIVATION_UTILS_H_
17 | #define TENSORFLOW_LITE_MICRO_KERNELS_ACTIVATION_UTILS_H_
18 |
19 | #include
20 | #include
21 |
22 | #include "tensorflow/lite/c/builtin_op_data.h"
23 | #include "tensorflow/lite/kernels/internal/cppmath.h"
24 | #include "tensorflow/lite/kernels/internal/max.h"
25 | #include "tensorflow/lite/kernels/internal/min.h"
26 |
27 | namespace tflite {
28 | namespace ops {
29 | namespace micro {
30 |
31 | // Returns the floating point value for a fused activation:
32 | inline float ActivationValFloat(TfLiteFusedActivation act, float a) {
33 | switch (act) {
34 | case kTfLiteActNone:
35 | return a;
36 | case kTfLiteActRelu:
37 | return TfLiteMax(0.0f, a);
38 | case kTfLiteActReluN1To1:
39 | return TfLiteMax(-1.0f, TfLiteMin(a, 1.0f));
40 | case kTfLiteActRelu6:
41 | return TfLiteMax(0.0f, TfLiteMin(a, 6.0f));
42 | case kTfLiteActTanh:
43 | return std::tanh(a);
44 | case kTfLiteActSignBit:
45 | return std::signbit(a);
46 | case kTfLiteActSigmoid:
47 | return 1.0f / (1.0f + std::exp(-a));
48 | }
49 | return 0.0f; // To indicate an unsupported activation (i.e. when a new fused
50 | // activation is added to the enum and not handled here).
51 | }
52 |
53 | } // namespace micro
54 | } // namespace ops
55 | } // namespace tflite
56 |
57 | #endif // TENSORFLOW_LITE_MICRO_KERNELS_ACTIVATION_UTILS_H_
58 |
--------------------------------------------------------------------------------
/tensorflow/lite/micro/micro_time.cpp:
--------------------------------------------------------------------------------
1 | /* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 |
16 | // Reference implementation of timer functions. Platforms are not required to
17 | // implement these timer methods, but they are required to enable profiling.
18 |
19 | // On platforms that have a POSIX stack or C library, it can be written using
20 | // methods from or clock() from .
21 |
22 | // To add an equivalent function for your own platform, create your own
23 | // implementation file, and place it in a subfolder with named after the OS
24 | // you're targeting. For example, see the Cortex M bare metal version in
25 | // tensorflow/lite/micro/bluepill/micro_time.cc or the mbed one on
26 | // tensorflow/lite/micro/mbed/micro_time.cc.
27 |
28 | #include "tensorflow/lite/micro/micro_time.h"
29 |
30 | namespace tflite {
31 |
32 | // Reference implementation of the ticks_per_second() function that's required
33 | // for a platform to support Tensorflow Lite for Microcontrollers profiling.
34 | // This returns 0 by default because timing is an optional feature that builds
35 | // without errors on platforms that do not need it.
36 | int32_t ticks_per_second() { return 0; }
37 |
38 | // Reference implementation of the GetCurrentTimeTicks() function that's
39 | // required for a platform to support Tensorflow Lite for Microcontrollers
40 | // profiling. This returns 0 by default because timing is an optional feature
41 | // that builds without errors on platforms that do not need it.
42 | int32_t GetCurrentTimeTicks() { return 0; }
43 |
44 | } // namespace tflite
45 |
--------------------------------------------------------------------------------
/tensorflow/lite/experimental/microfrontend/lib/noise_reduction.c:
--------------------------------------------------------------------------------
1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 | #include "tensorflow/lite/experimental/microfrontend/lib/noise_reduction.h"
16 |
17 | #include
18 |
19 | void NoiseReductionApply(struct NoiseReductionState* state, uint32_t* signal) {
20 | int i;
21 | for (i = 0; i < state->num_channels; ++i) {
22 | const uint32_t smoothing =
23 | ((i & 1) == 0) ? state->even_smoothing : state->odd_smoothing;
24 | const uint32_t one_minus_smoothing = (1 << kNoiseReductionBits) - smoothing;
25 |
26 | // Update the estimate of the noise.
27 | const uint32_t signal_scaled_up = signal[i] << state->smoothing_bits;
28 | uint32_t estimate =
29 | (((uint64_t)signal_scaled_up * smoothing) +
30 | ((uint64_t)state->estimate[i] * one_minus_smoothing)) >>
31 | kNoiseReductionBits;
32 | state->estimate[i] = estimate;
33 |
34 | // Make sure that we can't get a negative value for the signal - estimate.
35 | if (estimate > signal_scaled_up) {
36 | estimate = signal_scaled_up;
37 | }
38 |
39 | const uint32_t floor =
40 | ((uint64_t)signal[i] * state->min_signal_remaining) >>
41 | kNoiseReductionBits;
42 | const uint32_t subtracted =
43 | (signal_scaled_up - estimate) >> state->smoothing_bits;
44 | const uint32_t output = subtracted > floor ? subtracted : floor;
45 | signal[i] = output;
46 | }
47 | }
48 |
49 | void NoiseReductionReset(struct NoiseReductionState* state) {
50 | memset(state->estimate, 0, sizeof(*state->estimate) * state->num_channels);
51 | }
52 |
--------------------------------------------------------------------------------
/tensorflow/lite/micro/kernels/floor.cpp:
--------------------------------------------------------------------------------
1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 |
16 | #include "tensorflow/lite/kernels/internal/reference/floor.h"
17 |
18 | #include "tensorflow/lite/c/common.h"
19 | #include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
20 | #include "tensorflow/lite/kernels/kernel_util.h"
21 |
22 | namespace tflite {
23 | namespace ops {
24 | namespace micro {
25 | namespace floor {
26 |
27 | constexpr int kInputTensor = 0;
28 | constexpr int kOutputTensor = 0;
29 |
30 | TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
31 | const TfLiteTensor* input = GetInput(context, node, kInputTensor);
32 | TF_LITE_ENSURE_TYPES_EQ(context, input->type, kTfLiteFloat32);
33 | TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
34 | reference_ops::Floor(GetTensorShape(input), GetTensorData(input),
35 | GetTensorShape(output), GetTensorData(output));
36 | return kTfLiteOk;
37 | }
38 | } // namespace floor
39 |
40 | TfLiteRegistration* Register_FLOOR() {
41 | static TfLiteRegistration r = {/*init=*/nullptr,
42 | /*free=*/nullptr,
43 | /*prepare=*/nullptr,
44 | /*invoke=*/floor::Eval,
45 | /*profiling_string=*/nullptr,
46 | /*builtin_code=*/0,
47 | /*custom_name=*/nullptr,
48 | /*version=*/0};
49 | return &r;
50 | }
51 |
52 | } // namespace micro
53 | } // namespace ops
54 | } // namespace tflite
55 |
--------------------------------------------------------------------------------
/tensorflow/lite/micro/tools/make/downloads/cmsis/CMSIS/NN/Source/ConcatenationFunctions/arm_concatenation_s8_w.c:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (C) 2010-2019 Arm Limited or its affiliates. All rights reserved.
3 | *
4 | * SPDX-License-Identifier: Apache-2.0
5 | *
6 | * Licensed under the Apache License, Version 2.0 (the License); you may
7 | * not use this file except in compliance with the License.
8 | * You may obtain a copy of the License at
9 | *
10 | * www.apache.org/licenses/LICENSE-2.0
11 | *
12 | * Unless required by applicable law or agreed to in writing, software
13 | * distributed under the License is distributed on an AS IS BASIS, WITHOUT
14 | * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 | * See the License for the specific language governing permissions and
16 | * limitations under the License.
17 | */
18 |
19 | /* ----------------------------------------------------------------------
20 | * Project: CMSIS NN Library
21 | * Title: arm_concatenation_s8_w.c
22 | * Description: s8 version of concatenation along the W axis
23 | *
24 | * $Date: October 2019
25 | * $Revision: V.1.0.0
26 | *
27 | * Target Processor: Cortex-M cores
28 | *
29 | * -------------------------------------------------------------------- */
30 |
31 | #include "tensorflow/lite/micro/tools/make/downloads/cmsis/CMSIS/NN/Include/arm_nnfunctions.h"
32 |
33 | /**
34 | * @ingroup groupNN
35 | */
36 |
37 | /**
38 | * @addtogroup Concatenation
39 | * @{
40 | */
41 |
42 | /*
43 | * s8 version of concatenation along the W axis
44 | *
45 | * Refer to header file for details.
46 | *
47 | */
48 | void arm_concatenation_s8_w(const int8_t *input,
49 | const uint16_t input_x,
50 | const uint16_t input_y,
51 | const uint16_t input_z,
52 | const uint16_t input_w,
53 | int8_t *output,
54 | const uint32_t offset_w)
55 | {
56 | const uint32_t input_copy_size = input_x * input_y * input_z * input_w;
57 |
58 | output += offset_w * (input_x * input_y * input_z);
59 |
60 | memcpy(output, input, input_copy_size);
61 | }
62 |
63 | /**
64 | * @} end of Concatenation group
65 | */
66 |
--------------------------------------------------------------------------------
/tensorflow/lite/micro/memory_planner/linear_memory_planner.cpp:
--------------------------------------------------------------------------------
1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 |
16 | #include "tensorflow/lite/micro/memory_planner/linear_memory_planner.h"
17 |
18 | namespace tflite {
19 |
20 | LinearMemoryPlanner::LinearMemoryPlanner()
21 | : current_buffer_count_(0), next_free_offset_(0) {}
22 | LinearMemoryPlanner::~LinearMemoryPlanner() {}
23 |
24 | TfLiteStatus LinearMemoryPlanner::AddBuffer(
25 | tflite::ErrorReporter* error_reporter, int size, int first_time_used,
26 | int last_time_used) {
27 | if (current_buffer_count_ >= kMaxBufferCount) {
28 | TF_LITE_REPORT_ERROR(error_reporter, "Too many buffers (max is %d)",
29 | kMaxBufferCount);
30 | return kTfLiteError;
31 | }
32 | buffer_offsets_[current_buffer_count_] = next_free_offset_;
33 | next_free_offset_ += size;
34 | ++current_buffer_count_;
35 | return kTfLiteOk;
36 | }
37 |
38 | size_t LinearMemoryPlanner::GetMaximumMemorySize() { return next_free_offset_; }
39 |
40 | int LinearMemoryPlanner::GetBufferCount() { return current_buffer_count_; }
41 |
42 | TfLiteStatus LinearMemoryPlanner::GetOffsetForBuffer(
43 | tflite::ErrorReporter* error_reporter, int buffer_index, int* offset) {
44 | if ((buffer_index < 0) || (buffer_index >= current_buffer_count_)) {
45 | TF_LITE_REPORT_ERROR(error_reporter,
46 | "buffer index %d is outside range 0 to %d",
47 | buffer_index, current_buffer_count_);
48 | return kTfLiteError;
49 | }
50 | *offset = buffer_offsets_[buffer_index];
51 | return kTfLiteOk;
52 | }
53 |
54 | } // namespace tflite
55 |
--------------------------------------------------------------------------------
/source/arduino_training_final_v3/image_provider.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 |
16 | #ifndef TENSORFLOW_LITE_MICRO_EXAMPLES_PERSON_DETECTION_EXPERIMENTAL_IMAGE_PROVIDER_H_
17 | #define TENSORFLOW_LITE_MICRO_EXAMPLES_PERSON_DETECTION_EXPERIMENTAL_IMAGE_PROVIDER_H_
18 |
19 | #include "tensorflow/lite/c/common.h"
20 | #include "tensorflow/lite/micro/micro_error_reporter.h"
21 |
22 | // This is an abstraction around an image source like a camera, and is
23 | // expected to return 8-bit sample data. The assumption is that this will be
24 | // called in a low duty-cycle fashion in a low-power application. In these
25 | // cases, the imaging sensor need not be run in a streaming mode, but rather can
26 | // be idled in a relatively low-power mode between calls to GetImage(). The
27 | // assumption is that the overhead and time of bringing the low-power sensor out
28 | // of this standby mode is commensurate with the expected duty cycle of the
29 | // application. The underlying sensor may actually be put into a streaming
30 | // configuration, but the image buffer provided to GetImage should not be
31 | // overwritten by the driver code until the next call to GetImage();
32 | //
33 | // The reference implementation can have no platform-specific dependencies, so
34 | // it just returns a static image. For real applications, you should
35 | // ensure there's a specialized implementation that accesses hardware APIs.
36 | TfLiteStatus GetImage(tflite::ErrorReporter* error_reporter, int image_width,
37 | int image_height, int channels, int8_t* image_data);
38 |
39 | #endif // TENSORFLOW_LITE_MICRO_EXAMPLES_PERSON_DETECTION_EXPERIMENTAL_IMAGE_PROVIDER_H_
40 |
--------------------------------------------------------------------------------
/previous_iterations/arduino_training_final/image_provider.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 |
16 | #ifndef TENSORFLOW_LITE_MICRO_EXAMPLES_PERSON_DETECTION_EXPERIMENTAL_IMAGE_PROVIDER_H_
17 | #define TENSORFLOW_LITE_MICRO_EXAMPLES_PERSON_DETECTION_EXPERIMENTAL_IMAGE_PROVIDER_H_
18 |
19 | #include "tensorflow/lite/c/common.h"
20 | #include "tensorflow/lite/micro/micro_error_reporter.h"
21 |
22 | // This is an abstraction around an image source like a camera, and is
23 | // expected to return 8-bit sample data. The assumption is that this will be
24 | // called in a low duty-cycle fashion in a low-power application. In these
25 | // cases, the imaging sensor need not be run in a streaming mode, but rather can
26 | // be idled in a relatively low-power mode between calls to GetImage(). The
27 | // assumption is that the overhead and time of bringing the low-power sensor out
28 | // of this standby mode is commensurate with the expected duty cycle of the
29 | // application. The underlying sensor may actually be put into a streaming
30 | // configuration, but the image buffer provided to GetImage should not be
31 | // overwritten by the driver code until the next call to GetImage();
32 | //
33 | // The reference implementation can have no platform-specific dependencies, so
34 | // it just returns a static image. For real applications, you should
35 | // ensure there's a specialized implementation that accesses hardware APIs.
36 | TfLiteStatus GetImage(tflite::ErrorReporter* error_reporter, int image_width,
37 | int image_height, int channels, int8_t* image_data);
38 |
39 | #endif // TENSORFLOW_LITE_MICRO_EXAMPLES_PERSON_DETECTION_EXPERIMENTAL_IMAGE_PROVIDER_H_
40 |
--------------------------------------------------------------------------------
/previous_iterations/arduino_training_final_v2/image_provider.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 |
16 | #ifndef TENSORFLOW_LITE_MICRO_EXAMPLES_PERSON_DETECTION_EXPERIMENTAL_IMAGE_PROVIDER_H_
17 | #define TENSORFLOW_LITE_MICRO_EXAMPLES_PERSON_DETECTION_EXPERIMENTAL_IMAGE_PROVIDER_H_
18 |
19 | #include "tensorflow/lite/c/common.h"
20 | #include "tensorflow/lite/micro/micro_error_reporter.h"
21 |
22 | // This is an abstraction around an image source like a camera, and is
23 | // expected to return 8-bit sample data. The assumption is that this will be
24 | // called in a low duty-cycle fashion in a low-power application. In these
25 | // cases, the imaging sensor need not be run in a streaming mode, but rather can
26 | // be idled in a relatively low-power mode between calls to GetImage(). The
27 | // assumption is that the overhead and time of bringing the low-power sensor out
28 | // of this standby mode is commensurate with the expected duty cycle of the
29 | // application. The underlying sensor may actually be put into a streaming
30 | // configuration, but the image buffer provided to GetImage should not be
31 | // overwritten by the driver code until the next call to GetImage();
32 | //
33 | // The reference implementation can have no platform-specific dependencies, so
34 | // it just returns a static image. For real applications, you should
35 | // ensure there's a specialized implementation that accesses hardware APIs.
36 | TfLiteStatus GetImage(tflite::ErrorReporter* error_reporter, int image_width,
37 | int image_height, int channels, int8_t* image_data);
38 |
39 | #endif // TENSORFLOW_LITE_MICRO_EXAMPLES_PERSON_DETECTION_EXPERIMENTAL_IMAGE_PROVIDER_H_
40 |
--------------------------------------------------------------------------------
/tensorflow/lite/experimental/microfrontend/lib/frontend_util.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 | #ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_FRONTEND_UTIL_H_
16 | #define TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_FRONTEND_UTIL_H_
17 |
18 | #include "tensorflow/lite/experimental/microfrontend/lib/fft_util.h"
19 | #include "tensorflow/lite/experimental/microfrontend/lib/filterbank_util.h"
20 | #include "tensorflow/lite/experimental/microfrontend/lib/frontend.h"
21 | #include "tensorflow/lite/experimental/microfrontend/lib/log_scale_util.h"
22 | #include "tensorflow/lite/experimental/microfrontend/lib/noise_reduction_util.h"
23 | #include "tensorflow/lite/experimental/microfrontend/lib/pcan_gain_control_util.h"
24 | #include "tensorflow/lite/experimental/microfrontend/lib/window_util.h"
25 |
26 | #ifdef __cplusplus
27 | extern "C" {
28 | #endif
29 |
30 | struct FrontendConfig {
31 | struct WindowConfig window;
32 | struct FilterbankConfig filterbank;
33 | struct NoiseReductionConfig noise_reduction;
34 | struct PcanGainControlConfig pcan_gain_control;
35 | struct LogScaleConfig log_scale;
36 | };
37 |
38 | // Fills the frontendConfig with "sane" defaults.
39 | void FrontendFillConfigWithDefaults(struct FrontendConfig* config);
40 |
41 | // Allocates any buffers.
42 | int FrontendPopulateState(const struct FrontendConfig* config,
43 | struct FrontendState* state, int sample_rate);
44 |
45 | // Frees any allocated buffers.
46 | void FrontendFreeStateContents(struct FrontendState* state);
47 |
48 | #ifdef __cplusplus
49 | } // extern "C"
50 | #endif
51 |
52 | #endif // TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_FRONTEND_UTIL_H_
53 |
--------------------------------------------------------------------------------
/tensorflow/lite/core/api/op_resolver.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 | #ifndef TENSORFLOW_LITE_CORE_API_OP_RESOLVER_H_
16 | #define TENSORFLOW_LITE_CORE_API_OP_RESOLVER_H_
17 |
18 | #include "tensorflow/lite/c/common.h"
19 | #include "tensorflow/lite/core/api/error_reporter.h"
20 | #include "tensorflow/lite/schema/schema_generated.h"
21 |
22 | namespace tflite {
23 |
24 | /// Abstract interface that returns TfLiteRegistrations given op codes or custom
25 | /// op names. This is the mechanism that ops being referenced in the flatbuffer
26 | /// model are mapped to executable function pointers (TfLiteRegistrations).
27 | class OpResolver {
28 | public:
29 | /// Finds the op registration for a builtin operator by enum code.
30 | virtual const TfLiteRegistration* FindOp(tflite::BuiltinOperator op,
31 | int version) const = 0;
32 | /// Finds the op registration of a custom operator by op name.
33 | virtual const TfLiteRegistration* FindOp(const char* op,
34 | int version) const = 0;
35 | virtual ~OpResolver() {}
36 | };
37 |
38 | // Handles the logic for converting between an OperatorCode structure extracted
39 | // from a flatbuffer and information about a registered operator
40 | // implementation.
41 | TfLiteStatus GetRegistrationFromOpCode(const OperatorCode* opcode,
42 | const OpResolver& op_resolver,
43 | ErrorReporter* error_reporter,
44 | const TfLiteRegistration** registration);
45 |
46 | } // namespace tflite
47 |
48 | #endif // TENSORFLOW_LITE_CORE_API_OP_RESOLVER_H_
49 |
--------------------------------------------------------------------------------
/previous_iterations/person_detection_arducam_5mp_plus/image_provider.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 |
16 | #ifndef TENSORFLOW_LITE_MICRO_EXAMPLES_PERSON_DETECTION_EXPERIMENTAL_IMAGE_PROVIDER_H_
17 | #define TENSORFLOW_LITE_MICRO_EXAMPLES_PERSON_DETECTION_EXPERIMENTAL_IMAGE_PROVIDER_H_
18 |
19 | #include "tensorflow/lite/c/common.h"
20 | #include "tensorflow/lite/micro/micro_error_reporter.h"
21 |
22 | // This is an abstraction around an image source like a camera, and is
23 | // expected to return 8-bit sample data. The assumption is that this will be
24 | // called in a low duty-cycle fashion in a low-power application. In these
25 | // cases, the imaging sensor need not be run in a streaming mode, but rather can
26 | // be idled in a relatively low-power mode between calls to GetImage(). The
27 | // assumption is that the overhead and time of bringing the low-power sensor out
28 | // of this standby mode is commensurate with the expected duty cycle of the
29 | // application. The underlying sensor may actually be put into a streaming
30 | // configuration, but the image buffer provided to GetImage should not be
31 | // overwritten by the driver code until the next call to GetImage();
32 | //
33 | // The reference implementation can have no platform-specific dependencies, so
34 | // it just returns a static image. For real applications, you should
35 | // ensure there's a specialized implementation that accesses hardware APIs.
36 | TfLiteStatus GetImage(tflite::ErrorReporter* error_reporter, int image_width,
37 | int image_height, int channels, int8_t* image_data);
38 |
39 | #endif // TENSORFLOW_LITE_MICRO_EXAMPLES_PERSON_DETECTION_EXPERIMENTAL_IMAGE_PROVIDER_H_
40 |
--------------------------------------------------------------------------------
/tensorflow/lite/kernels/op_macros.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 | #ifndef TENSORFLOW_LITE_KERNELS_OP_MACROS_H_
16 | #define TENSORFLOW_LITE_KERNELS_OP_MACROS_H_
17 |
18 | // If we're on a platform without standard IO functions, fall back to a
19 | // non-portable function.
20 | #ifdef TF_LITE_MCU_DEBUG_LOG
21 |
22 | #include "tensorflow/lite/micro/debug_log.h"
23 |
24 | #define DEBUG_LOG(x) \
25 | do { \
26 | DebugLog(x); \
27 | } while (0)
28 |
29 | inline void InfiniteLoop() {
30 | DEBUG_LOG("HALTED\n");
31 | while (1) {
32 | }
33 | }
34 |
35 | #define TFLITE_ABORT InfiniteLoop();
36 |
37 | #else // TF_LITE_MCU_DEBUG_LOG
38 |
39 | #include
40 | #include
41 |
42 | #define DEBUG_LOG(x) \
43 | do { \
44 | fprintf(stderr, "%s", (x)); \
45 | } while (0)
46 |
47 | #define TFLITE_ABORT abort()
48 |
49 | #endif // TF_LITE_MCU_DEBUG_LOG
50 |
51 | #ifdef NDEBUG
52 | #define TFLITE_ASSERT_FALSE (static_cast(0))
53 | #else
54 | #define TFLITE_ASSERT_FALSE TFLITE_ABORT
55 | #endif
56 |
57 | #define TF_LITE_FATAL(msg) \
58 | do { \
59 | DEBUG_LOG(msg); \
60 | DEBUG_LOG("\nFATAL\n"); \
61 | TFLITE_ABORT; \
62 | } while (0)
63 |
64 | #define TF_LITE_ASSERT(x) \
65 | do { \
66 | if (!(x)) TF_LITE_FATAL(#x); \
67 | } while (0)
68 |
69 | #define TF_LITE_ASSERT_EQ(x, y) \
70 | do { \
71 | if ((x) != (y)) TF_LITE_FATAL(#x " didn't equal " #y); \
72 | } while (0)
73 |
74 | #endif // TENSORFLOW_LITE_KERNELS_OP_MACROS_H_
75 |
--------------------------------------------------------------------------------
/tensorflow/lite/kernels/internal/reference/quantize.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 | #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_QUANTIZE_H_
16 | #define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_QUANTIZE_H_
17 |
18 | #include
19 | #include
20 |
21 | #include "tensorflow/lite/kernels/internal/common.h"
22 | #include "tensorflow/lite/kernels/internal/compatibility.h"
23 | #include "tensorflow/lite/kernels/internal/cppmath.h"
24 | #include "tensorflow/lite/kernels/internal/types.h"
25 |
26 | namespace tflite {
27 |
28 | namespace reference_ops {
29 |
30 | template
31 | inline void AffineQuantize(const tflite::QuantizationParams& op_params,
32 | const RuntimeShape& input_shape,
33 | const InputT* input_data,
34 | const RuntimeShape& output_shape,
35 | OutputT* output_data) {
36 | const int32 zero_point = op_params.zero_point;
37 | const double scale = op_params.scale;
38 | const int flat_size = MatchingFlatSize(input_shape, output_shape);
39 | static constexpr int32 min_val = std::numeric_limits::min();
40 | static constexpr int32 max_val = std::numeric_limits::max();
41 |
42 | for (int i = 0; i < flat_size; i++) {
43 | const InputT val = input_data[i];
44 | int32 unclamped =
45 | static_cast(TfLiteRound(val / static_cast(scale))) +
46 | zero_point;
47 | int32 clamped = std::min(std::max(unclamped, min_val), max_val);
48 | output_data[i] = clamped;
49 | }
50 | }
51 |
52 | } // namespace reference_ops
53 |
54 | } // namespace tflite
55 | #endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_QUANTIZE_H_
56 |
--------------------------------------------------------------------------------
/tensorflow/lite/micro/tools/make/downloads/cmsis/CMSIS/NN/Source/ConcatenationFunctions/arm_concatenation_s8_x.c:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (C) 2010-2019 Arm Limited or its affiliates. All rights reserved.
3 | *
4 | * SPDX-License-Identifier: Apache-2.0
5 | *
6 | * Licensed under the Apache License, Version 2.0 (the License); you may
7 | * not use this file except in compliance with the License.
8 | * You may obtain a copy of the License at
9 | *
10 | * www.apache.org/licenses/LICENSE-2.0
11 | *
12 | * Unless required by applicable law or agreed to in writing, software
13 | * distributed under the License is distributed on an AS IS BASIS, WITHOUT
14 | * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 | * See the License for the specific language governing permissions and
16 | * limitations under the License.
17 | */
18 |
19 | /* ----------------------------------------------------------------------
20 | * Project: CMSIS NN Library
21 | * Title: arm_concatenation_s8_x.c
22 | * Description: s8 version of concatenation along the X axis
23 | *
24 | * $Date: October 2019
25 | * $Revision: V.1.0.0
26 | *
27 | * Target Processor: Cortex-M cores
28 | *
29 | * -------------------------------------------------------------------- */
30 |
31 | #include "tensorflow/lite/micro/tools/make/downloads/cmsis/CMSIS/NN/Include/arm_nnfunctions.h"
32 |
33 | /**
34 | * @ingroup groupNN
35 | */
36 |
37 | /**
38 | * @addtogroup Concatenation
39 | * @{
40 | */
41 |
42 | /*
43 | * s8 version of concatenation along the X axis
44 | *
45 | * Refer to header file for details.
46 | *
47 | */
48 | void arm_concatenation_s8_x(const int8_t *input,
49 | const uint16_t input_x,
50 | const uint16_t input_y,
51 | const uint16_t input_z,
52 | const uint16_t input_w,
53 | int8_t *output,
54 | const uint16_t output_x,
55 | const uint32_t offset_x)
56 | {
57 | const uint32_t num_iterations = input_y * input_z * input_w;
58 |
59 | output += offset_x;
60 |
61 | uint32_t i;
62 |
63 | // Copy per row
64 | for (i = 0; i < num_iterations; ++i)
65 | {
66 | memcpy(output, input, input_x);
67 | input += input_x;
68 | output += output_x;
69 | }
70 | }
71 |
72 | /**
73 | * @} end of Concatenation group
74 | */
75 |
--------------------------------------------------------------------------------
/tensorflow/lite/experimental/microfrontend/lib/pcan_gain_control_util.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 | #ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_PCAN_GAIN_CONTROL_UTIL_H_
16 | #define TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_PCAN_GAIN_CONTROL_UTIL_H_
17 |
18 | #include "tensorflow/lite/experimental/microfrontend/lib/pcan_gain_control.h"
19 |
20 | #define kWideDynamicFunctionBits 32
21 | #define kWideDynamicFunctionLUTSize (4 * kWideDynamicFunctionBits - 3)
22 |
23 | #ifdef __cplusplus
24 | extern "C" {
25 | #endif
26 |
27 | struct PcanGainControlConfig {
28 | // set to false (0) to disable this module
29 | int enable_pcan;
30 | // gain normalization exponent (0.0 disables, 1.0 full strength)
31 | float strength;
32 | // positive value added in the normalization denominator
33 | float offset;
34 | // number of fractional bits in the gain
35 | int gain_bits;
36 | };
37 |
38 | void PcanGainControlFillConfigWithDefaults(
39 | struct PcanGainControlConfig* config);
40 |
41 | int16_t PcanGainLookupFunction(const struct PcanGainControlConfig* config,
42 | int32_t input_bits, uint32_t x);
43 |
44 | int PcanGainControlPopulateState(const struct PcanGainControlConfig* config,
45 | struct PcanGainControlState* state,
46 | uint32_t* noise_estimate,
47 | const int num_channels,
48 | const uint16_t smoothing_bits,
49 | const int32_t input_correction_bits);
50 |
51 | void PcanGainControlFreeStateContents(struct PcanGainControlState* state);
52 |
53 | #ifdef __cplusplus
54 | } // extern "C"
55 | #endif
56 |
57 | #endif // TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_PCAN_GAIN_CONTROL_UTIL_H_
58 |
--------------------------------------------------------------------------------
/tensorflow/lite/core/api/error_reporter.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 | #ifndef TENSORFLOW_LITE_CORE_API_ERROR_REPORTER_H_
16 | #define TENSORFLOW_LITE_CORE_API_ERROR_REPORTER_H_
17 |
18 | #include
19 |
20 | namespace tflite {
21 |
22 | /// A functor that reports error to supporting system. Invoked similar to
23 | /// printf.
24 | ///
25 | /// Usage:
26 | /// ErrorReporter foo;
27 | /// foo.Report("test %d", 5);
28 | /// or
29 | /// va_list args;
30 | /// foo.Report("test %d", args); // where args is va_list
31 | ///
32 | /// Subclass ErrorReporter to provide another reporting destination.
33 | /// For example, if you have a GUI program, you might redirect to a buffer
34 | /// that drives a GUI error log box.
35 | class ErrorReporter {
36 | public:
37 | virtual ~ErrorReporter() {}
38 | virtual int Report(const char* format, va_list args) = 0;
39 | int Report(const char* format, ...);
40 | int ReportError(void*, const char* format, ...);
41 | };
42 |
43 | } // namespace tflite
44 |
45 | // You should not make bare calls to the error reporter, instead use the
46 | // TF_LITE_REPORT_ERROR macro, since this allows message strings to be
47 | // stripped when the binary size has to be optimized. If you are looking to
48 | // reduce binary size, define TF_LITE_STRIP_ERROR_STRINGS when compiling and
49 | // every call will be stubbed out, taking no memory.
50 | #ifndef TF_LITE_STRIP_ERROR_STRINGS
51 | #define TF_LITE_REPORT_ERROR(reporter, ...) \
52 | do { \
53 | static_cast(reporter)->Report(__VA_ARGS__); \
54 | } while (false)
55 | #else // TF_LITE_STRIP_ERROR_STRINGS
56 | #define TF_LITE_REPORT_ERROR(reporter, ...)
57 | #endif // TF_LITE_STRIP_ERROR_STRINGS
58 |
59 | #endif // TENSORFLOW_LITE_CORE_API_ERROR_REPORTER_H_
60 |
--------------------------------------------------------------------------------
/tensorflow/lite/experimental/microfrontend/lib/filterbank.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 | #ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_FILTERBANK_H_
16 | #define TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_FILTERBANK_H_
17 |
18 | #include
19 | #include
20 |
21 | #include "tensorflow/lite/experimental/microfrontend/lib/fft.h"
22 |
23 | #define kFilterbankBits 12
24 |
25 | #ifdef __cplusplus
26 | extern "C" {
27 | #endif
28 |
29 | struct FilterbankState {
30 | int num_channels;
31 | int start_index;
32 | int end_index;
33 | int16_t* channel_frequency_starts;
34 | int16_t* channel_weight_starts;
35 | int16_t* channel_widths;
36 | int16_t* weights;
37 | int16_t* unweights;
38 | uint64_t* work;
39 | };
40 |
41 | // Converts the relevant complex values of an FFT output into energy (the
42 | // square magnitude).
43 | void FilterbankConvertFftComplexToEnergy(struct FilterbankState* state,
44 | struct complex_int16_t* fft_output,
45 | int32_t* energy);
46 |
47 | // Computes the mel-scale filterbank on the given energy array. Output is cached
48 | // internally - to fetch it, you need to call FilterbankSqrt.
49 | void FilterbankAccumulateChannels(struct FilterbankState* state,
50 | const int32_t* energy);
51 |
52 | // Applies an integer square root to the 64 bit intermediate values of the
53 | // filterbank, and returns a pointer to them. Memory will be invalidated the
54 | // next time FilterbankAccumulateChannels is called.
55 | uint32_t* FilterbankSqrt(struct FilterbankState* state, int scale_down_shift);
56 |
57 | void FilterbankReset(struct FilterbankState* state);
58 |
59 | #ifdef __cplusplus
60 | } // extern "C"
61 | #endif
62 |
63 | #endif // TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_FILTERBANK_H_
64 |
--------------------------------------------------------------------------------
/tensorflow/lite/micro/tools/make/downloads/cmsis/CMSIS/NN/Source/ConcatenationFunctions/arm_concatenation_s8_z.c:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (C) 2010-2019 Arm Limited or its affiliates. All rights reserved.
3 | *
4 | * SPDX-License-Identifier: Apache-2.0
5 | *
6 | * Licensed under the Apache License, Version 2.0 (the License); you may
7 | * not use this file except in compliance with the License.
8 | * You may obtain a copy of the License at
9 | *
10 | * www.apache.org/licenses/LICENSE-2.0
11 | *
12 | * Unless required by applicable law or agreed to in writing, software
13 | * distributed under the License is distributed on an AS IS BASIS, WITHOUT
14 | * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 | * See the License for the specific language governing permissions and
16 | * limitations under the License.
17 | */
18 |
19 | /* ----------------------------------------------------------------------
20 | * Project: CMSIS NN Library
21 | * Title: arm_concatenation_s8_z.c
22 | * Description: s8 version of concatenation along the Z axis
23 | *
24 | * $Date: October 2019
25 | * $Revision: V.1.0.0
26 | *
27 | * Target Processor: Cortex-M cores
28 | *
29 | * -------------------------------------------------------------------- */
30 |
31 | #include "tensorflow/lite/micro/tools/make/downloads/cmsis/CMSIS/NN/Include/arm_nnfunctions.h"
32 |
33 | /**
34 | * @ingroup groupNN
35 | */
36 |
37 | /**
38 | * @addtogroup Concatenation
39 | * @{
40 | */
41 |
42 | /*
43 | * s8 version of concatenation along the Z axis
44 | *
45 | * Refer to header file for details.
46 | *
47 | */
48 | void arm_concatenation_s8_z(const int8_t *input,
49 | const uint16_t input_x,
50 | const uint16_t input_y,
51 | const uint16_t input_z,
52 | const uint16_t input_w,
53 | int8_t *output,
54 | const uint16_t output_z,
55 | const uint32_t offset_z)
56 | {
57 | const uint32_t input_copy_size = input_x * input_y * input_z;
58 | const uint32_t output_stride = input_x * input_y * output_z;
59 |
60 | output += offset_z * (input_x * input_y);
61 |
62 | uint32_t i;
63 |
64 | for (i = 0; i < input_w; ++i)
65 | {
66 | memcpy(output, input, input_copy_size);
67 | input += input_copy_size;
68 | output += output_stride;
69 | }
70 | }
71 |
72 | /**
73 | * @} end of Concatenation group
74 | */
75 |
--------------------------------------------------------------------------------
/tensorflow/lite/micro/kernels/neg.cpp:
--------------------------------------------------------------------------------
1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 |
16 | #include "tensorflow/lite/kernels/internal/reference/neg.h"
17 |
18 | #include "tensorflow/lite/c/common.h"
19 | #include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
20 | #include "tensorflow/lite/kernels/kernel_util.h"
21 |
22 | namespace tflite {
23 | namespace ops {
24 | namespace micro {
25 | namespace neg {
26 |
27 | constexpr int kInputTensor = 0;
28 | constexpr int kOutputTensor = 0;
29 |
30 | TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
31 | const TfLiteTensor* input = GetInput(context, node, kInputTensor);
32 | TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
33 | switch (input->type) {
34 | // TODO(wangtz): handle for kTfLiteInt8
35 | case kTfLiteFloat32:
36 | reference_ops::Negate(GetTensorShape(input), GetTensorData(input),
37 | GetTensorShape(output),
38 | GetTensorData(output));
39 | break;
40 | default:
41 | TF_LITE_KERNEL_LOG(context, "Type %s (%d) not supported.",
42 | TfLiteTypeGetName(input->type), input->type);
43 | return kTfLiteError;
44 | }
45 | return kTfLiteOk;
46 | }
47 |
48 | } // namespace neg
49 |
50 | TfLiteRegistration* Register_NEG() {
51 | static TfLiteRegistration r = {/*init=*/nullptr,
52 | /*free=*/nullptr,
53 | /*prepare=*/nullptr,
54 | /*invoke=*/neg::Eval,
55 | /*profiling_string=*/nullptr,
56 | /*builtin_code=*/0,
57 | /*custom_name=*/nullptr,
58 | /*version=*/0};
59 | return &r;
60 | }
61 |
62 | } // namespace micro
63 | } // namespace ops
64 | } // namespace tflite
65 |
--------------------------------------------------------------------------------
/tensorflow/lite/micro/tools/make/downloads/cmsis/CMSIS/NN/Source/ConcatenationFunctions/arm_concatenation_s8_y.c:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (C) 2010-2019 Arm Limited or its affiliates. All rights reserved.
3 | *
4 | * SPDX-License-Identifier: Apache-2.0
5 | *
6 | * Licensed under the Apache License, Version 2.0 (the License); you may
7 | * not use this file except in compliance with the License.
8 | * You may obtain a copy of the License at
9 | *
10 | * www.apache.org/licenses/LICENSE-2.0
11 | *
12 | * Unless required by applicable law or agreed to in writing, software
13 | * distributed under the License is distributed on an AS IS BASIS, WITHOUT
14 | * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 | * See the License for the specific language governing permissions and
16 | * limitations under the License.
17 | */
18 |
19 | /* ----------------------------------------------------------------------
20 | * Project: CMSIS NN Library
21 | * Title: arm_concatenation_s8_y.c
22 | * Description: s8 version of concatenation along the Y axis
23 | *
24 | * $Date: October 2019
25 | * $Revision: V.1.0.0
26 | *
27 | * Target Processor: Cortex-M cores
28 | *
29 | * -------------------------------------------------------------------- */
30 |
31 | #include "tensorflow/lite/micro/tools/make/downloads/cmsis/CMSIS/NN/Include/arm_nnfunctions.h"
32 |
33 | /**
34 | * @ingroup groupNN
35 | */
36 |
37 | /**
38 | * @addtogroup Concatenation
39 | * @{
40 | */
41 |
42 | /*
43 | * s8 version of concatenation along the Y axis
44 | *
45 | * Refer to header file for details.
46 | *
47 | */
48 | void arm_concatenation_s8_y(const int8_t *input,
49 | const uint16_t input_x,
50 | const uint16_t input_y,
51 | const uint16_t input_z,
52 | const uint16_t input_w,
53 | int8_t *output,
54 | const uint16_t output_y,
55 | const uint32_t offset_y)
56 | {
57 | const uint32_t num_iterations = input_z * input_w;
58 | const uint32_t input_copy_size = input_x * input_y;
59 | const uint32_t output_stride = input_x * output_y;
60 |
61 | output += offset_y * input_x;
62 | uint32_t i;
63 |
64 | // Copy per tile
65 | for (i = 0; i < num_iterations; ++i)
66 | {
67 | memcpy(output, input, input_copy_size);
68 | input += input_copy_size;
69 | output += output_stride;
70 | }
71 | }
72 |
73 | /**
74 | * @} end of Concatenation group
75 | */
76 |
--------------------------------------------------------------------------------
/tensorflow/lite/type_to_tflitetype.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 | #ifndef TENSORFLOW_LITE_TYPE_TO_TFLITETYPE_H_
16 | #define TENSORFLOW_LITE_TYPE_TO_TFLITETYPE_H_
17 |
18 | // Arduino build defines abs as a macro here. That is invalid C++, and breaks
19 | // libc++'s header, undefine it.
20 | #ifdef abs
21 | #undef abs
22 | #endif
23 |
24 | #include
25 | #include
26 |
27 | #include "tensorflow/lite/c/common.h"
28 |
29 | namespace tflite {
30 |
31 | // Map statically from a c++ type to a TfLiteType. Used in interpreter for safe
32 | // casts.
33 | template
34 | constexpr TfLiteType typeToTfLiteType() {
35 | return kTfLiteNoType;
36 | }
37 | template <>
38 | constexpr TfLiteType typeToTfLiteType() {
39 | return kTfLiteInt32;
40 | }
41 | template <>
42 | constexpr TfLiteType typeToTfLiteType() {
43 | return kTfLiteInt16;
44 | }
45 | template <>
46 | constexpr TfLiteType typeToTfLiteType() {
47 | return kTfLiteInt64;
48 | }
49 | template <>
50 | constexpr TfLiteType typeToTfLiteType() {
51 | return kTfLiteFloat32;
52 | }
53 | template <>
54 | constexpr TfLiteType typeToTfLiteType() {
55 | return kTfLiteUInt8;
56 | }
57 | template <>
58 | constexpr TfLiteType typeToTfLiteType() {
59 | return kTfLiteInt8;
60 | }
61 | template <>
62 | constexpr TfLiteType typeToTfLiteType() {
63 | return kTfLiteBool;
64 | }
65 | template <>
66 | constexpr TfLiteType typeToTfLiteType>() {
67 | return kTfLiteComplex64;
68 | }
69 | template <>
70 | constexpr TfLiteType typeToTfLiteType() {
71 | return kTfLiteString;
72 | }
73 | template <>
74 | constexpr TfLiteType typeToTfLiteType() {
75 | return kTfLiteFloat16;
76 | }
77 | template <>
78 | constexpr TfLiteType typeToTfLiteType() {
79 | return kTfLiteFloat64;
80 | }
81 | } // namespace tflite
82 | #endif // TENSORFLOW_LITE_TYPE_TO_TFLITETYPE_H_
83 |
--------------------------------------------------------------------------------
/tensorflow/lite/micro/tools/make/downloads/cmsis/CMSIS/NN/Source/NNSupportFunctions/arm_nn_accumulate_q7_to_q15.c:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (C) 2010-2019 Arm Limited or its affiliates. All rights reserved.
3 | *
4 | * SPDX-License-Identifier: Apache-2.0
5 | *
6 | * Licensed under the Apache License, Version 2.0 (the License); you may
7 | * not use this file except in compliance with the License.
8 | * You may obtain a copy of the License at
9 | *
10 | * www.apache.org/licenses/LICENSE-2.0
11 | *
12 | * Unless required by applicable law or agreed to in writing, software
13 | * distributed under the License is distributed on an AS IS BASIS, WITHOUT
14 | * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 | * See the License for the specific language governing permissions and
16 | * limitations under the License.
17 | */
18 |
19 | /* ----------------------------------------------------------------------
20 | * Project: CMSIS NN Library
21 | * Title: arm_nn_accumulate_q7_to_q15.c
22 | * Description: Accumulate q7 vector into q15 one.
23 | *
24 | * $Date: July 2019
25 | * $Revision: V.1.0.0
26 | *
27 | * pSrc Processor: Cortex-M cores
28 | *
29 | * -------------------------------------------------------------------- */
30 | #include "tensorflow/lite/micro/tools/make/downloads/cmsis/CMSIS/DSP/Include/arm_math.h"
31 | #include "tensorflow/lite/micro/tools/make/downloads/cmsis/CMSIS/NN/Include/arm_nnfunctions.h"
32 |
33 | /**
34 | * @ingroup groupSupport
35 | */
36 |
37 | /**
38 | * @addtogroup NNBasicMath
39 | * @{
40 | */
41 |
42 | void arm_nn_accumulate_q7_to_q15(q15_t *pDst, const q7_t *pSrc, uint32_t length)
43 | {
44 | q15_t *pCnt = pDst;
45 | const q7_t *pV = pSrc;
46 | q31_t v1, v2, vo1, vo2;
47 | int32_t cnt = length >> 2;
48 | q31_t in;
49 |
50 | while (cnt > 0l)
51 | {
52 | q31_t value = arm_nn_read_q7x4_ia(&pV);
53 | v1 = __SXTB16(__ROR(value, 8));
54 | v2 = __SXTB16(value);
55 | #ifndef ARM_MATH_BIG_ENDIAN
56 |
57 | vo2 = __PKHTB(v1, v2, 16);
58 | vo1 = __PKHBT(v2, v1, 16);
59 |
60 | #else
61 |
62 | vo1 = __PKHTB(v1, v2, 16);
63 | vo2 = __PKHBT(v2, v1, 16);
64 |
65 | #endif
66 |
67 | in = arm_nn_read_q15x2(pCnt);
68 | write_q15x2_ia(&pCnt, __QADD16(vo1, in));
69 |
70 | in = arm_nn_read_q15x2(pCnt);
71 | write_q15x2_ia(&pCnt, __QADD16(vo2, in));
72 |
73 | cnt--;
74 | }
75 | cnt = length & 0x3;
76 | while (cnt > 0l)
77 | {
78 | *pCnt++ += *pV++;
79 | cnt--;
80 | }
81 | }
82 |
83 | /**
84 | * @} end of NNBasicMath group
85 | */
--------------------------------------------------------------------------------
/tensorflow/lite/micro/tools/make/downloads/cmsis/CMSIS/NN/Source/SoftmaxFunctions/arm_softmax_with_batch_q7.c:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (C) 2010-2019 Arm Limited or its affiliates. All rights reserved.
3 | *
4 | * SPDX-License-Identifier: Apache-2.0
5 | *
6 | * Licensed under the Apache License, Version 2.0 (the License); you may
7 | * not use this file except in compliance with the License.
8 | * You may obtain a copy of the License at
9 | *
10 | * www.apache.org/licenses/LICENSE-2.0
11 | *
12 | * Unless required by applicable law or agreed to in writing, software
13 | * distributed under the License is distributed on an AS IS BASIS, WITHOUT
14 | * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 | * See the License for the specific language governing permissions and
16 | * limitations under the License.
17 | */
18 |
19 | /* ----------------------------------------------------------------------
20 | * Project: CMSIS NN Library
21 | * Title: arm_softmax_with_batch_q7.c
22 | * Description: Q7 softmax function
23 | *
24 | * $Date: 05. August 2019
25 | * $Revision: V.1.0.0
26 | *
27 | * Target Processor: Cortex-M and Cortex-A cores
28 | *
29 | * -------------------------------------------------------------------- */
30 |
31 | #include "tensorflow/lite/micro/tools/make/downloads/cmsis/CMSIS/DSP/Include/arm_math.h"
32 | #include "tensorflow/lite/micro/tools/make/downloads/cmsis/CMSIS/NN/Include/arm_nnfunctions.h"
33 |
34 | /**
35 | * @ingroup groupNN
36 | */
37 |
38 | /**
39 | * @addtogroup Softmax
40 | * @{
41 | */
42 |
43 | /**
44 | * @brief Q7 softmax function with batch parameter
45 | * @param[in] vec_in pointer to input vector
46 | * @param[in] nb_batches number of batches
47 | * @param[in] dim_vec input vector dimention
48 | * @param[out] p_out pointer to output vector
49 | *
50 | * @details
51 | *
52 | * Here, instead of typical natural logarithm e based softmax, we use
53 | * 2-based softmax here, i.e.,:
54 | *
55 | * y_i = 2^(x_i) / sum(2^x_j)
56 | *
57 | * The relative output will be different here.
58 | * But mathematically, the gradient will be the same
59 | * with a log(2) scaling factor.
60 | *
61 | */
62 |
63 | void arm_softmax_with_batch_q7(const q7_t * vec_in, const uint16_t nb_batches,const uint16_t dim_vec, q7_t * p_out )
64 | {
65 | for(int i=0; i
18 | #include
19 | #include
20 | #include
21 |
22 | // Some platforms don't have M_PI
23 | #ifndef M_PI
24 | #define M_PI 3.14159265358979323846
25 | #endif
26 |
27 | void WindowFillConfigWithDefaults(struct WindowConfig* config) {
28 | config->size_ms = 25;
29 | config->step_size_ms = 10;
30 | }
31 |
32 | int WindowPopulateState(const struct WindowConfig* config,
33 | struct WindowState* state, int sample_rate) {
34 | state->size = config->size_ms * sample_rate / 1000;
35 | state->step = config->step_size_ms * sample_rate / 1000;
36 |
37 | state->coefficients = malloc(state->size * sizeof(*state->coefficients));
38 | if (state->coefficients == NULL) {
39 | fprintf(stderr, "Failed to allocate window coefficients\n");
40 | return 0;
41 | }
42 |
43 | // Populate the window values.
44 | const float arg = M_PI * 2.0 / ((float)state->size);
45 | int i;
46 | for (i = 0; i < state->size; ++i) {
47 | float float_value = 0.5 - (0.5 * cos(arg * (i + 0.5)));
48 | // Scale it to fixed point and round it.
49 | state->coefficients[i] =
50 | floor(float_value * (1 << kFrontendWindowBits) + 0.5);
51 | }
52 |
53 | state->input_used = 0;
54 | state->input = malloc(state->size * sizeof(*state->input));
55 | if (state->input == NULL) {
56 | fprintf(stderr, "Failed to allocate window input\n");
57 | return 0;
58 | }
59 |
60 | state->output = malloc(state->size * sizeof(*state->output));
61 | if (state->output == NULL) {
62 | fprintf(stderr, "Failed to allocate window output\n");
63 | return 0;
64 | }
65 |
66 | return 1;
67 | }
68 |
69 | void WindowFreeStateContents(struct WindowState* state) {
70 | free(state->coefficients);
71 | free(state->input);
72 | free(state->output);
73 | }
74 |
--------------------------------------------------------------------------------
/tensorflow/lite/micro/tools/make/downloads/cmsis/CMSIS/NN/Source/NNSupportFunctions/arm_nn_add_q7.c:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (C) 2010-2018 Arm Limited or its affiliates. All rights reserved.
3 | *
4 | * SPDX-License-Identifier: Apache-2.0
5 | *
6 | * Licensed under the Apache License, Version 2.0 (the License); you may
7 | * not use this file except in compliance with the License.
8 | * You may obtain a copy of the License at
9 | *
10 | * www.apache.org/licenses/LICENSE-2.0
11 | *
12 | * Unless required by applicable law or agreed to in writing, software
13 | * distributed under the License is distributed on an AS IS BASIS, WITHOUT
14 | * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 | * See the License for the specific language governing permissions and
16 | * limitations under the License.
17 | */
18 |
19 | /* ----------------------------------------------------------------------
20 | * Project: CMSIS NN Library
21 | * Title: arm_nn_add_q7.c
22 | * Description: Non saturating addition of elements of a q7 vector.
23 | *
24 | * $Date: July 2019
25 | * $Revision: V.1.0.0
26 | *
27 | * Target Processor: Cortex-M cores
28 | *
29 | * -------------------------------------------------------------------- */
30 | #include "tensorflow/lite/micro/tools/make/downloads/cmsis/CMSIS/DSP/Include/arm_math.h"
31 | #include "tensorflow/lite/micro/tools/make/downloads/cmsis/CMSIS/NN/Include/arm_nnfunctions.h"
32 |
33 | /**
34 | * @ingroup groupSupport
35 | */
36 |
37 | /**
38 | * @addtogroup NNBasicMath
39 | * @{
40 | */
41 |
42 | void arm_nn_add_q7(const q7_t *input, q31_t *output, uint32_t block_size)
43 | {
44 | uint32_t block_count;
45 | q31_t result = 0;
46 | #if defined(ARM_MATH_DSP)
47 | /* Loop unrolling: Compute 4 outputs at a time */
48 | block_count = block_size >> 2U;
49 |
50 | while (block_count > 0U)
51 | {
52 | const int32_t mult_q15x2 = (1UL << 16) | 1UL;
53 | q31_t in_q7x4 = arm_nn_read_q7x4_ia(&input);
54 | q31_t temp_q15x2 = __SXTAB16(__SXTB16(in_q7x4), __ROR(in_q7x4, 8));
55 |
56 | result = __SMLAD(temp_q15x2, mult_q15x2, result);
57 |
58 | /* Decrement loop counter */
59 | block_count--;
60 | }
61 |
62 | /* Loop unrolling: Compute remaining outputs */
63 | block_count = block_size & 0x3;
64 | #else
65 | block_count = block_size;
66 | #endif
67 | while (block_count > 0U)
68 | {
69 | /* Add and store result in destination buffer. */
70 | result += *input++;
71 |
72 | /* Decrement loop counter */
73 | block_count--;
74 | }
75 |
76 | *output = result;
77 | }
78 |
79 | /**
80 | * @} end of NNBasicMath group
81 | */
--------------------------------------------------------------------------------