├── src
├── gui_layout
│ └── images
│ │ ├── MCU.png
│ │ ├── SBC.png
│ │ ├── Data.png
│ │ ├── FPGA.png
│ │ ├── Sinus.xlsx
│ │ ├── Database.png
│ │ ├── SBC_icon.png
│ │ ├── network.png
│ │ ├── Loadmodel.png
│ │ ├── Trainmodel.png
│ │ ├── back_arrow.png
│ │ ├── load_arrow.png
│ │ ├── next_arrow.png
│ │ ├── Button_icons.pptx
│ │ ├── Data_regression.png
│ │ ├── Image_regression.png
│ │ ├── Pruning_Button.png
│ │ ├── Window_Icon_blue.png
│ │ ├── Window_Icon_black.png
│ │ ├── Window_Icon_white.png
│ │ ├── Data_classification.png
│ │ ├── Image_classification.png
│ │ ├── Quantization_Button.png
│ │ ├── gui_windows
│ │ ├── GUI_window_1.PNG
│ │ ├── GUI_window_load_2.PNG
│ │ ├── GUI_window_load_3.PNG
│ │ ├── GUI_window_load_4.PNG
│ │ ├── GUI_window_load_4a.PNG
│ │ ├── GUI_window_load_4b.PNG
│ │ ├── GUI_window_load_4c.PNG
│ │ ├── GUI_window_load_4d.PNG
│ │ ├── GUI_window_load_5.PNG
│ │ ├── GUI_window_load_5a.PNG
│ │ ├── GUI_window_train_2.PNG
│ │ ├── GUI_window_train_3.PNG
│ │ ├── GUI_window_train_4.PNG
│ │ ├── GUI_window_train_5.PNG
│ │ ├── GUI_window_load_4_SBC.PNG
│ │ ├── GUI_window_load_6_MCU.PNG
│ │ ├── GUI_window_load_6_SBC.PNG
│ │ └── GUI_window_load_6_FPGA.PNG
│ │ ├── gui_progress_bar
│ │ ├── GUI_step_1.png
│ │ ├── progress_bar.pptx
│ │ ├── GUI_load_step_2.png
│ │ ├── GUI_load_step_3.png
│ │ ├── GUI_load_step_4.png
│ │ ├── GUI_load_step_5.png
│ │ ├── GUI_load_step_6.png
│ │ ├── GUI_train_step_2.png
│ │ ├── GUI_train_step_3.png
│ │ ├── GUI_train_step_4.png
│ │ └── GUI_train_step_5.png
│ │ ├── gui_loading_images
│ │ ├── GUI_load_0.png
│ │ ├── GUI_load_1.png
│ │ ├── GUI_load_10.png
│ │ ├── GUI_load_11.png
│ │ ├── GUI_load_12.png
│ │ ├── GUI_load_13.png
│ │ ├── GUI_load_14.png
│ │ ├── GUI_load_15.png
│ │ ├── GUI_load_2.png
│ │ ├── GUI_load_3.png
│ │ ├── GUI_load_4.png
│ │ ├── GUI_load_5.png
│ │ ├── GUI_load_6.png
│ │ ├── GUI_load_7.png
│ │ ├── GUI_load_8.png
│ │ ├── GUI_load_9.png
│ │ ├── GUI_load_finish.png
│ │ └── GUI_loading_images.pptx
│ │ ├── Datapriv.svg
│ │ ├── Database.svg
│ │ └── Flowchart.drawio
├── converter
│ ├── tensorflow_library
│ │ ├── tensorflow
│ │ │ └── lite
│ │ │ │ ├── micro
│ │ │ │ ├── kernels
│ │ │ │ │ ├── ethosu.cpp
│ │ │ │ │ ├── ethosu.h
│ │ │ │ │ ├── flexbuffers_generated_data.h
│ │ │ │ │ ├── micro_utils.h
│ │ │ │ │ ├── kernel_util.cpp
│ │ │ │ │ ├── activation_utils.h
│ │ │ │ │ ├── floor.cpp
│ │ │ │ │ ├── fully_connected.h
│ │ │ │ │ ├── neg.cpp
│ │ │ │ │ ├── shape.cpp
│ │ │ │ │ └── ceil.cpp
│ │ │ │ ├── testing
│ │ │ │ │ ├── test_conv_model.h
│ │ │ │ │ └── util_test.cpp
│ │ │ │ ├── benchmarks
│ │ │ │ │ └── keyword_scrambled_model_data.h
│ │ │ │ ├── debug_log.h
│ │ │ │ ├── micro_time.h
│ │ │ │ ├── micro_error_reporter.h
│ │ │ │ ├── rp2
│ │ │ │ │ ├── micro_time.cpp
│ │ │ │ │ └── debug_log.cpp
│ │ │ │ ├── compatibility.h
│ │ │ │ ├── micro_error_reporter.cpp
│ │ │ │ ├── micro_string.h
│ │ │ │ ├── all_ops_resolver.h
│ │ │ │ ├── micro_profiler.cpp
│ │ │ │ ├── memory_planner
│ │ │ │ │ ├── linear_memory_planner.h
│ │ │ │ │ └── linear_memory_planner.cpp
│ │ │ │ ├── all_ops_resolver.cpp
│ │ │ │ ├── memory_helpers.h
│ │ │ │ ├── recording_simple_memory_allocator.h
│ │ │ │ ├── micro_profiler.h
│ │ │ │ ├── micro_utils.cpp
│ │ │ │ └── recording_micro_interpreter.h
│ │ │ │ ├── core
│ │ │ │ └── api
│ │ │ │ │ ├── tensor_utils.h
│ │ │ │ │ ├── error_reporter.cpp
│ │ │ │ │ ├── tensor_utils.cpp
│ │ │ │ │ ├── error_reporter.h
│ │ │ │ │ ├── op_resolver.cpp
│ │ │ │ │ └── op_resolver.h
│ │ │ │ ├── kernels
│ │ │ │ ├── internal
│ │ │ │ │ ├── max.h
│ │ │ │ │ ├── min.h
│ │ │ │ │ ├── reference
│ │ │ │ │ │ ├── neg.h
│ │ │ │ │ │ ├── ceil.h
│ │ │ │ │ │ ├── floor.h
│ │ │ │ │ │ ├── round.h
│ │ │ │ │ │ ├── quantize.h
│ │ │ │ │ │ ├── arg_min_max.h
│ │ │ │ │ │ └── maximum_minimum.h
│ │ │ │ │ ├── cppmath.h
│ │ │ │ │ ├── optimized
│ │ │ │ │ │ └── neon_check.h
│ │ │ │ │ └── tensor_ctypes.h
│ │ │ │ └── op_macros.h
│ │ │ │ ├── version.h
│ │ │ │ └── schema
│ │ │ │ ├── schema_utils.h
│ │ │ │ └── schema_utils.cpp
│ │ └── third_party
│ │ │ ├── kissfft
│ │ │ ├── tools
│ │ │ │ └── kiss_fftr.h
│ │ │ └── COPYING
│ │ │ └── cmsis
│ │ │ └── CMSIS
│ │ │ ├── DSP
│ │ │ └── Include
│ │ │ │ └── dsp
│ │ │ │ ├── svm_defines.h
│ │ │ │ └── bayes_functions.h
│ │ │ └── NN
│ │ │ ├── Source
│ │ │ ├── ReshapeFunctions
│ │ │ │ └── arm_reshape_s8.c
│ │ │ ├── ActivationFunctions
│ │ │ │ ├── arm_relu6_s8.c
│ │ │ │ ├── arm_relu_q15.c
│ │ │ │ ├── arm_nn_activations_q7.c
│ │ │ │ └── arm_relu_q7.c
│ │ │ ├── ConcatenationFunctions
│ │ │ │ ├── arm_concatenation_s8_w.c
│ │ │ │ ├── arm_concatenation_s8_x.c
│ │ │ │ ├── arm_concatenation_s8_z.c
│ │ │ │ └── arm_concatenation_s8_y.c
│ │ │ ├── SoftmaxFunctions
│ │ │ │ └── arm_softmax_with_batch_q7.c
│ │ │ └── NNSupportFunctions
│ │ │ │ ├── arm_nn_accumulate_q7_to_q15.c
│ │ │ │ └── arm_nn_add_q7.c
│ │ │ └── Include
│ │ │ └── arm_nn_tables.h
│ └── create_project.py
├── gui_event
│ ├── _gui_start.py
│ ├── _automl_training.py
│ ├── _csv_dataloader.py
│ ├── _target_platform.py
│ ├── _dataloader.py
│ └── _automl_task.py
├── threads
│ ├── loading_images_thread.py
│ └── create_project_thread_fpga.py
└── automl
│ ├── data_regressor.py
│ ├── data_classifier.py
│ ├── customize_autokeras.py
│ ├── image_regressor.py
│ └── image_classifier.py
├── example
├── data
│ ├── mnist_model.tflite
│ └── data_preprocessing_mnist.py
└── templates
│ ├── arduino_mnist
│ ├── model_output.h
│ ├── tf_lite_exe.h
│ ├── mnist_data.h
│ ├── mnist.ino
│ └── tf_lite_exe.cpp
│ └── sbc_mnist.py
├── .gitignore
├── .gitmodules
├── requirements.txt
├── .github
├── ISSUE_TEMPLATE
│ ├── feature_request.md
│ └── bug_report.md
├── CONTRIBUTING.md
├── REQUEST_TEMPLATE.md
└── README
│ └── README_AutoKeras.md
└── AutoFlow.py
/src/gui_layout/images/MCU.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Hahn-Schickard/AutoFlow/HEAD/src/gui_layout/images/MCU.png
--------------------------------------------------------------------------------
/src/gui_layout/images/SBC.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Hahn-Schickard/AutoFlow/HEAD/src/gui_layout/images/SBC.png
--------------------------------------------------------------------------------
/example/data/mnist_model.tflite:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Hahn-Schickard/AutoFlow/HEAD/example/data/mnist_model.tflite
--------------------------------------------------------------------------------
/src/gui_layout/images/Data.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Hahn-Schickard/AutoFlow/HEAD/src/gui_layout/images/Data.png
--------------------------------------------------------------------------------
/src/gui_layout/images/FPGA.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Hahn-Schickard/AutoFlow/HEAD/src/gui_layout/images/FPGA.png
--------------------------------------------------------------------------------
/src/gui_layout/images/Sinus.xlsx:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Hahn-Schickard/AutoFlow/HEAD/src/gui_layout/images/Sinus.xlsx
--------------------------------------------------------------------------------
/src/gui_layout/images/Database.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Hahn-Schickard/AutoFlow/HEAD/src/gui_layout/images/Database.png
--------------------------------------------------------------------------------
/src/gui_layout/images/SBC_icon.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Hahn-Schickard/AutoFlow/HEAD/src/gui_layout/images/SBC_icon.png
--------------------------------------------------------------------------------
/src/gui_layout/images/network.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Hahn-Schickard/AutoFlow/HEAD/src/gui_layout/images/network.png
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | .vscode/
2 | auto_model/
3 | __pycache__/
4 | image_classifier/
5 | image_regressor/
6 | my_h5_model.h5
7 | output/
8 |
--------------------------------------------------------------------------------
/src/gui_layout/images/Loadmodel.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Hahn-Schickard/AutoFlow/HEAD/src/gui_layout/images/Loadmodel.png
--------------------------------------------------------------------------------
/src/gui_layout/images/Trainmodel.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Hahn-Schickard/AutoFlow/HEAD/src/gui_layout/images/Trainmodel.png
--------------------------------------------------------------------------------
/src/gui_layout/images/back_arrow.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Hahn-Schickard/AutoFlow/HEAD/src/gui_layout/images/back_arrow.png
--------------------------------------------------------------------------------
/src/gui_layout/images/load_arrow.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Hahn-Schickard/AutoFlow/HEAD/src/gui_layout/images/load_arrow.png
--------------------------------------------------------------------------------
/src/gui_layout/images/next_arrow.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Hahn-Schickard/AutoFlow/HEAD/src/gui_layout/images/next_arrow.png
--------------------------------------------------------------------------------
/src/gui_layout/images/Button_icons.pptx:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Hahn-Schickard/AutoFlow/HEAD/src/gui_layout/images/Button_icons.pptx
--------------------------------------------------------------------------------
/src/gui_layout/images/Data_regression.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Hahn-Schickard/AutoFlow/HEAD/src/gui_layout/images/Data_regression.png
--------------------------------------------------------------------------------
/src/gui_layout/images/Image_regression.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Hahn-Schickard/AutoFlow/HEAD/src/gui_layout/images/Image_regression.png
--------------------------------------------------------------------------------
/src/gui_layout/images/Pruning_Button.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Hahn-Schickard/AutoFlow/HEAD/src/gui_layout/images/Pruning_Button.png
--------------------------------------------------------------------------------
/src/gui_layout/images/Window_Icon_blue.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Hahn-Schickard/AutoFlow/HEAD/src/gui_layout/images/Window_Icon_blue.png
--------------------------------------------------------------------------------
/src/gui_layout/images/Window_Icon_black.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Hahn-Schickard/AutoFlow/HEAD/src/gui_layout/images/Window_Icon_black.png
--------------------------------------------------------------------------------
/src/gui_layout/images/Window_Icon_white.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Hahn-Schickard/AutoFlow/HEAD/src/gui_layout/images/Window_Icon_white.png
--------------------------------------------------------------------------------
/.gitmodules:
--------------------------------------------------------------------------------
1 | [submodule "src/optimization"]
2 | path = src/optimization
3 | url = https://github.com/Hahn-Schickard/Automatic-Structured-Pruning
4 |
--------------------------------------------------------------------------------
/src/gui_layout/images/Data_classification.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Hahn-Schickard/AutoFlow/HEAD/src/gui_layout/images/Data_classification.png
--------------------------------------------------------------------------------
/src/gui_layout/images/Image_classification.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Hahn-Schickard/AutoFlow/HEAD/src/gui_layout/images/Image_classification.png
--------------------------------------------------------------------------------
/src/gui_layout/images/Quantization_Button.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Hahn-Schickard/AutoFlow/HEAD/src/gui_layout/images/Quantization_Button.png
--------------------------------------------------------------------------------
/src/gui_layout/images/gui_windows/GUI_window_1.PNG:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Hahn-Schickard/AutoFlow/HEAD/src/gui_layout/images/gui_windows/GUI_window_1.PNG
--------------------------------------------------------------------------------
/src/gui_layout/images/gui_progress_bar/GUI_step_1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Hahn-Schickard/AutoFlow/HEAD/src/gui_layout/images/gui_progress_bar/GUI_step_1.png
--------------------------------------------------------------------------------
/src/gui_layout/images/gui_loading_images/GUI_load_0.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Hahn-Schickard/AutoFlow/HEAD/src/gui_layout/images/gui_loading_images/GUI_load_0.png
--------------------------------------------------------------------------------
/src/gui_layout/images/gui_loading_images/GUI_load_1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Hahn-Schickard/AutoFlow/HEAD/src/gui_layout/images/gui_loading_images/GUI_load_1.png
--------------------------------------------------------------------------------
/src/gui_layout/images/gui_loading_images/GUI_load_10.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Hahn-Schickard/AutoFlow/HEAD/src/gui_layout/images/gui_loading_images/GUI_load_10.png
--------------------------------------------------------------------------------
/src/gui_layout/images/gui_loading_images/GUI_load_11.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Hahn-Schickard/AutoFlow/HEAD/src/gui_layout/images/gui_loading_images/GUI_load_11.png
--------------------------------------------------------------------------------
/src/gui_layout/images/gui_loading_images/GUI_load_12.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Hahn-Schickard/AutoFlow/HEAD/src/gui_layout/images/gui_loading_images/GUI_load_12.png
--------------------------------------------------------------------------------
/src/gui_layout/images/gui_loading_images/GUI_load_13.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Hahn-Schickard/AutoFlow/HEAD/src/gui_layout/images/gui_loading_images/GUI_load_13.png
--------------------------------------------------------------------------------
/src/gui_layout/images/gui_loading_images/GUI_load_14.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Hahn-Schickard/AutoFlow/HEAD/src/gui_layout/images/gui_loading_images/GUI_load_14.png
--------------------------------------------------------------------------------
/src/gui_layout/images/gui_loading_images/GUI_load_15.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Hahn-Schickard/AutoFlow/HEAD/src/gui_layout/images/gui_loading_images/GUI_load_15.png
--------------------------------------------------------------------------------
/src/gui_layout/images/gui_loading_images/GUI_load_2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Hahn-Schickard/AutoFlow/HEAD/src/gui_layout/images/gui_loading_images/GUI_load_2.png
--------------------------------------------------------------------------------
/src/gui_layout/images/gui_loading_images/GUI_load_3.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Hahn-Schickard/AutoFlow/HEAD/src/gui_layout/images/gui_loading_images/GUI_load_3.png
--------------------------------------------------------------------------------
/src/gui_layout/images/gui_loading_images/GUI_load_4.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Hahn-Schickard/AutoFlow/HEAD/src/gui_layout/images/gui_loading_images/GUI_load_4.png
--------------------------------------------------------------------------------
/src/gui_layout/images/gui_loading_images/GUI_load_5.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Hahn-Schickard/AutoFlow/HEAD/src/gui_layout/images/gui_loading_images/GUI_load_5.png
--------------------------------------------------------------------------------
/src/gui_layout/images/gui_loading_images/GUI_load_6.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Hahn-Schickard/AutoFlow/HEAD/src/gui_layout/images/gui_loading_images/GUI_load_6.png
--------------------------------------------------------------------------------
/src/gui_layout/images/gui_loading_images/GUI_load_7.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Hahn-Schickard/AutoFlow/HEAD/src/gui_layout/images/gui_loading_images/GUI_load_7.png
--------------------------------------------------------------------------------
/src/gui_layout/images/gui_loading_images/GUI_load_8.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Hahn-Schickard/AutoFlow/HEAD/src/gui_layout/images/gui_loading_images/GUI_load_8.png
--------------------------------------------------------------------------------
/src/gui_layout/images/gui_loading_images/GUI_load_9.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Hahn-Schickard/AutoFlow/HEAD/src/gui_layout/images/gui_loading_images/GUI_load_9.png
--------------------------------------------------------------------------------
/src/gui_layout/images/gui_progress_bar/progress_bar.pptx:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Hahn-Schickard/AutoFlow/HEAD/src/gui_layout/images/gui_progress_bar/progress_bar.pptx
--------------------------------------------------------------------------------
/src/gui_layout/images/gui_windows/GUI_window_load_2.PNG:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Hahn-Schickard/AutoFlow/HEAD/src/gui_layout/images/gui_windows/GUI_window_load_2.PNG
--------------------------------------------------------------------------------
/src/gui_layout/images/gui_windows/GUI_window_load_3.PNG:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Hahn-Schickard/AutoFlow/HEAD/src/gui_layout/images/gui_windows/GUI_window_load_3.PNG
--------------------------------------------------------------------------------
/src/gui_layout/images/gui_windows/GUI_window_load_4.PNG:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Hahn-Schickard/AutoFlow/HEAD/src/gui_layout/images/gui_windows/GUI_window_load_4.PNG
--------------------------------------------------------------------------------
/src/gui_layout/images/gui_windows/GUI_window_load_4a.PNG:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Hahn-Schickard/AutoFlow/HEAD/src/gui_layout/images/gui_windows/GUI_window_load_4a.PNG
--------------------------------------------------------------------------------
/src/gui_layout/images/gui_windows/GUI_window_load_4b.PNG:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Hahn-Schickard/AutoFlow/HEAD/src/gui_layout/images/gui_windows/GUI_window_load_4b.PNG
--------------------------------------------------------------------------------
/src/gui_layout/images/gui_windows/GUI_window_load_4c.PNG:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Hahn-Schickard/AutoFlow/HEAD/src/gui_layout/images/gui_windows/GUI_window_load_4c.PNG
--------------------------------------------------------------------------------
/src/gui_layout/images/gui_windows/GUI_window_load_4d.PNG:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Hahn-Schickard/AutoFlow/HEAD/src/gui_layout/images/gui_windows/GUI_window_load_4d.PNG
--------------------------------------------------------------------------------
/src/gui_layout/images/gui_windows/GUI_window_load_5.PNG:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Hahn-Schickard/AutoFlow/HEAD/src/gui_layout/images/gui_windows/GUI_window_load_5.PNG
--------------------------------------------------------------------------------
/src/gui_layout/images/gui_windows/GUI_window_load_5a.PNG:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Hahn-Schickard/AutoFlow/HEAD/src/gui_layout/images/gui_windows/GUI_window_load_5a.PNG
--------------------------------------------------------------------------------
/src/gui_layout/images/gui_windows/GUI_window_train_2.PNG:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Hahn-Schickard/AutoFlow/HEAD/src/gui_layout/images/gui_windows/GUI_window_train_2.PNG
--------------------------------------------------------------------------------
/src/gui_layout/images/gui_windows/GUI_window_train_3.PNG:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Hahn-Schickard/AutoFlow/HEAD/src/gui_layout/images/gui_windows/GUI_window_train_3.PNG
--------------------------------------------------------------------------------
/src/gui_layout/images/gui_windows/GUI_window_train_4.PNG:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Hahn-Schickard/AutoFlow/HEAD/src/gui_layout/images/gui_windows/GUI_window_train_4.PNG
--------------------------------------------------------------------------------
/src/gui_layout/images/gui_windows/GUI_window_train_5.PNG:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Hahn-Schickard/AutoFlow/HEAD/src/gui_layout/images/gui_windows/GUI_window_train_5.PNG
--------------------------------------------------------------------------------
/src/gui_layout/images/gui_progress_bar/GUI_load_step_2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Hahn-Schickard/AutoFlow/HEAD/src/gui_layout/images/gui_progress_bar/GUI_load_step_2.png
--------------------------------------------------------------------------------
/src/gui_layout/images/gui_progress_bar/GUI_load_step_3.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Hahn-Schickard/AutoFlow/HEAD/src/gui_layout/images/gui_progress_bar/GUI_load_step_3.png
--------------------------------------------------------------------------------
/src/gui_layout/images/gui_progress_bar/GUI_load_step_4.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Hahn-Schickard/AutoFlow/HEAD/src/gui_layout/images/gui_progress_bar/GUI_load_step_4.png
--------------------------------------------------------------------------------
/src/gui_layout/images/gui_progress_bar/GUI_load_step_5.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Hahn-Schickard/AutoFlow/HEAD/src/gui_layout/images/gui_progress_bar/GUI_load_step_5.png
--------------------------------------------------------------------------------
/src/gui_layout/images/gui_progress_bar/GUI_load_step_6.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Hahn-Schickard/AutoFlow/HEAD/src/gui_layout/images/gui_progress_bar/GUI_load_step_6.png
--------------------------------------------------------------------------------
/src/gui_layout/images/gui_progress_bar/GUI_train_step_2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Hahn-Schickard/AutoFlow/HEAD/src/gui_layout/images/gui_progress_bar/GUI_train_step_2.png
--------------------------------------------------------------------------------
/src/gui_layout/images/gui_progress_bar/GUI_train_step_3.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Hahn-Schickard/AutoFlow/HEAD/src/gui_layout/images/gui_progress_bar/GUI_train_step_3.png
--------------------------------------------------------------------------------
/src/gui_layout/images/gui_progress_bar/GUI_train_step_4.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Hahn-Schickard/AutoFlow/HEAD/src/gui_layout/images/gui_progress_bar/GUI_train_step_4.png
--------------------------------------------------------------------------------
/src/gui_layout/images/gui_progress_bar/GUI_train_step_5.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Hahn-Schickard/AutoFlow/HEAD/src/gui_layout/images/gui_progress_bar/GUI_train_step_5.png
--------------------------------------------------------------------------------
/src/gui_layout/images/gui_windows/GUI_window_load_4_SBC.PNG:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Hahn-Schickard/AutoFlow/HEAD/src/gui_layout/images/gui_windows/GUI_window_load_4_SBC.PNG
--------------------------------------------------------------------------------
/src/gui_layout/images/gui_windows/GUI_window_load_6_MCU.PNG:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Hahn-Schickard/AutoFlow/HEAD/src/gui_layout/images/gui_windows/GUI_window_load_6_MCU.PNG
--------------------------------------------------------------------------------
/src/gui_layout/images/gui_windows/GUI_window_load_6_SBC.PNG:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Hahn-Schickard/AutoFlow/HEAD/src/gui_layout/images/gui_windows/GUI_window_load_6_SBC.PNG
--------------------------------------------------------------------------------
/src/gui_layout/images/gui_loading_images/GUI_load_finish.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Hahn-Schickard/AutoFlow/HEAD/src/gui_layout/images/gui_loading_images/GUI_load_finish.png
--------------------------------------------------------------------------------
/src/gui_layout/images/gui_windows/GUI_window_load_6_FPGA.PNG:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Hahn-Schickard/AutoFlow/HEAD/src/gui_layout/images/gui_windows/GUI_window_load_6_FPGA.PNG
--------------------------------------------------------------------------------
/src/gui_layout/images/gui_loading_images/GUI_loading_images.pptx:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Hahn-Schickard/AutoFlow/HEAD/src/gui_layout/images/gui_loading_images/GUI_loading_images.pptx
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | # Python v.3.8.12
2 | # pip v.21.3.1
3 | # Use TensorFlow with gpu support:
4 | Tensorflow-gpu==2.5.*
5 | # Use TensorFlow without gpu support:
6 | # Tensorflow==2.5.*
7 | autokeras==1.0.16
8 | keras-tuner==1.0.4
9 | PyQt5==5.15.6
10 | pandas==1.3.4
11 | Pillow==9.0.0
12 | scikit-learn==1.0.2
13 | hls4ml==0.6.0
14 | tensorflow-model-optimization==0.7.1
--------------------------------------------------------------------------------
/example/templates/arduino_mnist/model_output.h:
--------------------------------------------------------------------------------
1 | /* Copyright [2022] Hahn-Schickard-Gesellschaft fuer angewandte Forschung e.V.,
2 | Daniel Konegen + Marcus Rueb
3 | SPDX-License-Identifier: GPL-3.0
4 | ============================================================================*/
5 |
6 | char pred_labels[10] = {'0', '1', '2', '3', '4', '5', '6', '7', '8', '9'};
7 |
--------------------------------------------------------------------------------
/src/gui_layout/images/Datapriv.svg:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/example/templates/arduino_mnist/tf_lite_exe.h:
--------------------------------------------------------------------------------
1 | /* Copyright [2022] Hahn-Schickard-Gesellschaft fuer angewandte Forschung e.V.,
2 | Daniel Konegen + Marcus Rueb
3 | SPDX-License-Identifier: GPL-3.0
4 | ============================================================================*/
5 |
6 | #include "mnist_data.h"
7 | #include "tensorflow/lite/micro/all_ops_resolver.h"
8 | #include "tensorflow/lite/micro/micro_error_reporter.h"
9 | #include "tensorflow/lite/micro/micro_interpreter.h"
10 | #include "tensorflow/lite/schema/schema_generated.h"
11 | #include "tensorflow/lite/version.h"
12 |
13 | void setup_model();
14 | float* model_execute(float *);
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/feature_request.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: Feature request
3 | about: Suggest an idea for this project
4 | title: ''
5 | labels: ''
6 | assignees: ''
7 |
8 | ---
9 |
10 | **Is your feature request related to a problem? Please describe.**
11 | A clear and concise description of what the problem is.
12 |
13 | **Describe the solution you'd like**
14 | A clear and concise description of what you want to happen.
15 |
16 | **Describe alternatives you've considered**
17 | A clear and concise description of any alternative solutions or features you've considered.
18 |
19 | **Additional context**
20 | Add any other context or screenshots about the feature request here.
21 |
--------------------------------------------------------------------------------
/example/templates/arduino_mnist/mnist_data.h:
--------------------------------------------------------------------------------
1 | /* Copyright [2022] Hahn-Schickard-Gesellschaft fuer angewandte Forschung e.V.,
2 | Daniel Konegen + Marcus Rueb
3 | SPDX-License-Identifier: GPL-3.0
4 | ============================================================================*/
5 |
6 | // This is a standard TensorFlow Lite model file that has been converted into a
7 | // C data array, so it can be easily compiled into a binary for devices that
8 | // don't have a file system.
9 |
10 | #ifndef TENSORFLOW_LITE_MODEL_DATA_H_
11 | #define TENSORFLOW_LITE_MODEL_DATA_H_
12 |
13 | extern const unsigned char MNIST_tflite[];
14 | extern const int MNIST_tflite_len;
15 |
16 | #endif
--------------------------------------------------------------------------------
/example/data/data_preprocessing_mnist.py:
--------------------------------------------------------------------------------
1 | '''Copyright [2022] Hahn-Schickard-Gesellschaft fuer angewandte Forschung e.V.,
2 | Daniel Konegen + Marcus Rueb
3 | SPDX-License-Identifier: GPL-3.0
4 | ============================================================================'''
5 |
6 | import numpy as np
7 | from tensorflow.keras.datasets import mnist
8 |
9 |
10 | def get_data():
11 | (x_train, y_train), (x_test, y_test) = mnist.load_data()
12 |
13 | x_train = x_train.astype('float32') / 255.0
14 | x_test = x_test.astype('float32') / 255.0
15 |
16 | x_train = np.expand_dims(x_train, 3)
17 | x_test = np.expand_dims(x_test, 3)
18 |
19 | return x_train, y_train, x_test, y_test
20 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/bug_report.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: Bug report
3 | about: Create a report to help us improve
4 | title: ''
5 | labels: ''
6 | assignees: ''
7 |
8 | ---
9 |
10 | **Describe the bug**
11 | A clear and concise description of what the bug is.
12 |
13 | **To Reproduce**
14 | Steps to reproduce the behavior:
15 | 1. Go to '...'
16 | 2. Click on '....'
17 | 3. See error
18 |
19 | **Expected behavior**
20 | A clear and concise description of what you expected to happen.
21 |
22 | **Screenshots**
23 | If applicable, add screenshots to help explain your problem.
24 |
25 | **Desktop (please complete the following information):**
26 | - OS: [e.g. Window, Linux]
27 |
28 | **Additional context**
29 | Add any other context about the problem here.
30 |
--------------------------------------------------------------------------------
/.github/CONTRIBUTING.md:
--------------------------------------------------------------------------------
1 | # Contributing
2 |
3 | If you would like to contribute to this repository, please discuss the change you would like to make with the community to let them know and gather more ideas.
4 |
5 | Please note we have a code of conduct, please follow it in all your interactions with the project.
6 |
7 | ## Pull Request Process
8 | - Update the README.md with details of changes to the interface.
9 | - If new features are added and additional libraries are needed, remember to add them to the requirements.txt file.
10 | - Increase the version numbers in any examples files and the README.md to the new version that this Pull Request would represent. The versioning scheme we use is SemVer.
11 | - After you have submitted a pull request, we will review it. If your changes match our vision of AutoFlow, we will gladly integrate them into our tool.
12 |
--------------------------------------------------------------------------------
/example/templates/arduino_mnist/mnist.ino:
--------------------------------------------------------------------------------
1 | /* Copyright [2022] Hahn-Schickard-Gesellschaft fuer angewandte Forschung e.V.,
2 | Daniel Konegen + Marcus Rueb
3 | SPDX-License-Identifier: GPL-3.0
4 | ============================================================================*/
5 |
6 | #include
7 |
8 | #include "input_data.h"
9 | #include "Model_output.h"
10 |
11 | #include "tf_lite_exe.h"
12 |
13 | float* prediction;
14 |
15 | char buffer[50];
16 |
17 | // The name of this function is important for Arduino compatibility.
18 | void setup() {
19 | Serial.begin(9600);
20 |
21 | setup_model();
22 | }
23 |
24 | // The name of this function is important for Arduino compatibility.
25 | void loop() {
26 |
27 | prediction = model_execute(input_img_7);
28 | for(int i=0; i<10; i++){
29 | sprintf(buffer, "Prediction %c: %.2f%%\n", pred_labels[i], prediction[i]*100);
30 | Serial.print(buffer);
31 | }
32 |
33 | }
34 |
--------------------------------------------------------------------------------
/src/converter/tensorflow_library/tensorflow/lite/micro/kernels/ethosu.cpp:
--------------------------------------------------------------------------------
1 | /* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 |
16 | //
17 | // This is a stub file for non-Ethos platforms
18 | //
19 | #include "tensorflow/lite/c/common.h"
20 |
21 | namespace tflite {
22 |
23 | TfLiteRegistration* Register_ETHOSU() { return nullptr; }
24 |
25 | const char* GetString_ETHOSU() { return ""; }
26 |
27 | } // namespace tflite
28 |
--------------------------------------------------------------------------------
/.github/REQUEST_TEMPLATE.md:
--------------------------------------------------------------------------------
1 | # Description
2 |
3 | Please include a summary of the change and which issue is fixed. Please also
4 | include relevant motivation and context. List any dependencies that are
5 | required for this change.
6 |
7 | Fixes # (issue)
8 |
9 |
10 | ## Type of change
11 |
12 | - [ ] Bug fix (non-breaking change which fixes an issue)
13 | - [ ] New feature (non-breaking change which adds functionality)
14 | - [ ] Breaking change (fix or feature that would cause existing
15 | functionality to not work as expected)
16 | - [ ] Documentation or code style changes
17 | - [ ] Other
18 |
19 |
20 | # Checklist:
21 |
22 | - [ ] My code follows the style guidelines of this project
23 | - [ ] I have performed a self-review of my own code
24 | - [ ] I have commented my code, particularly in hard-to-understand areas
25 | - [ ] I have made corresponding changes to the documentation
26 | - [ ] My changes generate no new warnings
27 | - [ ] I tested my changes to prove my fix is effective or that my feature
28 | works
29 | - [ ] Any dependent changes have been merged and published in downstream
30 | modules
--------------------------------------------------------------------------------
/src/converter/tensorflow_library/third_party/kissfft/tools/kiss_fftr.h:
--------------------------------------------------------------------------------
1 | #ifndef KISS_FTR_H
2 | #define KISS_FTR_H
3 |
4 | #include "kiss_fft.h"
5 | #ifdef __cplusplus
6 | extern "C" {
7 | #endif
8 |
9 |
10 | /*
11 |
12 | Real optimized version can save about 45% cpu time vs. complex fft of a real seq.
13 |
14 |
15 |
16 | */
17 |
18 | typedef struct kiss_fftr_state *kiss_fftr_cfg;
19 |
20 |
21 | kiss_fftr_cfg kiss_fftr_alloc(int nfft,int inverse_fft,void * mem, size_t * lenmem);
22 | /*
23 | nfft must be even
24 |
25 | If you don't care to allocate space, use mem = lenmem = NULL
26 | */
27 |
28 |
29 | void kiss_fftr(kiss_fftr_cfg cfg,const kiss_fft_scalar *timedata,kiss_fft_cpx *freqdata);
30 | /*
31 | input timedata has nfft scalar points
32 | output freqdata has nfft/2+1 complex points
33 | */
34 |
35 | void kiss_fftri(kiss_fftr_cfg cfg,const kiss_fft_cpx *freqdata,kiss_fft_scalar *timedata);
36 | /*
37 | input freqdata has nfft/2+1 complex points
38 | output timedata has nfft scalar points
39 | */
40 |
41 | #define kiss_fftr_free free
42 |
43 | #ifdef __cplusplus
44 | }
45 | #endif
46 | #endif
47 |
--------------------------------------------------------------------------------
/src/converter/tensorflow_library/tensorflow/lite/micro/kernels/ethosu.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 | #ifndef TENSORFLOW_LITE_MICRO_KERNELS_ETHOSU_H_
16 | #define TENSORFLOW_LITE_MICRO_KERNELS_ETHOSU_H_
17 |
18 | #include "tensorflow/lite/c/common.h"
19 |
20 | namespace tflite {
21 |
22 | TfLiteRegistration* Register_ETHOSU();
23 |
24 | const char* GetString_ETHOSU();
25 |
26 | } // namespace tflite
27 |
28 | #endif // TENSORFLOW_LITE_MICRO_KERNELS_ETHOSU_H_
29 |
--------------------------------------------------------------------------------
/src/converter/tensorflow_library/tensorflow/lite/micro/testing/test_conv_model.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 |
16 | #ifndef TENSORFLOW_LITE_MICRO_TESTING_TEST_CONV_MODEL_H_
17 | #define TENSORFLOW_LITE_MICRO_TESTING_TEST_CONV_MODEL_H_
18 |
19 | // See generate_test_models.py for updating the contents of this model:
20 | extern const unsigned char kTestConvModelData[];
21 | extern const unsigned int kTestConvModelDataSize;
22 |
23 | #endif // TENSORFLOW_LITE_MICRO_TESTING_TEST_CONV_MODEL_H_
24 |
--------------------------------------------------------------------------------
/src/gui_layout/images/Database.svg:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/src/converter/tensorflow_library/tensorflow/lite/core/api/tensor_utils.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 |
16 | #ifndef TENSORFLOW_LITE_CORE_API_TENSOR_UTILS_H_
17 | #define TENSORFLOW_LITE_CORE_API_TENSOR_UTILS_H_
18 |
19 | #include "tensorflow/lite/c/common.h"
20 |
21 | namespace tflite {
22 |
23 | // Resets a variable tensor to the default value.
24 | TfLiteStatus ResetVariableTensor(TfLiteTensor* tensor);
25 |
26 | } // namespace tflite
27 |
28 | #endif // TENSORFLOW_LITE_CORE_API_TENSOR_UTILS_H_
29 |
--------------------------------------------------------------------------------
/src/converter/tensorflow_library/tensorflow/lite/micro/benchmarks/keyword_scrambled_model_data.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 |
16 | #ifndef TENSORFLOW_LITE_MICRO_BENCHMARKS_KEYWORD_SCRAMBLED_MODEL_DATA_H_
17 | #define TENSORFLOW_LITE_MICRO_BENCHMARKS_KEYWORD_SCRAMBLED_MODEL_DATA_H_
18 |
19 | extern const unsigned char g_keyword_scrambled_model_data[];
20 | extern const unsigned int g_keyword_scrambled_model_data_length;
21 |
22 | #endif // TENSORFLOW_LITE_MICRO_BENCHMARKS_KEYWORD_SCRAMBLED_MODEL_DATA_H_
23 |
--------------------------------------------------------------------------------
/src/converter/tensorflow_library/tensorflow/lite/micro/kernels/flexbuffers_generated_data.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
2 | Licensed under the Apache License, Version 2.0 (the "License");
3 | you may not use this file except in compliance with the License.
4 | You may obtain a copy of the License at
5 |
6 | http://www.apache.org/licenses/LICENSE-2.0
7 |
8 | Unless required by applicable law or agreed to in writing, software
9 | distributed under the License is distributed on an "AS IS" BASIS,
10 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 | See the License for the specific language governing permissions and
12 | limitations under the License.
13 | ==============================================================================*/
14 |
15 | #ifndef TENSORFLOW_LITE_MICRO_KERNELS_FLEXBUFFERS_GENERATED_DATA_H
16 | #define TENSORFLOW_LITE_MICRO_KERNELS_FLEXBUFFERS_GENERATED_DATA_H
17 |
18 | extern const int g_gen_data_size_none_regular_nms;
19 | extern const unsigned char g_gen_data_none_regular_nms[];
20 |
21 | extern const int g_gen_data_size_regular_nms;
22 | extern const unsigned char g_gen_data_regular_nms[];
23 |
24 | #endif
25 |
--------------------------------------------------------------------------------
/src/converter/tensorflow_library/tensorflow/lite/micro/testing/util_test.cpp:
--------------------------------------------------------------------------------
1 | /* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 |
16 | #include "tensorflow/lite/micro/testing/micro_test.h"
17 |
18 | TF_LITE_MICRO_TESTS_BEGIN
19 |
20 | TF_LITE_MICRO_TEST(ArgumentsExecutedOnlyOnce) {
21 | float count = 0.;
22 | // Make sure either argument is executed once after macro expansion.
23 | TF_LITE_MICRO_EXPECT_NEAR(0, count++, 0.1f);
24 | TF_LITE_MICRO_EXPECT_NEAR(1, count++, 0.1f);
25 | TF_LITE_MICRO_EXPECT_NEAR(count++, 2, 0.1f);
26 | TF_LITE_MICRO_EXPECT_NEAR(count++, 3, 0.1f);
27 | }
28 |
29 | TF_LITE_MICRO_TESTS_END
30 |
--------------------------------------------------------------------------------
/src/converter/tensorflow_library/tensorflow/lite/micro/debug_log.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 | #ifndef TENSORFLOW_LITE_MICRO_DEBUG_LOG_H_
16 | #define TENSORFLOW_LITE_MICRO_DEBUG_LOG_H_
17 |
18 | #ifdef __cplusplus
19 | extern "C" {
20 | #endif // __cplusplus
21 |
22 | // This function should be implemented by each target platform, and provide a
23 | // way for strings to be output to some text stream. For more information, see
24 | // tensorflow/lite/micro/debug_log.cc.
25 | void DebugLog(const char* s);
26 |
27 | #ifdef __cplusplus
28 | } // extern "C"
29 | #endif // __cplusplus
30 |
31 | #endif // TENSORFLOW_LITE_MICRO_DEBUG_LOG_H_
32 |
--------------------------------------------------------------------------------
/src/converter/tensorflow_library/tensorflow/lite/kernels/internal/max.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 | #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_MAX_H_
16 | #define TENSORFLOW_LITE_KERNELS_INTERNAL_MAX_H_
17 |
18 | #include
19 |
20 | namespace tflite {
21 |
22 | #if defined(TF_LITE_USE_GLOBAL_MAX) || defined(__ZEPHYR__)
23 | inline float TfLiteMax(const float& x, const float& y) {
24 | return std::max(x, y);
25 | }
26 | #else
27 | template
28 | inline T TfLiteMax(const T& x, const T& y) {
29 | return std::fmax(x, y);
30 | }
31 | #endif
32 |
33 | } // namespace tflite
34 |
35 | #endif // TENSORFLOW_LITE_KERNELS_INTERNAL_MAX_H_
36 |
--------------------------------------------------------------------------------
/src/converter/tensorflow_library/tensorflow/lite/kernels/internal/min.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 | #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_MIN_H_
16 | #define TENSORFLOW_LITE_KERNELS_INTERNAL_MIN_H_
17 |
18 | #include
19 |
20 | namespace tflite {
21 |
22 | #if defined(TF_LITE_USE_GLOBAL_MIN) || defined(__ZEPHYR__)
23 | inline float TfLiteMin(const float& x, const float& y) {
24 | return std::min(x, y);
25 | }
26 | #else
27 | template
28 | inline T TfLiteMin(const T& x, const T& y) {
29 | return std::fmin(x, y);
30 | }
31 | #endif
32 |
33 | } // namespace tflite
34 |
35 | #endif // TENSORFLOW_LITE_KERNELS_INTERNAL_MIN_H_
36 |
--------------------------------------------------------------------------------
/src/converter/tensorflow_library/tensorflow/lite/micro/micro_time.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 | #ifndef TENSORFLOW_LITE_MICRO_MICRO_TIME_H_
16 | #define TENSORFLOW_LITE_MICRO_MICRO_TIME_H_
17 |
18 | #include
19 |
20 | namespace tflite {
21 |
22 | // These functions should be implemented by each target platform, and provide an
23 | // accurate tick count along with how many ticks there are per second.
24 | int32_t ticks_per_second();
25 |
26 | // Return time in ticks. The meaning of a tick varies per platform.
27 | int32_t GetCurrentTimeTicks();
28 |
29 | } // namespace tflite
30 |
31 | #endif // TENSORFLOW_LITE_MICRO_MICRO_TIME_H_
32 |
--------------------------------------------------------------------------------
/src/converter/tensorflow_library/tensorflow/lite/version.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 | #ifndef TENSORFLOW_LITE_VERSION_H_
16 | #define TENSORFLOW_LITE_VERSION_H_
17 |
18 | #include "tensorflow/core/public/version.h"
19 |
20 | // The version number of the Schema. Ideally all changes will be backward
21 | // compatible. If that ever changes, we must ensure that version is the first
22 | // entry in the new tflite root so that we can see that version is not 1.
23 | #define TFLITE_SCHEMA_VERSION (3)
24 |
25 | // TensorFlow Lite Runtime version.
26 | // This value is currently shared with that of TensorFlow.
27 | #define TFLITE_VERSION_STRING TF_VERSION_STRING
28 |
29 | #endif // TENSORFLOW_LITE_VERSION_H_
30 |
--------------------------------------------------------------------------------
/src/converter/tensorflow_library/tensorflow/lite/micro/micro_error_reporter.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 | #ifndef TENSORFLOW_LITE_MICRO_MICRO_ERROR_REPORTER_H_
16 | #define TENSORFLOW_LITE_MICRO_MICRO_ERROR_REPORTER_H_
17 |
18 | #include
19 |
20 | #include "tensorflow/lite/core/api/error_reporter.h"
21 | #include "tensorflow/lite/micro/compatibility.h"
22 |
23 | namespace tflite {
24 |
25 | class MicroErrorReporter : public ErrorReporter {
26 | public:
27 | ~MicroErrorReporter() override {}
28 | int Report(const char* format, va_list args) override;
29 |
30 | private:
31 | TF_LITE_REMOVE_VIRTUAL_DELETE
32 | };
33 |
34 | } // namespace tflite
35 |
36 | #endif // TENSORFLOW_LITE_MICRO_MICRO_ERROR_REPORTER_H_
37 |
--------------------------------------------------------------------------------
/src/converter/tensorflow_library/tensorflow/lite/micro/rp2/micro_time.cpp:
--------------------------------------------------------------------------------
1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 |
16 | // Raspberry Pi Pico-specific implementation of timing functions.
17 |
18 | #include "tensorflow/lite/micro/micro_time.h"
19 |
20 | #include "tensorflow/lite/micro/debug_log.h"
21 |
22 | // These are headers from the RP2's SDK.
23 | #include "hardware/timer.h" // NOLINT
24 |
25 | namespace tflite {
26 | namespace {
27 | // Pico's time_us_32() returns microseconds.
28 | const int32_t kClocksPerSecond = 1000000;
29 | } // namespace
30 |
31 | int32_t ticks_per_second() { return kClocksPerSecond; }
32 |
33 | int32_t GetCurrentTimeTicks() {
34 | return static_cast(time_us_32());
35 | }
36 |
37 | } // namespace tflite
38 |
--------------------------------------------------------------------------------
/src/converter/tensorflow_library/tensorflow/lite/core/api/error_reporter.cpp:
--------------------------------------------------------------------------------
1 | /* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 | #include "tensorflow/lite/core/api/error_reporter.h"
16 | #include
17 |
18 | namespace tflite {
19 |
20 | int ErrorReporter::Report(const char* format, ...) {
21 | va_list args;
22 | va_start(args, format);
23 | int code = Report(format, args);
24 | va_end(args);
25 | return code;
26 | }
27 |
28 | // TODO(aselle): Make the name of ReportError on context the same, so
29 | // we can use the ensure functions w/o a context and w/ a reporter.
30 | int ErrorReporter::ReportError(void*, const char* format, ...) {
31 | va_list args;
32 | va_start(args, format);
33 | int code = Report(format, args);
34 | va_end(args);
35 | return code;
36 | }
37 |
38 | } // namespace tflite
39 |
--------------------------------------------------------------------------------
/src/converter/tensorflow_library/tensorflow/lite/micro/kernels/micro_utils.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2 | Licensed under the Apache License, Version 2.0 (the "License");
3 | you may not use this file except in compliance with the License.
4 | You may obtain a copy of the License at
5 | http://www.apache.org/licenses/LICENSE-2.0
6 | Unless required by applicable law or agreed to in writing, software
7 | distributed under the License is distributed on an "AS IS" BASIS,
8 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
9 | See the License for the specific language governing permissions and
10 | limitations under the License.
11 | ==============================================================================*/
12 | #ifndef TENSORFLOW_LITE_MICRO_KERNELS_MICRO_UTILS_H_
13 | #define TENSORFLOW_LITE_MICRO_KERNELS_MICRO_UTILS_H_
14 | namespace tflite {
15 | namespace ops {
16 | namespace micro {
17 |
18 | // Same as gtl::Greater but defined here to reduce dependencies and
19 | // binary size for micro environment.
20 | struct Greater {
21 | template
22 | bool operator()(const T& x, const T& y) const {
23 | return x > y;
24 | }
25 | };
26 |
27 | struct Less {
28 | template
29 | bool operator()(const T& x, const T& y) const {
30 | return x < y;
31 | }
32 | };
33 |
34 | } // namespace micro
35 | } // namespace ops
36 | } // namespace tflite
37 | #endif // TENSORFLOW_LITE_MICRO_KERNELS_MICRO_UTILS_H_
38 |
--------------------------------------------------------------------------------
/src/converter/tensorflow_library/tensorflow/lite/schema/schema_utils.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 | #ifndef TENSORFLOW_LITE_SCHEMA_SCHEMA_UTILS_H_
16 | #define TENSORFLOW_LITE_SCHEMA_SCHEMA_UTILS_H_
17 |
18 | #include "flatbuffers/flatbuffers.h"
19 | #include "tensorflow/lite/schema/schema_generated.h"
20 |
21 | namespace tflite {
22 |
23 | // The following methods are introduced to resolve op builtin code shortage
24 | // problem. The new builtin opreator will be assigned to the extended builtin
25 | // code field in the flatbuffer schema. Those methods helps to hide builtin code
26 | // details.
27 | BuiltinOperator GetBuiltinCode(const OperatorCode *op_code);
28 |
29 | BuiltinOperator GetBuiltinCode(const OperatorCodeT *op_code);
30 |
31 | } // namespace tflite
32 |
33 | #endif // TENSORFLOW_LITE_SCHEMA_SCHEMA_UTILS_H_
34 |
--------------------------------------------------------------------------------
/src/gui_event/_gui_start.py:
--------------------------------------------------------------------------------
1 | '''Copyright [2020] Hahn-Schickard-Gesellschaft fuer angewandte Forschung e.V.,
2 | Daniel Konegen + Marcus Rueb
3 | Copyright [2021] Karlsruhe Institute of Technology, Daniel Konegen
4 | Copyright [2022] Hahn-Schickard-Gesellschaft fuer angewandte Forschung e.V.,
5 | Daniel Konegen + Marcus Rueb
6 | SPDX-License-Identifier: GPL-3.0
7 | ============================================================================'''
8 |
9 | from src.gui_layout.ui_gui_start import *
10 |
11 |
12 | def gui_start(self):
13 | """Actiivates the start window of the GUI
14 |
15 | You can decide if you want to train a new model using
16 | AutoKeras or if you want to load an already trained
17 | model.
18 | """
19 | self.gui_start_ui = UIGUIStart(self.WINDOW_WIDTH, self.WINDOW_HEIGHT,
20 | self.FONT_STYLE, self)
21 |
22 | self.gui_start_ui.load_model.clicked.connect(
23 | lambda: next_window(self, "AutoML"))
24 | self.gui_start_ui.train_model.clicked.connect(
25 | lambda: next_window(self, "LoadModel"))
26 |
27 | self.setCentralWidget(self.gui_start_ui)
28 | self.show()
29 |
30 |
31 | def next_window(self, n):
32 | """
33 | Defines which one is the next window to open.
34 |
35 | Args:
36 | n: Next window to open
37 | """
38 | if n == "AutoML":
39 | self.automl_data()
40 |
41 | elif n == "LoadModel":
42 | self.project_data()
43 |
--------------------------------------------------------------------------------
/src/converter/tensorflow_library/tensorflow/lite/kernels/internal/reference/neg.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 | #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_NEG_H_
16 | #define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_NEG_H_
17 |
18 | #include "tensorflow/lite/kernels/internal/types.h"
19 |
20 | namespace tflite {
21 |
22 | namespace reference_ops {
23 |
24 | template
25 | inline void Negate(const RuntimeShape& input_shape, const T* input_data,
26 | const RuntimeShape& output_shape, T* output_data) {
27 | const int flat_size = MatchingFlatSize(input_shape, output_shape);
28 |
29 | for (int i = 0; i < flat_size; ++i) {
30 | output_data[i] = -input_data[i];
31 | }
32 | }
33 |
34 | } // namespace reference_ops
35 | } // namespace tflite
36 |
37 | #endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_NEG_H_
38 |
--------------------------------------------------------------------------------
/src/converter/tensorflow_library/third_party/kissfft/COPYING:
--------------------------------------------------------------------------------
1 | Copyright (c) 2003-2010 Mark Borgerding
2 |
3 | All rights reserved.
4 |
5 | Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
6 |
7 | * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
8 | * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
9 | * Neither the author nor the names of any contributors may be used to endorse or promote products derived from this software without specific prior written permission.
10 |
11 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
12 |
--------------------------------------------------------------------------------
/src/converter/tensorflow_library/tensorflow/lite/kernels/internal/reference/ceil.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 | #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_CEIL_H_
16 | #define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_CEIL_H_
17 |
18 | #include
19 |
20 | #include "tensorflow/lite/kernels/internal/types.h"
21 |
22 | namespace tflite {
23 |
24 | namespace reference_ops {
25 |
26 | inline void Ceil(const RuntimeShape& input_shape, const float* input_data,
27 | const RuntimeShape& output_shape, float* output_data) {
28 | const int flat_size = MatchingFlatSize(input_shape, output_shape);
29 |
30 | for (int i = 0; i < flat_size; ++i) {
31 | output_data[i] = std::ceil(input_data[i]);
32 | }
33 | }
34 |
35 | } // namespace reference_ops
36 | } // namespace tflite
37 | #endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_CEIL_H_
38 |
--------------------------------------------------------------------------------
/src/converter/tensorflow_library/third_party/cmsis/CMSIS/DSP/Include/dsp/svm_defines.h:
--------------------------------------------------------------------------------
1 | /******************************************************************************
2 | * @file svm_defines.h
3 | * @brief Public header file for CMSIS DSP Library
4 | ******************************************************************************/
5 | /*
6 | * Copyright (c) 2010-2020 Arm Limited or its affiliates. All rights reserved.
7 | *
8 | * SPDX-License-Identifier: Apache-2.0
9 | *
10 | * Licensed under the Apache License, Version 2.0 (the License); you may
11 | * not use this file except in compliance with the License.
12 | * You may obtain a copy of the License at
13 | *
14 | * www.apache.org/licenses/LICENSE-2.0
15 | *
16 | * Unless required by applicable law or agreed to in writing, software
17 | * distributed under the License is distributed on an AS IS BASIS, WITHOUT
18 | * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
19 | * See the License for the specific language governing permissions and
20 | * limitations under the License.
21 | */
22 |
23 |
24 | #ifndef _SVM_DEFINES_H_
25 | #define _SVM_DEFINES_H_
26 |
27 | /**
28 | * @brief Struct for specifying SVM Kernel
29 | */
30 | typedef enum
31 | {
32 | ARM_ML_KERNEL_LINEAR = 0,
33 | /**< Linear kernel */
34 | ARM_ML_KERNEL_POLYNOMIAL = 1,
35 | /**< Polynomial kernel */
36 | ARM_ML_KERNEL_RBF = 2,
37 | /**< Radial Basis Function kernel */
38 | ARM_ML_KERNEL_SIGMOID = 3
39 | /**< Sigmoid kernel */
40 | } arm_ml_kernel_type;
41 |
42 | #endif
43 |
--------------------------------------------------------------------------------
/src/converter/tensorflow_library/tensorflow/lite/kernels/internal/reference/floor.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 | #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_FLOOR_H_
16 | #define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_FLOOR_H_
17 |
18 | #include
19 |
20 | #include "tensorflow/lite/kernels/internal/types.h"
21 |
22 | namespace tflite {
23 |
24 | namespace reference_ops {
25 |
26 | inline void Floor(const RuntimeShape& input_shape, const float* input_data,
27 | const RuntimeShape& output_shape, float* output_data) {
28 | const int flat_size = MatchingFlatSize(input_shape, output_shape);
29 |
30 | for (int i = 0; i < flat_size; i++) {
31 | int offset = i;
32 | output_data[offset] = std::floor(input_data[offset]);
33 | }
34 | }
35 |
36 | } // namespace reference_ops
37 | } // namespace tflite
38 |
39 | #endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_FLOOR_H_
40 |
--------------------------------------------------------------------------------
/src/converter/tensorflow_library/tensorflow/lite/micro/compatibility.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 | #ifndef TENSORFLOW_LITE_MICRO_COMPATIBILITY_H_
16 | #define TENSORFLOW_LITE_MICRO_COMPATIBILITY_H_
17 |
18 | // C++ will automatically create class-specific delete operators for virtual
19 | // objects, which by default call the global delete function. For embedded
20 | // applications we want to avoid this, and won't be calling new/delete on these
21 | // objects, so we need to override the default implementation with one that does
22 | // nothing to avoid linking in ::delete().
23 | // This macro needs to be included in all subclasses of a virtual base class in
24 | // the private section.
25 | #ifdef TF_LITE_STATIC_MEMORY
26 | #define TF_LITE_REMOVE_VIRTUAL_DELETE \
27 | void operator delete(void* p) {}
28 | #else
29 | #define TF_LITE_REMOVE_VIRTUAL_DELETE
30 | #endif
31 |
32 | #endif // TENSORFLOW_LITE_MICRO_COMPATIBILITY_H_
33 |
--------------------------------------------------------------------------------
/src/converter/tensorflow_library/tensorflow/lite/micro/micro_error_reporter.cpp:
--------------------------------------------------------------------------------
1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 |
16 | #include "tensorflow/lite/micro/micro_error_reporter.h"
17 |
18 | #include
19 |
20 | #ifndef TF_LITE_STRIP_ERROR_STRINGS
21 | #include "tensorflow/lite/micro/debug_log.h"
22 | #include "tensorflow/lite/micro/micro_string.h"
23 | #endif
24 |
25 | namespace tflite {
26 |
27 | int MicroErrorReporter::Report(const char* format, va_list args) {
28 | #ifndef TF_LITE_STRIP_ERROR_STRINGS
29 | // Only pulling in the implementation of this function for builds where we
30 | // expect to make use of it to be extra cautious about not increasing the code
31 | // size.
32 | static constexpr int kMaxLogLen = 256;
33 | char log_buffer[kMaxLogLen];
34 | MicroVsnprintf(log_buffer, kMaxLogLen, format, args);
35 | DebugLog(log_buffer);
36 | DebugLog("\r\n");
37 | #endif
38 | return 0;
39 | }
40 |
41 | } // namespace tflite
42 |
--------------------------------------------------------------------------------
/src/converter/tensorflow_library/tensorflow/lite/kernels/internal/cppmath.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 | #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_CPPMATH_H_
16 | #define TENSORFLOW_LITE_KERNELS_INTERNAL_CPPMATH_H_
17 |
18 | #include
19 |
20 | namespace tflite {
21 |
22 | #if defined(TF_LITE_USE_GLOBAL_CMATH_FUNCTIONS) || \
23 | (defined(__ANDROID__) && !defined(__NDK_MAJOR__)) || defined(ARDUINO) || \
24 | defined(__ZEPHYR__)
25 | #define TF_LITE_GLOBAL_STD_PREFIX
26 | #else
27 | #define TF_LITE_GLOBAL_STD_PREFIX std
28 | #endif
29 |
30 | #define DECLARE_STD_GLOBAL_SWITCH1(tf_name, std_name) \
31 | template \
32 | inline T tf_name(const T x) { \
33 | return TF_LITE_GLOBAL_STD_PREFIX::std_name(x); \
34 | }
35 |
36 | DECLARE_STD_GLOBAL_SWITCH1(TfLiteRound, round);
37 |
38 | } // namespace tflite
39 |
40 | #endif // TENSORFLOW_LITE_KERNELS_INTERNAL_CPPMATH_H_
41 |
--------------------------------------------------------------------------------
/src/converter/tensorflow_library/tensorflow/lite/micro/micro_string.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 | #ifndef TENSORFLOW_LITE_MICRO_MICRO_STRING_H_
16 | #define TENSORFLOW_LITE_MICRO_MICRO_STRING_H_
17 |
18 | #include
19 |
20 | // Implements simple string formatting for numeric types. Returns the number of
21 | // bytes written to output.
22 | extern "C" {
23 | // Functionally equivalent to vsnprintf, trimmed down for TFLite Micro.
24 | // MicroSnprintf() is implemented using MicroVsnprintf().
25 | int MicroVsnprintf(char* output, int len, const char* format, va_list args);
26 | // Functionally equavalent to snprintf, trimmed down for TFLite Micro.
27 | // For example, MicroSnprintf(buffer, 10, "int %d", 10) will put the string
28 | // "int 10" in the buffer.
29 | // Floating point values are logged in exponent notation (1.XXX*2^N).
30 | int MicroSnprintf(char* output, int len, const char* format, ...);
31 | }
32 |
33 | #endif // TENSORFLOW_LITE_MICRO_MICRO_STRING_H_
34 |
--------------------------------------------------------------------------------
/example/templates/sbc_mnist.py:
--------------------------------------------------------------------------------
1 | '''Copyright [2022] Hahn-Schickard-Gesellschaft fuer angewandte Forschung e.V.,
2 | Daniel Konegen + Marcus Rueb
3 | SPDX-License-Identifier: GPL-3.0
4 | ============================================================================'''
5 |
6 | import tensorflow as tf
7 | import numpy as np
8 | import sys
9 | sys.path.insert(1, '..')
10 | from data.data_preprocessing_mnist import get_data
11 |
12 |
13 | # Path of TFLite model
14 | tflite_model_file = '../data/mnist_model.tflite'
15 | # Get data to test the model
16 | _, _, x_test, y_test = get_data()
17 |
18 | # Read the data of your TFLite model file
19 | with open(tflite_model_file, 'rb') as f:
20 | tflite_model = f.read()
21 |
22 | # Load TFLite model and allocate tensors
23 | interpreter = tf.lite.Interpreter(model_content=tflite_model)
24 | interpreter.allocate_tensors()
25 |
26 | # Get input and output of model
27 | input = interpreter.get_input_details()[0]
28 | output = interpreter.get_output_details()[0]
29 |
30 | # If you quantized your model to int8 only you have
31 | # convert your input data as int8 values
32 | # x_test = x_test.astype(np.int8)
33 |
34 | # Gather the results for the test data
35 | predictions = []
36 | for sample in x_test:
37 | # Set input data
38 | interpreter.set_tensor(input['index'], sample)
39 | # Run the model
40 | interpreter.invoke()
41 | # Get model output
42 | pred = interpreter.get_tensor(output['index'])
43 | predictions.append(pred.argmax())
44 |
45 | model_acc = sum(1 for a,b in zip(predictions,y_test) if a == b) / len(predictions)
46 |
47 | print('Model accuracy: {:.2f}%'.format(model_acc*100))
--------------------------------------------------------------------------------
/src/converter/tensorflow_library/tensorflow/lite/micro/kernels/kernel_util.cpp:
--------------------------------------------------------------------------------
1 | /* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 |
16 | #include "tensorflow/lite/micro/kernels/kernel_util.h"
17 |
18 | #include "tensorflow/lite/c/common.h"
19 |
20 | namespace tflite {
21 | namespace micro {
22 |
23 | bool HaveSameShapes(const TfLiteEvalTensor* input1,
24 | const TfLiteEvalTensor* input2) {
25 | TFLITE_DCHECK(input1 != nullptr);
26 | TFLITE_DCHECK(input2 != nullptr);
27 | return TfLiteIntArrayEqual(input1->dims, input2->dims);
28 | }
29 |
30 | const RuntimeShape GetTensorShape(const TfLiteEvalTensor* tensor) {
31 | if (tensor == nullptr || tensor->dims == nullptr) {
32 | return RuntimeShape();
33 | }
34 | TfLiteIntArray* dims = tensor->dims;
35 | const int dims_size = dims->size;
36 | const int32_t* dims_data = reinterpret_cast(dims->data);
37 | return RuntimeShape(dims_size, dims_data);
38 | }
39 |
40 | } // namespace micro
41 | } // namespace tflite
42 |
--------------------------------------------------------------------------------
/src/converter/tensorflow_library/tensorflow/lite/micro/all_ops_resolver.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2 | Licensed under the Apache License, Version 2.0 (the "License");
3 | you may not use this file except in compliance with the License.
4 | You may obtain a copy of the License at
5 | http://www.apache.org/licenses/LICENSE-2.0
6 | Unless required by applicable law or agreed to in writing, software
7 | distributed under the License is distributed on an "AS IS" BASIS,
8 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
9 | See the License for the specific language governing permissions and
10 | limitations under the License.
11 | ==============================================================================*/
12 | #ifndef TENSORFLOW_LITE_MICRO_ALL_OPS_RESOLVER_H_
13 | #define TENSORFLOW_LITE_MICRO_ALL_OPS_RESOLVER_H_
14 |
15 | #include "tensorflow/lite/micro/compatibility.h"
16 | #include "tensorflow/lite/micro/micro_mutable_op_resolver.h"
17 |
18 | namespace tflite {
19 |
20 | // The magic number in the template parameter is the maximum number of ops that
21 | // can be added to AllOpsResolver. It can be increased if needed. And most
22 | // applications that care about the memory footprint will want to directly use
23 | // MicroMutableOpResolver and have an application specific template parameter.
24 | // The examples directory has sample code for this.
25 | class AllOpsResolver : public MicroMutableOpResolver<128> {
26 | public:
27 | AllOpsResolver();
28 |
29 | private:
30 | TF_LITE_REMOVE_VIRTUAL_DELETE
31 | };
32 |
33 | } // namespace tflite
34 |
35 | #endif // TENSORFLOW_LITE_MICRO_ALL_OPS_RESOLVER_H_
36 |
--------------------------------------------------------------------------------
/src/converter/tensorflow_library/tensorflow/lite/kernels/internal/optimized/neon_check.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 | #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_OPTIMIZED_NEON_CHECK_H_
16 | #define TENSORFLOW_LITE_KERNELS_INTERNAL_OPTIMIZED_NEON_CHECK_H_
17 |
18 | #if defined(__ARM_NEON__) || defined(__ARM_NEON)
19 | #define USE_NEON
20 | #include
21 | #endif
22 |
23 | #if defined __GNUC__ && defined __SSE4_1__ && !defined TF_LITE_DISABLE_X86_NEON
24 | #define USE_NEON
25 | #include "NEON_2_SSE.h"
26 | #endif
27 |
28 | // NEON_OR_PORTABLE(SomeFunc, args) calls NeonSomeFunc(args) if USE_NEON is
29 | // defined, PortableSomeFunc(args) otherwise.
30 | #ifdef USE_NEON
31 | // Always use Neon code
32 | #define NEON_OR_PORTABLE(funcname, ...) Neon##funcname(__VA_ARGS__)
33 |
34 | #else
35 | // No NEON available: Use Portable code
36 | #define NEON_OR_PORTABLE(funcname, ...) Portable##funcname(__VA_ARGS__)
37 |
38 | #endif // defined(USE_NEON)
39 |
40 | #endif // TENSORFLOW_LITE_KERNELS_INTERNAL_OPTIMIZED_NEON_CHECK_H_
41 |
--------------------------------------------------------------------------------
/src/converter/tensorflow_library/third_party/cmsis/CMSIS/NN/Source/ReshapeFunctions/arm_reshape_s8.c:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (C) 2010-2019 Arm Limited or its affiliates. All rights reserved.
3 | *
4 | * SPDX-License-Identifier: Apache-2.0
5 | *
6 | * Licensed under the Apache License, Version 2.0 (the License); you may
7 | * not use this file except in compliance with the License.
8 | * You may obtain a copy of the License at
9 | *
10 | * www.apache.org/licenses/LICENSE-2.0
11 | *
12 | * Unless required by applicable law or agreed to in writing, software
13 | * distributed under the License is distributed on an AS IS BASIS, WITHOUT
14 | * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 | * See the License for the specific language governing permissions and
16 | * limitations under the License.
17 | */
18 |
19 | /* ----------------------------------------------------------------------
20 | * Project: CMSIS NN Library
21 | * Title: arm_reshape_s8.c
22 | * Description: Reshape a s8 vector
23 | *
24 | * $Date: September 2019
25 | * $Revision: V.1.0.0
26 | *
27 | * Target Processor: Cortex-M cores
28 | *
29 | * -------------------------------------------------------------------- */
30 |
31 | #include "arm_nnfunctions.h"
32 |
33 | /**
34 | * @ingroup groupNN
35 | */
36 |
37 | /**
38 | * @addtogroup Reshape
39 | * @{
40 | */
41 |
42 | /**
43 | * Basic s8 reshape function.
44 | *
45 | * Refer header file for details.
46 | *
47 | */
48 |
49 | void arm_reshape_s8(const int8_t *input,
50 | int8_t *output,
51 | const uint32_t total_size)
52 | {
53 | memcpy(output, input, total_size);
54 | }
55 |
56 | /**
57 | * @} end of Reshape group
58 | */
--------------------------------------------------------------------------------
/src/converter/tensorflow_library/tensorflow/lite/micro/micro_profiler.cpp:
--------------------------------------------------------------------------------
1 | /* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 |
16 | #include "tensorflow/lite/micro/micro_profiler.h"
17 |
18 | #include "tensorflow/lite/kernels/internal/compatibility.h"
19 | #include "tensorflow/lite/micro/micro_time.h"
20 |
21 | namespace tflite {
22 |
23 | MicroProfiler::MicroProfiler(tflite::ErrorReporter* reporter)
24 | : reporter_(reporter) {}
25 |
26 | uint32_t MicroProfiler::BeginEvent(const char* tag, EventType event_type,
27 | int64_t event_metadata1,
28 | int64_t event_metadata2) {
29 | start_time_ = GetCurrentTimeTicks();
30 | TFLITE_DCHECK(tag != nullptr);
31 | event_tag_ = tag;
32 | return 0;
33 | }
34 |
35 | void MicroProfiler::EndEvent(uint32_t event_handle) {
36 | #ifndef TF_LITE_STRIP_ERROR_STRINGS
37 | int32_t end_time = GetCurrentTimeTicks();
38 | TF_LITE_REPORT_ERROR(reporter_, "%s took %d cycles\n", event_tag_,
39 | end_time - start_time_);
40 | #endif
41 | }
42 | } // namespace tflite
43 |
--------------------------------------------------------------------------------
/src/threads/loading_images_thread.py:
--------------------------------------------------------------------------------
1 | '''Copyright [2020] Hahn-Schickard-Gesellschaft fuer angewandte Forschung e.V.,
2 | Daniel Konegen + Marcus Rueb
3 | Copyright [2021] Karlsruhe Institute of Technology, Daniel Konegen
4 | Copyright [2022] Hahn-Schickard-Gesellschaft fuer angewandte Forschung e.V.,
5 | Daniel Konegen + Marcus Rueb
6 | SPDX-License-Identifier: GPL-3.0
7 | ============================================================================'''
8 |
9 | import time
10 | import os
11 |
12 | from PyQt5.QtWidgets import *
13 | from PyQt5.QtGui import *
14 | from PyQt5.QtCore import *
15 |
16 |
17 | class LoadingImages(QThread):
18 | """Loading screen thread.
19 |
20 | Attributes:
21 | load_png: The image which is represented on the "create_project"
22 | loading_img: The different images representing the loadingscreen
23 | """
24 |
25 | def __init__(self, load_png):
26 | QThread.__init__(self)
27 | self.load_png = load_png
28 | self.loading_img = 0
29 |
30 | def run(self):
31 | """Activates the thread
32 |
33 | Changes the image of the loading screen every 0.75 seconds.
34 | """
35 |
36 | while(self.isRunning()):
37 |
38 | if self.loading_img < 15:
39 | self.loading_img += 1
40 | else:
41 | self.loading_img = 1
42 |
43 | time.sleep(0.75)
44 |
45 | self.load_png.setPixmap(QPixmap(os.path.join(
46 | 'src', 'gui_layout', 'images', 'gui_loading_images',
47 | 'GUI_load_' + str(self.loading_img) + '.png')))
48 |
49 | def stop_thread(self):
50 | """Ends the thread
51 | """
52 | self.terminate()
53 |
--------------------------------------------------------------------------------
/src/gui_event/_automl_training.py:
--------------------------------------------------------------------------------
1 | '''Copyright [2020] Hahn-Schickard-Gesellschaft fuer angewandte Forschung e.V.,
2 | Marcel Sawrin + Marcus Rueb
3 | Copyright [2022] Hahn-Schickard-Gesellschaft fuer angewandte Forschung e.V.,
4 | Daniel Konegen + Marcus Rueb
5 | SPDX-License-Identifier: GPL-3.0
6 | ============================================================================'''
7 |
8 | from src.gui_layout.ui_automl_training import *
9 |
10 |
11 | def automl_training(self):
12 | """AutoKeras training process get started.
13 |
14 | The process of training models accoring the passed settings
15 | get started. You have to wait until the process is finished.
16 | After this you get back to the start window.
17 | """
18 | self.automl_training_ui = UIAutoMLTraining(
19 | self.WINDOW_WIDTH, self.WINDOW_HEIGHT, self.FONT_STYLE,
20 | self.project_name, self.output_path, self.data_loader_path,
21 | self.task, self.max_trials, self.max_epochs, self.max_size,
22 | self.num_channels, self.img_height, self.img_width, self.separator,
23 | self.decimal, self.csv_target_label, self)
24 |
25 | self.automl_training_ui.loading_images.start()
26 | self.automl_training_ui.autokeras.start()
27 |
28 | self.automl_training_ui.autokeras.request_signal.connect(
29 | lambda: next_window(self))
30 |
31 | self.setCentralWidget(self.automl_training_ui)
32 | self.show()
33 |
34 |
35 | def next_window(self):
36 | """
37 | After AutoKeras training is finished, stop the
38 | the two threads and return to the start window
39 | of the GUI.
40 | """
41 | self.automl_training_ui.autokeras.stop_thread()
42 | self.automl_training_ui.loading_images.stop_thread()
43 | self.gui_start()
44 |
--------------------------------------------------------------------------------
/src/converter/tensorflow_library/third_party/cmsis/CMSIS/NN/Source/ActivationFunctions/arm_relu6_s8.c:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (C) 2010-2019 Arm Limited or its affiliates. All rights reserved.
3 | *
4 | * SPDX-License-Identifier: Apache-2.0
5 | *
6 | * Licensed under the Apache License, Version 2.0 (the License); you may
7 | * not use this file except in compliance with the License.
8 | * You may obtain a copy of the License at
9 | *
10 | * www.apache.org/licenses/LICENSE-2.0
11 | *
12 | * Unless required by applicable law or agreed to in writing, software
13 | * distributed under the License is distributed on an AS IS BASIS, WITHOUT
14 | * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 | * See the License for the specific language governing permissions and
16 | * limitations under the License.
17 | */
18 |
19 | /* ----------------------------------------------------------------------
20 | * Project: CMSIS NN Library
21 | * Title: arm_relu6_s8.c
22 | * Description: Basic s8 version of ReLU6
23 | *
24 | * $Date: Spetember 2019
25 | * $Revision: V.1.0.0
26 | *
27 | * Target Processor: Cortex-M cores
28 | *
29 | * -------------------------------------------------------------------- */
30 |
31 | #include "arm_math.h"
32 | #include "arm_nnfunctions.h"
33 |
34 | /**
35 | * @ingroup groupNN
36 | */
37 |
38 | /**
39 | * @addtogroup Acti
40 | * @{
41 | */
42 |
43 | /*
44 | * Basic ReLU6 function
45 | *
46 | * Refer to header file for details.
47 | *
48 | */
49 |
50 | void arm_relu6_s8(q7_t *data, uint16_t size)
51 | {
52 | int32_t i;
53 |
54 | for (i = 0; i < size; i++)
55 | {
56 | int32_t ip = data[i];
57 |
58 | ip = MAX(ip, 0);
59 | data[i] = MIN(ip, 6);
60 | }
61 | }
62 |
63 | /**
64 | * @} end of Acti group
65 | */
66 |
--------------------------------------------------------------------------------
/src/gui_event/_csv_dataloader.py:
--------------------------------------------------------------------------------
1 | '''Copyright [2020] Hahn-Schickard-Gesellschaft fuer angewandte Forschung e.V.,
2 | Daniel Konegen + Marcus Rueb
3 | Copyright [2021] Karlsruhe Institute of Technology, Daniel Konegen
4 | Copyright [2022] Hahn-Schickard-Gesellschaft fuer angewandte Forschung e.V.,
5 | Daniel Konegen + Marcus Rueb
6 | SPDX-License-Identifier: GPL-3.0
7 | ============================================================================'''
8 |
9 | from PyQt5.QtWidgets import *
10 | from PyQt5.QtGui import *
11 | from PyQt5.QtCore import *
12 |
13 | from src.gui_layout.ui_csv_dataloader import *
14 |
15 |
16 | def csv_dataloader(self, MainWindow):
17 | """Activates the GUI window to preview and load CSV data.
18 |
19 | With the help of the check boxes the separators can be selected.
20 | There are three buttons to interact with. With "Browse" you can
21 | select the CSV file you want to use. "Preview" shows format of
22 | the CSV file according to the selected separators. "Load data"
23 | selects the settings of the preview and take them to train the
24 | model later.
25 |
26 | Args:
27 | MainWindow: Main window if the GUI
28 | """
29 | self.csv_dataloader_ui = UICSVDataloader(self.WINDOW_WIDTH,
30 | self.WINDOW_HEIGHT,
31 | self.FONT_STYLE)
32 |
33 | self.csv_dataloader_ui.browse.clicked.connect(
34 | lambda: self.browse_csv_data(self.csv_dataloader_ui))
35 | self.csv_dataloader_ui.preview.clicked.connect(
36 | lambda: self.preview_csv_data(self.csv_dataloader_ui))
37 | self.csv_dataloader_ui.load_data.clicked.connect(
38 | lambda: self.load_csv_data(self.csv_dataloader_ui, MainWindow))
39 |
40 | self.csv_dataloader_ui.show()
41 |
--------------------------------------------------------------------------------
/.github/README/README_AutoKeras.md:
--------------------------------------------------------------------------------
1 | ## Train a new model with AutoKeras
2 |
3 | In the next window the model name, the output path and the data to train the neural network are passed.
4 |
5 |
6 |
7 |
8 |
9 |
10 | In the next window, the task to be solved by the neural network can be selected. Four different tasks are available for selection: Image classification, Image regression, Data classification and Data regression.
11 |
12 |
13 |
14 |
15 |
16 |
17 | Next, some parameters have to be passed to AutoKeras for automatic model generation. These are:
18 | - The number of epochs each model should be trained.
19 | - The number of different models that should be trained.
20 | - The maximum size of the models. (If this parameter is 0, there is no limit to the size of the model).
21 |
22 | If the data is passed in the form of images, the height and width of the images (number of pixels), as well as the number of color channels, have to be passed additionally. If everything is passed, the automatic model generation can be started.
23 |
24 |
25 |
26 |
27 |
28 |
29 | In the last window of this part of the AutoFlow tool you have to wait until the automatic model generation is finished. Then you will be returned to the GUI start window.
30 |
31 |
32 |
33 |
--------------------------------------------------------------------------------
/src/converter/tensorflow_library/tensorflow/lite/core/api/tensor_utils.cpp:
--------------------------------------------------------------------------------
1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 |
16 | #include "tensorflow/lite/core/api/tensor_utils.h"
17 |
18 | #include
19 |
20 | #include "tensorflow/lite/c/common.h"
21 |
22 | namespace tflite {
23 |
24 | TfLiteStatus ResetVariableTensor(TfLiteTensor* tensor) {
25 | if (!tensor->is_variable) {
26 | return kTfLiteOk;
27 | }
28 | // TODO(b/115961645): Implement - If a variable tensor has a buffer, reset it
29 | // to the value of the buffer.
30 | int value = 0;
31 | if (tensor->type == kTfLiteInt8) {
32 | value = tensor->params.zero_point;
33 | }
34 | // TODO(b/139446230): Provide a platform header to better handle these
35 | // specific scenarios.
36 | #if __ANDROID__ || defined(__x86_64__) || defined(__i386__) || \
37 | defined(__i386) || defined(__x86__) || defined(__X86__) || \
38 | defined(_X86_) || defined(_M_IX86) || defined(_M_X64)
39 | memset(tensor->data.raw, value, tensor->bytes);
40 | #else
41 | char* raw_ptr = tensor->data.raw;
42 | for (size_t i = 0; i < tensor->bytes; ++i) {
43 | *raw_ptr = value;
44 | raw_ptr++;
45 | }
46 | #endif
47 | return kTfLiteOk;
48 | }
49 |
50 | } // namespace tflite
51 |
--------------------------------------------------------------------------------
/src/converter/tensorflow_library/tensorflow/lite/kernels/internal/tensor_ctypes.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 | #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_TENSOR_CTYPES_H_
16 | #define TENSORFLOW_LITE_KERNELS_INTERNAL_TENSOR_CTYPES_H_
17 |
18 | #include "tensorflow/lite/c/common.h"
19 | #include "tensorflow/lite/kernels/internal/types.h"
20 |
21 | namespace tflite {
22 |
23 | template
24 | inline T* GetTensorData(TfLiteTensor* tensor) {
25 | return tensor != nullptr ? reinterpret_cast(tensor->data.raw) : nullptr;
26 | }
27 |
28 | template
29 | inline const T* GetTensorData(const TfLiteTensor* tensor) {
30 | return tensor != nullptr ? reinterpret_cast(tensor->data.raw)
31 | : nullptr;
32 | }
33 |
34 | inline RuntimeShape GetTensorShape(const TfLiteTensor* tensor) {
35 | if (tensor == nullptr) {
36 | return RuntimeShape();
37 | }
38 |
39 | TfLiteIntArray* dims = tensor->dims;
40 | const int dims_size = dims->size;
41 | const int32_t* dims_data = reinterpret_cast(dims->data);
42 | return RuntimeShape(dims_size, dims_data);
43 | }
44 |
45 | } // namespace tflite
46 |
47 | #endif // TENSORFLOW_LITE_KERNELS_INTERNAL_TENSOR_CTYPES_H_
48 |
--------------------------------------------------------------------------------
/src/automl/data_regressor.py:
--------------------------------------------------------------------------------
1 | '''Copyright [2020] Hahn-Schickard-Gesellschaft fuer angewandte Forschung e.V.,
2 | Marcel Sawrin + Marcus Rueb
3 | Copyright [2022] Hahn-Schickard-Gesellschaft fuer angewandte Forschung e.V.,
4 | Daniel Konegen + Marcus Rueb
5 | SPDX-License-Identifier: GPL-3.0
6 | ============================================================================'''
7 |
8 | import autokeras as ak
9 | import os
10 | import tensorflow as tf
11 |
12 | from src.gui_event._dataloader_helper import dataloader_autokeras
13 |
14 |
15 | def data_regressor(project_name, output_path, data_path, max_trials=10,
16 | max_epochs=20, max_size=0, overwrite=True, separator=None,
17 | decimal=None, csv_target_label=None):
18 |
19 | input_node = ak.Input()
20 | output_node = ak.DenseBlock(use_batchnorm=False)(input_node)
21 | output_node = ak.RegressionHead()(output_node)
22 | clf = ak.AutoModel(
23 | inputs=input_node, outputs=output_node, overwrite=overwrite,
24 | max_trials=max_trials, max_model_size=max_size
25 | )
26 |
27 | if os.path.isfile(data_path):
28 | x_train, y_train, x_test, y_test = dataloader_autokeras(
29 | data_path, separator, decimal, csv_target_label, None, None, None)
30 | clf.fit(x_train, y_train, epochs=max_epochs, validation_split=0.2,
31 | batch_size=64, callbacks=[tf.keras.callbacks.EarlyStopping(
32 | monitor='val_loss', patience=5,
33 | restore_best_weights=True)])
34 | # Evaluate the best model with testing data.
35 | print("Best model evaluation:", clf.evaluate(x_test, y_test))
36 | elif os.path.isdir(data_path):
37 | print("For data regression select a file as data loader.")
38 | return
39 |
40 | best_model = clf.export_model()
41 | best_model.summary()
42 |
43 | best_model.save(output_path + "/" + project_name + '.h5')
44 |
--------------------------------------------------------------------------------
/src/automl/data_classifier.py:
--------------------------------------------------------------------------------
1 | '''Copyright [2020] Hahn-Schickard-Gesellschaft fuer angewandte Forschung e.V.,
2 | Marcel Sawrin + Marcus Rueb
3 | Copyright [2022] Hahn-Schickard-Gesellschaft fuer angewandte Forschung e.V.,
4 | Daniel Konegen + Marcus Rueb
5 | SPDX-License-Identifier: GPL-3.0
6 | ============================================================================'''
7 |
8 | import autokeras as ak
9 | import os
10 | import tensorflow as tf
11 |
12 | from src.gui_event._dataloader_helper import dataloader_autokeras
13 |
14 |
15 | def data_classifier(project_name, output_path, data_path, max_trials=10,
16 | max_epochs=20, max_size=0, overwrite=True, separator=None,
17 | decimal=None, csv_target_label=None):
18 |
19 | input_node = ak.Input()
20 | output_node = ak.DenseBlock(use_batchnorm=False)(input_node)
21 | output_node = ak.ClassificationHead()(output_node)
22 | clf = ak.AutoModel(
23 | inputs=input_node, outputs=output_node, overwrite=overwrite,
24 | max_trials=max_trials, max_model_size=max_size
25 | )
26 |
27 | if os.path.isfile(data_path):
28 | x_train, y_train, x_test, y_test = dataloader_autokeras(
29 | data_path, separator, decimal, csv_target_label, None, None, None)
30 | clf.fit(x_train, y_train, epochs=max_epochs, validation_split=0.2,
31 | batch_size=64, callbacks=[tf.keras.callbacks.EarlyStopping(
32 | monitor='val_loss', patience=5,
33 | restore_best_weights=True)])
34 | # Evaluate the best model with testing data.
35 | print("Best model evaluation:", clf.evaluate(x_test, y_test))
36 | elif os.path.isdir(data_path):
37 | print("For data classification select a file as data loader.")
38 | return
39 |
40 | best_model = clf.export_model()
41 | best_model.summary()
42 |
43 | best_model.save(output_path + "/" + project_name + '.h5')
44 |
--------------------------------------------------------------------------------
/src/gui_layout/images/Flowchart.drawio:
--------------------------------------------------------------------------------
1 | 7Vtbc5s4FP41fmwGkMDwmFvbmbY7mWa7jfviUUGxaTByhXzrr19hhAFBjMJt8czmJeggCXH0ne9chCfgdrX/QNF6+YV4OJgYmrefgLuJYei6bfJ/seSQSEzbTgQL6nuiUyZ49P9gIdSEdON7OCp0ZIQEzF8XhS4JQ+yyggxRSnbFbs8kKD51jRa4JHh0UVCWfvc9tkzfy3KyGx+xv1iKR9vGNLmxQmln8SbREnlklxOB+wm4pYSw5Gq1v8VBrLxUL8m496/cPS2M4pCpDHCeZsbz/fOPb7sfu5svf6Hr+0+zd2J3tijYiBfe+HOu83nEEGVi4eyQaoOSTejheEJtAm52S5/hxzVy47s7vv9ctmSrgLd0fhkxSl7wLQkIPY4GxrVt3F3zO+Wli7fZYsrwPicSr/IBkxVm9MC7nJAl1CpwZUxFe5ftki1Ey9z+QCFDAheL08yZ6viF0N4bNKnrJW1hj0NJNAllS7IgIQruM+lNUZ9Zn8+ErIUWf2HGDsIu0IaRoo7x3mdPuetZPNWVKVp3ezHzsXFIGyF/36d8IzcqbmbDjq1snHcdWxRv/gyI+/L30g8T8Xs/aLHpEdlQF59RrSXsHtEFZue2QOxtrPezGKI4QMzfFk28ChFi6APx+ZpP2ANAwh6QQJW8kRgl4eq0jOZQs6qMdk3JL06Acw8xNGK7NWS71ct2q2uDGq5Wpc4EbPPoEDG8GrE+gamgT31QfZZ9Skl/7oZuj+rTX+eVnAaL6k45L+O5WZ7muua8nsnNVCQ3S5HbcvtuVmx7KmtJgbpMgbYaBZYnsmu4NFFMaSKOGHTIdVvHHaIzC7ak58BCtMQvkhk7JWrDqaIWsmb+yv/D1U/COQoWZMT0YmoK9AKGpJf0YWOIs6ADalhHIWKKRzxg6nP1YNqSrvogqHTHaxkqRXvf4ReEsIhJw7maSrOo0g+0jEpaqKOfrhgCpBZZYIg4hgsI8jgexksNFixSw4kGctTgDMoMoEqXLsWI4TQ8HrE+p3BskRyA46HaupRWhWhHRKopV9aSagrq3kl1KsdhnFQNLfsDzQjWlPO9oQm292ykk8zCQ9HytITXAZelPtAqmIemwfMGUhFkDIFeXRG9w2QtpgxyvWHWYmlmte/rOGuRF5xmMb1mLaBcX+rYZFoVLTPLmuXvdWlmzS0jBXx3vD6MZUwd46oIaUNraBu2OZWm0hU5/63WMXX+C+uYVkWZcRizCi6t+AoqQvZhi6/pxAMXC3VFrrnUYmEK0pGwiy6lNrAptxiS3wVWP8wiVwvBENVCWFkLEMzCUPQyYmaRjyFGwCxlIhlt8nph57Epu9TSEFQN/9seyEpVatA0rgewZqKes1V4QRWXCwMtVI3MoTkQaA0Ja7IvU65hS0niiYuHAm3lR0TCb0UceX64iEbsu6Cj4LuMQX3XuRyDUeSHXKUj1qilEg0MWsqG9v/E2hexqiYlUDUpaVvKloP3psQqH36rZhtdEatZTghKKH4tOXYDFEW+O5bUuBfgqYahqg59oCJ0ye87DeEp+31dmqirIrT0rQ9wnLPrMuQguqa/3B0UurfOtf/ZfqUff5v+b51+/Tl7WFnfQPCuMtV2o+2FHL1LO2IO6F4r1QlqlNXCd76NnVr5wKZnDNKpAqKsahXHG7l1KBHfOfB2d/rW9vBA+oavdBqmfKwmT2R05XB5M/ulRdI9+70KuP8X
--------------------------------------------------------------------------------
/src/converter/tensorflow_library/third_party/cmsis/CMSIS/NN/Include/arm_nn_tables.h:
--------------------------------------------------------------------------------
1 | /* ----------------------------------------------------------------------
2 | * Project: CMSIS NN Library
3 | * Title: arm_nn_tables.h
4 | * Description: Extern declaration for NN tables
5 | *
6 | * $Date: 17. January 2018
7 | * $Revision: V.1.0.0
8 | *
9 | * Target Processor: Cortex-M cores
10 | * -------------------------------------------------------------------- */
11 | /*
12 | * Copyright (C) 2010-2018 Arm Limited or its affiliates. All rights reserved.
13 | *
14 | * SPDX-License-Identifier: Apache-2.0
15 | *
16 | * Licensed under the Apache License, Version 2.0 (the License); you may
17 | * not use this file except in compliance with the License.
18 | * You may obtain a copy of the License at
19 | *
20 | * www.apache.org/licenses/LICENSE-2.0
21 | *
22 | * Unless required by applicable law or agreed to in writing, software
23 | * distributed under the License is distributed on an AS IS BASIS, WITHOUT
24 | * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
25 | * See the License for the specific language governing permissions and
26 | * limitations under the License.
27 | */
28 |
29 | #ifndef _ARM_NN_TABLES_H
30 | #define _ARM_NN_TABLES_H
31 |
32 | #include "arm_math.h"
33 |
34 | /**
35 | * @brief tables for various activation functions
36 | *
37 | */
38 |
39 | extern const q15_t sigmoidTable_q15[256];
40 | extern const q7_t sigmoidTable_q7[256];
41 |
42 | extern const q7_t tanhTable_q7[256];
43 | extern const q15_t tanhTable_q15[256];
44 |
45 | /**
46 | * @brief 2-way tables for various activation functions
47 | *
48 | * 2-way table, H table for value larger than 1/4
49 | * L table for value smaller than 1/4, H table for remaining
50 | * We have this only for the q15_t version. It does not make
51 | * sense to have it for q7_t type
52 | */
53 | extern const q15_t sigmoidHTable_q15[192];
54 | extern const q15_t sigmoidLTable_q15[128];
55 |
56 | #endif /* ARM_NN_TABLES_H */
57 |
--------------------------------------------------------------------------------
/src/gui_event/_target_platform.py:
--------------------------------------------------------------------------------
1 | '''Copyright [2020] Hahn-Schickard-Gesellschaft fuer angewandte Forschung e.V.,
2 | Daniel Konegen + Marcus Rueb
3 | Copyright [2021] Karlsruhe Institute of Technology, Daniel Konegen
4 | Copyright [2022] Hahn-Schickard-Gesellschaft fuer angewandte Forschung e.V.,
5 | Daniel Konegen + Marcus Rueb
6 | SPDX-License-Identifier: GPL-3.0
7 | ============================================================================'''
8 |
9 | from src.gui_layout.ui_target_platform import *
10 |
11 |
12 | def target_platform(self):
13 | """Select a button to choose your device.
14 |
15 | You can choose via three different buttons on which device you want
16 | to exectue the model. If "Back" is pressed you get back to the start
17 | window. If you choose a device you get to the optimization window.
18 | """
19 | self.target_platform_ui = UITargetPlatform(
20 | self.WINDOW_WIDTH, self.WINDOW_HEIGHT, self.FONT_STYLE, self)
21 |
22 | self.target_platform_ui.mcu.clicked.connect(
23 | lambda: next_window(self, "Next", "MCU"))
24 | self.target_platform_ui.fpga.clicked.connect(
25 | lambda: next_window(self, "Next", "FPGA"))
26 | self.target_platform_ui.sbc.clicked.connect(
27 | lambda: next_window(self, "Next", "SBC"))
28 |
29 | self.target_platform_ui.back.clicked.connect(
30 | lambda: next_window(self, "Back", None))
31 |
32 | self.setCentralWidget(self.target_platform_ui)
33 | self.show()
34 |
35 |
36 | def next_window(self, n, target):
37 | """
38 | Defines which one is the next window to open.
39 |
40 | Args:
41 | n: Go forward or go back
42 | target: Target to execute the neural network
43 | """
44 | if n == "Back":
45 | self.project_data()
46 |
47 | elif n == "Next":
48 | self.target = target
49 | print("Target:", self.target)
50 |
51 | if (self.target == "MCU" or self.target == "FPGA" or
52 | self.target == "SBC"):
53 | self.optimization_algo()
54 |
--------------------------------------------------------------------------------
/src/converter/tensorflow_library/tensorflow/lite/kernels/internal/reference/round.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 | #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_ROUND_H_
16 | #define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_ROUND_H_
17 |
18 | #include
19 |
20 | #include "tensorflow/lite/kernels/internal/types.h"
21 |
22 | namespace tflite {
23 |
24 | namespace reference_ops {
25 |
26 | inline float RoundToNearest(float value) {
27 | auto floor_val = std::floor(value);
28 | auto diff = value - floor_val;
29 | if ((diff < 0.5f) ||
30 | ((diff == 0.5f) && (static_cast(floor_val) % 2 == 0))) {
31 | return floor_val;
32 | } else {
33 | return floor_val = floor_val + 1.0f;
34 | }
35 | }
36 |
37 | inline void Round(const RuntimeShape& input_shape, const float* input_data,
38 | const RuntimeShape& output_shape, float* output_data) {
39 | const int flat_size = MatchingFlatSize(input_shape, output_shape);
40 | for (int i = 0; i < flat_size; ++i) {
41 | // Note that this implementation matches that of tensorFlow tf.round
42 | // and corresponds to the bankers rounding method.
43 | // cfenv (for fesetround) is not yet supported universally on Android, so
44 | // using a work around.
45 | output_data[i] = RoundToNearest(input_data[i]);
46 | }
47 | }
48 |
49 | } // namespace reference_ops
50 | } // namespace tflite
51 | #endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_ROUND_H_
52 |
--------------------------------------------------------------------------------
/src/converter/tensorflow_library/tensorflow/lite/micro/memory_planner/linear_memory_planner.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 |
16 | #ifndef TENSORFLOW_LITE_MICRO_MEMORY_PLANNER_LINEAR_MEMORY_PLANNER_H_
17 | #define TENSORFLOW_LITE_MICRO_MEMORY_PLANNER_LINEAR_MEMORY_PLANNER_H_
18 |
19 | #include "tensorflow/lite/micro/compatibility.h"
20 | #include "tensorflow/lite/micro/memory_planner/memory_planner.h"
21 |
22 | namespace tflite {
23 |
24 | // The simplest possible memory planner that just lays out all buffers at
25 | // increasing offsets without trying to reuse memory.
26 | class LinearMemoryPlanner : public MemoryPlanner {
27 | public:
28 | LinearMemoryPlanner();
29 | ~LinearMemoryPlanner() override;
30 |
31 | TfLiteStatus AddBuffer(tflite::ErrorReporter* error_reporter, int size,
32 | int first_time_used, int last_time_used) override;
33 |
34 | size_t GetMaximumMemorySize() override;
35 | int GetBufferCount() override;
36 | TfLiteStatus GetOffsetForBuffer(tflite::ErrorReporter* error_reporter,
37 | int buffer_index, int* offset) override;
38 |
39 | private:
40 | static constexpr int kMaxBufferCount = 1024;
41 | size_t buffer_offsets_[kMaxBufferCount];
42 | int current_buffer_count_;
43 | size_t next_free_offset_;
44 |
45 | TF_LITE_REMOVE_VIRTUAL_DELETE
46 | };
47 |
48 | } // namespace tflite
49 |
50 | #endif // TENSORFLOW_LITE_MICRO_MEMORY_PLANNER_LINEAR_MEMORY_PLANNER_H_
51 |
--------------------------------------------------------------------------------
/AutoFlow.py:
--------------------------------------------------------------------------------
1 | '''Copyright [2022] Hahn-Schickard-Gesellschaft fuer angewandte Forschung e.V.,
2 | Daniel Konegen + Marcus Rueb
3 | SPDX-License-Identifier: GPL-3.0
4 | ============================================================================'''
5 |
6 | """This is the start file to run the AutoFlow GUI
7 | In this file we import the libarys and define the GUI with all the layouts.
8 |
9 | Typical usage example:
10 | python AutoFlow.py
11 | """
12 |
13 | import sys
14 | from PyQt5.QtWidgets import *
15 | from PyQt5.QtGui import *
16 | from PyQt5.QtCore import *
17 | from src.gui_event import MainWindow
18 |
19 |
20 | app = QApplication(sys.argv)
21 | # Force the style to be the same on all OSs:
22 | app.setStyle("Fusion")
23 |
24 | # Now use a palette to switch to dark colors:
25 | palette = QPalette()
26 | palette.setColor(QPalette.Window, QColor(53, 53, 53))
27 | palette.setColor(QPalette.WindowText, Qt.white)
28 | palette.setColor(QPalette.Base, QColor(25, 25, 25))
29 | palette.setColor(QPalette.AlternateBase, QColor(53, 53, 53))
30 | palette.setColor(QPalette.ToolTipBase, Qt.white)
31 | palette.setColor(QPalette.ToolTipText, Qt.white)
32 | palette.setColor(QPalette.Text, Qt.white)
33 | palette.setColor(QPalette.Button, QColor(53, 53, 53))
34 | palette.setColor(QPalette.ButtonText, Qt.white)
35 | palette.setColor(QPalette.BrightText, Qt.red)
36 | palette.setColor(QPalette.Link, QColor(42, 130, 218))
37 | palette.setColor(QPalette.Highlight, QColor(42, 130, 218))
38 | palette.setColor(QPalette.HighlightedText, Qt.black)
39 | app.setPalette(palette)
40 |
41 | app.setStyleSheet("QPushButton:pressed { background-color: rgb(10, 100, 200) }"
42 | "QPushButton:checked { background-color: rgb(10, 100, 200) }"
43 | "QPushButton::hover { background-color : rgb(10, 100, 200)} ")
44 |
45 | screen_width = app.primaryScreen().size().width()
46 | screen_height = app.primaryScreen().size().height()
47 |
48 | print("screen_width:{}; screen_height:{}".format(screen_width, screen_height))
49 |
50 | w = MainWindow(screen_width, screen_height)
51 | w.show()
52 | w.setFixedSize(w.size())
53 | sys.exit(app.exec_())
54 |
--------------------------------------------------------------------------------
/src/threads/create_project_thread_fpga.py:
--------------------------------------------------------------------------------
1 | '''Copyright [2022] Hahn-Schickard-Gesellschaft fuer angewandte Forschung e.V.,
2 | Daniel Konegen + Marcus Rueb
3 | SPDX-License-Identifier: GPL-3.0
4 | ============================================================================'''
5 |
6 | import sys
7 | sys.path.append("..") # Adds higher directory to python modules path.
8 |
9 | from PyQt5.QtWidgets import *
10 | from PyQt5.QtGui import *
11 | from PyQt5.QtCore import *
12 |
13 | from src.converter.create_project import *
14 | import hls4ml
15 |
16 |
17 | class ConvertBuildLoadingFPGA(QThread):
18 | """Thread to convert the model and build the project.
19 |
20 | Attributes:
21 | model_path: Path of the model to convert
22 | project_name: Name of the project to be created
23 | output_path: Output path of the project to be created
24 | """
25 |
26 | request_signal = pyqtSignal()
27 |
28 | def __init__(self, model_path, project_name, output_path):
29 | QThread.__init__(self)
30 | self.model_path = model_path
31 | self.project_name = project_name
32 | self.output_path = output_path
33 |
34 | def run(self):
35 | """Activates the thread
36 |
37 | Calls the function to convert the model and build the FPGA
38 | project. When the function is finished, a signal is emitted.
39 | """
40 | print("Project name:", str(self.project_name))
41 | model = tf.keras.models.load_model(self.model_path)
42 |
43 | config = hls4ml.utils.config_from_keras_model(model,
44 | granularity='model')
45 |
46 | hls_model = hls4ml.converters.convert_from_keras_model(
47 | model, hls_config=config, output_dir=str(self.output_path) + '/' +
48 | str(self.project_name))
49 | try:
50 | hls_model.compile()
51 | except:
52 | print("To compile the project, Xilinx has to be installed.")
53 |
54 | print("Ende")
55 | self.request_signal.emit()
56 |
57 | def stop_thread(self):
58 | """Ends the thread
59 | """
60 | self.terminate()
61 |
--------------------------------------------------------------------------------
/src/converter/tensorflow_library/third_party/cmsis/CMSIS/NN/Source/ConcatenationFunctions/arm_concatenation_s8_w.c:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (C) 2010-2019 Arm Limited or its affiliates. All rights reserved.
3 | *
4 | * SPDX-License-Identifier: Apache-2.0
5 | *
6 | * Licensed under the Apache License, Version 2.0 (the License); you may
7 | * not use this file except in compliance with the License.
8 | * You may obtain a copy of the License at
9 | *
10 | * www.apache.org/licenses/LICENSE-2.0
11 | *
12 | * Unless required by applicable law or agreed to in writing, software
13 | * distributed under the License is distributed on an AS IS BASIS, WITHOUT
14 | * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 | * See the License for the specific language governing permissions and
16 | * limitations under the License.
17 | */
18 |
19 | /* ----------------------------------------------------------------------
20 | * Project: CMSIS NN Library
21 | * Title: arm_concatenation_s8_w.c
22 | * Description: s8 version of concatenation along the W axis
23 | *
24 | * $Date: October 2019
25 | * $Revision: V.1.0.0
26 | *
27 | * Target Processor: Cortex-M cores
28 | *
29 | * -------------------------------------------------------------------- */
30 |
31 | #include "arm_nnfunctions.h"
32 |
33 | /**
34 | * @ingroup groupNN
35 | */
36 |
37 | /**
38 | * @addtogroup Concatenation
39 | * @{
40 | */
41 |
42 | /*
43 | * s8 version of concatenation along the W axis
44 | *
45 | * Refer to header file for details.
46 | *
47 | */
48 | void arm_concatenation_s8_w(const int8_t *input,
49 | const uint16_t input_x,
50 | const uint16_t input_y,
51 | const uint16_t input_z,
52 | const uint16_t input_w,
53 | int8_t *output,
54 | const uint32_t offset_w)
55 | {
56 | const uint32_t input_copy_size = input_x * input_y * input_z * input_w;
57 |
58 | output += offset_w * (input_x * input_y * input_z);
59 |
60 | memcpy(output, input, input_copy_size);
61 | }
62 |
63 | /**
64 | * @} end of Concatenation group
65 | */
66 |
--------------------------------------------------------------------------------
/src/converter/tensorflow_library/tensorflow/lite/micro/kernels/activation_utils.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 |
16 | #ifndef TENSORFLOW_LITE_MICRO_KERNELS_ACTIVATION_UTILS_H_
17 | #define TENSORFLOW_LITE_MICRO_KERNELS_ACTIVATION_UTILS_H_
18 |
19 | #include
20 | #include
21 |
22 | #include "tensorflow/lite/c/builtin_op_data.h"
23 | #include "tensorflow/lite/kernels/internal/cppmath.h"
24 | #include "tensorflow/lite/kernels/internal/max.h"
25 | #include "tensorflow/lite/kernels/internal/min.h"
26 |
27 | namespace tflite {
28 | namespace ops {
29 | namespace micro {
30 |
31 | // Returns the floating point value for a fused activation:
32 | inline float ActivationValFloat(TfLiteFusedActivation act, float a) {
33 | switch (act) {
34 | case kTfLiteActNone:
35 | return a;
36 | case kTfLiteActRelu:
37 | return TfLiteMax(0.0f, a);
38 | case kTfLiteActReluN1To1:
39 | return TfLiteMax(-1.0f, TfLiteMin(a, 1.0f));
40 | case kTfLiteActRelu6:
41 | return TfLiteMax(0.0f, TfLiteMin(a, 6.0f));
42 | case kTfLiteActTanh:
43 | return std::tanh(a);
44 | case kTfLiteActSignBit:
45 | return std::signbit(a);
46 | case kTfLiteActSigmoid:
47 | return 1.0f / (1.0f + std::exp(-a));
48 | }
49 | return 0.0f; // To indicate an unsupported activation (i.e. when a new fused
50 | // activation is added to the enum and not handled here).
51 | }
52 |
53 | } // namespace micro
54 | } // namespace ops
55 | } // namespace tflite
56 |
57 | #endif // TENSORFLOW_LITE_MICRO_KERNELS_ACTIVATION_UTILS_H_
58 |
--------------------------------------------------------------------------------
/src/converter/tensorflow_library/tensorflow/lite/micro/all_ops_resolver.cpp:
--------------------------------------------------------------------------------
1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2 | Licensed under the Apache License, Version 2.0 (the "License");
3 | you may not use this file except in compliance with the License.
4 | You may obtain a copy of the License at
5 | http://www.apache.org/licenses/LICENSE-2.0
6 | Unless required by applicable law or agreed to in writing, software
7 | distributed under the License is distributed on an "AS IS" BASIS,
8 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
9 | See the License for the specific language governing permissions and
10 | limitations under the License.
11 | ==============================================================================*/
12 |
13 | #include "tensorflow/lite/micro/all_ops_resolver.h"
14 |
15 | #include "tensorflow/lite/micro/kernels/micro_ops.h"
16 |
17 | namespace tflite {
18 |
19 | AllOpsResolver::AllOpsResolver() {
20 | // Please keep this list of Builtin Operators in alphabetical order.
21 | AddAbs();
22 | AddAdd();
23 | AddArgMax();
24 | AddArgMin();
25 | AddAveragePool2D();
26 | AddCeil();
27 | AddConcatenation();
28 | AddConv2D();
29 | AddCos();
30 | AddDepthwiseConv2D();
31 | AddDequantize();
32 | AddDetectionPostprocess();
33 | AddEqual();
34 | AddEthosU();
35 | AddFloor();
36 | AddFullyConnected();
37 | AddGreater();
38 | AddGreaterEqual();
39 | AddHardSwish();
40 | AddL2Normalization();
41 | AddLess();
42 | AddLessEqual();
43 | AddLog();
44 | AddLogicalAnd();
45 | AddLogicalNot();
46 | AddLogicalOr();
47 | AddLogistic();
48 | AddMaximum();
49 | AddMaxPool2D();
50 | AddMean();
51 | AddMinimum();
52 | AddMul();
53 | AddNeg();
54 | AddNotEqual();
55 | AddPack();
56 | AddPad();
57 | AddPadV2();
58 | AddPrelu();
59 | AddQuantize();
60 | AddReduceMax();
61 | AddRelu();
62 | AddRelu6();
63 | AddReshape();
64 | AddResizeNearestNeighbor();
65 | AddRound();
66 | AddRsqrt();
67 | AddShape();
68 | AddSin();
69 | AddSoftmax();
70 | AddSplit();
71 | AddSplitV();
72 | AddSqrt();
73 | AddSquare();
74 | AddStridedSlice();
75 | AddSub();
76 | AddSvdf();
77 | AddTanh();
78 | AddUnpack();
79 | }
80 |
81 | } // namespace tflite
82 |
--------------------------------------------------------------------------------
/src/converter/tensorflow_library/tensorflow/lite/micro/kernels/floor.cpp:
--------------------------------------------------------------------------------
1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 |
16 | #include "tensorflow/lite/kernels/internal/reference/floor.h"
17 |
18 | #include "tensorflow/lite/c/common.h"
19 | #include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
20 | #include "tensorflow/lite/micro/kernels/kernel_util.h"
21 |
22 | namespace tflite {
23 | namespace ops {
24 | namespace micro {
25 | namespace floor {
26 |
27 | constexpr int kInputTensor = 0;
28 | constexpr int kOutputTensor = 0;
29 |
30 | TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
31 | const TfLiteEvalTensor* input =
32 | tflite::micro::GetEvalInput(context, node, kInputTensor);
33 | TF_LITE_ENSURE_TYPES_EQ(context, input->type, kTfLiteFloat32);
34 | TfLiteEvalTensor* output =
35 | tflite::micro::GetEvalOutput(context, node, kOutputTensor);
36 | reference_ops::Floor(tflite::micro::GetTensorShape(input),
37 | tflite::micro::GetTensorData(input),
38 | tflite::micro::GetTensorShape(output),
39 | tflite::micro::GetTensorData(output));
40 | return kTfLiteOk;
41 | }
42 | } // namespace floor
43 |
44 | TfLiteRegistration Register_FLOOR() {
45 | return {/*init=*/nullptr,
46 | /*free=*/nullptr,
47 | /*prepare=*/nullptr,
48 | /*invoke=*/floor::Eval,
49 | /*profiling_string=*/nullptr,
50 | /*builtin_code=*/0,
51 | /*custom_name=*/nullptr,
52 | /*version=*/0};
53 | }
54 |
55 | } // namespace micro
56 | } // namespace ops
57 | } // namespace tflite
58 |
--------------------------------------------------------------------------------
/src/converter/tensorflow_library/tensorflow/lite/micro/kernels/fully_connected.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 | #ifndef TENSORFLOW_LITE_MICRO_KERNELS_FULLY_CONNECTED_H_
16 | #define TENSORFLOW_LITE_MICRO_KERNELS_FULLY_CONNECTED_H_
17 |
18 | #include "tensorflow/lite/c/common.h"
19 |
20 | namespace tflite {
21 |
22 | // This is the most generic TfLiteRegistration. The actual supported types may
23 | // still be target dependent. The only requirement is that every implementation
24 | // (reference or optimized) must define this function.
25 | TfLiteRegistration Register_FULLY_CONNECTED();
26 |
27 | #if defined(CMSIS_NN) || defined(ARDUINO)
28 | // The Arduino is a special case where we use the CMSIS kernels, but because of
29 | // the current approach to building for Arduino, we do not support -DCMSIS_NN as
30 | // part of the build. As a result, we use defined(ARDUINO) as proxy for the
31 | // CMSIS kernels for this one special case.
32 |
33 | // Returns a TfLiteRegistration struct for cmsis-nn kernel variant that only
34 | // supports int8.
35 | TfLiteRegistration Register_FULLY_CONNECTED_INT8();
36 |
37 | #else
38 | // Note that while this block gets used for both reference and optimized kernels
39 | // that do not have any specialized implementations, the only goal here is to
40 | // define fallback implementation that allow reference kernels to still be used
41 | // from applications that call a more specific kernel variant.
42 |
43 | inline TfLiteRegistration Register_FULLY_CONNECTED_INT8() {
44 | return Register_FULLY_CONNECTED();
45 | }
46 |
47 | #endif
48 | } // namespace tflite
49 |
50 | #endif // TENSORFLOW_LITE_MICRO_KERNELS_FULLY_CONNECTED_H_
51 |
--------------------------------------------------------------------------------
/src/converter/tensorflow_library/tensorflow/lite/micro/memory_planner/linear_memory_planner.cpp:
--------------------------------------------------------------------------------
1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 |
16 | #include "tensorflow/lite/micro/memory_planner/linear_memory_planner.h"
17 |
18 | namespace tflite {
19 |
20 | LinearMemoryPlanner::LinearMemoryPlanner()
21 | : current_buffer_count_(0), next_free_offset_(0) {}
22 | LinearMemoryPlanner::~LinearMemoryPlanner() {}
23 |
24 | TfLiteStatus LinearMemoryPlanner::AddBuffer(
25 | tflite::ErrorReporter* error_reporter, int size, int first_time_used,
26 | int last_time_used) {
27 | if (current_buffer_count_ >= kMaxBufferCount) {
28 | TF_LITE_REPORT_ERROR(error_reporter, "Too many buffers (max is %d)",
29 | kMaxBufferCount);
30 | return kTfLiteError;
31 | }
32 | buffer_offsets_[current_buffer_count_] = next_free_offset_;
33 | next_free_offset_ += size;
34 | ++current_buffer_count_;
35 | return kTfLiteOk;
36 | }
37 |
38 | size_t LinearMemoryPlanner::GetMaximumMemorySize() { return next_free_offset_; }
39 |
40 | int LinearMemoryPlanner::GetBufferCount() { return current_buffer_count_; }
41 |
42 | TfLiteStatus LinearMemoryPlanner::GetOffsetForBuffer(
43 | tflite::ErrorReporter* error_reporter, int buffer_index, int* offset) {
44 | if ((buffer_index < 0) || (buffer_index >= current_buffer_count_)) {
45 | TF_LITE_REPORT_ERROR(error_reporter,
46 | "buffer index %d is outside range 0 to %d",
47 | buffer_index, current_buffer_count_);
48 | return kTfLiteError;
49 | }
50 | *offset = buffer_offsets_[buffer_index];
51 | return kTfLiteOk;
52 | }
53 |
54 | } // namespace tflite
55 |
--------------------------------------------------------------------------------
/src/gui_event/_dataloader.py:
--------------------------------------------------------------------------------
1 | '''Copyright [2020] Hahn-Schickard-Gesellschaft fuer angewandte Forschung e.V.,
2 | Daniel Konegen + Marcus Rueb
3 | Copyright [2021] Karlsruhe Institute of Technology, Daniel Konegen
4 | Copyright [2022] Hahn-Schickard-Gesellschaft fuer angewandte Forschung e.V.,
5 | Daniel Konegen + Marcus Rueb
6 | SPDX-License-Identifier: GPL-3.0
7 | ============================================================================'''
8 |
9 | from PyQt5.QtWidgets import *
10 | from PyQt5.QtGui import *
11 | from PyQt5.QtCore import *
12 |
13 | from src.gui_layout.ui_dataloader import *
14 |
15 |
16 | def dataloader(self):
17 | """Activates the GUI window of the data loader.
18 |
19 | With the dropdown menu you can select whether the training data
20 | should be transferred in a file or folder.
21 | """
22 | self.dataloader_ui = UIDataloader(self.WINDOW_WIDTH, self.WINDOW_HEIGHT,
23 | self.FONT_STYLE, self)
24 |
25 | if self.data_loader_path is not None:
26 | self.set_label(self.dataloader_ui.data_path_label,
27 | self.data_loader_path, Qt.AlignCenter)
28 |
29 | self.dataloader_ui.select_data_browse.clicked.connect(
30 | lambda: self.get_data_loader(self.dataloader_ui,
31 | self.dataloader_ui.data_path_label))
32 |
33 | self.dataloader_ui.back.clicked.connect(lambda: next_window(self, "Back"))
34 | self.dataloader_ui.next.clicked.connect(lambda: next_window(self, "Next"))
35 |
36 | self.setCentralWidget(self.dataloader_ui)
37 | self.show()
38 |
39 |
40 | def next_window(self, n):
41 | """
42 | Defines which one is the next window to open.
43 |
44 | Args:
45 | n: Go forward or go back
46 | """
47 | self.data_loader_path = self.dataloader_ui.data_path_label.text()
48 |
49 | if n == "Back":
50 | self.optimization_algo()
51 |
52 | elif n == "Next":
53 |
54 | if self.data_loader_path == "":
55 | msg = QMessageBox()
56 | msg.setIcon(QMessageBox.Warning)
57 |
58 | msg.setText("Please enter a data loader.")
59 | msg.setWindowTitle("Warning")
60 | msg.setStandardButtons(QMessageBox.Ok | QMessageBox.Cancel)
61 | msg.exec_()
62 | return
63 |
64 | self.create_project()
65 |
--------------------------------------------------------------------------------
/src/converter/tensorflow_library/third_party/cmsis/CMSIS/NN/Source/ConcatenationFunctions/arm_concatenation_s8_x.c:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (C) 2010-2019 Arm Limited or its affiliates. All rights reserved.
3 | *
4 | * SPDX-License-Identifier: Apache-2.0
5 | *
6 | * Licensed under the Apache License, Version 2.0 (the License); you may
7 | * not use this file except in compliance with the License.
8 | * You may obtain a copy of the License at
9 | *
10 | * www.apache.org/licenses/LICENSE-2.0
11 | *
12 | * Unless required by applicable law or agreed to in writing, software
13 | * distributed under the License is distributed on an AS IS BASIS, WITHOUT
14 | * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 | * See the License for the specific language governing permissions and
16 | * limitations under the License.
17 | */
18 |
19 | /* ----------------------------------------------------------------------
20 | * Project: CMSIS NN Library
21 | * Title: arm_concatenation_s8_x.c
22 | * Description: s8 version of concatenation along the X axis
23 | *
24 | * $Date: October 2019
25 | * $Revision: V.1.0.0
26 | *
27 | * Target Processor: Cortex-M cores
28 | *
29 | * -------------------------------------------------------------------- */
30 |
31 | #include "arm_nnfunctions.h"
32 |
33 | /**
34 | * @ingroup groupNN
35 | */
36 |
37 | /**
38 | * @addtogroup Concatenation
39 | * @{
40 | */
41 |
42 | /*
43 | * s8 version of concatenation along the X axis
44 | *
45 | * Refer to header file for details.
46 | *
47 | */
48 | void arm_concatenation_s8_x(const int8_t *input,
49 | const uint16_t input_x,
50 | const uint16_t input_y,
51 | const uint16_t input_z,
52 | const uint16_t input_w,
53 | int8_t *output,
54 | const uint16_t output_x,
55 | const uint32_t offset_x)
56 | {
57 | const uint32_t num_iterations = input_y * input_z * input_w;
58 |
59 | output += offset_x;
60 |
61 | uint32_t i;
62 |
63 | // Copy per row
64 | for (i = 0; i < num_iterations; ++i)
65 | {
66 | memcpy(output, input, input_x);
67 | input += input_x;
68 | output += output_x;
69 | }
70 | }
71 |
72 | /**
73 | * @} end of Concatenation group
74 | */
75 |
--------------------------------------------------------------------------------
/src/converter/tensorflow_library/tensorflow/lite/kernels/internal/reference/quantize.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 | #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_QUANTIZE_H_
16 | #define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_QUANTIZE_H_
17 |
18 | #include
19 | #include
20 |
21 | #include "tensorflow/lite/kernels/internal/common.h"
22 | #include "tensorflow/lite/kernels/internal/compatibility.h"
23 | #include "tensorflow/lite/kernels/internal/cppmath.h"
24 | #include "tensorflow/lite/kernels/internal/types.h"
25 |
26 | namespace tflite {
27 |
28 | namespace reference_ops {
29 |
30 | template
31 | inline void AffineQuantize(const tflite::QuantizationParams& op_params,
32 | const RuntimeShape& input_shape,
33 | const InputT* input_data,
34 | const RuntimeShape& output_shape,
35 | OutputT* output_data) {
36 | const int32_t zero_point = op_params.zero_point;
37 | const double scale = op_params.scale;
38 | const int flat_size = MatchingFlatSize(input_shape, output_shape);
39 | static constexpr int32_t min_val = std::numeric_limits::min();
40 | static constexpr int32_t max_val = std::numeric_limits::max();
41 |
42 | for (int i = 0; i < flat_size; i++) {
43 | const InputT val = input_data[i];
44 | int32_t unclamped =
45 | static_cast(TfLiteRound(val / static_cast(scale))) +
46 | zero_point;
47 | int32_t clamped = std::min(std::max(unclamped, min_val), max_val);
48 | output_data[i] = clamped;
49 | }
50 | }
51 |
52 | } // namespace reference_ops
53 |
54 | } // namespace tflite
55 | #endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_QUANTIZE_H_
56 |
--------------------------------------------------------------------------------
/src/converter/tensorflow_library/third_party/cmsis/CMSIS/NN/Source/ConcatenationFunctions/arm_concatenation_s8_z.c:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (C) 2010-2019 Arm Limited or its affiliates. All rights reserved.
3 | *
4 | * SPDX-License-Identifier: Apache-2.0
5 | *
6 | * Licensed under the Apache License, Version 2.0 (the License); you may
7 | * not use this file except in compliance with the License.
8 | * You may obtain a copy of the License at
9 | *
10 | * www.apache.org/licenses/LICENSE-2.0
11 | *
12 | * Unless required by applicable law or agreed to in writing, software
13 | * distributed under the License is distributed on an AS IS BASIS, WITHOUT
14 | * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 | * See the License for the specific language governing permissions and
16 | * limitations under the License.
17 | */
18 |
19 | /* ----------------------------------------------------------------------
20 | * Project: CMSIS NN Library
21 | * Title: arm_concatenation_s8_z.c
22 | * Description: s8 version of concatenation along the Z axis
23 | *
24 | * $Date: October 2019
25 | * $Revision: V.1.0.0
26 | *
27 | * Target Processor: Cortex-M cores
28 | *
29 | * -------------------------------------------------------------------- */
30 |
31 | #include "arm_nnfunctions.h"
32 |
33 | /**
34 | * @ingroup groupNN
35 | */
36 |
37 | /**
38 | * @addtogroup Concatenation
39 | * @{
40 | */
41 |
42 | /*
43 | * s8 version of concatenation along the Z axis
44 | *
45 | * Refer to header file for details.
46 | *
47 | */
48 | void arm_concatenation_s8_z(const int8_t *input,
49 | const uint16_t input_x,
50 | const uint16_t input_y,
51 | const uint16_t input_z,
52 | const uint16_t input_w,
53 | int8_t *output,
54 | const uint16_t output_z,
55 | const uint32_t offset_z)
56 | {
57 | const uint32_t input_copy_size = input_x * input_y * input_z;
58 | const uint32_t output_stride = input_x * input_y * output_z;
59 |
60 | output += offset_z * (input_x * input_y);
61 |
62 | uint32_t i;
63 |
64 | for (i = 0; i < input_w; ++i)
65 | {
66 | memcpy(output, input, input_copy_size);
67 | input += input_copy_size;
68 | output += output_stride;
69 | }
70 | }
71 |
72 | /**
73 | * @} end of Concatenation group
74 | */
75 |
--------------------------------------------------------------------------------
/src/converter/tensorflow_library/third_party/cmsis/CMSIS/NN/Source/SoftmaxFunctions/arm_softmax_with_batch_q7.c:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (C) 2010-2019 Arm Limited or its affiliates. All rights reserved.
3 | *
4 | * SPDX-License-Identifier: Apache-2.0
5 | *
6 | * Licensed under the Apache License, Version 2.0 (the License); you may
7 | * not use this file except in compliance with the License.
8 | * You may obtain a copy of the License at
9 | *
10 | * www.apache.org/licenses/LICENSE-2.0
11 | *
12 | * Unless required by applicable law or agreed to in writing, software
13 | * distributed under the License is distributed on an AS IS BASIS, WITHOUT
14 | * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 | * See the License for the specific language governing permissions and
16 | * limitations under the License.
17 | */
18 |
19 | /* ----------------------------------------------------------------------
20 | * Project: CMSIS NN Library
21 | * Title: arm_softmax_with_batch_q7.c
22 | * Description: Q7 softmax function
23 | *
24 | * $Date: 05. August 2019
25 | * $Revision: V.1.0.0
26 | *
27 | * Target Processor: Cortex-M and Cortex-A cores
28 | *
29 | * -------------------------------------------------------------------- */
30 |
31 | #include "arm_math.h"
32 | #include "arm_nnfunctions.h"
33 |
34 | /**
35 | * @ingroup groupNN
36 | */
37 |
38 | /**
39 | * @addtogroup Softmax
40 | * @{
41 | */
42 |
43 | /**
44 | * @brief Q7 softmax function with batch parameter
45 | * @param[in] vec_in pointer to input vector
46 | * @param[in] nb_batches number of batches
47 | * @param[in] dim_vec input vector dimention
48 | * @param[out] p_out pointer to output vector
49 | *
50 | * @details
51 | *
52 | * Here, instead of typical natural logarithm e based softmax, we use
53 | * 2-based softmax here, i.e.,:
54 | *
55 | * y_i = 2^(x_i) / sum(2^x_j)
56 | *
57 | * The relative output will be different here.
58 | * But mathematically, the gradient will be the same
59 | * with a log(2) scaling factor.
60 | *
61 | */
62 |
63 | void arm_softmax_with_batch_q7(const q7_t * vec_in, const uint16_t nb_batches,const uint16_t dim_vec, q7_t * p_out )
64 | {
65 | for(int i=0; i> 2;
48 | q31_t in;
49 |
50 | while (cnt > 0l)
51 | {
52 | q31_t value = arm_nn_read_q7x4_ia(&pV);
53 | v1 = __SXTB16(__ROR((uint32_t)value, 8));
54 | v2 = __SXTB16(value);
55 | #ifndef ARM_MATH_BIG_ENDIAN
56 | vo2 = (q31_t)__PKHTB(v1, v2, 16);
57 | vo1 = (q31_t)__PKHBT(v2, v1, 16);
58 | #else
59 | vo1 = (q31_t)__PKHTB(v1, v2, 16);
60 | vo2 = (q31_t)__PKHBT(v2, v1, 16);
61 | #endif
62 |
63 | in = arm_nn_read_q15x2(pCnt);
64 | write_q15x2_ia(&pCnt, __QADD16(vo1, in));
65 |
66 | in = arm_nn_read_q15x2(pCnt);
67 | write_q15x2_ia(&pCnt, __QADD16(vo2, in));
68 |
69 | cnt--;
70 | }
71 | cnt = length & 0x3;
72 | while (cnt > 0l)
73 | {
74 | *pCnt++ += *pV++;
75 | cnt--;
76 | }
77 | }
78 |
79 | /**
80 | * @} end of NNBasicMath group
81 | */
--------------------------------------------------------------------------------
/src/automl/customize_autokeras.py:
--------------------------------------------------------------------------------
1 | '''Copyright [2022] Hahn-Schickard-Gesellschaft fuer angewandte Forschung e.V.,
2 | Daniel Konegen + Marcus Rueb
3 | SPDX-License-Identifier: GPL-3.0
4 | ============================================================================'''
5 |
6 | """To ensure that AutoKeras works without errors, this script is executed.
7 | It causes no CastToFloat32 layer to be added to the AutoKeras models. This
8 | would generate an error later when converting the models to TensorFlow Lite.
9 |
10 | Typical usage example:
11 | win - python Remove_cast_to_float32.py C:/Users/.../Anaconda3/envs/AutoFlow
12 | linux - python Remove_cast_to_float32.py .../anaconda3/envs/AutoFlow
13 | """
14 |
15 | import sys
16 |
17 |
18 | def replace_string(path):
19 | """
20 | In "autokeras/nodes.py" a code line get changed. As a result, no
21 | CastToFloat32 layers will be added to the AutoKeras models.
22 |
23 | Args:
24 | path: Path to used anaconda environment
25 | """
26 | # creating a variable and storing the text
27 | # that we want to search
28 | search_text = 'keras_layers.CastToFloat32()(input_node)'
29 |
30 | # creating a variable and storing the text
31 | # that we want to add
32 | replace_text = 'input_node #' + search_text
33 |
34 | # Set the file path, according to the operating system
35 | if 'linux' in sys.platform:
36 | file_path = '/lib/python3.8/site-packages/autokeras/nodes.py'
37 | else:
38 | file_path = '/Lib/site-packages/autokeras/nodes.py'
39 |
40 | # Opening our text file in read only
41 | # mode using the open() function
42 | with open(path + file_path, 'r') as f:
43 |
44 | # Reading the content of the file
45 | # using the read() function and storing
46 | # them in a new variable
47 | data = f.read()
48 |
49 | # Searching and replacing the text
50 | # using the replace() function
51 | # if text is not replaced yet
52 | if replace_text in data:
53 | return
54 | else:
55 | data = data.replace(search_text, replace_text)
56 |
57 | # Opening our text file in write only
58 | # mode to write the replaced content
59 | with open(path + file_path, 'w') as f:
60 |
61 | # Writing the replaced data in our
62 | # text file
63 | f.write(data)
64 |
65 | # Printing Text replaced
66 | print('Text replaced')
67 |
68 |
69 | if __name__ == '__main__':
70 | replace_string(sys.argv[1])
71 |
--------------------------------------------------------------------------------
/src/converter/tensorflow_library/third_party/cmsis/CMSIS/NN/Source/ConcatenationFunctions/arm_concatenation_s8_y.c:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (C) 2010-2019 Arm Limited or its affiliates. All rights reserved.
3 | *
4 | * SPDX-License-Identifier: Apache-2.0
5 | *
6 | * Licensed under the Apache License, Version 2.0 (the License); you may
7 | * not use this file except in compliance with the License.
8 | * You may obtain a copy of the License at
9 | *
10 | * www.apache.org/licenses/LICENSE-2.0
11 | *
12 | * Unless required by applicable law or agreed to in writing, software
13 | * distributed under the License is distributed on an AS IS BASIS, WITHOUT
14 | * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 | * See the License for the specific language governing permissions and
16 | * limitations under the License.
17 | */
18 |
19 | /* ----------------------------------------------------------------------
20 | * Project: CMSIS NN Library
21 | * Title: arm_concatenation_s8_y.c
22 | * Description: s8 version of concatenation along the Y axis
23 | *
24 | * $Date: October 2019
25 | * $Revision: V.1.0.0
26 | *
27 | * Target Processor: Cortex-M cores
28 | *
29 | * -------------------------------------------------------------------- */
30 |
31 | #include "arm_nnfunctions.h"
32 |
33 | /**
34 | * @ingroup groupNN
35 | */
36 |
37 | /**
38 | * @addtogroup Concatenation
39 | * @{
40 | */
41 |
42 | /*
43 | * s8 version of concatenation along the Y axis
44 | *
45 | * Refer to header file for details.
46 | *
47 | */
48 | void arm_concatenation_s8_y(const int8_t *input,
49 | const uint16_t input_x,
50 | const uint16_t input_y,
51 | const uint16_t input_z,
52 | const uint16_t input_w,
53 | int8_t *output,
54 | const uint16_t output_y,
55 | const uint32_t offset_y)
56 | {
57 | const uint32_t num_iterations = input_z * input_w;
58 | const uint32_t input_copy_size = input_x * input_y;
59 | const uint32_t output_stride = input_x * output_y;
60 |
61 | output += offset_y * input_x;
62 | uint32_t i;
63 |
64 | // Copy per tile
65 | for (i = 0; i < num_iterations; ++i)
66 | {
67 | memcpy(output, input, input_copy_size);
68 | input += input_copy_size;
69 | output += output_stride;
70 | }
71 | }
72 |
73 | /**
74 | * @} end of Concatenation group
75 | */
76 |
--------------------------------------------------------------------------------
/src/converter/tensorflow_library/tensorflow/lite/micro/kernels/neg.cpp:
--------------------------------------------------------------------------------
1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 |
16 | #include "tensorflow/lite/kernels/internal/reference/neg.h"
17 |
18 | #include "tensorflow/lite/c/common.h"
19 | #include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
20 | #include "tensorflow/lite/micro/kernels/kernel_util.h"
21 |
22 | namespace tflite {
23 | namespace ops {
24 | namespace micro {
25 | namespace neg {
26 |
27 | constexpr int kInputTensor = 0;
28 | constexpr int kOutputTensor = 0;
29 |
30 | TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
31 | const TfLiteEvalTensor* input =
32 | tflite::micro::GetEvalInput(context, node, kInputTensor);
33 | TfLiteEvalTensor* output =
34 | tflite::micro::GetEvalOutput(context, node, kOutputTensor);
35 | switch (input->type) {
36 | // TODO(wangtz): handle for kTfLiteInt8
37 | case kTfLiteFloat32:
38 | reference_ops::Negate(tflite::micro::GetTensorShape(input),
39 | tflite::micro::GetTensorData(input),
40 | tflite::micro::GetTensorShape(output),
41 | tflite::micro::GetTensorData(output));
42 | break;
43 | default:
44 | TF_LITE_KERNEL_LOG(context, "Type %s (%d) not supported.",
45 | TfLiteTypeGetName(input->type), input->type);
46 | return kTfLiteError;
47 | }
48 | return kTfLiteOk;
49 | }
50 |
51 | } // namespace neg
52 |
53 | TfLiteRegistration Register_NEG() {
54 | return {/*init=*/nullptr,
55 | /*free=*/nullptr,
56 | /*prepare=*/nullptr,
57 | /*invoke=*/neg::Eval,
58 | /*profiling_string=*/nullptr,
59 | /*builtin_code=*/0,
60 | /*custom_name=*/nullptr,
61 | /*version=*/0};
62 | }
63 |
64 | } // namespace micro
65 | } // namespace ops
66 | } // namespace tflite
67 |
--------------------------------------------------------------------------------
/src/converter/tensorflow_library/tensorflow/lite/core/api/error_reporter.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 | #ifndef TENSORFLOW_LITE_CORE_API_ERROR_REPORTER_H_
16 | #define TENSORFLOW_LITE_CORE_API_ERROR_REPORTER_H_
17 |
18 | #include
19 |
20 | namespace tflite {
21 |
22 | /// A functor that reports error to supporting system. Invoked similar to
23 | /// printf.
24 | ///
25 | /// Usage:
26 | /// ErrorReporter foo;
27 | /// foo.Report("test %d", 5);
28 | /// or
29 | /// va_list args;
30 | /// foo.Report("test %d", args); // where args is va_list
31 | ///
32 | /// Subclass ErrorReporter to provide another reporting destination.
33 | /// For example, if you have a GUI program, you might redirect to a buffer
34 | /// that drives a GUI error log box.
35 | class ErrorReporter {
36 | public:
37 | virtual ~ErrorReporter() {}
38 | virtual int Report(const char* format, va_list args) = 0;
39 | int Report(const char* format, ...);
40 | int ReportError(void*, const char* format, ...);
41 | };
42 |
43 | } // namespace tflite
44 |
45 | // You should not make bare calls to the error reporter, instead use the
46 | // TF_LITE_REPORT_ERROR macro, since this allows message strings to be
47 | // stripped when the binary size has to be optimized. If you are looking to
48 | // reduce binary size, define TF_LITE_STRIP_ERROR_STRINGS when compiling and
49 | // every call will be stubbed out, taking no memory.
50 | #ifndef TF_LITE_STRIP_ERROR_STRINGS
51 | #define TF_LITE_REPORT_ERROR(reporter, ...) \
52 | do { \
53 | static_cast(reporter)->Report(__VA_ARGS__); \
54 | } while (false)
55 | #else // TF_LITE_STRIP_ERROR_STRINGS
56 | #define TF_LITE_REPORT_ERROR(reporter, ...)
57 | #endif // TF_LITE_STRIP_ERROR_STRINGS
58 |
59 | #endif // TENSORFLOW_LITE_CORE_API_ERROR_REPORTER_H_
60 |
--------------------------------------------------------------------------------
/src/automl/image_regressor.py:
--------------------------------------------------------------------------------
1 | '''Copyright [2020] Hahn-Schickard-Gesellschaft fuer angewandte Forschung e.V.,
2 | Marcel Sawrin + Marcus Rueb
3 | Copyright [2022] Hahn-Schickard-Gesellschaft fuer angewandte Forschung e.V.,
4 | Daniel Konegen + Marcus Rueb
5 | SPDX-License-Identifier: GPL-3.0
6 | ============================================================================'''
7 |
8 | import autokeras as ak
9 | import os
10 | import tensorflow as tf
11 |
12 | from src.gui_event._dataloader_helper import dataloader_autokeras
13 |
14 |
15 | def image_regressor(project_name, output_path, data_path, max_trials=10,
16 | max_epochs=20, max_size=0, overwrite=True, num_channels=3,
17 | img_height=128, img_width=128, separator=None,
18 | decimal=None, csv_target_label=None):
19 |
20 | input_node = ak.ImageInput()
21 | output_node = ak.ConvBlock()(input_node)
22 | output_node = ak.DenseBlock()(output_node)
23 | output_node = ak.RegressionHead()(output_node)
24 | clf = ak.AutoModel(
25 | inputs=input_node, outputs=output_node, overwrite=overwrite,
26 | max_trials=max_trials, max_model_size=max_size
27 | )
28 |
29 | if os.path.isfile(data_path):
30 | x_train, y_train, x_test, y_test = dataloader_autokeras(
31 | data_path, separator, decimal, csv_target_label, img_height,
32 | img_width, num_channels)
33 | clf.fit(x_train, y_train, epochs=max_epochs, validation_split=0.2,
34 | batch_size=64, callbacks=[tf.keras.callbacks.EarlyStopping(
35 | monitor='val_loss', patience=5,
36 | restore_best_weights=True)])
37 | # Evaluate the best model with testing data.
38 | print("Best model evaluation:", clf.evaluate(x_test, y_test))
39 | elif os.path.isdir(data_path):
40 | train_data, _, test_data, _ = dataloader_autokeras(
41 | data_path, separator, decimal, csv_target_label, img_height,
42 | img_width, num_channels)
43 |
44 | clf.fit(train_data, epochs=max_epochs, validation_split=0.2,
45 | batch_size=64, callbacks=[tf.keras.callbacks.EarlyStopping(
46 | monitor='val_loss', patience=5,
47 | restore_best_weights=True)])
48 | # Evaluate the best model with testing data.
49 | print("Best model evaluation:", clf.evaluate(test_data))
50 |
51 | best_model = clf.export_model()
52 | best_model.summary()
53 |
54 | best_model.save(output_path + "/" + project_name + '.h5')
55 |
--------------------------------------------------------------------------------
/src/automl/image_classifier.py:
--------------------------------------------------------------------------------
1 | '''Copyright [2020] Hahn-Schickard-Gesellschaft fuer angewandte Forschung e.V.,
2 | Marcel Sawrin + Marcus Rueb
3 | Copyright [2022] Hahn-Schickard-Gesellschaft fuer angewandte Forschung e.V.,
4 | Daniel Konegen + Marcus Rueb
5 | SPDX-License-Identifier: GPL-3.0
6 | ============================================================================'''
7 |
8 | import autokeras as ak
9 | import os
10 | import tensorflow as tf
11 |
12 | from src.gui_event._dataloader_helper import dataloader_autokeras
13 |
14 |
15 | def image_classifier(project_name, output_path, data_path, max_trials=10,
16 | max_epochs=20, max_size=0, overwrite=True, num_channels=3,
17 | img_height=128, img_width=128, separator=None,
18 | decimal=None, csv_target_label=None):
19 |
20 | input_node = ak.ImageInput()
21 | output_node = ak.ConvBlock()(input_node)
22 | output_node = ak.DenseBlock()(output_node)
23 | output_node = ak.ClassificationHead()(output_node)
24 | clf = ak.AutoModel(
25 | inputs=input_node, outputs=output_node, overwrite=overwrite,
26 | max_trials=max_trials, max_model_size=max_size
27 | )
28 |
29 | if os.path.isfile(data_path):
30 | x_train, y_train, x_test, y_test = dataloader_autokeras(
31 | data_path, separator, decimal, csv_target_label, img_height,
32 | img_width, num_channels)
33 | clf.fit(x_train, y_train, epochs=max_epochs, validation_split=0.2,
34 | batch_size=64, callbacks=[tf.keras.callbacks.EarlyStopping(
35 | monitor='val_loss', patience=5,
36 | restore_best_weights=True)])
37 | # Evaluate the best model with testing data.
38 | print("Best model evaluation:", clf.evaluate(x_test, y_test))
39 | elif os.path.isdir(data_path):
40 | train_data, _, test_data, _ = dataloader_autokeras(
41 | data_path, separator, decimal, csv_target_label, img_height,
42 | img_width, num_channels)
43 | clf.fit(train_data, epochs=max_epochs, validation_split=0.2,
44 | batch_size=64, callbacks=[tf.keras.callbacks.EarlyStopping(
45 | monitor='val_loss', patience=5,
46 | restore_best_weights=True)])
47 | # Evaluate the best model with testing data.
48 | print("Best model evaluation:", clf.evaluate(test_data))
49 |
50 | best_model = clf.export_model()
51 | best_model.summary()
52 |
53 | best_model.save(output_path + "/" + project_name + '.h5')
54 |
--------------------------------------------------------------------------------
/src/converter/tensorflow_library/third_party/cmsis/CMSIS/NN/Source/NNSupportFunctions/arm_nn_add_q7.c:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (C) 2010-2018 Arm Limited or its affiliates. All rights reserved.
3 | *
4 | * SPDX-License-Identifier: Apache-2.0
5 | *
6 | * Licensed under the Apache License, Version 2.0 (the License); you may
7 | * not use this file except in compliance with the License.
8 | * You may obtain a copy of the License at
9 | *
10 | * www.apache.org/licenses/LICENSE-2.0
11 | *
12 | * Unless required by applicable law or agreed to in writing, software
13 | * distributed under the License is distributed on an AS IS BASIS, WITHOUT
14 | * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 | * See the License for the specific language governing permissions and
16 | * limitations under the License.
17 | */
18 |
19 | /* ----------------------------------------------------------------------
20 | * Project: CMSIS NN Library
21 | * Title: arm_nn_add_q7.c
22 | * Description: Non saturating addition of elements of a q7 vector.
23 | *
24 | * $Date: July 2019
25 | * $Revision: V.1.0.0
26 | *
27 | * Target Processor: Cortex-M cores
28 | *
29 | * -------------------------------------------------------------------- */
30 | #include "arm_math.h"
31 | #include "arm_nnfunctions.h"
32 |
33 | /**
34 | * @ingroup groupSupport
35 | */
36 |
37 | /**
38 | * @addtogroup NNBasicMath
39 | * @{
40 | */
41 |
42 | void arm_nn_add_q7(const q7_t *input, q31_t *output, uint32_t block_size)
43 | {
44 | uint32_t block_count;
45 | q31_t result = 0;
46 | #if defined(ARM_MATH_DSP)
47 | /* Loop unrolling: Compute 4 outputs at a time */
48 | block_count = block_size >> 2U;
49 |
50 | while (block_count > 0U)
51 | {
52 | const int32_t mult_q15x2 = (1UL << 16) | 1UL;
53 | q31_t in_q7x4 = arm_nn_read_q7x4_ia(&input);
54 | q31_t temp_q15x2 = __SXTAB16(__SXTB16(in_q7x4),
55 | __ROR((uint32_t)in_q7x4, 8));
56 |
57 | result = __SMLAD(temp_q15x2, mult_q15x2, result);
58 |
59 | /* Decrement loop counter */
60 | block_count--;
61 | }
62 |
63 | /* Loop unrolling: Compute remaining outputs */
64 | block_count = block_size & 0x3;
65 | #else
66 | block_count = block_size;
67 | #endif
68 | while (block_count > 0U)
69 | {
70 | /* Add and store result in destination buffer. */
71 | result += *input++;
72 |
73 | /* Decrement loop counter */
74 | block_count--;
75 | }
76 |
77 | *output = result;
78 | }
79 |
80 | /**
81 | * @} end of NNBasicMath group
82 | */
--------------------------------------------------------------------------------
/example/templates/arduino_mnist/tf_lite_exe.cpp:
--------------------------------------------------------------------------------
1 | /* Copyright [2022] Hahn-Schickard-Gesellschaft fuer angewandte Forschung e.V.,
2 | Daniel Konegen + Marcus Rueb
3 | SPDX-License-Identifier: GPL-3.0
4 | ============================================================================*/
5 |
6 | #include "tf_lite_exe.h"
7 |
8 | namespace {
9 | // Create an area of memory to use for input, output, and intermediate arrays.
10 | constexpr int kTensorArenaSize = 100 * 1024;
11 | uint8_t tensor_arena[kTensorArenaSize];
12 |
13 | tflite::ErrorReporter* error_reporter = nullptr;
14 | const tflite::Model* model = nullptr;
15 | tflite::MicroInterpreter* interpreter = nullptr;
16 | TfLiteTensor* input = nullptr;
17 | TfLiteTensor* output = nullptr;
18 |
19 | float* prediction = new float[10];
20 | }
21 |
22 | void setup_model() {
23 | static tflite::MicroErrorReporter micro_error_reporter;
24 | error_reporter = µ_error_reporter;
25 |
26 | // Load the tflite Model
27 | model = tflite::GetModel(MNIST_tflite);
28 | if (model->version() != TFLITE_SCHEMA_VERSION) {
29 | error_reporter->Report(
30 | "Model provided is schema version %d not equal "
31 | "to supported version %d.",
32 | model->version(), TFLITE_SCHEMA_VERSION);
33 | return;
34 | }
35 |
36 | // This pulls in all the operation implementations we need.
37 | static tflite::AllOpsResolver resolver;
38 |
39 | // Build an interpreter to run the model with.
40 | static tflite::MicroInterpreter static_interpreter(
41 | model, resolver, tensor_arena, kTensorArenaSize, error_reporter);
42 | interpreter = &static_interpreter;
43 |
44 | // Allocate memory from the tensor_arena for the model's tensors.
45 | TfLiteStatus allocate_status = interpreter->AllocateTensors();
46 | if (allocate_status != kTfLiteOk) {
47 | error_reporter->Report("AllocateTensors() failed");
48 | return;
49 | }
50 |
51 | // Obtain pointers to the model's input and output tensors.
52 | input = interpreter->input(0);
53 | output = interpreter->output(0);
54 |
55 | }
56 |
57 | float* model_execute(float *input_data) {
58 | for (int i = 0; i < 784; ++i) {
59 | input->data.f[i] = *input_data;
60 | input_data++;
61 | }
62 |
63 | // Run inference, and report any error
64 | TfLiteStatus invoke_status = interpreter->Invoke();
65 | if (invoke_status != kTfLiteOk) {
66 | error_reporter->Report("Error by invoking interpreter\n");
67 | return 0;
68 | }
69 |
70 | // Read the prediction from the model's output tensor
71 | for (int i = 0; i < 10; i++) {
72 | prediction[i] = output->data.f[i];
73 | }
74 |
75 | return prediction;
76 | }
77 |
--------------------------------------------------------------------------------
/src/gui_event/_automl_task.py:
--------------------------------------------------------------------------------
1 | '''Copyright [2020] Hahn-Schickard-Gesellschaft fuer angewandte Forschung e.V.,
2 | Marcel Sawrin + Marcus Rueb
3 | Copyright [2022] Hahn-Schickard-Gesellschaft fuer angewandte Forschung e.V.,
4 | Daniel Konegen + Marcus Rueb
5 | SPDX-License-Identifier: GPL-3.0
6 | ============================================================================'''
7 |
8 | from src.gui_layout.ui_automl_task import *
9 |
10 |
11 | def automl_task(self):
12 | """Select a button to choose your device.
13 |
14 | You can choose via three different buttons on which device you want
15 | to exectue the model. If "Back" is pressed you get back to the start
16 | window. If you choose a device you get to the optimization window.
17 | """
18 | self.automl_task_ui = UIAutoMLTask(self.WINDOW_WIDTH, self.WINDOW_HEIGHT,
19 | self.FONT_STYLE, self)
20 |
21 | self.automl_task_ui.image_classification.clicked.connect(
22 | lambda: next_window(self, "Next", "imageClassification"))
23 | self.automl_task_ui.image_regression.clicked.connect(
24 | lambda: next_window(self, "Next", "imageRegression"))
25 | self.automl_task_ui.data_classification.clicked.connect(
26 | lambda: next_window(self, "Next", "dataClassification"))
27 | self.automl_task_ui.data_regression.clicked.connect(
28 | lambda: next_window(self, "Next", "dataRegression"))
29 |
30 | self.automl_task_ui.back.clicked.connect(
31 | lambda: next_window(self, "Back", None))
32 |
33 | self.setCentralWidget(self.automl_task_ui)
34 | self.show()
35 |
36 |
37 | def next_window(self, n, task):
38 | """
39 | Defines which one is the next window to open.
40 |
41 | Args:
42 | n: Go forward or go back
43 | task: Model type to interpret the data
44 | """
45 | if n == "Back":
46 | self.automl_data()
47 |
48 | elif n == "Next":
49 | self.task = task
50 | print("Task:", self.task)
51 |
52 | if (self.task == "dataClassification" and
53 | os.path.isdir(self.data_loader_path) or
54 | self.task == "dataRegression" and
55 | os.path.isdir(self.data_loader_path)):
56 |
57 | msg = QMessageBox()
58 | msg.setIcon(QMessageBox.Warning)
59 |
60 | msg.setText("If you want to use this task please use a file "
61 | "as dataloader.")
62 | msg.setWindowTitle("Warning")
63 | msg.setStandardButtons(QMessageBox.Ok | QMessageBox.Cancel)
64 | msg.exec_()
65 | return
66 |
67 | self.automl_settings()
68 |
--------------------------------------------------------------------------------
/src/converter/tensorflow_library/tensorflow/lite/micro/kernels/shape.cpp:
--------------------------------------------------------------------------------
1 | /* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 |
16 | #include "tensorflow/lite/c/builtin_op_data.h"
17 | #include "tensorflow/lite/c/common.h"
18 | #include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
19 | #include "tensorflow/lite/kernels/kernel_util.h"
20 | #include "tensorflow/lite/kernels/op_macros.h"
21 | #include "tensorflow/lite/micro/kernels/kernel_util.h"
22 | #include "tensorflow/lite/micro/memory_helpers.h"
23 | #include "tensorflow/lite/micro/micro_utils.h"
24 |
25 | namespace tflite {
26 |
27 | namespace {
28 | constexpr int kInputTensor = 0;
29 | constexpr int kOutputTensor = 0;
30 |
31 | void ExtractShape(const TfLiteEvalTensor* input, int32_t* output_data) {
32 | for (int i = 0; i < input->dims->size; ++i) {
33 | output_data[i] = input->dims->data[i];
34 | }
35 | }
36 |
37 | TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
38 | TF_LITE_ENSURE_EQ(context, NumInputs(node), 1);
39 | TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
40 |
41 | return kTfLiteOk;
42 | }
43 |
44 | TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
45 | const TfLiteEvalTensor* input =
46 | tflite::micro::GetEvalInput(context, node, kInputTensor);
47 | TfLiteEvalTensor* output =
48 | tflite::micro::GetEvalOutput(context, node, kOutputTensor);
49 | if (output->type != kTfLiteInt32) {
50 | TF_LITE_KERNEL_LOG(context, "Output type %s (%d) not supported.",
51 | TfLiteTypeGetName(output->type), output->type);
52 | return kTfLiteError;
53 | } else {
54 | ExtractShape(input, tflite::micro::GetTensorData(output));
55 | }
56 |
57 | return kTfLiteOk;
58 | }
59 |
60 | } // namespace
61 |
62 | TfLiteRegistration Register_SHAPE() {
63 | return {/*init=*/nullptr,
64 | /*free=*/nullptr,
65 | /*prepare=*/Prepare,
66 | /*invoke=*/Eval,
67 | /*profiling_string=*/nullptr,
68 | /*builtin_code=*/0,
69 | /*custom_name=*/nullptr,
70 | /*version=*/0};
71 | }
72 |
73 | } // namespace tflite
74 |
--------------------------------------------------------------------------------
/src/converter/tensorflow_library/tensorflow/lite/core/api/op_resolver.cpp:
--------------------------------------------------------------------------------
1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 |
16 | #include "tensorflow/lite/core/api/op_resolver.h"
17 |
18 | #include "flatbuffers/flatbuffers.h" // from @flatbuffers
19 | #include "tensorflow/lite/c/common.h"
20 | #include "tensorflow/lite/core/api/error_reporter.h"
21 | #include "tensorflow/lite/schema/schema_utils.h"
22 |
23 | namespace tflite {
24 |
25 | TfLiteStatus GetRegistrationFromOpCode(
26 | const OperatorCode* opcode, const OpResolver& op_resolver,
27 | ErrorReporter* error_reporter, const TfLiteRegistration** registration) {
28 | TfLiteStatus status = kTfLiteOk;
29 | *registration = nullptr;
30 | auto builtin_code = GetBuiltinCode(opcode);
31 | int version = opcode->version();
32 |
33 | if (builtin_code > BuiltinOperator_MAX ||
34 | builtin_code < BuiltinOperator_MIN) {
35 | TF_LITE_REPORT_ERROR(
36 | error_reporter,
37 | "Op builtin_code out of range: %d. Are you using old TFLite binary "
38 | "with newer model?",
39 | builtin_code);
40 | status = kTfLiteError;
41 | } else if (builtin_code != BuiltinOperator_CUSTOM) {
42 | *registration = op_resolver.FindOp(builtin_code, version);
43 | if (*registration == nullptr) {
44 | TF_LITE_REPORT_ERROR(
45 | error_reporter,
46 | "Didn't find op for builtin opcode '%s' version '%d'\n",
47 | EnumNameBuiltinOperator(builtin_code), version);
48 | status = kTfLiteError;
49 | }
50 | } else if (!opcode->custom_code()) {
51 | TF_LITE_REPORT_ERROR(
52 | error_reporter,
53 | "Operator with CUSTOM builtin_code has no custom_code.\n");
54 | status = kTfLiteError;
55 | } else {
56 | const char* name = opcode->custom_code()->c_str();
57 | *registration = op_resolver.FindOp(name, version);
58 | if (*registration == nullptr) {
59 | // Do not report error for unresolved custom op, we do the final check
60 | // while preparing ops.
61 | status = kTfLiteError;
62 | }
63 | }
64 | return status;
65 | }
66 |
67 | } // namespace tflite
68 |
--------------------------------------------------------------------------------
/src/converter/tensorflow_library/tensorflow/lite/micro/memory_helpers.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 | #ifndef TENSORFLOW_LITE_MICRO_MEMORY_HELPERS_H_
16 | #define TENSORFLOW_LITE_MICRO_MEMORY_HELPERS_H_
17 |
18 | #include
19 | #include
20 |
21 | #include "tensorflow/lite/c/common.h"
22 | #include "tensorflow/lite/core/api/error_reporter.h"
23 | #include "tensorflow/lite/schema/schema_generated.h"
24 |
25 | namespace tflite {
26 |
27 | // Returns the next pointer address aligned to the given alignment.
28 | uint8_t* AlignPointerUp(uint8_t* data, size_t alignment);
29 |
30 | // Returns the previous pointer address aligned to the given alignment.
31 | uint8_t* AlignPointerDown(uint8_t* data, size_t alignment);
32 |
33 | // Returns an increased size that's a multiple of alignment.
34 | size_t AlignSizeUp(size_t size, size_t alignment);
35 |
36 | // Returns size in bytes for a given TfLiteType.
37 | TfLiteStatus TfLiteTypeSizeOf(TfLiteType type, size_t* size);
38 |
39 | // How many bytes are needed to hold a tensor's contents.
40 | TfLiteStatus BytesRequiredForTensor(const tflite::Tensor& flatbuffer_tensor,
41 | size_t* bytes, size_t* type_size,
42 | ErrorReporter* error_reporter);
43 |
44 | // How many bytes are used in a TfLiteEvalTensor instance. The byte length is
45 | // returned in out_bytes.
46 | TfLiteStatus TfLiteEvalTensorByteLength(const TfLiteEvalTensor* eval_tensor,
47 | size_t* out_bytes);
48 |
49 | // Deduce output dimensions from input and allocate given size.
50 | // Useful for operators with two inputs where the largest input should equal the
51 | // output dimension.
52 | TfLiteStatus AllocateOutputDimensionsFromInput(TfLiteContext* context,
53 | const TfLiteTensor* input1,
54 | const TfLiteTensor* input2,
55 | TfLiteTensor* output);
56 |
57 | } // namespace tflite
58 |
59 | #endif // TENSORFLOW_LITE_MICRO_MEMORY_HELPERS_H_
60 |
--------------------------------------------------------------------------------
/src/converter/tensorflow_library/tensorflow/lite/micro/recording_simple_memory_allocator.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 |
16 | #ifndef TENSORFLOW_LITE_MICRO_RECORDING_SIMPLE_MEMORY_ALLOCATOR_H_
17 | #define TENSORFLOW_LITE_MICRO_RECORDING_SIMPLE_MEMORY_ALLOCATOR_H_
18 |
19 | #include "tensorflow/lite/micro/compatibility.h"
20 | #include "tensorflow/lite/micro/simple_memory_allocator.h"
21 |
22 | namespace tflite {
23 |
24 | // Utility class used to log allocations of a SimpleMemoryAllocator. Should only
25 | // be used in debug/evaluation settings or unit tests to evaluate allocation
26 | // usage.
27 | class RecordingSimpleMemoryAllocator : public SimpleMemoryAllocator {
28 | public:
29 | RecordingSimpleMemoryAllocator(ErrorReporter* error_reporter,
30 | uint8_t* buffer_head, size_t buffer_size);
31 | // TODO(b/157615197): Cleanup constructors/destructor and use factory
32 | // functions.
33 | ~RecordingSimpleMemoryAllocator() override;
34 |
35 | static RecordingSimpleMemoryAllocator* Create(ErrorReporter* error_reporter,
36 | uint8_t* buffer_head,
37 | size_t buffer_size);
38 |
39 | // Returns the number of bytes requested from the head or tail.
40 | size_t GetRequestedBytes() const;
41 |
42 | // Returns the number of bytes actually allocated from the head or tail. This
43 | // value will be >= to the number of requested bytes due to padding and
44 | // alignment.
45 | size_t GetUsedBytes() const;
46 |
47 | // Returns the number of alloc calls from the head or tail.
48 | size_t GetAllocatedCount() const;
49 |
50 | TfLiteStatus SetHeadBufferSize(size_t size, size_t alignment) override;
51 | uint8_t* AllocateFromTail(size_t size, size_t alignment) override;
52 |
53 | private:
54 | size_t requested_head_bytes_;
55 | size_t requested_tail_bytes_;
56 | size_t used_bytes_;
57 | size_t alloc_count_;
58 |
59 | TF_LITE_REMOVE_VIRTUAL_DELETE
60 | };
61 |
62 | } // namespace tflite
63 |
64 | #endif // TENSORFLOW_LITE_MICRO_RECORDING_SIMPLE_MEMORY_ALLOCATOR_H_
65 |
--------------------------------------------------------------------------------
/src/converter/tensorflow_library/tensorflow/lite/core/api/op_resolver.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 | #ifndef TENSORFLOW_LITE_CORE_API_OP_RESOLVER_H_
16 | #define TENSORFLOW_LITE_CORE_API_OP_RESOLVER_H_
17 |
18 | #include
19 |
20 | #include "tensorflow/lite/c/common.h"
21 | #include "tensorflow/lite/core/api/error_reporter.h"
22 | #include "tensorflow/lite/schema/schema_generated.h"
23 |
24 | namespace tflite {
25 |
26 | /// Abstract interface that returns TfLiteRegistrations given op codes or custom
27 | /// op names. This is the mechanism that ops being referenced in the flatbuffer
28 | /// model are mapped to executable function pointers (TfLiteRegistrations).
29 | class OpResolver {
30 | public:
31 | /// Finds the op registration for a builtin operator by enum code.
32 | virtual const TfLiteRegistration* FindOp(tflite::BuiltinOperator op,
33 | int version) const = 0;
34 | /// Finds the op registration of a custom operator by op name.
35 | virtual const TfLiteRegistration* FindOp(const char* op,
36 | int version) const = 0;
37 |
38 | // Returns optional delegates for resolving and handling ops in the flatbuffer
39 | // model. This may be used in addition to the standard TfLiteRegistration
40 | // lookup for graph resolution.
41 | using TfLiteDelegatePtrVector =
42 | std::vector>;
43 | virtual TfLiteDelegatePtrVector GetDelegates(int num_threads) const {
44 | return TfLiteDelegatePtrVector();
45 | }
46 |
47 | virtual ~OpResolver() {}
48 | };
49 |
50 | // Handles the logic for converting between an OperatorCode structure extracted
51 | // from a flatbuffer and information about a registered operator
52 | // implementation.
53 | TfLiteStatus GetRegistrationFromOpCode(const OperatorCode* opcode,
54 | const OpResolver& op_resolver,
55 | ErrorReporter* error_reporter,
56 | const TfLiteRegistration** registration);
57 |
58 | } // namespace tflite
59 |
60 | #endif // TENSORFLOW_LITE_CORE_API_OP_RESOLVER_H_
61 |
--------------------------------------------------------------------------------
/src/converter/tensorflow_library/tensorflow/lite/schema/schema_utils.cpp:
--------------------------------------------------------------------------------
1 | /* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 | #include "tensorflow/lite/schema/schema_utils.h"
16 |
17 | #include
18 |
19 | #include "tensorflow/lite/kernels/internal/compatibility.h"
20 |
21 | namespace tflite {
22 |
23 | // The following GetBuiltinCode methods are the utility methods for reading
24 | // builtin operatore code, ensuring compatibility issues between v3 and v3a
25 | // schema. Always the maximum value of the two fields always will be the correct
26 | // value as follows:
27 | //
28 | // - Supporting schema version v3 models
29 | //
30 | // The `builtin_code` field is not available in the v3 models. Flatbuffer
31 | // library will feed zero value, which is the default value in the v3a schema.
32 | // The actual builtin operatore code value will exist in the
33 | // `deprecated_builtin_code` field. At the same time, it implies that
34 | // `deprecated_builtin_code` >= `builtin_code` and the maximum value of the two
35 | // fields will be same with `deprecated_builtin_code'.
36 | //
37 | // - Supporting builtin operator codes beyonds 127
38 | //
39 | // New builtin operators, whose operator code is larger than 127, can not be
40 | // assigned to the `deprecated_builtin_code` field. In such cases, the
41 | // value of the `builtin_code` field should be used for the builtin operator
42 | // code. In the case, the maximum value of the two fields will be the value of
43 | // the `builtin_code` as the right value.
44 |
45 | BuiltinOperator GetBuiltinCode(const OperatorCode* op_code) {
46 | // Caller should guarantee that the given argument value is not a nullptr.
47 | TFLITE_DCHECK(op_code != nullptr);
48 |
49 | return std::max(
50 | op_code->builtin_code(),
51 | static_cast(op_code->deprecated_builtin_code()));
52 | }
53 |
54 | BuiltinOperator GetBuiltinCode(const OperatorCodeT* op_code) {
55 | // Caller should guarantee that the given argument value is not a nullptr.
56 | TFLITE_DCHECK(op_code != nullptr);
57 |
58 | return std::max(op_code->builtin_code, static_cast(
59 | op_code->deprecated_builtin_code));
60 | }
61 |
62 | } // namespace tflite
63 |
--------------------------------------------------------------------------------
/src/converter/tensorflow_library/tensorflow/lite/kernels/internal/reference/arg_min_max.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 | #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_ARG_MIN_MAX_H_
16 | #define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_ARG_MIN_MAX_H_
17 |
18 | #include "tensorflow/lite/kernels/internal/types.h"
19 |
20 | namespace tflite {
21 |
22 | namespace reference_ops {
23 |
24 | template
25 | void ArgMinMax(const RuntimeShape& input1_shape, const T1* input1_data,
26 | const T3* input2_data, const RuntimeShape& output_shape,
27 | T2* output_data, const Cmp& cmp) {
28 | TFLITE_DCHECK_GT(input1_shape.DimensionsCount(), 0);
29 | TFLITE_DCHECK_EQ(input1_shape.DimensionsCount() - 1,
30 | output_shape.DimensionsCount());
31 | int axis = input2_data[0];
32 | if (axis < 0) {
33 | axis += input1_shape.DimensionsCount();
34 | }
35 | const int axis_size = input1_shape.Dims(axis);
36 |
37 | int outer_size = 1;
38 | for (int i = 0; i < axis; ++i) {
39 | TFLITE_DCHECK_EQ(input1_shape.Dims(i), output_shape.Dims(i));
40 | outer_size *= input1_shape.Dims(i);
41 | }
42 |
43 | int inner_size = 1;
44 | const int dims_count = input1_shape.DimensionsCount();
45 | for (int i = axis + 1; i < dims_count; ++i) {
46 | TFLITE_DCHECK_EQ(input1_shape.Dims(i), output_shape.Dims(i - 1));
47 | inner_size *= input1_shape.Dims(i);
48 | }
49 | for (int outer = 0; outer < outer_size; ++outer) {
50 | for (int inner = 0; inner < inner_size; ++inner) {
51 | auto min_max_value = input1_data[outer * axis_size * inner_size + inner];
52 | T2 min_max_index = 0;
53 | for (int i = 1; i < axis_size; ++i) {
54 | const auto& curr_value =
55 | input1_data[(outer * axis_size + i) * inner_size + inner];
56 | if (cmp(curr_value, min_max_value)) {
57 | min_max_value = curr_value;
58 | min_max_index = static_cast(i);
59 | }
60 | }
61 | output_data[outer * inner_size + inner] = min_max_index;
62 | }
63 | }
64 | }
65 | } // namespace reference_ops
66 | } // namespace tflite
67 |
68 | #endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_ARG_MIN_MAX_H_
69 |
--------------------------------------------------------------------------------
/src/converter/tensorflow_library/third_party/cmsis/CMSIS/NN/Source/ActivationFunctions/arm_relu_q15.c:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (C) 2010-2020 Arm Limited or its affiliates. All rights reserved.
3 | *
4 | * SPDX-License-Identifier: Apache-2.0
5 | *
6 | * Licensed under the Apache License, Version 2.0 (the License); you may
7 | * not use this file except in compliance with the License.
8 | * You may obtain a copy of the License at
9 | *
10 | * www.apache.org/licenses/LICENSE-2.0
11 | *
12 | * Unless required by applicable law or agreed to in writing, software
13 | * distributed under the License is distributed on an AS IS BASIS, WITHOUT
14 | * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 | * See the License for the specific language governing permissions and
16 | * limitations under the License.
17 | */
18 |
19 | /* ----------------------------------------------------------------------
20 | * Project: CMSIS NN Library
21 | * Title: arm_relu_q15.c
22 | * Description: Q15 version of ReLU
23 | *
24 | * $Date: February 27, 2020
25 | * $Revision: V.1.0.1
26 | *
27 | * Target Processor: Cortex-M cores
28 | *
29 | * -------------------------------------------------------------------- */
30 |
31 | #include "arm_math.h"
32 | #include "arm_nnfunctions.h"
33 |
34 | /**
35 | * @ingroup groupNN
36 | */
37 |
38 | /**
39 | * @addtogroup Acti
40 | * @{
41 | */
42 |
43 | /**
44 | * @brief Q15 RELU function
45 | * @param[in,out] data pointer to input
46 | * @param[in] size number of elements
47 | *
48 | * @details
49 | *
50 | * Optimized relu with QSUB instructions.
51 | *
52 | */
53 |
54 | void arm_relu_q15(q15_t *data, uint16_t size)
55 | {
56 |
57 | #if defined(ARM_MATH_DSP)
58 | /* Run the following code for M cores with DSP extension */
59 |
60 | uint16_t i = size >> 1;
61 | q15_t *input = data;
62 | q15_t *output = data;
63 | q31_t in;
64 | q31_t buf;
65 | q31_t mask;
66 |
67 | while (i)
68 | {
69 | in = read_q15x2_ia(&input);
70 |
71 | /* extract the first bit */
72 | buf = __ROR(in & 0x80008000, 15);
73 |
74 | /* if MSB=1, mask will be 0xFF, 0x0 otherwise */
75 | mask = __QSUB16(0x00000000, buf);
76 |
77 | write_q15x2_ia(&output, in & (~mask));
78 | i--;
79 | }
80 |
81 | if (size & 0x1)
82 | {
83 | if (*input < 0)
84 | {
85 | *input = 0;
86 | }
87 | input++;
88 | }
89 | #else
90 | /* Run the following code as reference implementation for M cores without DSP extension */
91 | uint16_t i;
92 |
93 | for (i = 0; i < size; i++)
94 | {
95 | if (data[i] < 0)
96 | data[i] = 0;
97 | }
98 |
99 | #endif /* ARM_MATH_DSP */
100 | }
101 |
102 | /**
103 | * @} end of Acti group
104 | */
105 |
--------------------------------------------------------------------------------
/src/converter/tensorflow_library/tensorflow/lite/micro/rp2/debug_log.cpp:
--------------------------------------------------------------------------------
1 | /* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 |
16 | // Reference implementation of the DebugLog() function that's required for a
17 | // platform to support the TensorFlow Lite for Microcontrollers library. This is
18 | // the only function that's absolutely required to be available on a target
19 | // device, since it's used for communicating test results back to the host so
20 | // that we can verify the implementation is working correctly.
21 | // It's designed to be as easy as possible to supply an implementation though.
22 | // On platforms that have a POSIX stack or C library, it can be written as a
23 | // single call to `fprintf(stderr, "%s", s)` to output a string to the error
24 | // stream of the console, but if there's no OS or C library available, there's
25 | // almost always an equivalent way to write out a string to some serial
26 | // interface that can be used instead. For example on Arm M-series MCUs, calling
27 | // the `bkpt #0xAB` assembler instruction will output the string in r1 to
28 | // whatever debug serial connection is available. If you're running mbed, you
29 | // can do the same by creating `Serial pc(USBTX, USBRX)` and then calling
30 | // `pc.printf("%s", s)`.
31 | // To add an equivalent function for your own platform, create your own
32 | // implementation file, and place it in a subfolder with named after the OS
33 | // you're targeting. For example, see the Cortex M bare metal version in
34 | // tensorflow/lite/micro/bluepill/debug_log.cc or the mbed one on
35 | // tensorflow/lite/micro/mbed/debug_log.cc.
36 |
37 | #include "tensorflow/lite/micro/debug_log.h"
38 |
39 | #ifndef TF_LITE_STRIP_ERROR_STRINGS
40 | #include
41 |
42 | #include "pico/stdlib.h"
43 | #endif
44 |
45 | extern "C" void DebugLog(const char* s) {
46 | #ifndef TF_LITE_STRIP_ERROR_STRINGS
47 | static bool has_uart_been_set_up = false;
48 | if (!has_uart_been_set_up) {
49 | setup_default_uart();
50 | has_uart_been_set_up = true;
51 | }
52 | // Reusing TF_LITE_STRIP_ERROR_STRINGS to disable DebugLog completely to get
53 | // maximum reduction in binary size. This is because we have DebugLog calls
54 | // via TF_LITE_CHECK that are not stubbed out by TF_LITE_REPORT_ERROR.
55 | printf("%s", s);
56 | #endif
57 | }
58 |
--------------------------------------------------------------------------------
/src/converter/create_project.py:
--------------------------------------------------------------------------------
1 | '''Copyright [2020] Hahn-Schickard-Gesellschaft fuer angewandte Forschung e.V.,
2 | Daniel Konegen + Marcus Rueb
3 | Copyright [2021] Karlsruhe Institute of Technology, Daniel Konegen
4 | Copyright [2022] Hahn-Schickard-Gesellschaft fuer angewandte Forschung e.V.,
5 | Daniel Konegen + Marcus Rueb
6 | SPDX-License-Identifier: GPL-3.0
7 | ============================================================================'''
8 |
9 | import os
10 | import ntpath
11 |
12 | from src.converter.convert_keras_to_cc import *
13 | from src.converter.write_files_uc import *
14 |
15 |
16 | def convert_and_write(keras_model_dir, project_name, output_path,
17 | optimizations, data_loader_path, quant_dtype,
18 | separator, decimal, csv_target_label, model_memory,
19 | target):
20 | """
21 | A keras model get's converted into a C++ model, the project directory is
22 | created and all files that are needed to compile the project get generated.
23 |
24 | Args:
25 | keras_model_dir: Path of the keras model
26 | project_name: Name of the project which should be generated
27 | output_path: Directory where the project should be generated
28 | optimization: Selected optimization algorithms
29 | data_loader_path: Path of the folder or file with the training data
30 | quant_dtype: Data type to quantize to
31 | separator: Separator for reading a CSV file
32 | decimal: Decimal for reading a CSV file
33 | csv_target_label: Target label from the CSV file
34 | model_memory: Preallocate a certain amount of memory for input,
35 | output, and intermediate arrays in kilobytes
36 | target: Target to execute the neural network
37 | """
38 | print("convert_and_write function called")
39 | model_name = ntpath.basename(keras_model_dir)
40 | model_name, _ = os.path.splitext(model_name)
41 | model_input_neurons = 1
42 |
43 | project_dir = create_project_dir(project_name, output_path, target)
44 |
45 | model_input_shape, model_output_neurons = convert_model_to_tflite(
46 | keras_model_dir, project_dir, model_name, optimizations,
47 | data_loader_path, quant_dtype, separator, decimal, csv_target_label)
48 |
49 | if "MCU" in target:
50 | convert_model_to_cpp(model_name, project_dir)
51 |
52 | for i in range(1, len(model_input_shape)):
53 | model_input_neurons = model_input_neurons * model_input_shape[i]
54 |
55 | main_functions(project_dir, model_name, model_input_neurons,
56 | model_output_neurons, quant_dtype, model_memory)
57 | tensorflow_library(project_dir)
58 |
59 | if 'Pruning' in optimizations:
60 | pruned_keras_model(keras_model_dir, project_dir, model_name)
61 | os.remove(keras_model_dir)
62 |
--------------------------------------------------------------------------------
/src/converter/tensorflow_library/third_party/cmsis/CMSIS/NN/Source/ActivationFunctions/arm_nn_activations_q7.c:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (C) 2010-2018 Arm Limited or its affiliates. All rights reserved.
3 | *
4 | * SPDX-License-Identifier: Apache-2.0
5 | *
6 | * Licensed under the Apache License, Version 2.0 (the License); you may
7 | * not use this file except in compliance with the License.
8 | * You may obtain a copy of the License at
9 | *
10 | * www.apache.org/licenses/LICENSE-2.0
11 | *
12 | * Unless required by applicable law or agreed to in writing, software
13 | * distributed under the License is distributed on an AS IS BASIS, WITHOUT
14 | * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 | * See the License for the specific language governing permissions and
16 | * limitations under the License.
17 | */
18 |
19 | /* ----------------------------------------------------------------------
20 | * Project: CMSIS NN Library
21 | * Title: arm_nn_activations_q7.c
22 | * Description: Q7 neural network activation function using direct table look-up
23 | *
24 | * $Date: 17. January 2018
25 | * $Revision: V.1.0.0
26 | *
27 | * Target Processor: Cortex-M cores
28 | *
29 | * -------------------------------------------------------------------- */
30 |
31 | #include "arm_math.h"
32 | #include "arm_common_tables.h"
33 | #include "arm_nnfunctions.h"
34 |
35 | /**
36 | * @ingroup groupNN
37 | */
38 |
39 | /**
40 | * @addtogroup Acti
41 | * @{
42 | */
43 |
44 | /**
45 | * @brief Q7 neural network activation function using direct table look-up
46 | * @param[in,out] data pointer to input
47 | * @param[in] size number of elements
48 | * @param[in] int_width bit-width of the integer part, assume to be smaller than 3
49 | * @param[in] type type of activation functions
50 | *
51 | * @details
52 | *
53 | * This is the direct table look-up approach.
54 | *
55 | * Assume here the integer part of the fixed-point is <= 3.
56 | * More than 3 just not making much sense, makes no difference with
57 | * saturation followed by any of these activation functions.
58 | */
59 |
60 | void arm_nn_activations_direct_q7(q7_t * data, uint16_t size, uint16_t int_width, arm_nn_activation_type type)
61 | {
62 | uint16_t i = size;
63 | q7_t *pIn = data;
64 | q7_t *pOut = data;
65 | q7_t in;
66 | q7_t out;
67 | uint16_t shift_size = 3 - int_width;
68 | const q7_t *lookup_table;
69 | switch (type)
70 | {
71 | case ARM_SIGMOID:
72 | lookup_table = sigmoidTable_q7;
73 | break;
74 | case ARM_TANH:
75 | default:
76 | lookup_table = tanhTable_q7;
77 | break;
78 | }
79 | while (i)
80 | {
81 | in = *pIn++;
82 | out = lookup_table[(uint8_t) (in >> shift_size)];
83 | *pOut++ = out;
84 | i--;
85 | }
86 | }
87 |
88 | /**
89 | * @} end of Acti group
90 | */
91 |
--------------------------------------------------------------------------------
/src/converter/tensorflow_library/third_party/cmsis/CMSIS/DSP/Include/dsp/bayes_functions.h:
--------------------------------------------------------------------------------
1 | /******************************************************************************
2 | * @file bayes_functions.h
3 | * @brief Public header file for CMSIS DSP Library
4 | * @version V1.9.0
5 | * @date 20. July 2020
6 | ******************************************************************************/
7 | /*
8 | * Copyright (c) 2010-2020 Arm Limited or its affiliates. All rights reserved.
9 | *
10 | * SPDX-License-Identifier: Apache-2.0
11 | *
12 | * Licensed under the Apache License, Version 2.0 (the License); you may
13 | * not use this file except in compliance with the License.
14 | * You may obtain a copy of the License at
15 | *
16 | * www.apache.org/licenses/LICENSE-2.0
17 | *
18 | * Unless required by applicable law or agreed to in writing, software
19 | * distributed under the License is distributed on an AS IS BASIS, WITHOUT
20 | * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
21 | * See the License for the specific language governing permissions and
22 | * limitations under the License.
23 | */
24 |
25 |
26 | #ifndef _BAYES_FUNCTIONS_H_
27 | #define _BAYES_FUNCTIONS_H_
28 |
29 | #include "arm_math_types.h"
30 | #include "arm_math_memory.h"
31 |
32 | #include "dsp/none.h"
33 | #include "dsp/utils.h"
34 |
35 | #include "dsp/statistics_functions.h"
36 |
37 | /**
38 | * @defgroup groupBayes Bayesian estimators
39 | *
40 | * Implement the naive gaussian Bayes estimator.
41 | * The training must be done from scikit-learn.
42 | *
43 | * The parameters can be easily
44 | * generated from the scikit-learn object. Some examples are given in
45 | * DSP/Testing/PatternGeneration/Bayes.py
46 | */
47 |
48 | #ifdef __cplusplus
49 | extern "C"
50 | {
51 | #endif
52 |
53 | /**
54 | * @brief Instance structure for Naive Gaussian Bayesian estimator.
55 | */
56 | typedef struct
57 | {
58 | uint32_t vectorDimension; /**< Dimension of vector space */
59 | uint32_t numberOfClasses; /**< Number of different classes */
60 | const float32_t *theta; /**< Mean values for the Gaussians */
61 | const float32_t *sigma; /**< Variances for the Gaussians */
62 | const float32_t *classPriors; /**< Class prior probabilities */
63 | float32_t epsilon; /**< Additive value to variances */
64 | } arm_gaussian_naive_bayes_instance_f32;
65 |
66 | /**
67 | * @brief Naive Gaussian Bayesian Estimator
68 | *
69 | * @param[in] S points to a naive bayes instance structure
70 | * @param[in] in points to the elements of the input vector.
71 | * @param[in] pBuffer points to a buffer of length numberOfClasses
72 | * @return The predicted class
73 | *
74 | */
75 |
76 |
77 | uint32_t arm_gaussian_naive_bayes_predict_f32(const arm_gaussian_naive_bayes_instance_f32 *S,
78 | const float32_t * in,
79 | float32_t *pBuffer);
80 |
81 |
82 | #ifdef __cplusplus
83 | }
84 | #endif
85 |
86 | #endif /* ifndef _BAYES_FUNCTIONS_H_ */
87 |
--------------------------------------------------------------------------------
/src/converter/tensorflow_library/third_party/cmsis/CMSIS/NN/Source/ActivationFunctions/arm_relu_q7.c:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (C) 2010-2020 Arm Limited or its affiliates. All rights reserved.
3 | *
4 | * SPDX-License-Identifier: Apache-2.0
5 | *
6 | * Licensed under the Apache License, Version 2.0 (the License); you may
7 | * not use this file except in compliance with the License.
8 | * You may obtain a copy of the License at
9 | *
10 | * www.apache.org/licenses/LICENSE-2.0
11 | *
12 | * Unless required by applicable law or agreed to in writing, software
13 | * distributed under the License is distributed on an AS IS BASIS, WITHOUT
14 | * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 | * See the License for the specific language governing permissions and
16 | * limitations under the License.
17 | */
18 |
19 | /* ----------------------------------------------------------------------
20 | * Project: CMSIS NN Library
21 | * Title: arm_relu_q7.c
22 | * Description: Q7 version of ReLU
23 | *
24 | * $Date: May 29, 2020
25 | * $Revision: V.1.0.2
26 | *
27 | * Target Processor: Cortex-M cores
28 | *
29 | * -------------------------------------------------------------------- */
30 |
31 | #include "arm_math.h"
32 | #include "arm_nnfunctions.h"
33 |
34 | /**
35 | * @ingroup groupNN
36 | */
37 |
38 | /**
39 | * @addtogroup Acti
40 | * @{
41 | */
42 |
43 | /**
44 | * @brief Q7 RELU function
45 | * @param[in,out] data pointer to input
46 | * @param[in] size number of elements
47 | *
48 | * @details
49 | *
50 | * Optimized relu with QSUB instructions.
51 | *
52 | */
53 |
54 | void arm_relu_q7(q7_t *data, uint16_t size)
55 | {
56 |
57 | #if defined(ARM_MATH_DSP)
58 | /* Run the following code for M cores with DSP extension */
59 |
60 | uint16_t i = size >> 2;
61 | q7_t *input = data;
62 | q7_t *output = data;
63 | q31_t in;
64 | q31_t buf;
65 | q31_t mask;
66 |
67 | while (i)
68 | {
69 | in = read_q7x4_ia(&input);
70 |
71 | /* extract the first bit */
72 | buf = (int32_t)__ROR((uint32_t)in & 0x80808080, 7);
73 |
74 | /* if MSB=1, mask will be 0xFF, 0x0 otherwise */
75 | mask = __QSUB8(0x00000000, buf);
76 |
77 | write_q7x4_ia(&output, in & (~mask));
78 |
79 | i--;
80 | }
81 |
82 | i = size & 0x3;
83 | while (i)
84 | {
85 | if (*input < 0)
86 | {
87 | *input = 0;
88 | }
89 | input++;
90 | i--;
91 | }
92 |
93 | #else
94 | /* Run the following code as reference implementation for cores without DSP extension */
95 |
96 | uint16_t i;
97 |
98 | for (i = 0; i < size; i++)
99 | {
100 | if (data[i] < 0)
101 | data[i] = 0;
102 | }
103 |
104 | #endif
105 | }
106 |
107 | /**
108 | * @} end of Acti group
109 | */
110 |
--------------------------------------------------------------------------------
/src/converter/tensorflow_library/tensorflow/lite/kernels/op_macros.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 | #ifndef TENSORFLOW_LITE_KERNELS_OP_MACROS_H_
16 | #define TENSORFLOW_LITE_KERNELS_OP_MACROS_H_
17 |
18 | // If we're on a platform without standard IO functions, fall back to a
19 | // non-portable function.
20 | #ifdef TF_LITE_MCU_DEBUG_LOG
21 |
22 | #include "tensorflow/lite/micro/debug_log.h"
23 |
24 | #define DEBUG_LOG(x) \
25 | do { \
26 | DebugLog(x); \
27 | } while (0)
28 |
29 | inline void InfiniteLoop() {
30 | DEBUG_LOG("HALTED\n");
31 | while (1) {
32 | }
33 | }
34 |
35 | #define TFLITE_ABORT InfiniteLoop();
36 |
37 | #else // TF_LITE_MCU_DEBUG_LOG
38 |
39 | #include
40 | #include
41 |
42 | #define DEBUG_LOG(x) \
43 | do { \
44 | fprintf(stderr, "%s", (x)); \
45 | } while (0)
46 |
47 | // Report Error for unsupported type by op 'op_name' and returns kTfLiteError.
48 | #define TF_LITE_UNSUPPORTED_TYPE(context, type, op_name) \
49 | do { \
50 | TF_LITE_KERNEL_LOG((context), "%s:%d Type %s is unsupported by op %s.", \
51 | __FILE__, __LINE__, TfLiteTypeGetName(type), \
52 | (op_name)); \
53 | return kTfLiteError; \
54 | } while (0)
55 |
56 | #define TFLITE_ABORT abort()
57 |
58 | #endif // TF_LITE_MCU_DEBUG_LOG
59 |
60 | #if defined(NDEBUG) || defined(ARDUINO)
61 | #define TFLITE_ASSERT_FALSE (static_cast(0))
62 | #else
63 | #define TFLITE_ASSERT_FALSE TFLITE_ABORT
64 | #endif
65 |
66 | #define TF_LITE_FATAL(msg) \
67 | do { \
68 | DEBUG_LOG(msg); \
69 | DEBUG_LOG("\nFATAL\n"); \
70 | TFLITE_ABORT; \
71 | } while (0)
72 |
73 | #define TF_LITE_ASSERT(x) \
74 | do { \
75 | if (!(x)) TF_LITE_FATAL(#x); \
76 | } while (0)
77 |
78 | #define TF_LITE_ASSERT_EQ(x, y) \
79 | do { \
80 | if ((x) != (y)) TF_LITE_FATAL(#x " didn't equal " #y); \
81 | } while (0)
82 |
83 | #endif // TENSORFLOW_LITE_KERNELS_OP_MACROS_H_
84 |
--------------------------------------------------------------------------------
/src/converter/tensorflow_library/tensorflow/lite/micro/micro_profiler.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 |
16 | #ifndef TENSORFLOW_LITE_MICRO_MICRO_PROFILER_H_
17 | #define TENSORFLOW_LITE_MICRO_MICRO_PROFILER_H_
18 |
19 | #include "tensorflow/lite/core/api/error_reporter.h"
20 | #include "tensorflow/lite/core/api/profiler.h"
21 | #include "tensorflow/lite/micro/compatibility.h"
22 |
23 | namespace tflite {
24 |
25 | // MicroProfiler creates a common way to gain fine-grained insight into runtime
26 | // performance. Bottleck operators can be identified along with slow code
27 | // sections. This can be used in conjunction with running the relevant micro
28 | // benchmark to evaluate end-to-end performance.
29 | //
30 | // Usage example:
31 | // MicroProfiler profiler(error_reporter);
32 | // {
33 | // ScopedProfile scoped_profile(profiler, tag);
34 | // work_to_profile();
35 | // }
36 | //
37 | // This will call the following methods in order:
38 | // int event_handle = profiler->BeginEvent(op_name, EventType::DEFAULT, 0)
39 | // work_to_profile();
40 | // profiler->EndEvent(event_handle)
41 | class MicroProfiler : public tflite::Profiler {
42 | public:
43 | explicit MicroProfiler(tflite::ErrorReporter* reporter);
44 | ~MicroProfiler() override = default;
45 |
46 | // AddEvent is unused for Tf Micro.
47 | void AddEvent(const char* tag, EventType event_type, uint64_t start,
48 | uint64_t end, int64_t event_metadata1,
49 | int64_t event_metadata2) override{};
50 |
51 | // BeginEvent followed by code followed by EndEvent will profile the code
52 | // enclosed. Multiple concurrent events are unsupported, so the return value
53 | // is always 0. Event_metadata1 and event_metadata2 are unused. The tag
54 | // pointer must be valid until EndEvent is called.
55 | uint32_t BeginEvent(const char* tag, EventType event_type,
56 | int64_t event_metadata1,
57 | int64_t event_metadata2) override;
58 |
59 | // Event_handle is ignored since TF Micro does not support concurrent events.
60 | void EndEvent(uint32_t event_handle) override;
61 |
62 | private:
63 | tflite::ErrorReporter* reporter_;
64 | int32_t start_time_;
65 | const char* event_tag_;
66 | TF_LITE_REMOVE_VIRTUAL_DELETE
67 | };
68 |
69 | } // namespace tflite
70 |
71 | #endif // TENSORFLOW_LITE_MICRO_MICRO_PROFILER_H_
72 |
--------------------------------------------------------------------------------
/src/converter/tensorflow_library/tensorflow/lite/kernels/internal/reference/maximum_minimum.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 | #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_MAXIMUM_MINIMUM_H_
16 | #define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_MAXIMUM_MINIMUM_H_
17 |
18 | #include "tensorflow/lite/kernels/internal/common.h"
19 | #include "tensorflow/lite/kernels/internal/types.h"
20 |
21 | namespace tflite {
22 | namespace reference_ops {
23 |
24 | template
25 | void MaximumMinimumBroadcastSlow(const RuntimeShape& unextended_input1_shape,
26 | const T* input1_data,
27 | const RuntimeShape& unextended_input2_shape,
28 | const T* input2_data,
29 | const RuntimeShape& unextended_output_shape,
30 | T* output_data, Op op) {
31 | // Uses element-wise calculation if broadcast is not required.
32 | if (unextended_input1_shape == unextended_input2_shape) {
33 | const int flat_size =
34 | MatchingElementsSize(unextended_input1_shape, unextended_input2_shape,
35 | unextended_output_shape);
36 | for (int i = 0; i < flat_size; ++i) {
37 | output_data[i] = op(input1_data[i], input2_data[i]);
38 | }
39 | } else {
40 | TFLITE_DCHECK_LE(unextended_input1_shape.DimensionsCount(), N);
41 | TFLITE_DCHECK_LE(unextended_input2_shape.DimensionsCount(), N);
42 | TFLITE_DCHECK_LE(unextended_output_shape.DimensionsCount(), N);
43 |
44 | NdArrayDesc desc1;
45 | NdArrayDesc desc2;
46 | NdArrayDesc output_desc;
47 | NdArrayDescsForElementwiseBroadcast(
48 | unextended_input1_shape, unextended_input2_shape, &desc1, &desc2);
49 | CopyDimsToDesc(RuntimeShape::ExtendedShape(N, unextended_output_shape),
50 | &output_desc);
51 |
52 | auto maxmin_func = [&](int indexes[N]) {
53 | output_data[SubscriptToIndex(output_desc, indexes)] =
54 | op(input1_data[SubscriptToIndex(desc1, indexes)],
55 | input2_data[SubscriptToIndex(desc2, indexes)]);
56 | };
57 | NDOpsHelper(output_desc, maxmin_func);
58 | }
59 | }
60 |
61 | } // namespace reference_ops
62 | } // namespace tflite
63 |
64 | #endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_MAXIMUM_MINIMUM_H_
65 |
--------------------------------------------------------------------------------
/src/converter/tensorflow_library/tensorflow/lite/micro/micro_utils.cpp:
--------------------------------------------------------------------------------
1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 |
16 | #include "tensorflow/lite/micro/micro_utils.h"
17 |
18 | #include
19 | #include
20 | #include
21 |
22 | #include "tensorflow/lite/c/common.h"
23 | #include "tensorflow/lite/kernels/op_macros.h"
24 |
25 | namespace tflite {
26 |
27 | int ElementCount(const TfLiteIntArray& dims) {
28 | int result = 1;
29 | for (int i = 0; i < dims.size; ++i) {
30 | result *= dims.data[i];
31 | }
32 | return result;
33 | }
34 |
35 | void SignedSymmetricPerChannelQuantize(const float* values,
36 | TfLiteIntArray* dims,
37 | int quantized_dimension,
38 | int8_t* quantized_values,
39 | float* scaling_factors) {
40 | int input_size = ElementCount(*dims);
41 | int channel_count = dims->data[quantized_dimension];
42 | int per_channel_size = input_size / channel_count;
43 |
44 | int stride;
45 | int channel_stride;
46 | if (quantized_dimension == 0) {
47 | stride = 1;
48 | channel_stride = per_channel_size;
49 | } else if (quantized_dimension == 3) {
50 | stride = channel_count;
51 | channel_stride = 1;
52 | } else {
53 | TF_LITE_FATAL("quantized dimension must be 0 or 3");
54 | }
55 |
56 | // Calculate scales for each channel.
57 | for (int channel = 0; channel < channel_count; channel++) {
58 | float min = 0;
59 | float max = 0;
60 |
61 | for (int i = 0; i < per_channel_size; i++) {
62 | int idx = channel * channel_stride + i * stride;
63 | min = fminf(min, values[idx]);
64 | max = fmaxf(max, values[idx]);
65 | }
66 | scaling_factors[channel] =
67 | fmaxf(fabs(min), fabs(max)) / std::numeric_limits::max();
68 | for (int i = 0; i < per_channel_size; i++) {
69 | int idx = channel * channel_stride + i * stride;
70 | const int32_t quantized_value =
71 | static_cast(roundf(values[idx] / scaling_factors[channel]));
72 | // Clamp: just in case some odd numeric offset.
73 | quantized_values[idx] =
74 | fminf(std::numeric_limits::max(),
75 | fmaxf(std::numeric_limits::min() + 1, quantized_value));
76 | }
77 | }
78 | }
79 |
80 | } // namespace tflite
81 |
--------------------------------------------------------------------------------
/src/converter/tensorflow_library/tensorflow/lite/micro/recording_micro_interpreter.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 |
16 | #ifndef TENSORFLOW_LITE_MICRO_RECORDING_MICRO_INTERPRETER_H_
17 | #define TENSORFLOW_LITE_MICRO_RECORDING_MICRO_INTERPRETER_H_
18 |
19 | #include "tensorflow/lite/micro/micro_interpreter.h"
20 | #include "tensorflow/lite/micro/recording_micro_allocator.h"
21 |
22 | namespace tflite {
23 |
24 | // Utility subclass that enables internal recordings of the MicroInterpreter.
25 | // This class should be used to audit and analyze memory arena usage for a given
26 | // model and interpreter.
27 | //
28 | // After construction and the first Invoke() or AllocateTensors() call - the
29 | // memory usage is recorded and available through the GetMicroAllocator()
30 | // function. See RecordingMicroAlloctor for more details on what is currently
31 | // recorded from arena allocations.
32 | //
33 | // It is recommended for users to increase the tensor arena size by at least 1kb
34 | // to ensure enough additional memory is available for internal recordings.
35 | class RecordingMicroInterpreter : public MicroInterpreter {
36 | public:
37 | RecordingMicroInterpreter(const Model* model,
38 | const MicroOpResolver& op_resolver,
39 | uint8_t* tensor_arena, size_t tensor_arena_size,
40 | ErrorReporter* error_reporter)
41 | : MicroInterpreter(model, op_resolver,
42 | RecordingMicroAllocator::Create(
43 | tensor_arena, tensor_arena_size, error_reporter),
44 | error_reporter),
45 | recording_micro_allocator_(
46 | static_cast(allocator())) {}
47 |
48 | RecordingMicroInterpreter(const Model* model,
49 | const MicroOpResolver& op_resolver,
50 | RecordingMicroAllocator* allocator,
51 | ErrorReporter* error_reporter)
52 | : MicroInterpreter(model, op_resolver, allocator, error_reporter),
53 | recording_micro_allocator_(*allocator) {}
54 |
55 | const RecordingMicroAllocator& GetMicroAllocator() const {
56 | return recording_micro_allocator_;
57 | }
58 |
59 | private:
60 | const RecordingMicroAllocator& recording_micro_allocator_;
61 | };
62 |
63 | } // namespace tflite
64 |
65 | #endif // TENSORFLOW_LITE_MICRO_RECORDING_MICRO_INTERPRETER_H_
66 |
--------------------------------------------------------------------------------
/src/converter/tensorflow_library/tensorflow/lite/micro/kernels/ceil.cpp:
--------------------------------------------------------------------------------
1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 |
16 | #include "tensorflow/lite/kernels/internal/reference/ceil.h"
17 |
18 | #include "tensorflow/lite/c/common.h"
19 | #include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
20 | #include "tensorflow/lite/kernels/kernel_util.h"
21 | #include "tensorflow/lite/micro/kernels/kernel_util.h"
22 |
23 | namespace tflite {
24 | namespace ops {
25 | namespace micro {
26 | namespace ceil {
27 |
28 | constexpr int kInputTensor = 0;
29 | constexpr int kOutputTensor = 0;
30 |
31 | TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
32 | const TfLiteTensor* input = GetInput(context, node, kInputTensor);
33 | TF_LITE_ENSURE(context, input != nullptr);
34 | TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
35 | TF_LITE_ENSURE(context, output != nullptr);
36 | TF_LITE_ENSURE_EQ(context, NumInputs(node), 1);
37 | TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
38 | TF_LITE_ENSURE_TYPES_EQ(context, input->type, kTfLiteFloat32);
39 | TF_LITE_ENSURE_TYPES_EQ(context, output->type, input->type);
40 | TF_LITE_ENSURE_EQ(context, output->bytes, input->bytes);
41 | TF_LITE_ENSURE_EQ(context, output->dims->size, input->dims->size);
42 | for (int i = 0; i < output->dims->size; ++i) {
43 | TF_LITE_ENSURE_EQ(context, output->dims->data[i], input->dims->data[i]);
44 | }
45 | return kTfLiteOk;
46 | }
47 |
48 | TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
49 | const TfLiteEvalTensor* input =
50 | tflite::micro::GetEvalInput(context, node, kInputTensor);
51 | TfLiteEvalTensor* output =
52 | tflite::micro::GetEvalOutput(context, node, kOutputTensor);
53 |
54 | reference_ops::Ceil(tflite::micro::GetTensorShape(input),
55 | tflite::micro::GetTensorData(input),
56 | tflite::micro::GetTensorShape(output),
57 | tflite::micro::GetTensorData(output));
58 |
59 | return kTfLiteOk;
60 | }
61 | } // namespace ceil
62 |
63 | TfLiteRegistration Register_CEIL() {
64 | return {/*init=*/nullptr,
65 | /*free=*/nullptr,
66 | /*prepare=*/ceil::Prepare,
67 | /*invoke=*/ceil::Eval,
68 | /*profiling_string=*/nullptr,
69 | /*builtin_code=*/0,
70 | /*custom_name=*/nullptr,
71 | /*version=*/0};
72 | }
73 |
74 | } // namespace micro
75 | } // namespace ops
76 | } // namespace tflite
77 |
--------------------------------------------------------------------------------