├── .clang-format
├── .daq_pm
└── configs
│ ├── all-27
│ ├── all-28
│ ├── benchmark
│ ├── benchmark-27
│ ├── benchmark-29
│ ├── ex_model_builder
│ ├── infer
│ ├── jni
│ ├── onnx2daq
│ ├── onnx2daqquant
│ ├── onnx_infer
│ └── x86-all
├── .gitignore
├── .gitmodules
├── CMakeLists.txt
├── LICENSE
├── README.md
├── benchmark.py
├── binaries
├── CMakeLists.txt
├── argh.h
├── dnn_benchmark.cpp
├── dnn_retrieve_result.cpp
├── ex_model_builder.cpp
└── get_devices.cpp
├── ci
├── adb_push_and_run.sh
├── android_aar
│ ├── .gitignore
│ ├── README.md
│ ├── build.gradle
│ ├── dnnlibrary
│ │ ├── .gitignore
│ │ ├── build.gradle
│ │ ├── proguard-rules.pro
│ │ └── src
│ │ │ └── main
│ │ │ ├── AndroidManifest.xml
│ │ │ └── java
│ │ │ └── me
│ │ │ └── daquexian
│ │ │ └── dnnlibrary
│ │ │ ├── Model.java
│ │ │ └── ModelBuilder.java
│ ├── gradle.properties
│ ├── gradle
│ │ └── wrapper
│ │ │ ├── gradle-wrapper.jar
│ │ │ └── gradle-wrapper.properties
│ ├── gradlew
│ ├── gradlew.bat
│ └── settings.gradle
├── appimage
│ ├── onnx2daq.desktop
│ └── onnx2daq.png
├── build_aar.sh
├── build_appimage.sh
├── build_dnnlibrary.sh
├── build_onnx2daq.sh
├── dnnlibrary_build_and_test.yml
├── download_and_test_models.sh
├── download_protoc.sh
├── get_cores.sh
├── onnx2daq_build.yml
├── onnxruntime_test.yml
├── start_android_emulator.sh
├── template_onnx2daq_build_python.yml
├── template_onnx2daq_build_python_all_version.yml
├── template_onnx2daq_github_release.yml
├── template_onnx2daq_publish_artifacts.yml
├── template_onnx2daq_upload_to_pypi.yml
└── validate_onnx.py
├── cmake
├── DNNLibraryConfig.cmake.in
├── ONNX2daqConfig.cmake.in
├── common.cmake
├── flatbuffers.cmake
├── glog.cmake
├── onnx.cmake
├── protobuf.cmake
├── system.cmake
└── utils.cmake
├── common
├── Shaper.cpp
├── daq.fbs
├── helper.h
├── internal_vars.cpp
├── internal_vars.h
└── log_helper.h
├── dnnlibrary
├── CMakeLists.txt
├── DaqReader.cpp
├── JavaWrapper.cpp
├── Model.cpp
├── ModelBuilder.cpp
├── ModelBuilderImpl.cpp
├── NeuralNetworksWrapper.cpp
├── OnnxReader.cpp
├── android_log_helper.h
├── flatbuffers_helper.h
├── jni_handle.h
├── nnapi_helper.h
└── nnapi_implementation.cc
├── generate_code.py
├── images
├── DNNLibrary-huaweihonorv10.png
├── DNNLibrary-oneplus6t.png
├── DNNLibrary-rk3399.png
├── screenshot.png
├── screenshot_camera_mnist.png
├── screenshot_image_mnist.png
├── screenshot_image_resnet.png
├── screenshot_quant8.png
└── screenshot_raw_nnapi.png
├── include
├── common
│ ├── Shaper.h
│ ├── StrKeyMap.h
│ ├── daq_generated.h
│ ├── data_types.h
│ ├── expected.hpp
│ └── optional.h
├── dnnlibrary
│ ├── DaqReader.h
│ ├── Device.h
│ ├── Model.h
│ ├── ModelBuilder.h
│ ├── NeuralNetworksTypes.h
│ ├── NeuralNetworksWrapper.h
│ ├── OnnxReader.h
│ └── nnapi_implementation.h
└── tools
│ └── onnx2daq
│ └── OnnxConverter.h
├── ops.yml
├── quant.py
└── tools
├── CMakeLists.txt
├── getsupportednodes
├── CMakeLists.txt
└── getsupportednodes.cpp
└── onnx2daq
├── CMakeLists.txt
├── NodeAttrHelper.cpp
├── NodeAttrHelper.h
├── OnnxConverter.cpp
├── OnnxConverterImpl.cpp
├── onnx2daq.cpp
├── python
├── onnx2daq
│ ├── __init__.py
│ ├── __main__.py
│ └── convert.py
└── setup.py
└── pywrapper.cpp
/.clang-format:
--------------------------------------------------------------------------------
1 | BasedOnStyle: Google
2 | IndentWidth: 4
3 | AllowShortBlocksOnASingleLine: false
4 | AllowShortLoopsOnASingleLine: false
5 | AllowShortFunctionsOnASingleLine: false
6 | AlignEscapedNewlines: Left
7 |
--------------------------------------------------------------------------------
/.daq_pm/configs/all-27:
--------------------------------------------------------------------------------
1 | # Configuration for [project_manager.vim](https://github.com/daquexian/project_manager.vim)
2 | name DNNLibrary
3 | type cpp
4 | build_dir build-all-v27
5 | cmake_options -DCMAKE_TOOLCHAIN_FILE=~/Android/Sdk/ndk-bundle/build/cmake/android.toolchain.cmake -DANDROID_PLATFORM=android-27 -DANDROID_ABI=arm64-v8a -DCMAKE_EXPORT_COMPILE_COMMANDS=ON -GNinja -DDNN_READ_ONNX=ON -DDNN_CUSTOM_PROTOC_EXECUTABLE=/usr/bin/protoc
6 |
--------------------------------------------------------------------------------
/.daq_pm/configs/all-28:
--------------------------------------------------------------------------------
1 | # Configuration for [project_manager.vim](https://github.com/daquexian/project_manager.vim)
2 | name DNNLibrary
3 | type cpp
4 | build_dir build-all-v28
5 | cmake_options -DCMAKE_TOOLCHAIN_FILE=~/Android/Sdk/ndk-bundle/build/cmake/android.toolchain.cmake -DANDROID_PLATFORM=android-28 -DANDROID_ABI=arm64-v8a -DCMAKE_EXPORT_COMPILE_COMMANDS=ON -GNinja -DDNN_READ_ONNX=ON -DDNN_CUSTOM_PROTOC_EXECUTABLE=/usr/bin/protoc
6 |
--------------------------------------------------------------------------------
/.daq_pm/configs/benchmark:
--------------------------------------------------------------------------------
1 | # Configuration for [project_manager.vim](https://github.com/daquexian/project_manager.vim)
2 | name DNNLibrary
3 | type cpp
4 | target dnn_benchmark
5 | build_dir build
6 | cmake_options -DCMAKE_SYSTEM_NAME=Android -DCMAKE_TOOLCHAIN_FILE=~/Android/Sdk/ndk-bundle/build/cmake/android.toolchain.cmake -DANDROID_CPP_FEATURES=exceptions -DANDROID_PLATFORM=android-28 -DANDROID_ABI=arm64-v8a -DCMAKE_EXPORT_COMPILE_COMMANDS=ON
7 |
--------------------------------------------------------------------------------
/.daq_pm/configs/benchmark-27:
--------------------------------------------------------------------------------
1 | # Configuration for [project_manager.vim](https://github.com/daquexian/project_manager.vim)
2 | name DNNLibrary
3 | type cpp
4 | target dnn_benchmark
5 | build_dir build-v27
6 | cmake_options -DCMAKE_SYSTEM_NAME=Android -DCMAKE_TOOLCHAIN_FILE=~/Android/Sdk/ndk-bundle/build/cmake/android.toolchain.cmake -DANDROID_CPP_FEATURES=exceptions -DANDROID_PLATFORM=android-27 -DANDROID_ABI=arm64-v8a -DCMAKE_EXPORT_COMPILE_COMMANDS=ON
7 |
--------------------------------------------------------------------------------
/.daq_pm/configs/benchmark-29:
--------------------------------------------------------------------------------
1 | # Configuration for [project_manager.vim](https://github.com/daquexian/project_manager.vim)
2 | name DNNLibrary
3 | type cpp
4 | target dnn_benchmark
5 | build_dir build-29
6 | cmake_options -DCMAKE_SYSTEM_NAME=Android -DCMAKE_TOOLCHAIN_FILE=~/Android/Sdk/android-ndk-r20-beta1/build/cmake/android.toolchain.cmake -DANDROID_CPP_FEATURES=exceptions -DANDROID_PLATFORM=android-29 -DANDROID_ABI=arm64-v8a -DCMAKE_EXPORT_COMPILE_COMMANDS=ON
7 |
--------------------------------------------------------------------------------
/.daq_pm/configs/ex_model_builder:
--------------------------------------------------------------------------------
1 | name DNNLibrary
2 | type cpp
3 | target ex_model_builder
4 | build_dir build
5 | cmake_options -DCMAKE_SYSTEM_NAME=Android -DCMAKE_TOOLCHAIN_FILE=~/Android/Sdk/ndk-bundle/build/cmake/android.toolchain.cmake -DANDROID_CPP_FEATURES=exceptions -DANDROID_PLATFORM=android-28 -DANDROID_ABI=arm64-v8a -DCMAKE_EXPORT_COMPILE_COMMANDS=ON
6 | binary ~/adb_push_and_run.sh binaries/ex_model_builder
7 |
--------------------------------------------------------------------------------
/.daq_pm/configs/infer:
--------------------------------------------------------------------------------
1 | name DNNLibrary
2 | type cpp
3 | target dnn_retrieve_result
4 | build_dir build
5 | cmake_options -DCMAKE_SYSTEM_NAME=Android -DCMAKE_TOOLCHAIN_FILE=~/Android/Sdk/ndk-bundle/build/cmake/android.toolchain.cmake -DANDROID_CPP_FEATURES=exceptions -DANDROID_PLATFORM=android-28 -DANDROID_ABI=arm64-v8a -DCMAKE_EXPORT_COMPILE_COMMANDS=ON
6 | program_arguments ~/adb_push_and_run.sh binaries/dnn_retrieve_result
7 |
--------------------------------------------------------------------------------
/.daq_pm/configs/jni:
--------------------------------------------------------------------------------
1 | name DNNLibrary
2 | type cpp
3 | target daq-jni
4 | build_dir build_jni
5 | cmake_options -DCMAKE_SYSTEM_NAME=Android -DCMAKE_TOOLCHAIN_FILE=~/Android/Sdk/ndk-bundle/build/cmake/android.toolchain.cmake -DANDROID_CPP_FEATURES=exceptions -DANDROID_PLATFORM=android-28 -DANDROID_ABI=arm64-v8a -DCMAKE_EXPORT_COMPILE_COMMANDS=ON -DDNN_BUILD_JNI=ON
6 |
--------------------------------------------------------------------------------
/.daq_pm/configs/onnx2daq:
--------------------------------------------------------------------------------
1 | # It is configuration file for [project_manager.vim](https://github.com/daquexian/project_manager.vim)
2 | name DNNLibrary
3 | type cpp
4 | cmake_options -DCMAKE_EXPORT_COMPILE_COMMANDS=ON -DDNN_BUILD_PYTHON=ON
5 | build_dir build_onnx2daq
6 |
7 |
--------------------------------------------------------------------------------
/.daq_pm/configs/onnx2daqquant:
--------------------------------------------------------------------------------
1 | # It is configuration file for [project_manager.vim](https://github.com/daquexian/project_manager.vim)
2 | name DNNLibrary
3 | type cpp
4 | cmake_options -DCMAKE_EXPORT_COMPILE_COMMANDS=ON
5 | build_dir build_onnx2daq
6 | binary ./tools/onnx2daq/onnx2daq ~/models/mobilenetv2-1.0/quant-mobilenetv2-1.0.onnx temp.daq ../table.txt
7 |
--------------------------------------------------------------------------------
/.daq_pm/configs/onnx_infer:
--------------------------------------------------------------------------------
1 | name DNNLibrary
2 | type cpp
3 | target dnn_retrieve_result
4 | build_dir build_onnxinfer
5 | cmake_options -DCMAKE_SYSTEM_NAME=Android -DCMAKE_TOOLCHAIN_FILE=~/Android/Sdk/ndk-bundle/build/cmake/android.toolchain.cmake -DANDROID_CPP_FEATURES=exceptions -DANDROID_PLATFORM=android-28 -DANDROID_ABI=arm64-v8a -DCMAKE_EXPORT_COMPILE_COMMANDS=ON -DDNN_READ_ONNX=ON -DDNN_CUSTOM_PROTOC_EXECUTABLE=/usr/bin/protoc
6 | program_arguments ~/adb_push_and_run.sh binaries/dnn_retrieve_result
7 |
--------------------------------------------------------------------------------
/.daq_pm/configs/x86-all:
--------------------------------------------------------------------------------
1 | # It is configuration file for [project_manager.vim](https://github.com/daquexian/project_manager.vim)
2 | name DNNLibrary
3 | type cpp
4 | cmake_options -DCMAKE_EXPORT_COMPILE_COMMANDS=ON -DDNN_READ_ONNX=ON
5 | build_dir build_x86all
6 |
7 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | cmake-build-*/
2 | build/
3 | compile_commands.json
4 | .idea/
5 | *.swp
6 | *.swo
7 | .ccls-cache/
8 | .daq_pm/status/
9 | build*/
10 | table*.txt
11 | *.npy
12 | val.py
13 |
14 | # For onnx2daq python package
15 | .setuptools-cmake-build/
16 | dist/
17 | onnx2daq.egg-info/
18 | __pycache__/
19 | .eggs/
20 |
21 | .clangd/
22 | venv/
23 |
--------------------------------------------------------------------------------
/.gitmodules:
--------------------------------------------------------------------------------
1 | [submodule "third_party/protobuf"]
2 | path = third_party/protobuf
3 | url = https://github.com/google/protobuf/
4 | [submodule "third_party/glog"]
5 | path = third_party/glog
6 | url = https://github.com/google/glog
7 | [submodule "third_party/onnx"]
8 | path = third_party/onnx
9 | url = https://github.com/onnx/onnx
10 | [submodule "third_party/pybind11"]
11 | path = third_party/pybind11
12 | url = https://github.com/pybind/pybind11
13 | [submodule "third_party/flatbuffers"]
14 | path = third_party/flatbuffers
15 | url = https://github.com/google/flatbuffers
16 |
--------------------------------------------------------------------------------
/CMakeLists.txt:
--------------------------------------------------------------------------------
1 | cmake_minimum_required(VERSION 3.5.2)
2 | project(DNNLibrary)
3 |
4 | option(DNN_BUILD_BIN "Build binaries" ON)
5 | option(DNN_BUILD_JNI "Build Java Wrapper" OFF)
6 | option(DNN_READ_ONNX "Read ONNX model directly instead of converting to .daq" OFF)
7 | option(DNN_SYSTEM_PROTOBUF "Use system protobuf when building onnx2daq" OFF)
8 | option(DNN_BUILD_PYTHON "Build Python wrapper for onnx2daq" ON)
9 | option(DNN_USE_MSVC_STATIC_RUNTIME "Link onnx2daq to msvc static runtime" ON)
10 | option(DNN_CMAKE_INSTALL "Export targets in cmake (disable it for ONNXRuntime)" ON)
11 |
12 | if ("${CMAKE_SYSTEM_NAME}" STREQUAL "Android")
13 | if (CMAKE_VERSION VERSION_GREATER_EQUAL "3.15.0" AND CMAKE_VERSION VERSION_LESS "3.15.2")
14 | message(FATAL_ERROR "Android NDK is not compatible with CMake 3.15.0 and 3.15.1 (https://gitlab.kitware.com/cmake/cmake/issues/19515), please install another cmake version.")
15 | endif()
16 | endif()
17 |
18 | include(cmake/system.cmake)
19 | include(cmake/common.cmake)
20 |
21 | include(cmake/utils.cmake)
22 | dnn_add_msvc_runtime_flag()
23 |
24 | include(cmake/glog.cmake)
25 | configure_glog()
26 |
27 | include(cmake/flatbuffers.cmake)
28 | configure_flatbuffers()
29 |
30 | if (${CMAKE_SYSTEM_NAME} STREQUAL "Android")
31 | add_compile_options(-Os)
32 | set (CMAKE_CXX_STANDARD 17)
33 | if (${DNN_READ_ONNX})
34 | include(cmake/onnx.cmake)
35 | configure_onnx()
36 | set(ONNX2DAQ_ONLY_LIB ON)
37 | add_subdirectory(tools)
38 | else()
39 | add_compile_options(-fno-rtti)
40 | endif()
41 | add_subdirectory(dnnlibrary)
42 | add_subdirectory(binaries)
43 | else()
44 | set (CMAKE_CXX_STANDARD 11)
45 | if (MSVC)
46 | set(CMAKE_WINDOWS_EXPORT_ALL_SYMBOLS 1)
47 | endif()
48 | if (${DNN_BUILD_PYTHON})
49 | set(CMAKE_POSITION_INDEPENDENT_CODE ON)
50 | add_subdirectory(third_party/pybind11)
51 | endif()
52 |
53 | if (${DNN_SYSTEM_PROTOBUF})
54 | find_package(Protobuf)
55 | endif()
56 | if (NOT Protobuf_FOUND)
57 | set(DNN_SYSTEM_PROTOBUF OFF CACHE BOOL "Use system protobuf when building onnx2daq" FORCE)
58 | endif()
59 |
60 | include(cmake/onnx.cmake)
61 | configure_onnx()
62 | add_subdirectory(tools)
63 | endif()
64 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | Apache License
2 | Version 2.0, January 2004
3 | http://www.apache.org/licenses/
4 |
5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6 |
7 | 1. Definitions.
8 |
9 | "License" shall mean the terms and conditions for use, reproduction,
10 | and distribution as defined by Sections 1 through 9 of this document.
11 |
12 | "Licensor" shall mean the copyright owner or entity authorized by
13 | the copyright owner that is granting the License.
14 |
15 | "Legal Entity" shall mean the union of the acting entity and all
16 | other entities that control, are controlled by, or are under common
17 | control with that entity. For the purposes of this definition,
18 | "control" means (i) the power, direct or indirect, to cause the
19 | direction or management of such entity, whether by contract or
20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the
21 | outstanding shares, or (iii) beneficial ownership of such entity.
22 |
23 | "You" (or "Your") shall mean an individual or Legal Entity
24 | exercising permissions granted by this License.
25 |
26 | "Source" form shall mean the preferred form for making modifications,
27 | including but not limited to software source code, documentation
28 | source, and configuration files.
29 |
30 | "Object" form shall mean any form resulting from mechanical
31 | transformation or translation of a Source form, including but
32 | not limited to compiled object code, generated documentation,
33 | and conversions to other media types.
34 |
35 | "Work" shall mean the work of authorship, whether in Source or
36 | Object form, made available under the License, as indicated by a
37 | copyright notice that is included in or attached to the work
38 | (an example is provided in the Appendix below).
39 |
40 | "Derivative Works" shall mean any work, whether in Source or Object
41 | form, that is based on (or derived from) the Work and for which the
42 | editorial revisions, annotations, elaborations, or other modifications
43 | represent, as a whole, an original work of authorship. For the purposes
44 | of this License, Derivative Works shall not include works that remain
45 | separable from, or merely link (or bind by name) to the interfaces of,
46 | the Work and Derivative Works thereof.
47 |
48 | "Contribution" shall mean any work of authorship, including
49 | the original version of the Work and any modifications or additions
50 | to that Work or Derivative Works thereof, that is intentionally
51 | submitted to Licensor for inclusion in the Work by the copyright owner
52 | or by an individual or Legal Entity authorized to submit on behalf of
53 | the copyright owner. For the purposes of this definition, "submitted"
54 | means any form of electronic, verbal, or written communication sent
55 | to the Licensor or its representatives, including but not limited to
56 | communication on electronic mailing lists, source code control systems,
57 | and issue tracking systems that are managed by, or on behalf of, the
58 | Licensor for the purpose of discussing and improving the Work, but
59 | excluding communication that is conspicuously marked or otherwise
60 | designated in writing by the copyright owner as "Not a Contribution."
61 |
62 | "Contributor" shall mean Licensor and any individual or Legal Entity
63 | on behalf of whom a Contribution has been received by Licensor and
64 | subsequently incorporated within the Work.
65 |
66 | 2. Grant of Copyright License. Subject to the terms and conditions of
67 | this License, each Contributor hereby grants to You a perpetual,
68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69 | copyright license to reproduce, prepare Derivative Works of,
70 | publicly display, publicly perform, sublicense, and distribute the
71 | Work and such Derivative Works in Source or Object form.
72 |
73 | 3. Grant of Patent License. Subject to the terms and conditions of
74 | this License, each Contributor hereby grants to You a perpetual,
75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76 | (except as stated in this section) patent license to make, have made,
77 | use, offer to sell, sell, import, and otherwise transfer the Work,
78 | where such license applies only to those patent claims licensable
79 | by such Contributor that are necessarily infringed by their
80 | Contribution(s) alone or by combination of their Contribution(s)
81 | with the Work to which such Contribution(s) was submitted. If You
82 | institute patent litigation against any entity (including a
83 | cross-claim or counterclaim in a lawsuit) alleging that the Work
84 | or a Contribution incorporated within the Work constitutes direct
85 | or contributory patent infringement, then any patent licenses
86 | granted to You under this License for that Work shall terminate
87 | as of the date such litigation is filed.
88 |
89 | 4. Redistribution. You may reproduce and distribute copies of the
90 | Work or Derivative Works thereof in any medium, with or without
91 | modifications, and in Source or Object form, provided that You
92 | meet the following conditions:
93 |
94 | (a) You must give any other recipients of the Work or
95 | Derivative Works a copy of this License; and
96 |
97 | (b) You must cause any modified files to carry prominent notices
98 | stating that You changed the files; and
99 |
100 | (c) You must retain, in the Source form of any Derivative Works
101 | that You distribute, all copyright, patent, trademark, and
102 | attribution notices from the Source form of the Work,
103 | excluding those notices that do not pertain to any part of
104 | the Derivative Works; and
105 |
106 | (d) If the Work includes a "NOTICE" text file as part of its
107 | distribution, then any Derivative Works that You distribute must
108 | include a readable copy of the attribution notices contained
109 | within such NOTICE file, excluding those notices that do not
110 | pertain to any part of the Derivative Works, in at least one
111 | of the following places: within a NOTICE text file distributed
112 | as part of the Derivative Works; within the Source form or
113 | documentation, if provided along with the Derivative Works; or,
114 | within a display generated by the Derivative Works, if and
115 | wherever such third-party notices normally appear. The contents
116 | of the NOTICE file are for informational purposes only and
117 | do not modify the License. You may add Your own attribution
118 | notices within Derivative Works that You distribute, alongside
119 | or as an addendum to the NOTICE text from the Work, provided
120 | that such additional attribution notices cannot be construed
121 | as modifying the License.
122 |
123 | You may add Your own copyright statement to Your modifications and
124 | may provide additional or different license terms and conditions
125 | for use, reproduction, or distribution of Your modifications, or
126 | for any such Derivative Works as a whole, provided Your use,
127 | reproduction, and distribution of the Work otherwise complies with
128 | the conditions stated in this License.
129 |
130 | 5. Submission of Contributions. Unless You explicitly state otherwise,
131 | any Contribution intentionally submitted for inclusion in the Work
132 | by You to the Licensor shall be under the terms and conditions of
133 | this License, without any additional terms or conditions.
134 | Notwithstanding the above, nothing herein shall supersede or modify
135 | the terms of any separate license agreement you may have executed
136 | with Licensor regarding such Contributions.
137 |
138 | 6. Trademarks. This License does not grant permission to use the trade
139 | names, trademarks, service marks, or product names of the Licensor,
140 | except as required for reasonable and customary use in describing the
141 | origin of the Work and reproducing the content of the NOTICE file.
142 |
143 | 7. Disclaimer of Warranty. Unless required by applicable law or
144 | agreed to in writing, Licensor provides the Work (and each
145 | Contributor provides its Contributions) on an "AS IS" BASIS,
146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147 | implied, including, without limitation, any warranties or conditions
148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149 | PARTICULAR PURPOSE. You are solely responsible for determining the
150 | appropriateness of using or redistributing the Work and assume any
151 | risks associated with Your exercise of permissions under this License.
152 |
153 | 8. Limitation of Liability. In no event and under no legal theory,
154 | whether in tort (including negligence), contract, or otherwise,
155 | unless required by applicable law (such as deliberate and grossly
156 | negligent acts) or agreed to in writing, shall any Contributor be
157 | liable to You for damages, including any direct, indirect, special,
158 | incidental, or consequential damages of any character arising as a
159 | result of this License or out of the use or inability to use the
160 | Work (including but not limited to damages for loss of goodwill,
161 | work stoppage, computer failure or malfunction, or any and all
162 | other commercial damages or losses), even if such Contributor
163 | has been advised of the possibility of such damages.
164 |
165 | 9. Accepting Warranty or Additional Liability. While redistributing
166 | the Work or Derivative Works thereof, You may choose to offer,
167 | and charge a fee for, acceptance of support, warranty, indemnity,
168 | or other liability obligations and/or rights consistent with this
169 | License. However, in accepting such obligations, You may act only
170 | on Your own behalf and on Your sole responsibility, not on behalf
171 | of any other Contributor, and only if You agree to indemnify,
172 | defend, and hold each Contributor harmless for any liability
173 | incurred by, or claims asserted against, such Contributor by reason
174 | of your accepting any such warranty or additional liability.
175 |
176 | END OF TERMS AND CONDITIONS
177 |
178 | APPENDIX: How to apply the Apache License to your work.
179 |
180 | To apply the Apache License to your work, attach the following
181 | boilerplate notice, with the fields enclosed by brackets "[]"
182 | replaced with your own identifying information. (Don't include
183 | the brackets!) The text should be enclosed in the appropriate
184 | comment syntax for the file format. We also recommend that a
185 | file or class name and description of purpose be included on the
186 | same "printed page" as the copyright notice for easier
187 | identification within third-party archives.
188 |
189 | Copyright [2019] [JD.com Inc. JD AI]
190 |
191 | Licensed under the Apache License, Version 2.0 (the "License");
192 | you may not use this file except in compliance with the License.
193 | You may obtain a copy of the License at
194 |
195 | http://www.apache.org/licenses/LICENSE-2.0
196 |
197 | Unless required by applicable law or agreed to in writing, software
198 | distributed under the License is distributed on an "AS IS" BASIS,
199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200 | See the License for the specific language governing permissions and
201 | limitations under the License.
202 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # DNNLibrary
2 |
3 | [](https://dev.azure.com/daquexian/DNNLibrary/_build/latest?definitionId=8&branchName=master)
4 | [ ](https://bintray.com/daquexian566/maven/dnnlibrary/_latestVersion)
5 | [](https://github.com/JDAI-CV/DNNLibrary/pulls)
6 |
7 | *Run ONNX models on your Android phone using the new NNAPI !*
8 |
9 | Android 8.1 introduces Neural Networks API (NNAPI). It's very exciting to run a model in the "native" way supported by Android System. :)
10 |
11 | DNNLibrary is a wrapper of NNAPI ("DNNLibrary" is for "**d**aquexian's **NN**API library). It lets you easily make the use of the new NNAPI introduced in Android 8.1. You can convert your onnx model into `daq` and run the model directly.
12 |
13 | For the Android app example, please check out [dnnlibrary-example](https://github.com/daquexian/dnnlibrary-example).
14 |
15 | Telegram Group: [link](https://t.me/joinchat/DjMsYRBe0UtG6OZsYes5KQ), QQ Group (Chinese): 948989771, answer: 哈哈哈哈
16 |
17 | ## Screenshot
18 |
19 | This screenshot is MobileNet v2, both float version and 8-bit quantized version
20 |
21 | 
22 |
23 | ## Preparation
24 |
25 | Please make sure the Android System on your phone is 8.1+, or you may want to use an 8.1+ emulator.
26 |
27 | ## Introduction
28 |
29 | Android 8.1 introduces NNAPI. However, NNAPI is not friendly to normal Android developers. It is not designed to be used by normal developers directly. So I wrapped it into a library.
30 |
31 | With DNNLibrary it's extremely easy to deploy your ONNX model on Android 8.1+ phone. For example, following is the Java code to deploy the MobileNet v2 in your app (please check out [dnnlibrary-example](https://github.com/daquexian/dnnlibrary-example) for detail):
32 |
33 | ```Java
34 | ModelBuilder modelBuilder = new ModelBuilder();
35 | Model model = modelBuilder.readFile(getAssets(), "mobilenetv2.daq")
36 | // the following line will allow fp16 on supported devices, bringing speed boost. It is only available on Android P, see https://www.anandtech.com/show/13503/the-mate-20-mate-20-pro-review/4 for a detailed benchmark
37 | // .allowFp16(true)
38 | .setOutput("mobilenetv20_output_pred_fwd"); // The output name is from the onnx model
39 | .compile(ModelBuilder.PREFERENCE_FAST_SINGLE_ANSWER);
40 |
41 | float[] result = model.predict(inputData);
42 | ```
43 |
44 | Only five lines! And the `daq` model file is got from the pretrained onnx model using `onnx2daq`.
45 |
46 | ## Convert the model
47 |
48 | ### If you are a Linux user
49 |
50 | We provide precomplied AppImage of onnx2daq, our model conversion tool. [AppImage](https://appimage.org/) is a program format that runs on almost all Linux system. Just download the onnx2daq.AppImage from [releases](https://github.com/JDAI-CV/DNNLibrary/releases), and make it executable by
51 |
52 | ```bash
53 | chmod +x onnx2daq.AppImage
54 | ```
55 |
56 | then directly use it. The usage is in the following "Usage of onnx2daq".
57 |
58 | ### If you are a Windows or Mac user
59 |
60 | You need to build onnx2daq from source.
61 |
62 | Clone this repo and submodules:
63 |
64 | ```bash
65 | git clone --recursive https://github.com/JDAI-CV/DNNLibrary
66 | ```
67 |
68 | After cloning step listed in Preparation section, run
69 | ```bash
70 | mkdir build
71 | cd build
72 | cmake ..
73 | cmake --build .
74 | ```
75 |
76 | Now `onnx2daq` is in `tools/onnx2daq` directory.
77 |
78 | ### Usage of onnx2daq
79 |
80 | ```bash
81 | path_of_onnx2daq onnx_model output_filename
82 | ```
83 |
84 | For example, if you are a Linux user and have a model named "mobilenetv2.onnx" in your current directory,
85 | ```bash
86 | ./onnx2daq.AppImage mobilenetv2.onnx mobilenetv2.daq
87 | ```
88 |
89 | For 8-bit quantization, please check out [our wiki](https://github.com/JDAI-CV/DNNLibrary/wiki/Quantization)
90 |
91 | ## Usage
92 |
93 | ### If you are an Android app developer and want it to work out of the box
94 |
95 | Welcome! It has been published on jcenter.
96 |
97 | Just add
98 |
99 | ```
100 | implementation 'me.daquexian:dnnlibrary:replace_me_with_the_latest_version'
101 | ```
102 |
103 | in your app's `build.gradle`'s `dependencies` section.
104 |
105 | The lastest version can be found in the following badge:
106 |
107 | 
108 |
109 | ### If you are an c++ developer and don't care about Android app
110 |
111 | We use CMake as the build system. So you can build it as most C++ projects, the only difference is that you need Android NDK, **r17b or higher NDK is necessary** :
112 |
113 | ```bash
114 | mkdir build && cd build
115 | cmake -DCMAKE_SYSTEM_NAME=Android -DCMAKE_TOOLCHAIN_FILE=path_of_android_ndk/build/cmake/android.toolchain.cmake -DANDROID_CPP_FEATURES=exceptions -DANDROID_PLATFORM=replace_me_with_android-28_or_android-27 -DANDROID_ABI=arm64-v8a
116 | cmake --build .
117 | ```
118 |
119 | then you will get binary files.
120 |
121 | ## But TensorFlow Lite also supports NNAPI...
122 |
123 | Yes, but its support for NNAPI is far from perfect. For example, dilated convolution (which is widely used in segmentation) are [not supported](https://github.com/tensorflow/tensorflow/blob/da7b71f67147ff4795c5c0168d1f225ba2b4b522/tensorflow/lite/delegates/nnapi/nnapi_delegate.cc#L463), prelu is also not supported.
124 |
125 | What's more, only the TensorFlow models can easily get converted to TensorFlow Lite model. Since NNAPI is independent of any frameworks, we support ONNX, a framework-independent model format.
126 |
127 | _ | TF Lite | DNNLibrary
128 | --- |:---:|:---:
129 | Supported Model Format | TensorFlow | ONNX
130 | Dilated Convolution | ❌ | ✔️
131 | Ease of Use | ❌
(Bazel build system,
not friendly to Android developers) | ✔️
132 | Quantization | ✔️ | ✔️
(since 0.6.10)
133 |
134 | However we are also far from maturity comparing to TF Lite. At least we are an another choice if you want to enjoy the power of NNAPI :)
135 |
136 | ## Benchmark
137 |
138 | We benchmarked DNNLibrary against two popular frameworks, [NCNN](https://github.com/Tencent/ncnn) and [MNN](https://github.com/alibaba/MNN). DNNLibrary shows promising results on three devices. (Note: GoogleNet fails to convert on MNN so the corresponding latency is blank.)
139 |
140 | 
141 | 
142 | 
143 |
144 | More benchmark is welcome!
145 |
146 | ## About caffe model support
147 |
148 | The old DNNLibrary supports caffe model by [dnntools](https://github.com/daquexian/dnntools), however, it is not supported directly now, the models generated by `dnntools` are not usable, too. Please use a convert tool like [MMdnn](https://github.com/Microsoft/MMdnn) to convert the caffe model to the ONNX model, then convert it to `daq` using `onnx2daq`.
149 |
--------------------------------------------------------------------------------
/benchmark.py:
--------------------------------------------------------------------------------
1 | import onnx
2 | from onnx import numpy_helper
3 | import os
4 | import glob
5 | import numpy as np
6 | import tempfile
7 |
8 | def run(onnx, onnx2daq, dnn_benchmark, output_name, number_running, table_file):
9 | quant = 1 if len(table_file) != 0 else 0
10 | daq = "temp.daq"
11 | os.system("{} {} {} {}".format(onnx2daq, onnx, daq, table_file))
12 | print("Converted to daq")
13 |
14 | os.system("adb push {} /data/local/tmp/".format(daq))
15 | os.system("adb push {} /data/local/tmp/dnn_benchmark".format(dnn_benchmark))
16 | os.system('adb shell "LD_LIBRARY_PATH=/data/local/tmp/ /data/local/tmp/dnn_benchmark /data/local/tmp/{} {} {} {}"'.format(os.path.basename(daq), output_name, number_running, quant))
17 | os.system("adb shell rm /data/local/tmp/dnn_benchmark")
18 | os.system("adb shell rm /data/local/tmp/{}".format(os.path.basename(daq)))
19 | os.system("rm {}".format(daq))
20 |
21 | if __name__ == '__main__':
22 | import argparse
23 | parser = argparse.ArgumentParser(description='Test onnx model on nnapi')
24 | parser.add_argument('onnx', type=str, help='onnx model file')
25 | parser.add_argument('onnx2daq', type=str, help='onnx2daq binary file')
26 | parser.add_argument('dnn_benchmark', type=str, help='dnn_benchmark binary file')
27 | parser.add_argument('output', type=str, help='Output name of the model')
28 | parser.add_argument('--number_running', type=int, help='The number of running', default=50)
29 | parser.add_argument('--table_file', type=str, help='table file for 8-bit quantization', default='')
30 | args = parser.parse_args()
31 |
32 | actual = run(args.onnx, args.onnx2daq, args.dnn_benchmark, args.output, args.number_running, args.table_file)
33 |
--------------------------------------------------------------------------------
/binaries/CMakeLists.txt:
--------------------------------------------------------------------------------
1 | if (DNN_BUILD_BIN)
2 | add_executable(dnn_retrieve_result
3 | dnn_retrieve_result.cpp)
4 | target_link_libraries(dnn_retrieve_result
5 | dnnlibrary)
6 | target_include_directories(dnn_retrieve_result
7 | PRIVATE
8 | ${PROJECT_SOURCE_DIR}
9 | )
10 |
11 | if ((NOT DNN_READ_ONNX) OR DNN_SYSTEM_PROTOBUF)
12 | treat_warnings_as_errors(dnn_retrieve_result)
13 | endif()
14 |
15 | add_executable(dnn_benchmark
16 | dnn_benchmark.cpp)
17 | target_link_libraries(dnn_benchmark
18 | dnnlibrary)
19 | target_include_directories(dnn_benchmark
20 | PRIVATE
21 | ${PROJECT_SOURCE_DIR}
22 | )
23 |
24 | if ((NOT DNN_READ_ONNX) OR DNN_SYSTEM_PROTOBUF)
25 | treat_warnings_as_errors(dnn_benchmark)
26 | endif()
27 |
28 | add_executable(ex_model_builder
29 | ex_model_builder.cpp)
30 | target_link_libraries(ex_model_builder
31 | dnnlibrary)
32 | target_include_directories(ex_model_builder
33 | PRIVATE
34 | ${PROJECT_SOURCE_DIR}
35 | )
36 |
37 | if ((NOT DNN_READ_ONNX) OR DNN_SYSTEM_PROTOBUF)
38 | treat_warnings_as_errors(ex_model_builder)
39 | endif()
40 |
41 | add_executable(get_devices
42 | get_devices.cpp)
43 | target_link_libraries(get_devices
44 | dnnlibrary)
45 | target_include_directories(get_devices
46 | PRIVATE
47 | ${PROJECT_SOURCE_DIR}
48 | )
49 |
50 | endif()
51 |
--------------------------------------------------------------------------------
/binaries/dnn_benchmark.cpp:
--------------------------------------------------------------------------------
1 | //
2 | // Created by daquexian on 29/01/19.
3 | //
4 |
5 | #include
6 | #include
7 | #include
8 | #include
9 | #include
10 | #include
11 | #include
12 |
13 | #include
14 | #include
15 | #include
16 | #ifdef DNN_READ_ONNX
17 | #include
18 | #endif
19 | #include
20 |
21 | using std::cout;
22 | using std::endl;
23 | using std::string;
24 | using Clock = std::chrono::high_resolution_clock;
25 | using dnn::DaqReader;
26 | using dnn::Model;
27 | using dnn::ModelBuilder;
28 | #ifdef DNN_READ_ONNX
29 | using dnn::OnnxReader;
30 | #endif
31 |
32 | bool hasEnding(std::string const &fullString, std::string const &ending) {
33 | if (fullString.length() >= ending.length()) {
34 | return (0 == fullString.compare(fullString.length() - ending.length(),
35 | ending.length(), ending));
36 | } else {
37 | return false;
38 | }
39 | }
40 |
41 | auto GetModel(css &daqName, const bool allow_fp16,
42 | const int compile_preference) {
43 | std::unique_ptr model;
44 | ModelBuilder builder;
45 | if (hasEnding(daqName, ".daq")) {
46 | DaqReader daq_reader;
47 | // Set the last argument to true to use mmap. It may be more efficient
48 | // than memory buffer.
49 | daq_reader.ReadDaq(daqName, builder, false);
50 | #ifdef DNN_READ_ONNX
51 | } else if (hasEnding(daqName, ".onnx")) {
52 | OnnxReader onnx_reader;
53 | // Set the last argument to true to use mmap. It may be more efficient
54 | // than memory buffer.
55 | onnx_reader.ReadOnnx(daqName, builder);
56 | #endif
57 | } else {
58 | throw std::invalid_argument(
59 | "Wrong model name " + daqName +
60 | ". It must end with .daq or .onnx (.onnx is only "
61 | "supported when DNN_READ_ONNX is ON)");
62 | }
63 | model = builder.AllowFp16(allow_fp16).Compile(compile_preference);
64 | return model;
65 | }
66 |
67 | auto PrefCodeToStr(const int &preference_code) {
68 | if (preference_code == ANEURALNETWORKS_PREFER_FAST_SINGLE_ANSWER) {
69 | return "fast single";
70 | }
71 | if (preference_code == ANEURALNETWORKS_PREFER_SUSTAINED_SPEED) {
72 | return "sustained speed";
73 | }
74 | if (preference_code == ANEURALNETWORKS_PREFER_LOW_POWER) {
75 | return "low power";
76 | }
77 | return "Unknown preference code";
78 | }
79 |
80 | // ./dnn_benchmark daq_name
81 | int main(int argc, char **argv) {
82 | google::InitGoogleLogging(argv[0]);
83 | FLAGS_logtostderr = true;
84 | FLAGS_logbuflevel = -1;
85 | FLAGS_v = 0;
86 | if (argc != 4) {
87 | return -1;
88 | }
89 | css daq_name = argv[1];
90 | const int number_running = std::atoi(argv[2]);
91 | const bool quant = std::atoi(argv[3]) != 0;
92 |
93 | size_t input_len, output_len;
94 | {
95 | auto model = GetModel(daq_name, false,
96 | ANEURALNETWORKS_PREFER_FAST_SINGLE_ANSWER);
97 | input_len = model->GetSize(model->GetInputs()[0]);
98 | output_len = model->GetSize(model->GetOutputs()[0]);
99 | }
100 | #define WARM_UP \
101 | { \
102 | auto model = GetModel(daq_name, false, \
103 | ANEURALNETWORKS_PREFER_FAST_SINGLE_ANSWER); \
104 | for (int i = 0; i < 10; i++) { \
105 | model->SetOutputBuffer(0, output); \
106 | model->Predict(std::vector{data}); \
107 | } \
108 | }
109 |
110 | #define BENCHMARK(fp16_candidates, preference_candidates) \
111 | for (const auto allow_fp16 : fp16_candidates) { \
112 | for (const auto compile_preference : preference_candidates) { \
113 | auto model = GetModel(daq_name, allow_fp16, compile_preference); \
114 | const auto t1 = Clock::now(); \
115 | for (int i = 0; i < number_running; i++) { \
116 | model->SetOutputBuffer(0, output); \
117 | model->Predict(std::vector{data}); \
118 | } \
119 | const auto t2 = Clock::now(); \
120 | const auto total_time = \
121 | std::chrono::duration_cast(t2 - t1) \
122 | .count(); \
123 | const auto single_time = 1. * total_time / number_running; \
124 | LOG(INFO) << "AllowFp16: " << allow_fp16 \
125 | << ", compile preference: " \
126 | << PrefCodeToStr(compile_preference) \
127 | << ", time: " << total_time << "/" << number_running \
128 | << " = " << single_time; \
129 | } \
130 | }
131 |
132 | const std::vector preference_candidates{
133 | ANEURALNETWORKS_PREFER_FAST_SINGLE_ANSWER,
134 | ANEURALNETWORKS_PREFER_SUSTAINED_SPEED,
135 | ANEURALNETWORKS_PREFER_LOW_POWER};
136 | if (quant) {
137 | uint8_t data[input_len];
138 | float output[output_len];
139 | WARM_UP;
140 | const std::vector fp16_candidates{false};
141 | BENCHMARK(fp16_candidates, preference_candidates);
142 | } else {
143 | float data[input_len];
144 | FORZ(i, input_len) {
145 | data[i] = i;
146 | }
147 | float output[output_len];
148 |
149 | WARM_UP;
150 |
151 | const std::vector fp16_candidates =
152 | GetAndroidSdkVersion() >= __ANDROID_API_P__
153 | ? std::vector{false, true}
154 | : std::vector{false};
155 | BENCHMARK(fp16_candidates, preference_candidates);
156 | }
157 | }
158 |
--------------------------------------------------------------------------------
/binaries/dnn_retrieve_result.cpp:
--------------------------------------------------------------------------------
1 | //
2 | // Created by daquexian on 5/21/18.
3 | //
4 |
5 | #include
6 | #include
7 | #include
8 | #include
9 | #include
10 | #include
11 | #include
12 |
13 | #include
14 | #include
15 | #include
16 | #ifdef DNN_READ_ONNX
17 | #include
18 | #endif
19 | #include
20 | #include "argh.h"
21 |
22 | using std::cout;
23 | using std::endl;
24 | using std::string;
25 | using Clock = std::chrono::high_resolution_clock;
26 | using dnn::DaqReader;
27 | using dnn::Model;
28 | using dnn::ModelBuilder;
29 | #ifdef DNN_READ_ONNX
30 | using dnn::OnnxReader;
31 | #endif
32 |
33 | bool hasEnding(std::string const &fullString, std::string const &ending) {
34 | if (fullString.length() >= ending.length()) {
35 | return (0 == fullString.compare(fullString.length() - ending.length(),
36 | ending.length(), ending));
37 | } else {
38 | return false;
39 | }
40 | }
41 |
42 | template
43 | std::vector NHWC2NCHW(const std::vector &nhwc, const size_t n,
44 | const size_t h, const size_t w, const size_t c) {
45 | std::vector nchw;
46 | nchw.resize(n * h * w * c);
47 | FORZ(i, n) {
48 | FORZ(j, h) {
49 | FORZ(k, w) {
50 | FORZ(l, c) {
51 | nchw[i * c * h * w + l * h * w + j * w + k] =
52 | nhwc[i * h * w * c + j * w * c + k * c + l];
53 | }
54 | }
55 | }
56 | }
57 | return nchw;
58 | }
59 |
60 | // Usage: ./dnn_retrieve_result daqName [--quant_input] [--quant_output]
61 | // [--nchw_result] [input1 ..]
62 | int main(int argc, char **argv) {
63 | argh::parser cmdl(argc, argv);
64 | google::InitGoogleLogging(argv[0]);
65 | FLAGS_log_dir = "/data/local/tmp/log";
66 | FLAGS_logbuflevel = -1;
67 | FLAGS_alsologtostderr = true;
68 | FLAGS_v = cmdl("v", 5);
69 | string daqName = cmdl[1];
70 | bool quant_input = cmdl["quant_input"];
71 | bool quant_output = cmdl["quant_output"];
72 | bool nchw_result = cmdl["nchw_result"];
73 | bool use_external_input = cmdl(2);
74 | PNT(use_external_input);
75 |
76 | std::unique_ptr model;
77 | ModelBuilder builder;
78 | if (hasEnding(daqName, ".daq")) {
79 | DaqReader daq_reader;
80 | // Set the last argument to true to use mmap. It may be more efficient
81 | // than memory buffer.
82 | daq_reader.ReadDaq(daqName, builder, false);
83 | #ifdef DNN_READ_ONNX
84 | } else if (hasEnding(daqName, ".onnx")) {
85 | OnnxReader onnx_reader;
86 | // Set the last argument to true to use mmap. It may be more efficient
87 | // than memory buffer.
88 | onnx_reader.ReadOnnx(daqName, builder);
89 | #endif
90 | } else {
91 | throw std::invalid_argument("Wrong model name " + daqName +
92 | ". It must end with .daq or .onnx (.onnx is only "
93 | "supported when DNN_READ_ONNX is ON)");
94 | }
95 | model = builder.Compile(ANEURALNETWORKS_PREFER_SUSTAINED_SPEED);
96 | DNN_ASSERT(model->GetOutputs().size() == 1,
97 | "the number of outputs can only be 1 here");
98 | const auto outputLen = model->GetSize(model->GetOutputs()[0]);
99 | std::vector> inputs;
100 | for (size_t i = 2, n = 0; n < model->GetInputs().size(); i++, n++) {
101 | const auto &input_name = model->GetInputs()[n];
102 | const auto input_size = model->GetSize(input_name);
103 | std::vector input_data;
104 | input_data.reserve(input_size);
105 | if (use_external_input) {
106 | std::ifstream ifs(cmdl[i]);
107 | float element;
108 | FORZ(_, model->GetSize(input_name)) {
109 | if (!(ifs >> element)) {
110 | throw std::invalid_argument("Read file error");
111 | }
112 | input_data.push_back(element);
113 | }
114 | } else {
115 | FORZ(j, input_size) {
116 | input_data.push_back(j);
117 | }
118 | }
119 | inputs.push_back(input_data);
120 | }
121 |
122 | std::vector output_uint8(outputLen);
123 | std::vector output_float(outputLen);
124 | PNT(quant_input, quant_output);
125 | if (quant_output) {
126 | model->SetOutputBuffer(0, output_uint8.data());
127 | } else {
128 | model->SetOutputBuffer(0, output_float.data());
129 | }
130 | if (quant_input) {
131 | std::vector> uint8_inputs;
132 | for (const auto &input : inputs) {
133 | std::vector uint8_input(input.begin(), input.end());
134 | uint8_inputs.push_back(uint8_input);
135 | }
136 | model->Predict(uint8_inputs);
137 | } else {
138 | model->Predict(inputs);
139 | }
140 | const auto &output_shape = model->GetShape(model->GetOutputs()[0]);
141 | if (nchw_result && output_shape.size() == 4) {
142 | const size_t n = output_shape[0], h = output_shape[1],
143 | w = output_shape[2], c = output_shape[3];
144 | if (quant_output) {
145 | output_uint8 = NHWC2NCHW(output_uint8, n, h, w, c);
146 | } else {
147 | output_float = NHWC2NCHW(output_float, n, h, w, c);
148 | }
149 | }
150 | std::ofstream ofs("/data/local/tmp/result");
151 | if (quant_output) {
152 | FORZ(i, outputLen) {
153 | ofs << static_cast(output_uint8[i]) << endl;
154 | }
155 | } else {
156 | FORZ(i, outputLen) {
157 | ofs << output_float[i] << endl;
158 | }
159 | }
160 | }
161 |
--------------------------------------------------------------------------------
/binaries/ex_model_builder.cpp:
--------------------------------------------------------------------------------
1 | /**
2 | * It is an example showing how to use the ModelBuilder API to build an model
3 | */
4 | #include
5 | #include
6 | #include
7 |
8 | #include
9 | #include
10 | #include
11 |
12 | using namespace android::nn::wrapper;
13 | using dnn::ModelBuilder;
14 |
15 | int main() {
16 | ModelBuilder builder;
17 | builder.Prepare();
18 | const bool quant8 = true;
19 | uint8_t weight_buf[999]{100, 200, 150, 20, 166, 22};
20 | uint8_t bias_buf[999]{99, 13, 235, 131};
21 | if (quant8) {
22 | builder.AddInput("data",
23 | {Type::TENSOR_QUANT8_ASYMM, {1, 224, 224, 3}, 1, 0});
24 | builder.AddTensorFromBuffer(
25 | "weight", weight_buf,
26 | {Type::TENSOR_QUANT8_ASYMM, {3, 1, 1, 3}, 0.1, 150});
27 | builder.AddTensorFromBuffer("bias", bias_buf,
28 | {Type::TENSOR_INT32, {3}, 0.1, 0});
29 | builder.AddLayer_DEPTHWISE_CONV_2D(
30 | "data", "weight", "bias", 1, 1, 0, 0, 0, 0, 1, dnn::FuseCode::NONE,
31 | "conv_fwd",
32 | std::make_optional(
33 | {Type::TENSOR_QUANT8_ASYMM, {0.5}, 100}));
34 | builder.AddLayer_RELU("conv_fwd", "relu_fwd");
35 | builder.AddLayer_ADD("data", "relu_fwd", dnn::FuseCode::NONE, "output",
36 | std::make_optional(
37 | {Type::TENSOR_QUANT8_ASYMM, {0.05}, 100}));
38 | } else {
39 | builder.AddInput("data", {Type::TENSOR_FLOAT32, {1, 224, 224, 3}});
40 | builder.AddTensorFromBuffer("weight", weight_buf,
41 | {Type::TENSOR_FLOAT32, {3, 1, 1, 3}});
42 | builder.AddTensorFromBuffer("bias", bias_buf,
43 | {Type::TENSOR_FLOAT32, {3}});
44 | builder.AddLayer_CONV_2D("data", "weight", "bias", 1, 1, 0, 0, 0, 0,
45 | dnn::FuseCode::NONE, false, 1, 1, "output",
46 | dnn::nullopt);
47 | }
48 | auto model = builder.AddOutput("output").Compile(
49 | ModelBuilder::PREFERENCE_FAST_SINGLE_ANSWER);
50 | if (quant8) {
51 | uint8_t input[1 * 3 * 224 * 224]{29, 100, 66, 166, 188, 222};
52 | uint8_t output[1 * 3 * 224 * 224];
53 | model->SetOutputBuffer(0, output);
54 | model->Predict(std::vector{input});
55 | LOG(INFO) << static_cast(output[0]);
56 | } else {
57 | float input[1 * 3 * 224 * 224]{29, 100, 66, 166, 188, 222};
58 | float output[1 * 3 * 224 * 224];
59 | model->SetOutputBuffer(0, output);
60 | model->Predict(std::vector{input});
61 | LOG(INFO) << output[0];
62 | }
63 | }
64 |
--------------------------------------------------------------------------------
/binaries/get_devices.cpp:
--------------------------------------------------------------------------------
1 | #include
2 | #include
3 | #include
4 |
5 | #include
6 | #include
7 |
8 | using namespace android::nn::wrapper;
9 | using dnn::ModelBuilder;
10 |
11 | int main() {
12 | ModelBuilder builder;
13 | builder.Prepare();
14 | const auto devices = builder.GetDevices();
15 | if (devices.has_value()) {
16 | for (const auto &device : devices.value()) {
17 | PNT(device.name, device.feature_level, device.type, device.version);
18 | }
19 | } else {
20 | std::cout << "Cannot get devices" << std::endl;
21 | }
22 | }
23 |
24 |
--------------------------------------------------------------------------------
/ci/adb_push_and_run.sh:
--------------------------------------------------------------------------------
1 | #! /usr/bin/env bash
2 |
3 | # echo "${@:2}"
4 | adb push $1 /data/local/tmp/`basename $1` && adb shell "data/local/tmp/`basename $1` ${@:2}"
5 |
--------------------------------------------------------------------------------
/ci/android_aar/.gitignore:
--------------------------------------------------------------------------------
1 | *.iml
2 | .gradle
3 | /local.properties
4 | /.idea/workspace.xml
5 | /.idea/libraries
6 | .DS_Store
7 | build/
8 |
9 | *.so
10 |
11 | /captures
12 | .externalNativeBuild
13 |
14 | .idea/
15 |
16 | *.swp
17 | *.swo
18 |
--------------------------------------------------------------------------------
/ci/android_aar/README.md:
--------------------------------------------------------------------------------
1 | # DNNLibrary
2 |
3 | This directory is for building aar.
4 |
5 | Run scripts/build_aar.sh in the parent directory.
6 |
--------------------------------------------------------------------------------
/ci/android_aar/build.gradle:
--------------------------------------------------------------------------------
1 | // Top-level build file where you can add configuration options common to all sub-projects/modules.
2 |
3 | buildscript {
4 |
5 | repositories {
6 | google()
7 | jcenter()
8 | }
9 | dependencies {
10 | classpath 'com.android.tools.build:gradle:3.1.4'
11 |
12 |
13 | // NOTE: Do not place your application dependencies here; they belong
14 | // in the individual module build.gradle files
15 | }
16 | }
17 |
18 | allprojects {
19 | repositories {
20 | google()
21 | jcenter()
22 | }
23 | }
24 |
25 | task clean(type: Delete) {
26 | delete rootProject.buildDir
27 | }
28 |
--------------------------------------------------------------------------------
/ci/android_aar/dnnlibrary/.gitignore:
--------------------------------------------------------------------------------
1 | /build
2 |
--------------------------------------------------------------------------------
/ci/android_aar/dnnlibrary/build.gradle:
--------------------------------------------------------------------------------
1 | apply plugin: 'com.android.library'
2 | apply plugin: 'com.novoda.bintray-release' // must be applied after your artifact generating plugin (eg. java / com.android.library)
3 |
4 | static def generateVersionCode() {
5 | def result = "git rev-list HEAD --count".execute().text.trim() //unix
6 | if(result.empty) result = "PowerShell -Command git rev-list HEAD --count".execute().text.trim() //windows
7 | if(result.empty) throw new RuntimeException("Could not generate versioncode on this platform? Cmd output: ${result.text}")
8 | return result.toInteger()
9 | }
10 |
11 | buildscript {
12 | repositories {
13 | jcenter()
14 | }
15 | dependencies {
16 | classpath 'com.novoda:bintray-release:0.8.0'
17 | }
18 | }
19 |
20 | android {
21 | compileSdkVersion 28
22 | buildToolsVersion "28.0.3"
23 | defaultConfig {
24 | minSdkVersion 27
25 | targetSdkVersion 28
26 | versionCode generateVersionCode()
27 | versionName "v0.6.15"
28 |
29 | testInstrumentationRunner "android.support.test.runner.AndroidJUnitRunner"
30 |
31 | ndk {
32 | abiFilters 'arm64-v8a'
33 | }
34 | }
35 |
36 | buildTypes {
37 | release {
38 | minifyEnabled false
39 | proguardFiles getDefaultProguardFile('proguard-android.txt'), 'proguard-rules.pro'
40 | }
41 | }
42 | }
43 |
44 | dependencies {
45 | implementation fileTree(dir: 'libs', include: ['*.jar'])
46 |
47 | testImplementation 'junit:junit:4.12'
48 | androidTestImplementation 'com.android.support.test:runner:1.0.1'
49 | androidTestImplementation 'com.android.support.test.espresso:espresso-core:3.0.1'
50 | }
51 |
52 | publish {
53 | userOrg = 'daquexian566'
54 | groupId = 'me.daquexian'
55 | artifactId = 'dnnlibrary'
56 | publishVersion = '0.6.15'
57 | desc = 'Daquexian\'s NNAPI Library. Run neural network using the new NNAPI on Android !'
58 | website = 'https://github.com/daquexian/DNNLibrary'
59 | }
60 |
--------------------------------------------------------------------------------
/ci/android_aar/dnnlibrary/proguard-rules.pro:
--------------------------------------------------------------------------------
1 | # Add project specific ProGuard rules here.
2 | # You can control the set of applied configuration files using the
3 | # proguardFiles setting in build.gradle.
4 | #
5 | # For more details, see
6 | # http://developer.android.com/guide/developing/tools/proguard.html
7 |
8 | # If your project uses WebView with JS, uncomment the following
9 | # and specify the fully qualified class name to the JavaScript interface
10 | # class:
11 | #-keepclassmembers class fqcn.of.javascript.interface.for.webview {
12 | # public *;
13 | #}
14 |
15 | # Uncomment this to preserve the line number information for
16 | # debugging stack traces.
17 | #-keepattributes SourceFile,LineNumberTable
18 |
19 | # If you keep the line number information, uncomment this to
20 | # hide the original source file name.
21 | #-renamesourcefileattribute SourceFile
22 |
--------------------------------------------------------------------------------
/ci/android_aar/dnnlibrary/src/main/AndroidManifest.xml:
--------------------------------------------------------------------------------
1 |
3 |
--------------------------------------------------------------------------------
/ci/android_aar/dnnlibrary/src/main/java/me/daquexian/dnnlibrary/Model.java:
--------------------------------------------------------------------------------
1 | package me.daquexian.dnnlibrary;
2 |
3 | /**
4 | * Created by daquexian on 2018.08.27.
5 | * Java wrapper for Model
6 | */
7 |
8 | public class Model {
9 |
10 | static {
11 | System.loadLibrary( "daq-jni");
12 | }
13 |
14 | public float[] predict(float[] input) {
15 | return predict_float_float(input);
16 | }
17 |
18 | public float[] predict(byte[] input) {
19 | return predict_quant8_float(input);
20 | }
21 |
22 | public byte[] predictQuant8(float[] input) {
23 | return predict_float_quant8(input);
24 | }
25 |
26 | public byte[] predictQuant8(byte[] input) {
27 | return predict_quant8_quant8(input);
28 | }
29 |
30 | private long nativeHandle;
31 | private native float[] predict_float_float(float[] input);
32 | private native float[] predict_quant8_float(byte[] input);
33 | private native byte[] predict_float_quant8(float[] input);
34 | private native byte[] predict_quant8_quant8(byte[] input);
35 | public native void dispose();
36 | public void finalize() {
37 | dispose();
38 | }
39 | }
40 |
--------------------------------------------------------------------------------
/ci/android_aar/dnnlibrary/src/main/java/me/daquexian/dnnlibrary/ModelBuilder.java:
--------------------------------------------------------------------------------
1 | package me.daquexian.dnnlibrary;
2 |
3 | import android.content.res.AssetManager;
4 |
5 | /**
6 | * Created by daquexian on 2017/11/12.
7 | * Java wrapper for ModelBuilder
8 | */
9 |
10 | public class ModelBuilder {
11 |
12 | static {
13 | System.loadLibrary( "daq-jni");
14 | }
15 |
16 | public static final int PREFERENCE_LOW_POWER = 0;
17 | public static final int PREFERENCE_FAST_SINGLE_ANSWER = 1;
18 | public static final int PREFERENCE_SUSTAINED_SPEED = 2;
19 | private long nativeHandle;
20 | public ModelBuilder() {
21 | initHandle();
22 | }
23 | public native ModelBuilder readFile(AssetManager assetManager, String filename);
24 | public native ModelBuilder setOutput(String blobName);
25 | public native ModelBuilder allowFp16(boolean allowed);
26 | public native Model compile(int preference);
27 | public native void dispose();
28 | private native void initHandle();
29 | public void finalize() {
30 | dispose();
31 | }
32 | }
33 |
--------------------------------------------------------------------------------
/ci/android_aar/gradle.properties:
--------------------------------------------------------------------------------
1 | ## Project-wide Gradle settings.
2 | #
3 | # For more details on how to configure your build environment visit
4 | # http://www.gradle.org/docs/current/userguide/build_environment.html
5 | #
6 | # Specifies the JVM arguments used for the daemon process.
7 | # The setting is particularly useful for tweaking memory settings.
8 | # Default value: -Xmx1024m -XX:MaxPermSize=256m
9 | # org.gradle.jvmargs=-Xmx2048m -XX:MaxPermSize=512m -XX:+HeapDumpOnOutOfMemoryError -Dfile.encoding=UTF-8
10 | #
11 | # When configured, Gradle will run in incubating parallel mode.
12 | # This option should only be used with decoupled projects. More details, visit
13 | # http://www.gradle.org/docs/current/userguide/multi_project_builds.html#sec:decoupled_projects
14 | # org.gradle.parallel=true
15 | #Thu May 10 20:44:55 HKT 2018
16 | org.gradle.jvmargs=-Xmx1536m
17 |
--------------------------------------------------------------------------------
/ci/android_aar/gradle/wrapper/gradle-wrapper.jar:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/JDAI-CV/DNNLibrary/e17f11e966b2cce7d747799b76bb9843813d4b01/ci/android_aar/gradle/wrapper/gradle-wrapper.jar
--------------------------------------------------------------------------------
/ci/android_aar/gradle/wrapper/gradle-wrapper.properties:
--------------------------------------------------------------------------------
1 | #Tue May 22 12:33:34 HKT 2018
2 | distributionBase=GRADLE_USER_HOME
3 | distributionPath=wrapper/dists
4 | zipStoreBase=GRADLE_USER_HOME
5 | zipStorePath=wrapper/dists
6 | distributionUrl=https\://services.gradle.org/distributions/gradle-4.4-all.zip
7 |
8 |
--------------------------------------------------------------------------------
/ci/android_aar/gradlew:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | ##############################################################################
4 | ##
5 | ## Gradle start up script for UN*X
6 | ##
7 | ##############################################################################
8 |
9 | # Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script.
10 | DEFAULT_JVM_OPTS=""
11 |
12 | APP_NAME="Gradle"
13 | APP_BASE_NAME=`basename "$0"`
14 |
15 | # Use the maximum available, or set MAX_FD != -1 to use that value.
16 | MAX_FD="maximum"
17 |
18 | warn ( ) {
19 | echo "$*"
20 | }
21 |
22 | die ( ) {
23 | echo
24 | echo "$*"
25 | echo
26 | exit 1
27 | }
28 |
29 | # OS specific support (must be 'true' or 'false').
30 | cygwin=false
31 | msys=false
32 | darwin=false
33 | case "`uname`" in
34 | CYGWIN* )
35 | cygwin=true
36 | ;;
37 | Darwin* )
38 | darwin=true
39 | ;;
40 | MINGW* )
41 | msys=true
42 | ;;
43 | esac
44 |
45 | # Attempt to set APP_HOME
46 | # Resolve links: $0 may be a link
47 | PRG="$0"
48 | # Need this for relative symlinks.
49 | while [ -h "$PRG" ] ; do
50 | ls=`ls -ld "$PRG"`
51 | link=`expr "$ls" : '.*-> \(.*\)$'`
52 | if expr "$link" : '/.*' > /dev/null; then
53 | PRG="$link"
54 | else
55 | PRG=`dirname "$PRG"`"/$link"
56 | fi
57 | done
58 | SAVED="`pwd`"
59 | cd "`dirname \"$PRG\"`/" >/dev/null
60 | APP_HOME="`pwd -P`"
61 | cd "$SAVED" >/dev/null
62 |
63 | CLASSPATH=$APP_HOME/gradle/wrapper/gradle-wrapper.jar
64 |
65 | # Determine the Java command to use to start the JVM.
66 | if [ -n "$JAVA_HOME" ] ; then
67 | if [ -x "$JAVA_HOME/jre/sh/java" ] ; then
68 | # IBM's JDK on AIX uses strange locations for the executables
69 | JAVACMD="$JAVA_HOME/jre/sh/java"
70 | else
71 | JAVACMD="$JAVA_HOME/bin/java"
72 | fi
73 | if [ ! -x "$JAVACMD" ] ; then
74 | die "ERROR: JAVA_HOME is set to an invalid directory: $JAVA_HOME
75 |
76 | Please set the JAVA_HOME variable in your environment to match the
77 | location of your Java installation."
78 | fi
79 | else
80 | JAVACMD="java"
81 | which java >/dev/null 2>&1 || die "ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH.
82 |
83 | Please set the JAVA_HOME variable in your environment to match the
84 | location of your Java installation."
85 | fi
86 |
87 | # Increase the maximum file descriptors if we can.
88 | if [ "$cygwin" = "false" -a "$darwin" = "false" ] ; then
89 | MAX_FD_LIMIT=`ulimit -H -n`
90 | if [ $? -eq 0 ] ; then
91 | if [ "$MAX_FD" = "maximum" -o "$MAX_FD" = "max" ] ; then
92 | MAX_FD="$MAX_FD_LIMIT"
93 | fi
94 | ulimit -n $MAX_FD
95 | if [ $? -ne 0 ] ; then
96 | warn "Could not set maximum file descriptor limit: $MAX_FD"
97 | fi
98 | else
99 | warn "Could not query maximum file descriptor limit: $MAX_FD_LIMIT"
100 | fi
101 | fi
102 |
103 | # For Darwin, add options to specify how the application appears in the dock
104 | if $darwin; then
105 | GRADLE_OPTS="$GRADLE_OPTS \"-Xdock:name=$APP_NAME\" \"-Xdock:icon=$APP_HOME/media/gradle.icns\""
106 | fi
107 |
108 | # For Cygwin, switch paths to Windows format before running java
109 | if $cygwin ; then
110 | APP_HOME=`cygpath --path --mixed "$APP_HOME"`
111 | CLASSPATH=`cygpath --path --mixed "$CLASSPATH"`
112 | JAVACMD=`cygpath --unix "$JAVACMD"`
113 |
114 | # We build the pattern for arguments to be converted via cygpath
115 | ROOTDIRSRAW=`find -L / -maxdepth 1 -mindepth 1 -type d 2>/dev/null`
116 | SEP=""
117 | for dir in $ROOTDIRSRAW ; do
118 | ROOTDIRS="$ROOTDIRS$SEP$dir"
119 | SEP="|"
120 | done
121 | OURCYGPATTERN="(^($ROOTDIRS))"
122 | # Add a user-defined pattern to the cygpath arguments
123 | if [ "$GRADLE_CYGPATTERN" != "" ] ; then
124 | OURCYGPATTERN="$OURCYGPATTERN|($GRADLE_CYGPATTERN)"
125 | fi
126 | # Now convert the arguments - kludge to limit ourselves to /bin/sh
127 | i=0
128 | for arg in "$@" ; do
129 | CHECK=`echo "$arg"|egrep -c "$OURCYGPATTERN" -`
130 | CHECK2=`echo "$arg"|egrep -c "^-"` ### Determine if an option
131 |
132 | if [ $CHECK -ne 0 ] && [ $CHECK2 -eq 0 ] ; then ### Added a condition
133 | eval `echo args$i`=`cygpath --path --ignore --mixed "$arg"`
134 | else
135 | eval `echo args$i`="\"$arg\""
136 | fi
137 | i=$((i+1))
138 | done
139 | case $i in
140 | (0) set -- ;;
141 | (1) set -- "$args0" ;;
142 | (2) set -- "$args0" "$args1" ;;
143 | (3) set -- "$args0" "$args1" "$args2" ;;
144 | (4) set -- "$args0" "$args1" "$args2" "$args3" ;;
145 | (5) set -- "$args0" "$args1" "$args2" "$args3" "$args4" ;;
146 | (6) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" ;;
147 | (7) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" ;;
148 | (8) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" ;;
149 | (9) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" "$args8" ;;
150 | esac
151 | fi
152 |
153 | # Split up the JVM_OPTS And GRADLE_OPTS values into an array, following the shell quoting and substitution rules
154 | function splitJvmOpts() {
155 | JVM_OPTS=("$@")
156 | }
157 | eval splitJvmOpts $DEFAULT_JVM_OPTS $JAVA_OPTS $GRADLE_OPTS
158 | JVM_OPTS[${#JVM_OPTS[*]}]="-Dorg.gradle.appname=$APP_BASE_NAME"
159 |
160 | exec "$JAVACMD" "${JVM_OPTS[@]}" -classpath "$CLASSPATH" org.gradle.wrapper.GradleWrapperMain "$@"
161 |
--------------------------------------------------------------------------------
/ci/android_aar/gradlew.bat:
--------------------------------------------------------------------------------
1 | @if "%DEBUG%" == "" @echo off
2 | @rem ##########################################################################
3 | @rem
4 | @rem Gradle startup script for Windows
5 | @rem
6 | @rem ##########################################################################
7 |
8 | @rem Set local scope for the variables with windows NT shell
9 | if "%OS%"=="Windows_NT" setlocal
10 |
11 | @rem Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script.
12 | set DEFAULT_JVM_OPTS=
13 |
14 | set DIRNAME=%~dp0
15 | if "%DIRNAME%" == "" set DIRNAME=.
16 | set APP_BASE_NAME=%~n0
17 | set APP_HOME=%DIRNAME%
18 |
19 | @rem Find java.exe
20 | if defined JAVA_HOME goto findJavaFromJavaHome
21 |
22 | set JAVA_EXE=java.exe
23 | %JAVA_EXE% -version >NUL 2>&1
24 | if "%ERRORLEVEL%" == "0" goto init
25 |
26 | echo.
27 | echo ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH.
28 | echo.
29 | echo Please set the JAVA_HOME variable in your environment to match the
30 | echo location of your Java installation.
31 |
32 | goto fail
33 |
34 | :findJavaFromJavaHome
35 | set JAVA_HOME=%JAVA_HOME:"=%
36 | set JAVA_EXE=%JAVA_HOME%/bin/java.exe
37 |
38 | if exist "%JAVA_EXE%" goto init
39 |
40 | echo.
41 | echo ERROR: JAVA_HOME is set to an invalid directory: %JAVA_HOME%
42 | echo.
43 | echo Please set the JAVA_HOME variable in your environment to match the
44 | echo location of your Java installation.
45 |
46 | goto fail
47 |
48 | :init
49 | @rem Get command-line arguments, handling Windowz variants
50 |
51 | if not "%OS%" == "Windows_NT" goto win9xME_args
52 | if "%@eval[2+2]" == "4" goto 4NT_args
53 |
54 | :win9xME_args
55 | @rem Slurp the command line arguments.
56 | set CMD_LINE_ARGS=
57 | set _SKIP=2
58 |
59 | :win9xME_args_slurp
60 | if "x%~1" == "x" goto execute
61 |
62 | set CMD_LINE_ARGS=%*
63 | goto execute
64 |
65 | :4NT_args
66 | @rem Get arguments from the 4NT Shell from JP Software
67 | set CMD_LINE_ARGS=%$
68 |
69 | :execute
70 | @rem Setup the command line
71 |
72 | set CLASSPATH=%APP_HOME%\gradle\wrapper\gradle-wrapper.jar
73 |
74 | @rem Execute Gradle
75 | "%JAVA_EXE%" %DEFAULT_JVM_OPTS% %JAVA_OPTS% %GRADLE_OPTS% "-Dorg.gradle.appname=%APP_BASE_NAME%" -classpath "%CLASSPATH%" org.gradle.wrapper.GradleWrapperMain %CMD_LINE_ARGS%
76 |
77 | :end
78 | @rem End local scope for the variables with windows NT shell
79 | if "%ERRORLEVEL%"=="0" goto mainEnd
80 |
81 | :fail
82 | rem Set variable GRADLE_EXIT_CONSOLE if you need the _script_ return code instead of
83 | rem the _cmd.exe /c_ return code!
84 | if not "" == "%GRADLE_EXIT_CONSOLE%" exit 1
85 | exit /b 1
86 |
87 | :mainEnd
88 | if "%OS%"=="Windows_NT" endlocal
89 |
90 | :omega
91 |
--------------------------------------------------------------------------------
/ci/android_aar/settings.gradle:
--------------------------------------------------------------------------------
1 | include ':dnnlibrary'
2 |
--------------------------------------------------------------------------------
/ci/appimage/onnx2daq.desktop:
--------------------------------------------------------------------------------
1 | [Desktop Entry]
2 | Name=onnx2daq
3 | Exec=onnx2daq
4 | Icon=onnx2daq
5 | Type=Application
6 | Terminal=true
7 | Categories=Development;
8 |
--------------------------------------------------------------------------------
/ci/appimage/onnx2daq.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/JDAI-CV/DNNLibrary/e17f11e966b2cce7d747799b76bb9843813d4b01/ci/appimage/onnx2daq.png
--------------------------------------------------------------------------------
/ci/build_aar.sh:
--------------------------------------------------------------------------------
1 | #! /usr/bin/env bash
2 |
3 | set -e
4 |
5 | nproc=$(ci/get_cores.sh)
6 | if [[ "$OSTYPE" == "darwin"* ]]; then
7 | echo "The system is Mac OS X, alias sed to gsed"
8 | export PATH="/usr/local/opt/gnu-sed/libexec/gnubin:$PATH"
9 | echo "Output of sed -v:"
10 | sed --version
11 | fi
12 |
13 | MY_ANDROID_HOME="${ANDROID_HOME:-$HOME/Android/Sdk}"
14 | MY_ANDROID_NDK_HOME="${ANDROID_NDK_HOME:-$MY_ANDROID_HOME/ndk-bundle}"
15 | JNI_BUILD_DIR=build_jni_tmp
16 | rm -rf ${JNI_BUILD_DIR} && mkdir ${JNI_BUILD_DIR} && pushd ${JNI_BUILD_DIR}
17 | cmake -DCMAKE_SYSTEM_NAME=Android -DCMAKE_TOOLCHAIN_FILE=${MY_ANDROID_NDK_HOME}/build/cmake/android.toolchain.cmake -DANDROID_CPP_FEATURES=exceptions -DANDROID_PLATFORM=android-27 -DANDROID_ABI=arm64-v8a -DDNN_BUILD_JNI=ON -DDNN_BUILD_BIN=OFF ..
18 | cmake --build . --target daq-jni -- -j$nproc
19 | popd
20 | mkdir -p ci/android_aar/dnnlibrary/src/main/jniLibs/arm64-v8a
21 | cp ${JNI_BUILD_DIR}/dnnlibrary/libdaq-jni.so ci/android_aar/dnnlibrary/src/main/jniLibs/arm64-v8a/
22 |
23 | # Increase version code and update version name
24 |
25 | echo "Build source branch: $BUILD_SOURCEBRANCH"
26 |
27 | if (($# == 0)); then
28 | if [[ $BUILD_SOURCEBRANCH == refs/tags/v* ]]; then
29 | ver=`echo $BUILD_SOURCEBRANCH | cut -c 12-`
30 | else
31 | echo "HEAD is not tagged as a version, skip deploy aar"
32 | exit 0
33 | fi
34 | elif (( $# == 1 )); then
35 | ver=$1
36 | fi
37 | echo "ver=$ver"
38 |
39 | sed -i -E "s/versionName .+/versionName \"v$ver\"/" ci/android_aar/dnnlibrary/build.gradle
40 | sed -i -E "s/publishVersion = .+/publishVersion = \'$ver\'/" ci/android_aar/dnnlibrary/build.gradle
41 |
42 | cat ci/android_aar/dnnlibrary/build.gradle
43 |
44 | pushd ci/android_aar
45 | ANDROID_HOME=$MY_ANDROID_HOME ./gradlew clean build
46 |
47 | # Publishing is only for myself
48 | if [[ -z $BINTRAY_KEY ]]; then
49 | echo "BINTRAY_KEY is not set, skip bintray upload"
50 | else
51 | echo "Publishing.."
52 | ANDROID_HOME=$MY_ANDROID_HOME ./gradlew bintrayUpload -PbintrayUser=daquexian566 -PbintrayKey=$BINTRAY_KEY -PdryRun=false
53 | fi
54 | popd
55 |
--------------------------------------------------------------------------------
/ci/build_appimage.sh:
--------------------------------------------------------------------------------
1 | wget https://github.com/linuxdeploy/linuxdeploy/releases/download/continuous/linuxdeploy-x86_64.AppImage
2 | wget https://github.com/linuxdeploy/linuxdeploy-plugin-appimage/releases/download/continuous/linuxdeploy-plugin-appimage-x86_64.AppImage
3 |
4 | chmod +x linuxdeploy-*.AppImage
5 | mkdir -p ci/appimage/appdir/usr/bin
6 | cp build_onnx2daq/tools/onnx2daq/onnx2daq ci/appimage/appdir/usr/bin/
7 | ./linuxdeploy-x86_64.AppImage --appdir ci/appimage/appdir -d ci/appimage/onnx2daq.desktop -i ci/appimage/onnx2daq.png --output appimage
8 | mv `ls onnx2daq-*.AppImage` onnx2daq.AppImage
9 |
--------------------------------------------------------------------------------
/ci/build_dnnlibrary.sh:
--------------------------------------------------------------------------------
1 | #! /usr/bin/env bash
2 | set -e
3 |
4 | echo "y" | $ANDROID_HOME/tools/bin/sdkmanager --install 'ndk-bundle'
5 | nproc=$(ci/get_cores.sh)
6 |
7 | mkdir build_dnnlibrary && pushd build_dnnlibrary
8 | cmake -DCMAKE_TOOLCHAIN_FILE=$ANDROID_HOME/ndk-bundle/build/cmake/android.toolchain.cmake -DANDROID_PLATFORM=android-27 -DANDROID_ABI=x86_64 -DCMAKE_EXPORT_COMPILE_COMMANDS=ON -DCMAKE_BUILD_TYPE=Release -DDNN_READ_ONNX=ON -DDNN_CUSTOM_PROTOC_EXECUTABLE=$BUILD_SOURCESDIRECTORY/protoc/bin/protoc ..
9 | cmake --build . -- -j$nproc
10 | popd
11 |
--------------------------------------------------------------------------------
/ci/build_onnx2daq.sh:
--------------------------------------------------------------------------------
1 | #! /usr/bin/env bash
2 | set -e
3 |
4 | nproc=$(ci/get_cores.sh)
5 |
6 | mkdir build_onnx2daq && cd build_onnx2daq
7 | cmake ..
8 | cmake --build . -- -j$nproc
9 | cd -
10 |
--------------------------------------------------------------------------------
/ci/dnnlibrary_build_and_test.yml:
--------------------------------------------------------------------------------
1 | trigger:
2 | branches:
3 | include:
4 | - master
5 | tags:
6 | include:
7 | - v*
8 | paths:
9 | include:
10 | - '*'
11 | exclude:
12 | - README.md
13 | - docs/*
14 | pr:
15 | branches:
16 | include:
17 | - '*'
18 | paths:
19 | include:
20 | - '*'
21 | exclude:
22 | - README.md
23 | - docs/*
24 |
25 | pool:
26 | vmImage: 'macOS-10.14'
27 | steps:
28 | - checkout: self
29 | submodules: true
30 | - bash: brew install watch gnu-sed
31 | displayName: Install watch and gnu-sed
32 | # cmake 3.15 breaks Android NDK https://gitlab.kitware.com/cmake/cmake/issues/19515
33 | - script: pip install cmake==3.14.4 && alias cmake=/usr/local/bin/cmake && cmake --version
34 | displayName: Install cmake 3.14.4
35 | - bash: ci/download_protoc.sh
36 | displayName: Download protoc
37 | - bash: ci/build_dnnlibrary.sh
38 | displayName: Build
39 | - bash: ci/start_android_emulator.sh
40 | displayName: Start Android Emulator
41 | - bash: pip3 install --user onnx
42 | displayName: Install ONNX
43 | - bash: ci/download_and_test_models.sh
44 | displayName: Download And Test Models
45 | - bash: ci/build_aar.sh
46 | env:
47 | BINTRAY_KEY: $(bintrayKey)
48 | displayName: Build and Publish AAR package
49 |
--------------------------------------------------------------------------------
/ci/download_and_test_models.sh:
--------------------------------------------------------------------------------
1 | #! /usr/bin/env bash
2 |
3 | set -e
4 |
5 | wget -q "https://s3.amazonaws.com/onnx-model-zoo/squeezenet/squeezenet1.1/squeezenet1.1.tar.gz" -O squeezenet1.1.tar.gz
6 | tar xf squeezenet1.1.tar.gz
7 | python3 ci/validate_onnx.py squeezenet1.1 build_dnnlibrary/binaries/dnn_retrieve_result
8 |
9 | wget -q "https://s3.amazonaws.com/onnx-model-zoo/mobilenet/mobilenetv2-1.0/mobilenetv2-1.0.tar.gz" -O mobilenetv2-1.0.tar.gz
10 | tar xf mobilenetv2-1.0.tar.gz
11 | python3 ci/validate_onnx.py mobilenetv2-1.0 build_dnnlibrary/binaries/dnn_retrieve_result
12 |
13 | wget -q "https://s3.amazonaws.com/onnx-model-zoo/resnet/resnet18v2/resnet18v2.tar.gz" -O resnet18v2.tar.gz
14 | tar xf resnet18v2.tar.gz
15 | python3 ci/validate_onnx.py resnet18v2 build_dnnlibrary/binaries/dnn_retrieve_result
16 |
17 | wget -q "https://s3.amazonaws.com/download.onnx/models/opset_9/bvlc_googlenet.tar.gz" -O bvlc_googlenet.tar.gz
18 | tar xf bvlc_googlenet.tar.gz
19 | python3 ci/validate_onnx.py bvlc_googlenet build_dnnlibrary/binaries/dnn_retrieve_result
20 |
21 |
--------------------------------------------------------------------------------
/ci/download_protoc.sh:
--------------------------------------------------------------------------------
1 | #! /usr/bin/env bash
2 | set -e
3 |
4 | wget https://github.com/protocolbuffers/protobuf/releases/download/v3.7.1/protoc-3.7.1-osx-x86_64.zip -O protoc.zip
5 | unzip protoc.zip -d protoc
6 |
--------------------------------------------------------------------------------
/ci/get_cores.sh:
--------------------------------------------------------------------------------
1 | if [[ "$OSTYPE" == "drawin*" ]]; then
2 | echo $(sysctl -n hw.physicalcpu)
3 | else
4 | echo $(nproc)
5 | fi
6 |
--------------------------------------------------------------------------------
/ci/onnx2daq_build.yml:
--------------------------------------------------------------------------------
1 | trigger:
2 | branches:
3 | include:
4 | - master
5 | tags:
6 | include:
7 | - v*
8 | paths:
9 | include:
10 | - '*'
11 | exclude:
12 | - README.md
13 | - docs/*
14 | # not trigger onnx2daq build when only dnnlibrary is edited
15 | - dnnlibrary/*
16 | pr:
17 | branches:
18 | include:
19 | - '*'
20 | paths:
21 | include:
22 | - '*'
23 | exclude:
24 | - README.md
25 | - docs/*
26 | # not trigger onnx2daq build when only dnnlibrary is edited
27 | - dnnlibrary/*
28 |
29 | jobs:
30 | - job: LinuxAppImage
31 | pool:
32 | vmImage: 'ubuntu-16.04'
33 | steps:
34 | - checkout: self
35 | submodules: true
36 | - script: sudo apt install -y protobuf-compiler libprotobuf-dev
37 | displayName: Install protobuf
38 | - bash: ci/build_onnx2daq.sh
39 | displayName: Build
40 | - bash: ci/build_appimage.sh
41 | displayName: Build AppImage
42 | - task: CopyFiles@2
43 | inputs:
44 | contents: 'onnx2daq.AppImage'
45 | targetFolder: $(Build.ArtifactStagingDirectory)
46 | - template: template_onnx2daq_publish_artifacts.yml
47 | - template: template_onnx2daq_github_release.yml
48 | # Don't know why Windows CI fails
49 | # - job: Windows
50 | # pool:
51 | # vmImage: 'vs2017-win2016'
52 | # steps:
53 | # - checkout: self
54 | # submodules: true
55 | # - template: template_onnx2daq_build_python_all_version.yml
56 | # - task: CopyFiles@2
57 | # inputs:
58 | # sourceFolder: '.setuptools-cmake-build\tools\onnx2daq\Release\'
59 | # contents: 'onnx2daq.exe'
60 | # targetFolder: $(Build.ArtifactStagingDirectory)
61 | # - template: template_onnx2daq_publish_artifacts.yml
62 | # - template: template_onnx2daq_github_release.yml
63 | # - template: template_onnx2daq_upload_to_pypi.yml
64 | - job: macOS
65 | pool:
66 | vmImage: 'macOS-10.14'
67 | steps:
68 | - checkout: self
69 | submodules: true
70 | - template: template_onnx2daq_build_python_all_version.yml
71 | - script: 'cp .setuptools-cmake-build/tools/onnx2daq/onnx2daq .setuptools-cmake-build/tools/onnx2daq/onnx2daq-macos'
72 | displayName: 'Rename onnx2daq'
73 | - task: CopyFiles@2
74 | inputs:
75 | sourceFolder: '.setuptools-cmake-build/tools/onnx2daq'
76 | contents: 'onnx2daq-macos'
77 | targetFolder: $(Build.ArtifactStagingDirectory)
78 | - template: template_onnx2daq_publish_artifacts.yml
79 | - template: template_onnx2daq_github_release.yml
80 | - template: template_onnx2daq_upload_to_pypi.yml
81 |
--------------------------------------------------------------------------------
/ci/onnxruntime_test.yml:
--------------------------------------------------------------------------------
1 | trigger:
2 | branches:
3 | include:
4 | - master
5 | tags:
6 | include:
7 | - v*
8 | paths:
9 | include:
10 | - '*'
11 | exclude:
12 | - README.md
13 | - docs/*
14 | pr:
15 | branches:
16 | include:
17 | - '*'
18 | paths:
19 | include:
20 | - '*'
21 | exclude:
22 | - README.md
23 | - docs/*
24 |
25 | pool:
26 | vmImage: 'macOS-10.14'
27 | steps:
28 | - checkout: self
29 | submodules: true
30 | - script: git clone --recursive --branch android https://github.com/daquexian/onnxruntime $(Agent.HomeDirectory)/onnxruntime
31 | displayName: Clone ONNX Runtime
32 | - script: rm -rf $(Agent.HomeDirectory)/onnxruntime/cmake/external/DNNLibrary && cp -r $(Build.SourcesDirectory) $(Agent.HomeDirectory)/onnxruntime/cmake/external/DNNLibrary
33 | displayName: Copy latest DNNLibrary
34 | - script: pip install cmake==3.13.2.post1 && alias cmake=/usr/local/bin/cmake && cmake --version && brew install coreutils
35 | displayName: Install cmake 3.13 and coreutils
36 | - script: echo "y" | $ANDROID_HOME/tools/bin/sdkmanager --install 'ndk-bundle'
37 | displayName: Install Android NDK
38 | - script: tools/ci_build/build.py --android --build_dir build --android_ndk $ANDROID_HOME/ndk-bundle --android_abi=x86_64 --skip_submodule_sync --parallel --use_dnnlibrary
39 | workingDirectory: $(Agent.HomeDirectory)/onnxruntime
40 | displayName: Build and Test on Android Emulator
41 |
--------------------------------------------------------------------------------
/ci/start_android_emulator.sh:
--------------------------------------------------------------------------------
1 | #! /usr/bin/env bash
2 | set -e
3 |
4 | export TERM=xterm
5 |
6 | echo "y" | $ANDROID_HOME/tools/bin/sdkmanager --install 'system-images;android-28;google_apis;x86_64'
7 |
8 | echo "no" | $ANDROID_HOME/tools/bin/avdmanager create avd -n android_emulator -k 'system-images;android-28;google_apis;x86_64' --force
9 |
10 | echo "Starting emulator"
11 |
12 | # Start emulator in background
13 | nohup $ANDROID_HOME/emulator/emulator -avd android_emulator -no-snapshot -no-audio &
14 |
15 | # start server in advance, so that the result of watch will only change when device gets online
16 | $ANDROID_HOME/platform-tools/adb start-server
17 |
18 | watch -g -n 1 '$ANDROID_HOME/platform-tools/adb devices | grep -c device$'
19 |
20 | echo "Emulator is online"
21 |
22 | $ANDROID_HOME/platform-tools/adb devices
23 |
24 | echo "Emulator started"
25 |
--------------------------------------------------------------------------------
/ci/template_onnx2daq_build_python.yml:
--------------------------------------------------------------------------------
1 | steps:
2 | - script: python -m pip install --user --upgrade setuptools wheel twine
3 | displayName: Install setuptools, wheel and twine
4 | - script: python setup.py bdist_wheel
5 | workingDirectory: tools/onnx2daq/python/
6 | displayName: Build onnx2daq python package
7 |
--------------------------------------------------------------------------------
/ci/template_onnx2daq_build_python_all_version.yml:
--------------------------------------------------------------------------------
1 | steps:
2 | - task: UsePythonVersion@0
3 | inputs:
4 | versionSpec: '3.7'
5 | addToPath: true
6 | architecture: 'x64'
7 | - template: template_onnx2daq_build_python.yml
8 | - task: UsePythonVersion@0
9 | inputs:
10 | versionSpec: '3.6'
11 | addToPath: true
12 | architecture: 'x64'
13 | - template: template_onnx2daq_build_python.yml
14 | - task: UsePythonVersion@0
15 | inputs:
16 | versionSpec: '3.5'
17 | addToPath: true
18 | architecture: 'x64'
19 | - template: template_onnx2daq_build_python.yml
20 |
21 |
--------------------------------------------------------------------------------
/ci/template_onnx2daq_github_release.yml:
--------------------------------------------------------------------------------
1 | steps:
2 | - task: GitHubRelease@0
3 | condition: and(succeeded(), startsWith(variables['Build.SourceBranch'], 'refs/tags/v'))
4 | inputs:
5 | gitHubConnection: 'daquexian'
6 | repositoryName: '$(Build.Repository.Name)'
7 | action: 'edit'
8 | tag: '$(Build.SourceBranchName)'
9 | target: '$(Build.SourceVersion)'
10 | assets: '$(Build.ArtifactStagingDirectory)/*'
11 | assetUploadMode: 'replace'
12 |
13 |
--------------------------------------------------------------------------------
/ci/template_onnx2daq_publish_artifacts.yml:
--------------------------------------------------------------------------------
1 | steps:
2 | - task: PublishBuildArtifacts@1
3 | inputs:
4 | pathtoPublish: $(Build.ArtifactStagingDirectory)
5 | artifactName: onnx2daq
6 |
7 |
--------------------------------------------------------------------------------
/ci/template_onnx2daq_upload_to_pypi.yml:
--------------------------------------------------------------------------------
1 | steps:
2 | - script: python -m twine upload dist/* --verbose
3 | condition: and(succeeded(), startsWith(variables['Build.SourceBranch'], 'refs/tags/v'))
4 | displayName: Upload wheel to PyPI
5 | workingDirectory: tools/onnx2daq/python/
6 | env:
7 | TWINE_USERNAME: $(twineUsername)
8 | TWINE_PASSWORD: $(twinePassword)
9 |
10 |
--------------------------------------------------------------------------------
/ci/validate_onnx.py:
--------------------------------------------------------------------------------
1 | import onnx
2 | from onnx import numpy_helper
3 | import os
4 | import glob
5 | import numpy as np
6 | import tempfile
7 |
8 |
9 | def convert(onnx2daq, onnx, daq, table_file=''):
10 | daq = "temp.daq"
11 | os.system("{} {} {} {}".format(onnx2daq, onnx, daq, table_file))
12 | print("Converted to daq")
13 |
14 |
15 | def finish(model):
16 | os.system("adb shell rm /data/local/tmp/{}".format(os.path.basename(model)))
17 | if model[-4:] == '.daq':
18 | os.system("rm {}".format(model))
19 |
20 |
21 | def run(input_arrs, daq, dnn_retrieve_result, quant_input=False, quant_output=False):
22 | input_txts = []
23 | for i, input_arr in enumerate(input_arrs):
24 | if len(input_arr.shape) == 4:
25 | nchw_shape = input_arr.shape
26 | nhwc_shape = (nchw_shape[0], nchw_shape[2], nchw_shape[3], nchw_shape[1])
27 | input_arr = np.moveaxis(input_arr, 1, -1)
28 | assert input_arr.shape == nhwc_shape
29 | input_txt = 'input{}.txt'.format(i)
30 | np.savetxt(input_txt, input_arr.flatten(), delimiter='\n')
31 | input_txts.append(input_txt)
32 | input_txts_arg = " ".join(input_txts)
33 | input_txts_in_android_arg = " ".join(map(lambda x: "/data/local/tmp/" + x, input_txts))
34 |
35 | txt = os.path.join(tempfile._get_default_tempdir(), next(tempfile._get_candidate_names()))
36 | os.system("adb push {} /data/local/tmp/".format(input_txts_arg))
37 | os.system("adb push {} /data/local/tmp/dnn_retrieve_result".format(dnn_retrieve_result))
38 | os.system('adb shell "LD_LIBRARY_PATH=/data/local/tmp/ /data/local/tmp/dnn_retrieve_result --nchw_result /data/local/tmp/{} {} {} {}"'.format(os.path.basename(daq), "--quant_input" if quant_input else "", "--quant_output" if quant_output else "", input_txts_in_android_arg))
39 | os.system("adb shell rm {}".format(input_txts_in_android_arg))
40 | os.system("adb shell rm /data/local/tmp/dnn_retrieve_result")
41 | os.system("adb pull /data/local/tmp/result {}".format(txt))
42 | os.system("adb shell rm /data/local/tmp/result")
43 | os.system("rm {}".format(input_txts_arg))
44 | actual = np.loadtxt(txt)
45 | assert not np.any(np.isnan(actual))
46 |
47 | return actual
48 |
49 | if __name__ == '__main__':
50 | import argparse
51 | parser = argparse.ArgumentParser(description='Test onnx model on nnapi')
52 | parser.add_argument('dir', type=str, help='onnx model file')
53 | parser.add_argument('dnn_retrieve_result', type=str, help='dnn_retrieve_result binary file')
54 | parser.add_argument('--onnx2daq', type=str, help='onnx2daq binary file')
55 | parser.add_argument('--table_file', type=str, help='table file for 8-bit quantization', default='')
56 | parser.add_argument('--quant_input', help='whether the input is quant8', action='store_true')
57 | parser.add_argument('--quant_output', help='whether the output is quant8', action='store_true')
58 |
59 | args = parser.parse_args()
60 |
61 | onnx_model = glob.glob(os.path.join(args.dir, '*.onnx'))
62 | assert len(onnx_model) == 1
63 | onnx_model = onnx_model[0]
64 | data_dirs = glob.glob(os.path.join(args.dir, 'test_data_set_*'))
65 |
66 | for data_dir in data_dirs:
67 | # Load inputs
68 | inputs = []
69 | inputs_num = len(glob.glob(os.path.join(data_dir, 'input_*.pb')))
70 | for i in range(inputs_num):
71 | input_file = os.path.join(data_dir, 'input_{}.pb'.format(i))
72 | tensor = onnx.TensorProto()
73 | with open(input_file, 'rb') as f:
74 | tensor.ParseFromString(f.read())
75 | np_arr = numpy_helper.to_array(tensor)
76 | if args.quant_input:
77 | np_arr = np_arr.astype(np.uint8)
78 | inputs.append(np_arr)
79 |
80 | # Load reference outputs
81 | ref_outputs = []
82 | ref_outputs_num = len(glob.glob(os.path.join(data_dir, 'output_*.pb')))
83 | for i in range(ref_outputs_num):
84 | output_file = os.path.join(data_dir, 'output_{}.pb'.format(i))
85 | tensor = onnx.TensorProto()
86 | with open(output_file, 'rb') as f:
87 | tensor.ParseFromString(f.read())
88 | ref_outputs.append(numpy_helper.to_array(tensor))
89 |
90 | if args.onnx2daq is None:
91 | model = onnx_model
92 | else:
93 | model = "temp.daq"
94 | convert(args.onnx2daq, onnx_model, model, args.table_file)
95 | os.system("adb push {} /data/local/tmp/".format(model))
96 | actual = run(inputs, model, args.dnn_retrieve_result, args.quant_input, args.quant_output)
97 | expected = ref_outputs[i].flatten()
98 |
99 | print('====================')
100 | try:
101 | print("Max relative diff: {}".format(np.max(np.abs(expected - actual) / expected)))
102 | np.testing.assert_array_almost_equal(expected, actual, decimal=3)
103 | print('{} passed'.format(os.path.basename(data_dir)))
104 | except (AssertionError, ValueError) as e:
105 | print('{} failed'.format(os.path.basename(data_dir)))
106 | print(str(e))
107 | print(expected)
108 | print('-----')
109 | print(actual)
110 | print(np.argmax(actual))
111 |
112 | finish(model)
113 |
--------------------------------------------------------------------------------
/cmake/DNNLibraryConfig.cmake.in:
--------------------------------------------------------------------------------
1 | get_filename_component(DNNLibrary_CMAKE_DIR "${CMAKE_CURRENT_LIST_FILE}" PATH)
2 |
3 | if (@DNN_READ_ONNX@)
4 | if (NOT TARGET protobuf::libprotobuf-lite)
5 | find_package(Protobuf REQUIRED)
6 | endif()
7 | if (NOT TARGET onnx)
8 | find_package(ONNX REQUIRED)
9 | endif()
10 | endif()
11 |
12 | if (NOT TARGET glog::glog)
13 | find_package(glog REQUIRED)
14 | endif()
15 |
16 | if (NOT TARGET dnnlibrary::dnnlibrary)
17 | include("${DNNLibrary_CMAKE_DIR}/DNNLibraryTargets.cmake")
18 | endif()
19 |
--------------------------------------------------------------------------------
/cmake/ONNX2daqConfig.cmake.in:
--------------------------------------------------------------------------------
1 | get_filename_component(ONNX2daq_CMAKE_DIR "${CMAKE_CURRENT_LIST_FILE}" PATH)
2 |
3 | if (NOT TARGET protobuf::libprotobuf-lite)
4 | find_package(Protobuf REQUIRED)
5 | endif()
6 |
7 | if (NOT TARGET onnx)
8 | find_package(ONNX REQUIRED)
9 | endif()
10 |
11 | if (NOT TARGET glog::glog)
12 | find_package(glog REQUIRED)
13 | endif()
14 |
15 | if (NOT TARGET dnnlibrary::onnx2daq)
16 | include("${ONNX2daq_CMAKE_DIR}/ONNX2daqTargets.cmake")
17 | endif()
18 |
--------------------------------------------------------------------------------
/cmake/common.cmake:
--------------------------------------------------------------------------------
1 | function(treat_warnings_as_errors target)
2 | if(MSVC)
3 | target_compile_options(${target} PRIVATE "/W4" "/WX")
4 | else()
5 | target_compile_options(${target} PRIVATE "-Wall" "-Wextra" "-Werror")
6 | endif()
7 | endfunction()
8 |
--------------------------------------------------------------------------------
/cmake/flatbuffers.cmake:
--------------------------------------------------------------------------------
1 | function(configure_flatbuffers)
2 | option(FLATBUFFERS_BUILD_TESTS "Enable the build of tests and samples." OFF)
3 | option(FLATBUFFERS_BUILD_FLATHASH "Enable the build of flathash" OFF)
4 | option(FLATBUFFERS_BUILD_FLATC "Enable the build of the flatbuffers compiler"
5 | OFF)
6 | option(FLATBUFFERS_BUILD_FLATLIB "Enable the build of the flatbuffers library"
7 | ON)
8 | add_subdirectory(third_party/flatbuffers)
9 | endfunction()
10 |
11 |
--------------------------------------------------------------------------------
/cmake/glog.cmake:
--------------------------------------------------------------------------------
1 | function(configure_glog)
2 | if (TARGET glog::glog)
3 | return()
4 | endif()
5 | message(STATUS "Configureing glog...")
6 |
7 | set(TEMP_BUILD_TESTING ${BUILD_TESTING})
8 | set(BUILD_TESTING OFF CACHE BOOL "" FORCE)
9 | set(TEMP_WITH_GFLAGS ${WITH_GFLAGS})
10 | set(WITH_GFLAGS OFF CACHE BOOL "" FORCE)
11 |
12 | add_subdirectory(${PROJECT_SOURCE_DIR}/third_party/glog)
13 |
14 | set(BUILD_TESTING ${TEMP_BUILD_TESTING} CACHE BOOL "" FORCE)
15 | set(WITH_GFLAGS ${TEMP_WITH_GFLAGS} CACHE BOOL "" FORCE)
16 | endfunction()
17 |
--------------------------------------------------------------------------------
/cmake/onnx.cmake:
--------------------------------------------------------------------------------
1 | function(configure_onnx)
2 | if (TARGET onnx)
3 | return()
4 | endif()
5 |
6 | if (NOT ${DNN_SYSTEM_PROTOBUF})
7 | include(${PROJECT_SOURCE_DIR}/cmake/protobuf.cmake)
8 | configure_protobuf()
9 | endif()
10 |
11 | message(STATUS "Configuring onnx...")
12 | set(DAQ_ONNX_NAMESPACE onnx_daq)
13 | if (MSVC)
14 | set(ONNX_CMAKELISTS ${PROJECT_SOURCE_DIR}/third_party/onnx/CMakeLists.txt)
15 | file(READ ${ONNX_CMAKELISTS} content)
16 | string(
17 | REPLACE
18 | "/WX"
19 | ""
20 | content
21 | "${content}"
22 | )
23 | file(WRITE ${ONNX_CMAKELISTS} "${content}")
24 | endif()
25 | set(ONNX_BUILD_MAIN_LIB ON)
26 | set(ONNX_NAMESPACE ${DAQ_ONNX_NAMESPACE} CACHE STRING "onnx namespace")
27 | if (${CMAKE_SYSTEM_NAME} STREQUAL "Android" AND NOT EXISTS ${DNN_CUSTOM_PROTOC_EXECUTABLE})
28 | message(FATAL ERROR "DNN_CUSTOM_PROTOC_EXECUTABLE is not set or wrong.")
29 | endif()
30 | set(ONNX_CUSTOM_PROTOC_EXECUTABLE ${DNN_CUSTOM_PROTOC_EXECUTABLE})
31 | option(ONNX_USE_LITE_PROTO "" ON)
32 | add_subdirectory(${PROJECT_SOURCE_DIR}/third_party/onnx)
33 | target_compile_definitions(onnx_proto PRIVATE ONNX_BUILD_MAIN_LIB)
34 | # Since https://github.com/onnx/onnx/pull/1318 is merged, we don't need to set it manually
35 | # target_compile_definitions(onnx
36 | # PUBLIC
37 | # -DONNX_NAMESPACE=${DAQ_ONNX_NAMESPACE})
38 | endfunction()
39 |
--------------------------------------------------------------------------------
/cmake/protobuf.cmake:
--------------------------------------------------------------------------------
1 | function(configure_protobuf)
2 | if (NOT TARGET libprotobuf-lite)
3 | message(STATUS "Configuring protobuf...")
4 | option(protobuf_BUILD_TESTS "" OFF)
5 | option(protobuf_BUILD_EXAMPLES "" OFF)
6 | if (${CMAKE_SYSTEM_NAME} STREQUAL "Android")
7 | option(protobuf_BUILD_PROTOC_BINARIES "" OFF)
8 | else()
9 | option(protobuf_BUILD_PROTOC_BINARIES "" ON)
10 | endif()
11 | add_subdirectory(${PROJECT_SOURCE_DIR}/third_party/protobuf/cmake)
12 | if (protobuf_BUILD_PROTOC_BINARIES)
13 | add_executable(protobuf::protoc ALIAS protoc)
14 | endif()
15 | endif()
16 | add_library(protobuf::libprotobuf-lite ALIAS libprotobuf-lite)
17 | endfunction()
18 |
--------------------------------------------------------------------------------
/cmake/system.cmake:
--------------------------------------------------------------------------------
1 | if (${CMAKE_SYSTEM_NAME} STREQUAL "Android")
2 | set(CMAKE_FIND_ROOT_PATH_MODE_PROGRAM NEVER)
3 | set(CMAKE_FIND_ROOT_PATH_MODE_LIBRARY ONLY)
4 | set(CMAKE_FIND_ROOT_PATH_MODE_INCLUDE ONLY)
5 | set(CMAKE_FIND_ROOT_PATH_MODE_PACKAGE ONLY)
6 | list(APPEND CMAKE_FIND_ROOT_PATH ${ANDROID_NDK}) # workaround for r19 (https://github.com/android-ndk/ndk/issues/890)
7 | endif()
8 |
--------------------------------------------------------------------------------
/cmake/utils.cmake:
--------------------------------------------------------------------------------
1 | # Copyright 2019 JD.com Inc. JD AI
2 |
3 | # Add MSVC RunTime Flag
4 | function(dnn_add_msvc_runtime_flag)
5 | if (MSVC)
6 | if(${DNN_USE_MSVC_STATIC_RUNTIME})
7 | if(${CMAKE_BUILD_TYPE} MATCHES "Debug")
8 | add_compile_options(/MTd)
9 | else()
10 | add_compile_options(/MT)
11 | endif()
12 | else()
13 | if(${CMAKE_BUILD_TYPE} MATCHES "Debug")
14 | add_compile_options(/MDd)
15 | else()
16 | add_compile_options(/MD)
17 | endif()
18 | endif()
19 | endif()
20 | endfunction()
21 |
22 |
23 |
24 |
--------------------------------------------------------------------------------
/common/Shaper.cpp:
--------------------------------------------------------------------------------
1 | #include
2 |
3 | #include
4 | #include
5 |
6 | using std::string;
7 | using std::vector;
8 |
9 | Shaper::len_t Shaper::total(const Shape &shape) {
10 | return Product(shape);
11 | }
12 |
13 | // /**
14 | // * strides: [stride_y, stride_x]
15 | // * paddings: [top, left, bottom, right]
16 | // */
17 | // void Shaper::Conv(const std::string &input_name, const std::string
18 | // &weight_name,
19 | // const std::vector paddings,
20 | // const std::vector strides,
21 | // const std::string &output_name) {
22 | // Shaper::Conv(input_name, strides[1], strides[0], 1, 1, paddings[1],
23 | // paddings[3], paddings[0], paddings[2], weight_name,
24 | // output_name);
25 | // }
26 | //
27 | // void Shaper::Conv(const std::string &input_name,
28 | // const std::vector paddings,
29 | // const std::vector strides,
30 | // const std::vector dilations,
31 | // const std::string &weight_name,
32 | // const std::string &output_name) {
33 | // Shaper::Conv(input_name, strides[1], strides[0], dilations[1],
34 | // dilations[0],
35 | // paddings[1], paddings[3], paddings[0], paddings[2],
36 | // weight_name, output_name);
37 | // }
38 | void Shaper::Conv(const std::string &input, const std::string &weight,
39 | int32_t padding_left, int32_t padding_right,
40 | int32_t padding_top, int32_t padding_bottom, int32_t stride_x,
41 | int32_t stride_y, const bool nchw, const int32_t dilation_x,
42 | const int32_t dilation_y, const std::string &output) {
43 | DNN_ASSERT_EQ(nchw, false);
44 | Conv(input, stride_x, stride_y, dilation_x, dilation_y, padding_left,
45 | padding_right, padding_top, padding_bottom, weight, output);
46 | }
47 |
48 | void Shaper::Conv(const std::string &input_name, int32_t strideX,
49 | int32_t strideY, int32_t dilationX, int32_t dilationY,
50 | int32_t paddingLeft, int32_t paddingRight, int32_t paddingTop,
51 | int32_t paddingBottom, const std::string &weight_name,
52 | const std::string &output_name) {
53 | Shape weightDimen =
54 | shape_map_.at(weight_name); // num_output, height, width, num_input
55 | // NHWC
56 | Shape inputDimen = shape_map_.at(input_name);
57 | Shape outputDimen{inputDimen[0],
58 | (inputDimen[1] - ((weightDimen[1] - 1) * dilationY + 1) +
59 | paddingTop + paddingBottom) /
60 | strideY +
61 | 1,
62 | (inputDimen[2] - ((weightDimen[2] - 1) * dilationX + 1) +
63 | paddingLeft + paddingRight) /
64 | strideX +
65 | 1,
66 | weightDimen[0]};
67 | shape_map_[output_name] = outputDimen;
68 | }
69 |
70 | // void Shaper::DepthwiseConv(const std::string &input_name,
71 | // const std::vector paddings,
72 | // const std::vector strides,
73 | // const std::vector dilations,
74 | // const std::string &weight_name,
75 | // const std::string &output_name) {
76 | // Shaper::DepthwiseConv(input_name, strides[1], strides[0], dilations[1],
77 | // dilations[0], paddings[1], paddings[3],
78 | // paddings[0], paddings[2], weight_name,
79 | // output_name);
80 | // }
81 | //
82 | void Shaper::DepthwiseConv(const std::string &input_name,
83 | const std::string &weight_name, int32_t padding_left,
84 | int32_t padding_right, int32_t padding_top,
85 | int32_t padding_bottom, int32_t stride_x,
86 | int32_t stride_y, const std::string &output) {
87 | DepthwiseConv(input_name, stride_x, stride_y, 1, 1, padding_left,
88 | padding_right, padding_top, padding_bottom, weight_name,
89 | output);
90 | }
91 |
92 | void Shaper::DepthwiseConv(const std::string &input_name, int32_t strideX,
93 | int32_t strideY, int32_t dilationX,
94 | int32_t dilationY, int32_t paddingLeft,
95 | int32_t paddingRight, int32_t paddingTop,
96 | int32_t paddingBottom,
97 | const std::string &weight_name,
98 | const std::string &output_name) {
99 | Shape weightDimen =
100 | shape_map_.at(weight_name); // 1, height, width, num_output
101 | // NHWC
102 | Shape inputDimen = shape_map_.at(input_name);
103 | Shape outputDimen{inputDimen[0],
104 | (inputDimen[1] - ((weightDimen[1] - 1) * dilationY + 1) +
105 | paddingTop + paddingBottom) /
106 | strideY +
107 | 1,
108 | (inputDimen[2] - ((weightDimen[2] - 1) * dilationX + 1) +
109 | paddingLeft + paddingRight) /
110 | strideX +
111 | 1,
112 | weightDimen[3]};
113 | shape_map_[output_name] = outputDimen;
114 | }
115 |
116 | void Shaper::DepthwiseConv(const std::string &input_name,
117 | const std::string &weight_name,
118 | const std::vector paddings,
119 | const std::vector strides,
120 | const std::string &output_name) {
121 | DepthwiseConv(input_name, weight_name, paddings[1], paddings[3],
122 | paddings[0], paddings[2], strides[1], strides[0],
123 | output_name);
124 | }
125 |
126 | void Shaper::StridedSlice(const std::string &input_name,
127 | const std::vector &starts,
128 | const std::vector &ends,
129 | const std::vector &strides,
130 | int32_t beginMask, int32_t endMask,
131 | int32_t shrinkAxisMask,
132 | const std::string &output_name) {
133 | // NHWC
134 | vector inputDimen = shape_map_.at(input_name);
135 | vector outputDimen;
136 | for (size_t i = 0; i < inputDimen.size(); i++) {
137 | if (shrinkAxisMask & (1 << i)) {
138 | continue;
139 | }
140 | int32_t start = starts[i], end = ends[i], stride = strides[i];
141 | if (beginMask & (1 << i)) {
142 | start = 0;
143 | }
144 | if (endMask & (1 << i)) {
145 | end = inputDimen[i];
146 | }
147 | outputDimen.emplace_back((end - start) / stride);
148 | }
149 | shape_map_[output_name] = outputDimen;
150 | }
151 |
152 | void Shaper::Pool(const std::string &input_name, int32_t padding_left,
153 | int32_t padding_right, int32_t padding_top,
154 | int32_t padding_bottom, int32_t stride_x, int32_t stride_y,
155 | int32_t width, int32_t height,
156 | const std::string &output_name) {
157 | auto inputDimen = shape_map_.at(input_name);
158 |
159 | Shape outputDimen;
160 | if (height == -1 && width == -1) {
161 | outputDimen = {inputDimen[0], 1, 1, inputDimen[3]};
162 | } else {
163 | outputDimen = {
164 | inputDimen[0],
165 | (inputDimen[1] - height + padding_top + padding_bottom) / stride_y +
166 | 1,
167 | (inputDimen[2] - width + padding_left + padding_right) / stride_x +
168 | 1,
169 | inputDimen[3]};
170 | }
171 | shape_map_[output_name] = outputDimen;
172 | }
173 |
174 | /**
175 | * kernel_shape: [height, width]
176 | * strides: [stride_y, stride_x]
177 | * pads: [top, left, bottom, right]
178 | */
179 | void Shaper::Pool(const std::string &input_name,
180 | const std::vector kernel_shape,
181 | const std::vector pads,
182 | const std::vector strides,
183 | const std::string &output_name) {
184 | Shaper::Pool(input_name, pads[1], pads[3], pads[0], pads[2], strides[1],
185 | strides[0], kernel_shape[1], kernel_shape[0], output_name);
186 | }
187 |
188 | void Shaper::Softmax(const std::string &input_name,
189 | const std::string &output_name) {
190 | shape_map_[output_name] = shape_map_.at(input_name);
191 | }
192 |
193 | void Shaper::ReLU(const std::string &input_name,
194 | const std::string &output_name) {
195 | shape_map_[output_name] = shape_map_.at(input_name);
196 | }
197 |
198 | void Shaper::Concat(const std::vector &input_names, uint32_t axis,
199 | const std::string &output_name) {
200 | vector dimens;
201 | for (const auto &input : input_names) {
202 | auto &dimen = shape_map_.at(input);
203 | if (!dimens.empty()) {
204 | for (size_t i = 0; i < dimens[0].size(); i++) {
205 | if (i == axis) continue;
206 | if (dimen[i] != dimens[0][i]) {
207 | throw std::invalid_argument("Wrong input for concat");
208 | }
209 | }
210 | }
211 | dimens.push_back(shape_map_.at(input));
212 | }
213 |
214 | auto outputDimen = dimens[0];
215 | for (size_t i = 1; i < dimens.size(); i++) {
216 | outputDimen[axis] += dimens[i][axis];
217 | }
218 | shape_map_[output_name] = outputDimen;
219 | }
220 |
221 | void Shaper::LRN(const std::string &input_name,
222 | const std::string &output_name) {
223 | shape_map_[output_name] = shape_map_.at(input_name);
224 | }
225 |
226 | void Shaper::FC(const std::string &input_name, const std::string &weight_name,
227 | const std::string &output_name) {
228 | Shape weightDimen = shape_map_.at(weight_name); // num_units, input_size
229 | auto input_dimen = shape_map_.at(input_name);
230 | Shape outputDimen{input_dimen[0], weightDimen[0]};
231 | shape_map_[output_name] = outputDimen;
232 | }
233 |
234 | void Shaper::Eltwise(const std::string &input1_name,
235 | const std::string &input2_name,
236 | const std::string &output_name) {
237 | auto shape1 = shape_map_.at(input1_name);
238 | auto shape2 = shape_map_.at(input2_name);
239 | // TODO: broadcasting
240 | auto output_shape = shape1.size() >= shape2.size() ? shape1 : shape2;
241 | shape_map_[output_name] = output_shape;
242 | }
243 |
244 | void Shaper::Eltwise(const std::string &input1_name,
245 | const std::string &output_name) {
246 | shape_map_[output_name] = shape_map_.at(input1_name);
247 | }
248 |
249 | void Shaper::BatchToSpace(const std::string &input_name,
250 | const std::vector &block_sizes,
251 | const std::string &output_name) {
252 | auto input_dimen = shape_map_.at(input_name);
253 | auto output_dimen = {input_dimen[0] / Product(block_sizes),
254 | input_dimen[1] * block_sizes[0],
255 | input_dimen[2] * block_sizes[1], input_dimen[3]};
256 | shape_map_[output_name] = output_dimen;
257 | }
258 |
259 | void Shaper::SpaceToBatch(const std::string &input_name,
260 | const std::vector &block_sizes,
261 | const std::vector &pads,
262 | const std::string &output_name) {
263 | auto input_dimen = shape_map_.at(input_name);
264 | auto output_dimen = {input_dimen[0] * Product(block_sizes),
265 | (input_dimen[1] + pads[0] + pads[1]) / block_sizes[0],
266 | (input_dimen[2] + pads[2] + pads[3]) / block_sizes[1],
267 | input_dimen[3]};
268 | shape_map_[output_name] = output_dimen;
269 | }
270 |
271 | void Shaper::Affine(const std::string &input_name,
272 | const std::string &output_name) {
273 | shape_map_[output_name] = shape_map_.at(input_name);
274 | }
275 | void Shaper::Affine(const std::string &input_name, const std::string &a,
276 | const std::string &b, const std::string &output_name) {
277 | (void)a;
278 | (void)b;
279 | Shaper::Affine(input_name, output_name);
280 | }
281 |
282 | void Shaper::Identity(const std::string &input_name,
283 | const std::string &output_name) {
284 | shape_map_[output_name] = shape_map_.at(input_name);
285 | }
286 |
287 | void Shaper::AddShape(const std::string &name, const Shape &shape) {
288 | shape_map_[name] = shape;
289 | }
290 |
291 | size_t Shaper::GetSize(const std::string &name) {
292 | return static_cast(Product(shape_map_.at(name)));
293 | }
294 |
295 | void Shaper::Clear() {
296 | shape_map_.clear();
297 | }
298 |
299 | std::ostream &operator<<(std::ostream &os, const Shaper &shaper) {
300 | for (const auto &p : shaper.shape_map_) {
301 | os << (p.first + ": ") << p.second << std::endl;
302 | }
303 | return os;
304 | }
305 |
--------------------------------------------------------------------------------
/common/daq.fbs:
--------------------------------------------------------------------------------
1 | namespace DNN;
2 |
3 | /// Int8 is deprecated, but int8_data in table Tensor is used, since a Tensor just stores value, not care about quantization method
4 | enum DataType:byte { Float32, Int8, Int32, Float16, Bool8,
5 | QUANT8_ASYMM, QUANT8_SYMM, QUANT8_SYMM_PER_CHANNEL,
6 | QUANT16_ASYMM, QUANT16_SYMM}
7 | enum FuseCode:byte { None, Relu, Relu1, Relu6 }
8 | enum LayerType:byte {
9 | // Auto generated layer types start
10 | CONV_2D,
11 | AVERAGE_POOL_2D,
12 | MAX_POOL_2D,
13 | RELU,
14 | SOFTMAX,
15 | FULLY_CONNECTED,
16 | ADD,
17 | CONCATENATION,
18 | DEPTHWISE_CONV_2D,
19 | BATCH_TO_SPACE_ND,
20 | SPACE_TO_BATCH_ND,
21 | STRIDED_SLICE,
22 | MUL,
23 | DEQUANTIZE,
24 | LOCAL_RESPONSE_NORMALIZATION,
25 | TANH,
26 | FLOOR,
27 | LOGISTIC,
28 | PRELU,
29 | POW,
30 | NEG,
31 | MINIMUM,
32 | MAXIMUM,
33 | LOG,
34 | ABS,
35 | EXP,
36 | SUB,
37 | // Auto generated layer types end
38 | }
39 |
40 | table Tensor {
41 | data_type:DataType;
42 | int8_data: [uint8];
43 | float32_data: [float32];
44 | shape: [uint32];
45 | name: string;
46 | /// since flatbuffers doesn't have float16 data type, use uint16 instead
47 | float16_data: [uint16];
48 | bool8_data: [uint8];
49 | int32_data: [int32];
50 | }
51 |
52 | /// For weights, and for features
53 | table QuantInfo {
54 | name: string;
55 | data_type: DataType;
56 | /// a float32 array of scales, the length will be 1 for non per-channel quantization, and be channelDim for per-channel quantization
57 | scales: [float32];
58 | zero_point: int32;
59 | }
60 |
61 | table Input {
62 | shape:[uint32];
63 | name:string;
64 | }
65 |
66 | // Auto generated tables start
67 | table CONV_2D_Input {
68 | input: string;
69 | weight: string;
70 | bias: string;
71 | padding_left: int;
72 | padding_right: int;
73 | padding_top: int;
74 | padding_bottom: int;
75 | stride_x: int;
76 | stride_y: int;
77 | fuse_code: FuseCode;
78 | nchw: bool;
79 | dilation_x: int;
80 | dilation_y: int;
81 | }
82 |
83 | table CONV_2D_Output {
84 | output: string;
85 | }
86 |
87 | table CONV_2D {
88 | input: CONV_2D_Input;
89 | output: CONV_2D_Output;
90 | }
91 |
92 | table AVERAGE_POOL_2D_Input {
93 | input: string;
94 | padding_left: int;
95 | padding_right: int;
96 | padding_top: int;
97 | padding_bottom: int;
98 | stride_x: int;
99 | stride_y: int;
100 | kernel_width: int;
101 | kernel_height: int;
102 | fuse_code: FuseCode;
103 | }
104 |
105 | table AVERAGE_POOL_2D_Output {
106 | output: string;
107 | }
108 |
109 | table AVERAGE_POOL_2D {
110 | input: AVERAGE_POOL_2D_Input;
111 | output: AVERAGE_POOL_2D_Output;
112 | }
113 |
114 | table MAX_POOL_2D_Input {
115 | input: string;
116 | padding_left: int;
117 | padding_right: int;
118 | padding_top: int;
119 | padding_bottom: int;
120 | stride_x: int;
121 | stride_y: int;
122 | kernel_width: int;
123 | kernel_height: int;
124 | fuse_code: FuseCode;
125 | }
126 |
127 | table MAX_POOL_2D_Output {
128 | output: string;
129 | }
130 |
131 | table MAX_POOL_2D {
132 | input: MAX_POOL_2D_Input;
133 | output: MAX_POOL_2D_Output;
134 | }
135 |
136 | table RELU_Input {
137 | input: string;
138 | }
139 |
140 | table RELU_Output {
141 | output: string;
142 | }
143 |
144 | table RELU {
145 | input: RELU_Input;
146 | output: RELU_Output;
147 | }
148 |
149 | table SOFTMAX_Input {
150 | input: string;
151 | beta: float;
152 | }
153 |
154 | table SOFTMAX_Output {
155 | output: string;
156 | }
157 |
158 | table SOFTMAX {
159 | input: SOFTMAX_Input;
160 | output: SOFTMAX_Output;
161 | }
162 |
163 | table FULLY_CONNECTED_Input {
164 | input: string;
165 | weight: string;
166 | bias: string;
167 | fuse_code: FuseCode;
168 | }
169 |
170 | table FULLY_CONNECTED_Output {
171 | output: string;
172 | }
173 |
174 | table FULLY_CONNECTED {
175 | input: FULLY_CONNECTED_Input;
176 | output: FULLY_CONNECTED_Output;
177 | }
178 |
179 | table ADD_Input {
180 | input1: string;
181 | input2: string;
182 | fuse_code: FuseCode;
183 | }
184 |
185 | table ADD_Output {
186 | output: string;
187 | }
188 |
189 | table ADD {
190 | input: ADD_Input;
191 | output: ADD_Output;
192 | }
193 |
194 | table CONCATENATION_Input {
195 | inputs: [string];
196 | axis: int;
197 | }
198 |
199 | table CONCATENATION_Output {
200 | output: string;
201 | }
202 |
203 | table CONCATENATION {
204 | input: CONCATENATION_Input;
205 | output: CONCATENATION_Output;
206 | }
207 |
208 | table DEPTHWISE_CONV_2D_Input {
209 | input: string;
210 | weight: string;
211 | bias: string;
212 | padding_left: int;
213 | padding_right: int;
214 | padding_top: int;
215 | padding_bottom: int;
216 | stride_x: int;
217 | stride_y: int;
218 | depth_multiplier: int;
219 | fuse_code: FuseCode;
220 | }
221 |
222 | table DEPTHWISE_CONV_2D_Output {
223 | output: string;
224 | }
225 |
226 | table DEPTHWISE_CONV_2D {
227 | input: DEPTHWISE_CONV_2D_Input;
228 | output: DEPTHWISE_CONV_2D_Output;
229 | }
230 |
231 | table BATCH_TO_SPACE_ND_Input {
232 | input: string;
233 | block_sizes: [int];
234 | }
235 |
236 | table BATCH_TO_SPACE_ND_Output {
237 | output: string;
238 | }
239 |
240 | table BATCH_TO_SPACE_ND {
241 | input: BATCH_TO_SPACE_ND_Input;
242 | output: BATCH_TO_SPACE_ND_Output;
243 | }
244 |
245 | table SPACE_TO_BATCH_ND_Input {
246 | input: string;
247 | block_sizes: [int];
248 | pads: [int];
249 | }
250 |
251 | table SPACE_TO_BATCH_ND_Output {
252 | output: string;
253 | }
254 |
255 | table SPACE_TO_BATCH_ND {
256 | input: SPACE_TO_BATCH_ND_Input;
257 | output: SPACE_TO_BATCH_ND_Output;
258 | }
259 |
260 | table STRIDED_SLICE_Input {
261 | input: string;
262 | starts: [int];
263 | ends: [int];
264 | strides: [int];
265 | begin_mask: int;
266 | end_mask: int;
267 | shrink_axis_mask: int;
268 | }
269 |
270 | table STRIDED_SLICE_Output {
271 | output: string;
272 | }
273 |
274 | table STRIDED_SLICE {
275 | input: STRIDED_SLICE_Input;
276 | output: STRIDED_SLICE_Output;
277 | }
278 |
279 | table MUL_Input {
280 | input1: string;
281 | input2: string;
282 | fuse_code: FuseCode;
283 | }
284 |
285 | table MUL_Output {
286 | output: string;
287 | }
288 |
289 | table MUL {
290 | input: MUL_Input;
291 | output: MUL_Output;
292 | }
293 |
294 | table DEQUANTIZE_Input {
295 | input: string;
296 | }
297 |
298 | table DEQUANTIZE_Output {
299 | output: string;
300 | }
301 |
302 | table DEQUANTIZE {
303 | input: DEQUANTIZE_Input;
304 | output: DEQUANTIZE_Output;
305 | }
306 |
307 | table LOCAL_RESPONSE_NORMALIZATION_Input {
308 | input: string;
309 | radius: int;
310 | bias: float;
311 | alpha: float;
312 | beta: float;
313 | }
314 |
315 | table LOCAL_RESPONSE_NORMALIZATION_Output {
316 | output: string;
317 | }
318 |
319 | table LOCAL_RESPONSE_NORMALIZATION {
320 | input: LOCAL_RESPONSE_NORMALIZATION_Input;
321 | output: LOCAL_RESPONSE_NORMALIZATION_Output;
322 | }
323 |
324 | table TANH_Input {
325 | input: string;
326 | }
327 |
328 | table TANH_Output {
329 | output: string;
330 | }
331 |
332 | table TANH {
333 | input: TANH_Input;
334 | output: TANH_Output;
335 | }
336 |
337 | table FLOOR_Input {
338 | input: string;
339 | }
340 |
341 | table FLOOR_Output {
342 | output: string;
343 | }
344 |
345 | table FLOOR {
346 | input: FLOOR_Input;
347 | output: FLOOR_Output;
348 | }
349 |
350 | table LOGISTIC_Input {
351 | input: string;
352 | }
353 |
354 | table LOGISTIC_Output {
355 | output: string;
356 | }
357 |
358 | table LOGISTIC {
359 | input: LOGISTIC_Input;
360 | output: LOGISTIC_Output;
361 | }
362 |
363 | table PRELU_Input {
364 | input: string;
365 | alpha: string;
366 | }
367 |
368 | table PRELU_Output {
369 | output: string;
370 | }
371 |
372 | table PRELU {
373 | input: PRELU_Input;
374 | output: PRELU_Output;
375 | }
376 |
377 | table POW_Input {
378 | input: string;
379 | exp: string;
380 | }
381 |
382 | table POW_Output {
383 | output: string;
384 | }
385 |
386 | table POW {
387 | input: POW_Input;
388 | output: POW_Output;
389 | }
390 |
391 | table NEG_Input {
392 | input: string;
393 | }
394 |
395 | table NEG_Output {
396 | output: string;
397 | }
398 |
399 | table NEG {
400 | input: NEG_Input;
401 | output: NEG_Output;
402 | }
403 |
404 | table MINIMUM_Input {
405 | input1: string;
406 | input2: string;
407 | }
408 |
409 | table MINIMUM_Output {
410 | output: string;
411 | }
412 |
413 | table MINIMUM {
414 | input: MINIMUM_Input;
415 | output: MINIMUM_Output;
416 | }
417 |
418 | table MAXIMUM_Input {
419 | input1: string;
420 | input2: string;
421 | }
422 |
423 | table MAXIMUM_Output {
424 | output: string;
425 | }
426 |
427 | table MAXIMUM {
428 | input: MAXIMUM_Input;
429 | output: MAXIMUM_Output;
430 | }
431 |
432 | table LOG_Input {
433 | input: string;
434 | }
435 |
436 | table LOG_Output {
437 | output: string;
438 | }
439 |
440 | table LOG {
441 | input: LOG_Input;
442 | output: LOG_Output;
443 | }
444 |
445 | table ABS_Input {
446 | input: string;
447 | }
448 |
449 | table ABS_Output {
450 | output: string;
451 | }
452 |
453 | table ABS {
454 | input: ABS_Input;
455 | output: ABS_Output;
456 | }
457 |
458 | table EXP_Input {
459 | input: string;
460 | }
461 |
462 | table EXP_Output {
463 | output: string;
464 | }
465 |
466 | table EXP {
467 | input: EXP_Input;
468 | output: EXP_Output;
469 | }
470 |
471 | table SUB_Input {
472 | input1: string;
473 | input2: string;
474 | fuse_code: FuseCode;
475 | }
476 |
477 | table SUB_Output {
478 | output: string;
479 | }
480 |
481 | table SUB {
482 | input: SUB_Input;
483 | output: SUB_Output;
484 | }
485 |
486 | // Auto generated tables end
487 |
488 |
489 | table Layer {
490 | type:LayerType;
491 | // Auto generated fields start
492 | CONV_2D_param:CONV_2D;
493 | AVERAGE_POOL_2D_param:AVERAGE_POOL_2D;
494 | MAX_POOL_2D_param:MAX_POOL_2D;
495 | RELU_param:RELU;
496 | SOFTMAX_param:SOFTMAX;
497 | FULLY_CONNECTED_param:FULLY_CONNECTED;
498 | ADD_param:ADD;
499 | CONCATENATION_param:CONCATENATION;
500 | DEPTHWISE_CONV_2D_param:DEPTHWISE_CONV_2D;
501 | BATCH_TO_SPACE_ND_param:BATCH_TO_SPACE_ND;
502 | SPACE_TO_BATCH_ND_param:SPACE_TO_BATCH_ND;
503 | STRIDED_SLICE_param:STRIDED_SLICE;
504 | MUL_param:MUL;
505 | DEQUANTIZE_param:DEQUANTIZE;
506 | LOCAL_RESPONSE_NORMALIZATION_param:LOCAL_RESPONSE_NORMALIZATION;
507 | TANH_param:TANH;
508 | FLOOR_param:FLOOR;
509 | LOGISTIC_param:LOGISTIC;
510 | PRELU_param:PRELU;
511 | POW_param:POW;
512 | NEG_param:NEG;
513 | MINIMUM_param:MINIMUM;
514 | MAXIMUM_param:MAXIMUM;
515 | LOG_param:LOG;
516 | ABS_param:ABS;
517 | EXP_param:EXP;
518 | SUB_param:SUB;
519 | // Auto generated fields end
520 | }
521 |
522 | table Model {
523 | layers:[Layer];
524 | initializers:[Tensor];
525 | inputs:[Input];
526 | quant_infos:[QuantInfo];
527 | outputs:[string];
528 | version:uint32;
529 | }
530 |
531 | root_type Model;
532 |
--------------------------------------------------------------------------------
/common/helper.h:
--------------------------------------------------------------------------------
1 | #ifndef DNNLIBRARY_HELPER_H
2 | #define DNNLIBRARY_HELPER_H
3 |
4 | #include
5 | #include
6 |
7 | #include
8 | #include
9 |
10 | template
11 | T Product(const std::vector &v) {
12 | return static_cast(
13 | accumulate(v.begin(), v.end(), 1, std::multiplies()));
14 | }
15 |
16 | using css = const std::string;
17 |
18 | // Make a FOREACH macro
19 | #define FE_1(WHAT, X) WHAT(X)
20 | #define FE_2(WHAT, X, ...) WHAT(X) FE_1(WHAT, __VA_ARGS__)
21 | #define FE_3(WHAT, X, ...) WHAT(X) FE_2(WHAT, __VA_ARGS__)
22 | #define FE_4(WHAT, X, ...) WHAT(X) FE_3(WHAT, __VA_ARGS__)
23 | #define FE_5(WHAT, X, ...) WHAT(X) FE_4(WHAT, __VA_ARGS__)
24 | #define FE_6(WHAT, X, ...) WHAT(X) FE_5(WHAT, __VA_ARGS__)
25 | #define FE_7(WHAT, X, ...) WHAT(X) FE_6(WHAT, __VA_ARGS__)
26 | #define FE_8(WHAT, X, ...) WHAT(X) FE_7(WHAT, __VA_ARGS__)
27 | #define FE_9(WHAT, X, ...) WHAT(X) FE_8(WHAT, __VA_ARGS__)
28 | #define FE_10(WHAT, X, ...) WHAT(X) FE_9(WHAT, __VA_ARGS__)
29 | #define FE_11(WHAT, X, ...) WHAT(X) FE_10(WHAT, __VA_ARGS__)
30 | #define FE_12(WHAT, X, ...) WHAT(X) FE_11(WHAT, __VA_ARGS__)
31 | #define FE_13(WHAT, X, ...) WHAT(X) FE_12(WHAT, __VA_ARGS__)
32 | #define FE_14(WHAT, X, ...) WHAT(X) FE_13(WHAT, __VA_ARGS__)
33 | #define FE_15(WHAT, X, ...) WHAT(X) FE_14(WHAT, __VA_ARGS__)
34 | #define FE_16(WHAT, X, ...) WHAT(X) FE_15(WHAT, __VA_ARGS__)
35 | #define FE_17(WHAT, X, ...) WHAT(X) FE_16(WHAT, __VA_ARGS__)
36 | #define FE_18(WHAT, X, ...) WHAT(X) FE_17(WHAT, __VA_ARGS__)
37 | #define FE_19(WHAT, X, ...) WHAT(X) FE_18(WHAT, __VA_ARGS__)
38 | #define FE_20(WHAT, X, ...) WHAT(X) FE_19(WHAT, __VA_ARGS__)
39 | //... repeat as needed
40 |
41 | #define GET_MACRO(_1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, _14, \
42 | _15, _16, _17, _18, _19, _20, NAME, ...) \
43 | NAME
44 | #define FOR_EACH(action, ...) \
45 | GET_MACRO(__VA_ARGS__, FE_20, FE_19, FE_18, FE_17, FE_16, FE_15, FE_14, \
46 | FE_13, FE_12, FE_11, FE_10, FE_9, FE_8, FE_7, FE_6, FE_5, FE_4, \
47 | FE_3, FE_2, FE_1) \
48 | (action, __VA_ARGS__)
49 |
50 | #define FORZS(var, end, step) \
51 | for (auto var = decltype(end){0}; var < end; var += (step))
52 |
53 | #define FORZ(var, end) for (auto var = decltype(end){0}; var < end; var++)
54 |
55 | #define FOR(var, start, end) \
56 | for (auto var = decltype(end){start}; var < end; var++)
57 |
58 | #define STR(a) #a
59 | #define XSTR(a) STR(a)
60 |
61 | #define PNT_STR(s) << s << " "
62 | #define PNT_VAR(var) << XSTR(var) << " = " << (var) << ", "
63 | #define PNT_TO(stream, ...) stream FOR_EACH(PNT_VAR, __VA_ARGS__);
64 | #define PNT(...) PNT_TO(LOG(INFO), __VA_ARGS__)
65 |
66 | #define DNN_ASSERT(condition, ...) \
67 | if (!(condition)) { \
68 | std::stringstream ss; \
69 | ss << std::string(XSTR(condition)) \
70 | << std::string(" is not satisfied! ") \
71 | FOR_EACH(PNT_STR, __VA_ARGS__); \
72 | LOG(INFO) << ss.str(); \
73 | throw std::runtime_error(ss.str()); \
74 | }
75 |
76 | #define DNN_ASSERT_EQ(actual, expected) \
77 | DNN_ASSERT((actual) == (expected), XSTR(actual), "=", actual, \
78 | ", the expected value is", XSTR(expected), "(which is", \
79 | expected, ")")
80 |
81 | #endif /* DNNLIBRARY_HELPER_H */
82 |
--------------------------------------------------------------------------------
/common/internal_vars.cpp:
--------------------------------------------------------------------------------
1 | #include
2 |
3 | namespace dnn {
4 | const uint32_t CURRENT_MODEL_VERSION = 2;
5 | }
6 |
7 |
8 |
--------------------------------------------------------------------------------
/common/internal_vars.h:
--------------------------------------------------------------------------------
1 | #include
2 |
3 | namespace dnn {
4 | extern const uint32_t CURRENT_MODEL_VERSION;
5 | }
6 |
7 |
--------------------------------------------------------------------------------
/common/log_helper.h:
--------------------------------------------------------------------------------
1 | #ifndef DNN_LOG_HELPER_H
2 | #define DNN_LOG_HELPER_H
3 |
4 | #include
5 | #include
6 |
7 | #include
8 |
9 | template
10 | std::ostream& operator<<(std::ostream& output, std::vector const& values) {
11 | output << "[";
12 | for (size_t i = 0; i < values.size(); i++) {
13 | output << values[i];
14 | if (i != values.size() - 1) {
15 | output << ", ";
16 | }
17 | }
18 | output << "]";
19 | return output;
20 | }
21 |
22 | inline std::ostream& operator<<(std::ostream& output, const dnn::FuseCode& value) {
23 | switch (value) {
24 | case dnn::FuseCode::NONE: {
25 | output << "FuseCode::NONE";
26 | break;
27 | }
28 | case dnn::FuseCode::RELU: {
29 | output << "FuseCode::RELU";
30 | break;
31 | }
32 | case dnn::FuseCode::RELU1: {
33 | output << "FuseCode::RELU1";
34 | break;
35 | }
36 | case dnn::FuseCode::RELU6: {
37 | output << "FuseCode::RELU6";
38 | break;
39 | }
40 | }
41 | return output;
42 | }
43 |
44 | #endif
45 |
--------------------------------------------------------------------------------
/dnnlibrary/CMakeLists.txt:
--------------------------------------------------------------------------------
1 | # dnn_protobuf_generate_cpp(ONNX_SRCS ONNX_HDRS onnx.proto3)
2 |
3 | set(dnnlibrary_src
4 | ${PROJECT_SOURCE_DIR}/include/dnnlibrary/ModelBuilder.h
5 | ${PROJECT_SOURCE_DIR}/include/dnnlibrary/Model.h
6 | ${PROJECT_SOURCE_DIR}/include/dnnlibrary/DaqReader.h
7 | ${PROJECT_SOURCE_DIR}/include/dnnlibrary/nnapi_implementation.h
8 | android_log_helper.h
9 | flatbuffers_helper.h
10 | ModelBuilder.cpp
11 | ModelBuilderImpl.cpp
12 | Model.cpp
13 | DaqReader.cpp
14 | NeuralNetworksWrapper.cpp
15 | nnapi_implementation.cc
16 | ${PROJECT_SOURCE_DIR}/include/common/Shaper.h
17 | ${PROJECT_SOURCE_DIR}/common/Shaper.cpp
18 | ${PROJECT_SOURCE_DIR}/include/common/StrKeyMap.h
19 | ${PROJECT_SOURCE_DIR}/common/internal_vars.cpp
20 | ${PROJECT_SOURCE_DIR}/common/internal_vars.h
21 | )
22 | if (DNN_READ_ONNX)
23 | list(APPEND dnnlibrary_src
24 | OnnxReader.cpp
25 | ${PROJECT_SOURCE_DIR}/include/dnnlibrary/OnnxReader.h)
26 | endif()
27 |
28 | add_library(dnnlibrary
29 | ${dnnlibrary_src}
30 | )
31 |
32 | target_include_directories(
33 | dnnlibrary
34 | PUBLIC
35 | $
36 | $
37 | PRIVATE
38 | ${PROJECT_SOURCE_DIR}
39 | ${CMAKE_CURRENT_SOURCE_DIR}
40 | ${CMAKE_CURRENT_BINARY_DIR})
41 |
42 | find_library(
43 | android-lib
44 | android
45 | )
46 |
47 | find_library(
48 | log-lib
49 | log
50 | )
51 |
52 | find_library(
53 | neural-networks-lib
54 | neuralnetworks
55 | )
56 |
57 | target_link_libraries(
58 | dnnlibrary
59 | glog::glog
60 | flatbuffers
61 | ${android-lib}
62 | ${log-lib}
63 | ${neural-networks-lib}
64 | )
65 |
66 | if (DNN_READ_ONNX)
67 | target_link_libraries(
68 | dnnlibrary
69 | onnx2daq
70 | )
71 | target_compile_definitions(
72 | dnnlibrary
73 | PUBLIC
74 | DNN_READ_ONNX
75 | )
76 | else()
77 | # Reading ONNX need protobuf, which produces warnings
78 | treat_warnings_as_errors(dnnlibrary)
79 | endif()
80 |
81 | if (DNN_CMAKE_INSTALL)
82 | include(GNUInstallDirs)
83 | install(TARGETS dnnlibrary
84 | EXPORT DNNLibraryTargets
85 | ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR}
86 | LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR}
87 | )
88 | install(DIRECTORY ${PROJECT_SOURCE_DIR}/include/
89 | DESTINATION include)
90 | endif()
91 | add_library(dnnlibrary::dnnlibrary ALIAS dnnlibrary)
92 |
93 | if (DNN_BUILD_JNI)
94 | add_library(
95 | daq-jni
96 | SHARED
97 | JavaWrapper.cpp)
98 | target_link_libraries(daq-jni
99 | dnnlibrary)
100 | if (DNN_CMAKE_INSTALL)
101 | install(TARGETS daq-jni
102 | EXPORT DNNLibraryTargets
103 | ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR})
104 | endif()
105 | add_library(dnnlibrary::daq-jni ALIAS daq-jni)
106 | endif()
107 |
108 | if (DNN_CMAKE_INSTALL)
109 | install(EXPORT DNNLibraryTargets
110 | NAMESPACE dnnlibrary::
111 | DESTINATION share/cmake/DNNLibrary
112 | )
113 |
114 | configure_file("${PROJECT_SOURCE_DIR}/cmake/DNNLibraryConfig.cmake.in"
115 | "${PROJECT_BINARY_DIR}/cmake/DNNLibraryConfig.cmake"
116 | @ONLY)
117 |
118 | install(FILES "${PROJECT_BINARY_DIR}/cmake/DNNLibraryConfig.cmake" DESTINATION share/cmake/DNNLibrary)
119 | endif()
120 |
--------------------------------------------------------------------------------
/dnnlibrary/JavaWrapper.cpp:
--------------------------------------------------------------------------------
1 | //
2 | // Created by daquexian on 2017/11/12.
3 | //
4 |
5 | #include