├── .github
├── ISSUE_TEMPLATE
│ ├── issue-report.md
│ └── others.md
└── workflows
│ ├── ci_android.yml
│ ├── ci_arm.yml
│ ├── ci_ubuntu.yml
│ └── ci_windows.yml
├── .gitignore
├── .gitmodules
├── 00_doc
├── class_diagram.drawio
├── class_diagram.png
├── logo.png
├── overview.drawio
└── overview.png
├── 01_script
├── build_run_linux.sh
└── build_run_windows.ps1
├── LICENSE
├── NOTICE.md
├── README.md
├── inference_helper
├── CMakeLists.txt
├── inference_helper.cpp
├── inference_helper.h
├── inference_helper_armnn.cpp
├── inference_helper_armnn.h
├── inference_helper_libtorch.cpp
├── inference_helper_libtorch.h
├── inference_helper_log.h
├── inference_helper_mnn.cpp
├── inference_helper_mnn.h
├── inference_helper_ncnn.cpp
├── inference_helper_ncnn.h
├── inference_helper_nnabla.cpp
├── inference_helper_nnabla.h
├── inference_helper_onnx_runtime.cpp
├── inference_helper_onnx_runtime.h
├── inference_helper_opencv.cpp
├── inference_helper_opencv.h
├── inference_helper_sample.cpp
├── inference_helper_sample.h
├── inference_helper_snpe.cpp
├── inference_helper_snpe.h
├── inference_helper_tensorflow.cpp
├── inference_helper_tensorflow.h
├── inference_helper_tensorflow_lite.cpp
├── inference_helper_tensorflow_lite.h
├── inference_helper_tensorrt.cpp
├── inference_helper_tensorrt.h
├── snpe
│ ├── CreateUserBuffer.cpp
│ ├── CreateUserBuffer.hpp
│ ├── NOTICE.txt
│ ├── Util.cpp
│ ├── Util.hpp
│ ├── udlExample.cpp
│ └── udlExample.hpp
└── tensorrt
│ ├── BatchStream.h
│ ├── EntropyCalibrator.h
│ ├── ErrorRecorder.h
│ ├── calibration
│ ├── batchPrepare.py
│ ├── sample_org
│ │ ├── 000000000139.jpg
│ │ ├── 000000000285.jpg
│ │ ├── 000000000632.jpg
│ │ ├── 000000000724.jpg
│ │ ├── 000000000776.jpg
│ │ ├── 000000000785.jpg
│ │ ├── 000000000802.jpg
│ │ ├── 000000000872.jpg
│ │ ├── 000000000885.jpg
│ │ ├── 000000001000.jpg
│ │ ├── 000000001268.jpg
│ │ ├── 000000001296.jpg
│ │ ├── 000000001353.jpg
│ │ ├── 000000001425.jpg
│ │ ├── 000000001490.jpg
│ │ ├── 000000001503.jpg
│ │ ├── 000000001532.jpg
│ │ ├── 000000001584.jpg
│ │ ├── 000000001675.jpg
│ │ └── 000000001761.jpg
│ └── sample_ppm
│ │ ├── .gitattributes
│ │ ├── 000000000139.ppm
│ │ ├── 000000000285.ppm
│ │ ├── 000000000632.ppm
│ │ ├── 000000000724.ppm
│ │ ├── 000000000776.ppm
│ │ ├── 000000000785.ppm
│ │ ├── 000000000802.ppm
│ │ ├── 000000000872.ppm
│ │ ├── 000000000885.ppm
│ │ ├── 000000001000.ppm
│ │ ├── 000000001268.ppm
│ │ ├── 000000001296.ppm
│ │ ├── 000000001353.ppm
│ │ ├── 000000001425.ppm
│ │ ├── 000000001490.ppm
│ │ ├── 000000001503.ppm
│ │ ├── 000000001532.ppm
│ │ ├── 000000001584.ppm
│ │ ├── 000000001675.ppm
│ │ ├── 000000001761.ppm
│ │ └── list.txt
│ ├── common.h
│ ├── logger.cpp
│ ├── logger.h
│ └── logging.h
└── third_party
├── cmakes
├── armnn.cmake
├── libtorch.cmake
├── mnn.cmake
├── ncnn.cmake
├── nnabla.cmake
├── onnx_runtime.cmake
├── sample.cmake
├── snpe.cmake
├── tensorflow.cmake
├── tflite.cmake
├── tflite_edgetpu.cmake
├── tflite_edgetpu_pipeline.cmake
└── tflite_gpu.cmake
└── download_prebuilt_libraries.sh
/.github/ISSUE_TEMPLATE/issue-report.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: Issue report
3 | about: issue, bug, question
4 | title: ''
5 | labels: ''
6 | assignees: ''
7 |
8 | ---
9 |
10 | ## Environment (Hardware)
11 | - Hardware: Device, CPU, GPU, etc.
12 | - Software: OS, Compiler, etc.
13 | (Please include version information)
14 |
15 | ## Issue Details
16 | A clear and concise description of what the issue is.
17 |
18 | ## How to Reproduce
19 | Steps to reproduce the behavior. Please include your cmake command.
20 |
21 | ## Error Log
22 | ```
23 | error log
24 | ```
25 |
26 | ## Additional Information
27 | Add any other context about the problem here.
28 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/others.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: Others
3 | about: other topics
4 | title: ''
5 | labels: ''
6 | assignees: ''
7 |
8 | ---
9 |
10 |
11 |
--------------------------------------------------------------------------------
/.github/workflows/ci_android.yml:
--------------------------------------------------------------------------------
1 | name: CI Android
2 |
3 | on:
4 | push:
5 | tags: 'v*'
6 | branches: [ master ]
7 | pull_request:
8 | branches: [ master ]
9 | workflow_dispatch:
10 |
11 | jobs:
12 | build:
13 | runs-on: ubuntu-20.04
14 |
15 | steps:
16 | - uses: actions/checkout@v2
17 | - name: Get project source code
18 | run: |
19 | git clone https://github.com/iwatake2222/InferenceHelper_Sample
20 | cd InferenceHelper_Sample
21 | rm -rf InferenceHelper
22 | mv * ../../.
23 | cd ../../
24 | sh ./InferenceHelper/third_party/download_prebuilt_libraries.sh 1
25 |
26 | - name: Install Requirements
27 | run: |
28 | cd ../
29 | sudo apt update
30 | sudo apt install -y g++ git cmake wget unzip vulkan-utils libvulkan1 libvulkan-dev
31 |
32 | ### Android NDK ###
33 | wget https://dl.google.com/android/repository/android-ndk-r23b-linux.zip
34 | unzip android-ndk-r23b-linux.zip
35 | export ANDROID_NDK_HOME=`pwd`/android-ndk-r23b
36 |
37 | ### Prepare OpenCV For Android, and don't use rtti to avoid build error in ncnn ###
38 | wget https://github.com/opencv/opencv/releases/download/4.5.4/opencv-4.5.4-android-sdk.zip
39 | unzip opencv-4.5.4-android-sdk.zip
40 | mv OpenCV-android-sdk/sdk ViewAndroid/.
41 | sed -i s/"#define HAVE_OPENCV_FLANN"//g ViewAndroid/sdk/native/jni/include/opencv2/opencv_modules.hpp
42 |
43 | ### Delete setting to specify Inference Helper Type ###
44 | sed -i "/INFERENCE_HELPER_ENABLE/d" ViewAndroid/app/src/main/cpp/CMakeLists.txt
45 |
46 |
47 | - name: Build
48 | run: |
49 | cd ../
50 | export ANDROID_NDK_HOME=`pwd`/android-ndk-r23b
51 |
52 | cd ViewAndroid/app/src/main/cpp
53 | mkdir -p build && cd build
54 | echo "[CI Building] INFERENCE_HELPER_ENABLE_OPENCV"
55 | cmake .. -DCMAKE_TOOLCHAIN_FILE=${ANDROID_NDK_HOME}/build/cmake/android.toolchain.cmake -DANDROID_ABI="arm64-v8a" -DANDROID_PLATFORM=android-24 -DINFERENCE_HELPER_ENABLE_OPENCV=ON && make -j4 && rm -rf *
56 | echo "[CI Building] INFERENCE_HELPER_ENABLE_TFLITE"
57 | cmake .. -DCMAKE_TOOLCHAIN_FILE=${ANDROID_NDK_HOME}/build/cmake/android.toolchain.cmake -DANDROID_ABI="arm64-v8a" -DANDROID_PLATFORM=android-24 -DINFERENCE_HELPER_ENABLE_TFLITE=ON && make -j4 && rm -rf *
58 | echo "[CI Building] INFERENCE_HELPER_ENABLE_TFLITE_DELEGATE_XNNPACK"
59 | cmake .. -DCMAKE_TOOLCHAIN_FILE=${ANDROID_NDK_HOME}/build/cmake/android.toolchain.cmake -DANDROID_ABI="arm64-v8a" -DANDROID_PLATFORM=android-24 -DINFERENCE_HELPER_ENABLE_TFLITE_DELEGATE_XNNPACK=ON && make -j4 && rm -rf *
60 | echo "[CI Building] INFERENCE_HELPER_ENABLE_TFLITE_DELEGATE_GPU"
61 | cmake .. -DCMAKE_TOOLCHAIN_FILE=${ANDROID_NDK_HOME}/build/cmake/android.toolchain.cmake -DANDROID_ABI="arm64-v8a" -DANDROID_PLATFORM=android-24 -DINFERENCE_HELPER_ENABLE_TFLITE_DELEGATE_GPU=ON && make -j4 && rm -rf *
62 | echo "[CI Building] INFERENCE_HELPER_ENABLE_TFLITE_DELEGATE_NNAPI"
63 | cmake .. -DCMAKE_TOOLCHAIN_FILE=${ANDROID_NDK_HOME}/build/cmake/android.toolchain.cmake -DANDROID_ABI="arm64-v8a" -DANDROID_PLATFORM=android-24 -DINFERENCE_HELPER_ENABLE_TFLITE_DELEGATE_NNAPI=ON && make -j4 && rm -rf *
64 | echo "[CI Building] INFERENCE_HELPER_ENABLE_NCNN"
65 | cmake .. -DCMAKE_TOOLCHAIN_FILE=${ANDROID_NDK_HOME}/build/cmake/android.toolchain.cmake -DANDROID_ABI="arm64-v8a" -DANDROID_PLATFORM=android-24 -DINFERENCE_HELPER_ENABLE_NCNN=ON && make -j4 && rm -rf *
66 | echo "[CI Building] INFERENCE_HELPER_ENABLE_MNN"
67 | cmake .. -DCMAKE_TOOLCHAIN_FILE=${ANDROID_NDK_HOME}/build/cmake/android.toolchain.cmake -DANDROID_ABI="arm64-v8a" -DANDROID_PLATFORM=android-24 -DINFERENCE_HELPER_ENABLE_MNN=ON && make -j4 && rm -rf *
68 | echo "[CI Building] INFERENCE_HELPER_ENABLE_ONNX_RUNTIME"
69 | cmake .. -DCMAKE_TOOLCHAIN_FILE=${ANDROID_NDK_HOME}/build/cmake/android.toolchain.cmake -DANDROID_ABI="arm64-v8a" -DANDROID_PLATFORM=android-24 -DINFERENCE_HELPER_ENABLE_ONNX_RUNTIME=ON && make -j4 && rm -rf *
70 |
--------------------------------------------------------------------------------
/.github/workflows/ci_arm.yml:
--------------------------------------------------------------------------------
1 | name: CI Arm
2 |
3 | on:
4 | push:
5 | tags: 'v*'
6 | branches: [ master ]
7 | pull_request:
8 | branches: [ master ]
9 | workflow_dispatch:
10 |
11 | jobs:
12 | build:
13 | runs-on: ubuntu-18.04
14 | strategy:
15 | matrix:
16 | include:
17 | - arch: armv7
18 | distro: ubuntu18.04
19 | artifact_name: time_inference_armv7.txt
20 | - arch: aarch64
21 | distro: ubuntu20.04
22 | artifact_name: time_inference_aarch64.txt
23 |
24 | steps:
25 | - uses: actions/checkout@v2
26 | - name: Get project source code
27 | run: |
28 | git clone https://github.com/iwatake2222/InferenceHelper_Sample
29 | cd InferenceHelper_Sample
30 | rm -rf InferenceHelper
31 | mv * ../../.
32 | cd ../../
33 | sh ./InferenceHelper/third_party/download_prebuilt_libraries.sh 1
34 | sh ./download_resource.sh
35 |
36 | - name: Build and Run
37 | uses: uraimo/run-on-arch-action@v2.0.5
38 | with:
39 | arch: ${{ matrix.arch }}
40 | distro: ${{ matrix.distro }}
41 | githubToken: ${{ github.token }}
42 | shell: /bin/sh
43 | dockerRunArgs: |
44 | --volume ${PWD}/../:/InferenceHelper_Sample
45 | install: |
46 | apt-get update -q -y
47 | apt-get install -q -y git g++ cmake
48 | apt-get install -q -y libopencv-dev
49 | apt-get install -q -y vulkan-utils libvulkan1 libvulkan-dev
50 | run: |
51 | cd /InferenceHelper_Sample
52 | echo "inference time" > time_inference_linux.txt
53 | case "${{ matrix.arch }}" in
54 | armv7)
55 | sh ./01_script/build_run_linux.sh TFLITE
56 | # sh ./01_script/build_run_linux.sh TFLITE_DELEGATE_XNNPACK
57 | sh ./01_script/build_run_linux.sh TFLITE_DELEGATE_EDGETPU 1
58 | # sh ./01_script/build_run_linux.sh OPENCV
59 | # sh ./01_script/build_run_linux.sh NCNN
60 | # sh ./01_script/build_run_linux.sh MNN
61 | # sh ./01_script/build_run_linux.sh ONNX_RUNTIME
62 | # sh ./01_script/build_run_linux.sh LIBTORCH
63 | # sh ./01_script/build_run_linux.sh TENSORFLOW
64 | ;;
65 | aarch64)
66 | sh ./01_script/build_run_linux.sh TFLITE
67 | sh ./01_script/build_run_linux.sh TFLITE_DELEGATE_XNNPACK
68 | sh ./01_script/build_run_linux.sh TFLITE_DELEGATE_EDGETPU 1
69 | sh ./01_script/build_run_linux.sh OPENCV
70 | # sh ./01_script/build_run_linux.sh NCNN
71 | sh ./01_script/build_run_linux.sh MNN
72 | sh ./01_script/build_run_linux.sh ARMNN
73 | # sh ./01_script/build_run_linux.sh NNABLA
74 | sh ./01_script/build_run_linux.sh ONNX_RUNTIME
75 | # sh ./01_script/build_run_linux.sh LIBTORCH
76 | # sh ./01_script/build_run_linux.sh TENSORFLOW
77 | ;;
78 | esac
79 | mv ./time_inference_linux.txt "${{ matrix.artifact_name }}"
80 | - name: Move artifacts (because relative paths seems not allowed)
81 | run: mv ../"${{ matrix.artifact_name }}" "${{ matrix.artifact_name }}"
82 | - name: Upload Artifacts
83 | uses: actions/upload-artifact@v2
84 | with:
85 | name: ${{ matrix.artifact_name }}
86 | path: ${{ matrix.artifact_name }}
87 |
--------------------------------------------------------------------------------
/.github/workflows/ci_ubuntu.yml:
--------------------------------------------------------------------------------
1 | name: CI Ubuntu
2 |
3 | on:
4 | push:
5 | tags: 'v*'
6 | branches: [ master ]
7 | pull_request:
8 | branches: [ master ]
9 | workflow_dispatch:
10 |
11 | jobs:
12 | build:
13 | runs-on: ubuntu-20.04
14 |
15 | steps:
16 | - uses: actions/checkout@v2
17 | - name: Get project source code
18 | run: |
19 | git clone https://github.com/iwatake2222/InferenceHelper_Sample
20 | cd InferenceHelper_Sample
21 | rm -rf InferenceHelper
22 | mv * ../../.
23 | cd ../../
24 | sh ./InferenceHelper/third_party/download_prebuilt_libraries.sh 1
25 | sh ./download_resource.sh
26 |
27 | - name: Install Requirements
28 | run: |
29 | sudo apt update
30 | sudo apt install -y g++ git cmake wget unzip vulkan-utils libvulkan1 libvulkan-dev
31 |
32 | # OpenCV for INFERENCE_HELPER_ENABLE_OPENCV
33 | sudo apt install -y libopencv-dev
34 |
35 | # Vulkan for INFERENCE_HELPER_ENABLE_NCNN
36 | wget https://sdk.lunarg.com/sdk/download/latest/linux/vulkan-sdk.tar.gz
37 | tar xzvf vulkan-sdk.tar.gz
38 | export VULKAN_SDK=$(pwd)/1.2.198.1/x86_64
39 |
40 | - name: Build and Run
41 | run: |
42 | export VULKAN_SDK=$(pwd)/1.2.198.1/x86_64
43 | cd ../
44 |
45 | # Build and Run
46 | echo "inference time" > time_inference_linux.txt
47 | sh ./01_script/build_run_linux.sh TFLITE
48 | sh ./01_script/build_run_linux.sh TFLITE_DELEGATE_XNNPACK
49 | sh ./01_script/build_run_linux.sh TFLITE_DELEGATE_EDGETPU 1
50 | sh ./01_script/build_run_linux.sh OPENCV
51 | sh ./01_script/build_run_linux.sh NCNN
52 | sh ./01_script/build_run_linux.sh MNN
53 | sh ./01_script/build_run_linux.sh ARMNN
54 | # sh ./01_script/build_run_linux.sh NNABLA
55 | sh ./01_script/build_run_linux.sh ONNX_RUNTIME
56 | sh ./01_script/build_run_linux.sh LIBTORCH
57 | sh ./01_script/build_run_linux.sh TENSORFLOW
58 | mv time_inference_linux.txt InferenceHelper/.
59 |
60 | - name: Upload Artifacts
61 | uses: actions/upload-artifact@v2
62 | with:
63 | name: time_inference_linux.txt
64 | path: time_inference_linux.txt
65 |
--------------------------------------------------------------------------------
/.github/workflows/ci_windows.yml:
--------------------------------------------------------------------------------
1 | name: CI Windows
2 |
3 | on:
4 | push:
5 | tags: 'v*'
6 | branches: [ master ]
7 | pull_request:
8 | branches: [ master ]
9 | workflow_dispatch:
10 |
11 | jobs:
12 | build:
13 | runs-on: windows-2019
14 |
15 | steps:
16 | - uses: actions/checkout@v2
17 | - name: Get project source code
18 | run: |
19 | git clone https://github.com/iwatake2222/InferenceHelper_Sample
20 | cd InferenceHelper_Sample
21 | Remove-Item -Recurse -Force InferenceHelper
22 | mv * ../../.
23 | cd ../../
24 | sh ./InferenceHelper/third_party/download_prebuilt_libraries.sh 1
25 | sh ./download_resource.sh
26 |
27 | - name: setup-msbuild
28 | uses: microsoft/setup-msbuild@v1.1
29 | with:
30 | vs-version: '[16.0,16.20)' # Make sure to use Visual Studio 2019
31 |
32 | - name: Install Requirements
33 | run: |
34 | # OpenCV for INFERENCE_HELPER_ENABLE_OPENCV
35 | choco install opencv -Version 4.5.4
36 |
37 | # Vulkan for INFERENCE_HELPER_ENABLE_NCNN
38 | Invoke-WebRequest -Uri https://sdk.lunarg.com/sdk/download/1.2.189.0/windows/VulkanSDK-1.2.189.0-Installer.exe?Human=true -OutFile VulkanSDK.exe
39 | $installer = Start-Process -FilePath VulkanSDK.exe -Wait -PassThru -ArgumentList @("/S");
40 | $installer.WaitForExit();
41 |
42 | - name: Build and Run
43 | shell: powershell
44 | run: |
45 | # Setup OpenCV for INFERENCE_HELPER_ENABLE_OPENCV
46 | $env:OPENCV_DIR="C:/tools/opencv/build/x64/vc15/lib"
47 | $env:Path+=";C:/tools/opencv/build/x64/vc15/bin"
48 |
49 | # Setup Vulkan for INFERENCE_HELPER_ENABLE_NCNN
50 | $env:VULKAN_SDK="C:/VulkanSDK/1.2.189.0"
51 | $env:Path+=";C:/VulkanSDK/1.2.189.0/Bin"
52 |
53 | # Build and Run
54 | cd ../
55 | if($?) { echo "inference time" > time_inference_windows.txt }
56 | if($?) { ./01_script/build_run_windows.ps1 TFLITE }
57 | if($?) { ./01_script/build_run_windows.ps1 TFLITE_DELEGATE_XNNPACK }
58 | if($?) { ./01_script/build_run_windows.ps1 TFLITE_DELEGATE_EDGETPU -BUILD_ONLY }
59 | if($?) { ./01_script/build_run_windows.ps1 OPENCV }
60 | if($?) { ./01_script/build_run_windows.ps1 NCNN -BUILD_ONLY }
61 | if($?) { ./01_script/build_run_windows.ps1 MNN }
62 | if($?) { ./01_script/build_run_windows.ps1 NNABLA }
63 | if($?) { ./01_script/build_run_windows.ps1 ONNX_RUNTIME }
64 | if($?) { ./01_script/build_run_windows.ps1 LIBTORCH }
65 | if($?) { ./01_script/build_run_windows.ps1 TENSORFLOW }
66 | mv time_inference_windows.txt InferenceHelper/.
67 | exit $LASTEXITCODE
68 |
69 | - name: Upload Artifacts
70 | uses: actions/upload-artifact@v2
71 | with:
72 | name: time_inference_windows.txt
73 | path: time_inference_windows.txt
74 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | .vscode/
2 | build/
3 | ThirdParty/
4 | third_party/
5 | resource/
6 |
--------------------------------------------------------------------------------
/.gitmodules:
--------------------------------------------------------------------------------
1 | [submodule "ThirdParty/tensorflow"]
2 | path = third_party/tensorflow
3 | url = https://github.com/tensorflow/tensorflow
4 | [submodule "third_party/tensorflow_deps/abseil-cpp"]
5 | path = third_party/tensorflow_deps/abseil-cpp
6 | url = https://github.com/abseil/abseil-cpp
7 | [submodule "third_party/tensorflow_deps/flatbuffers"]
8 | path = third_party/tensorflow_deps/flatbuffers
9 | url = https://github.com/google/flatbuffers
10 |
--------------------------------------------------------------------------------
/00_doc/class_diagram.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iwatake2222/InferenceHelper/7c975cf9d32016aa6bfceabf6922fe6d808dc5eb/00_doc/class_diagram.png
--------------------------------------------------------------------------------
/00_doc/logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iwatake2222/InferenceHelper/7c975cf9d32016aa6bfceabf6922fe6d808dc5eb/00_doc/logo.png
--------------------------------------------------------------------------------
/00_doc/overview.drawio:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
26 |
27 |
28 |
29 |
30 |
31 |
32 |
33 |
34 |
35 |
36 |
37 |
38 |
39 |
40 |
41 |
42 |
43 |
44 |
45 |
46 |
47 |
48 |
49 |
50 |
51 |
52 |
53 |
54 |
55 |
56 |
57 |
58 |
59 |
60 |
61 |
62 |
63 |
64 |
--------------------------------------------------------------------------------
/00_doc/overview.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iwatake2222/InferenceHelper/7c975cf9d32016aa6bfceabf6922fe6d808dc5eb/00_doc/overview.png
--------------------------------------------------------------------------------
/01_script/build_run_linux.sh:
--------------------------------------------------------------------------------
1 | # docker create -v /mnt/c/iwatake/devel:/root/devel -v /etc/localtime:/etc/localtime:ro -it --name=ubuntu20 ubuntu:20.04
2 | # docker start ubuntu20
3 | # docker exec -it ubuntu20 bash
4 |
5 | # Check if sudo needed
6 | sudo
7 | if [ "$?" -le 10 ]
8 | then
9 | L_SUDO=sudo
10 | else
11 | L_SUDO=
12 | fi
13 |
14 | set -e
15 |
16 | FRAMEWORK_NAME=${1:-"MNN"}
17 | BUILD_ONLY=${2:-0}
18 | LOG_HEADER="[CI_LINUX_${FRAMEWORK_NAME}]"
19 | echo "${LOG_HEADER} Start"
20 |
21 | # ${L_SUDO} apt update
22 | # ${L_SUDO} apt install -y g++ git cmake wget unzip vulkan-utils libvulkan1 libvulkan-dev
23 |
24 |
25 | echo "${LOG_HEADER} Build Start"
26 | cd pj_cls_mobilenet_v2_wo_opencv
27 | rm -rf build
28 | mkdir build && cd build
29 | cmake .. -DINFERENCE_HELPER_ENABLE_${FRAMEWORK_NAME}=on
30 | make -j4
31 | echo "${LOG_HEADER} Build End"
32 |
33 | if [ ${BUILD_ONLY} -ne 0 ]; then
34 | exit 0
35 | fi
36 |
37 | echo "${LOG_HEADER} Run Start"
38 | ./main
39 | # if [ ${?} -ne 0 ]; then
40 | # echo "${LOG_HEADER} Run Error"
41 | # exit 1
42 | # fi
43 | echo "${LOG_HEADER} Run End"
44 |
45 |
46 | echo "$FRAMEWORK_NAME" >> ../../time_inference_linux.txt
47 | cat time_inference.txt >> ../../time_inference_linux.txt
48 |
49 | echo "${LOG_HEADER} End"
50 |
--------------------------------------------------------------------------------
/01_script/build_run_windows.ps1:
--------------------------------------------------------------------------------
1 | # Run on Visual Studio 2019 Developer PowerShell
2 | # You may need the following command before executing this script
3 | # Set-ExecutionPolicy Unrestricted -Scope Process
4 |
5 | Param(
6 | [string]$FRAMEWORK_NAME = "MNN",
7 | [switch]$BUILD_ONLY
8 | )
9 | $LOG_HEADER = "[CI_WINDOWS_${FRAMEWORK_NAME}]"
10 | echo "${LOG_HEADER} Start"
11 |
12 | echo "${LOG_HEADER} Build Start"
13 | if(Test-Path build) {
14 | del -R build
15 | }
16 | mkdir build
17 | cd build
18 | cmake -DINFERENCE_HELPER_ENABLE_"$FRAMEWORK_NAME"=on ../pj_cls_mobilenet_v2_wo_opencv
19 | MSBuild -m:4 ./main.sln /p:Configuration=Release
20 | if(!($?)) {
21 | echo "${LOG_HEADER} Build Error"
22 | cd ..
23 | exit -1
24 | }
25 | echo "${LOG_HEADER} Build End"
26 |
27 | if($BUILD_ONLY) {
28 | cd ..
29 | exit 0
30 | }
31 |
32 |
33 | echo "${LOG_HEADER} Run Start"
34 | ./Release/main.exe
35 | if(!($?)) {
36 | echo "${LOG_HEADER} Run Error"
37 | cd ..
38 | exit -1
39 | }
40 | echo "${LOG_HEADER} Run End"
41 |
42 | cd ..
43 | echo "$FRAMEWORK_NAME" >> time_inference_windows.txt
44 | cat build/time_inference.txt >> time_inference_windows.txt
45 |
46 | echo "${LOG_HEADER} End"
47 |
48 | exit 0
49 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | Apache License
2 | Version 2.0, January 2004
3 | http://www.apache.org/licenses/
4 |
5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6 |
7 | 1. Definitions.
8 |
9 | "License" shall mean the terms and conditions for use, reproduction,
10 | and distribution as defined by Sections 1 through 9 of this document.
11 |
12 | "Licensor" shall mean the copyright owner or entity authorized by
13 | the copyright owner that is granting the License.
14 |
15 | "Legal Entity" shall mean the union of the acting entity and all
16 | other entities that control, are controlled by, or are under common
17 | control with that entity. For the purposes of this definition,
18 | "control" means (i) the power, direct or indirect, to cause the
19 | direction or management of such entity, whether by contract or
20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the
21 | outstanding shares, or (iii) beneficial ownership of such entity.
22 |
23 | "You" (or "Your") shall mean an individual or Legal Entity
24 | exercising permissions granted by this License.
25 |
26 | "Source" form shall mean the preferred form for making modifications,
27 | including but not limited to software source code, documentation
28 | source, and configuration files.
29 |
30 | "Object" form shall mean any form resulting from mechanical
31 | transformation or translation of a Source form, including but
32 | not limited to compiled object code, generated documentation,
33 | and conversions to other media types.
34 |
35 | "Work" shall mean the work of authorship, whether in Source or
36 | Object form, made available under the License, as indicated by a
37 | copyright notice that is included in or attached to the work
38 | (an example is provided in the Appendix below).
39 |
40 | "Derivative Works" shall mean any work, whether in Source or Object
41 | form, that is based on (or derived from) the Work and for which the
42 | editorial revisions, annotations, elaborations, or other modifications
43 | represent, as a whole, an original work of authorship. For the purposes
44 | of this License, Derivative Works shall not include works that remain
45 | separable from, or merely link (or bind by name) to the interfaces of,
46 | the Work and Derivative Works thereof.
47 |
48 | "Contribution" shall mean any work of authorship, including
49 | the original version of the Work and any modifications or additions
50 | to that Work or Derivative Works thereof, that is intentionally
51 | submitted to Licensor for inclusion in the Work by the copyright owner
52 | or by an individual or Legal Entity authorized to submit on behalf of
53 | the copyright owner. For the purposes of this definition, "submitted"
54 | means any form of electronic, verbal, or written communication sent
55 | to the Licensor or its representatives, including but not limited to
56 | communication on electronic mailing lists, source code control systems,
57 | and issue tracking systems that are managed by, or on behalf of, the
58 | Licensor for the purpose of discussing and improving the Work, but
59 | excluding communication that is conspicuously marked or otherwise
60 | designated in writing by the copyright owner as "Not a Contribution."
61 |
62 | "Contributor" shall mean Licensor and any individual or Legal Entity
63 | on behalf of whom a Contribution has been received by Licensor and
64 | subsequently incorporated within the Work.
65 |
66 | 2. Grant of Copyright License. Subject to the terms and conditions of
67 | this License, each Contributor hereby grants to You a perpetual,
68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69 | copyright license to reproduce, prepare Derivative Works of,
70 | publicly display, publicly perform, sublicense, and distribute the
71 | Work and such Derivative Works in Source or Object form.
72 |
73 | 3. Grant of Patent License. Subject to the terms and conditions of
74 | this License, each Contributor hereby grants to You a perpetual,
75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76 | (except as stated in this section) patent license to make, have made,
77 | use, offer to sell, sell, import, and otherwise transfer the Work,
78 | where such license applies only to those patent claims licensable
79 | by such Contributor that are necessarily infringed by their
80 | Contribution(s) alone or by combination of their Contribution(s)
81 | with the Work to which such Contribution(s) was submitted. If You
82 | institute patent litigation against any entity (including a
83 | cross-claim or counterclaim in a lawsuit) alleging that the Work
84 | or a Contribution incorporated within the Work constitutes direct
85 | or contributory patent infringement, then any patent licenses
86 | granted to You under this License for that Work shall terminate
87 | as of the date such litigation is filed.
88 |
89 | 4. Redistribution. You may reproduce and distribute copies of the
90 | Work or Derivative Works thereof in any medium, with or without
91 | modifications, and in Source or Object form, provided that You
92 | meet the following conditions:
93 |
94 | (a) You must give any other recipients of the Work or
95 | Derivative Works a copy of this License; and
96 |
97 | (b) You must cause any modified files to carry prominent notices
98 | stating that You changed the files; and
99 |
100 | (c) You must retain, in the Source form of any Derivative Works
101 | that You distribute, all copyright, patent, trademark, and
102 | attribution notices from the Source form of the Work,
103 | excluding those notices that do not pertain to any part of
104 | the Derivative Works; and
105 |
106 | (d) If the Work includes a "NOTICE" text file as part of its
107 | distribution, then any Derivative Works that You distribute must
108 | include a readable copy of the attribution notices contained
109 | within such NOTICE file, excluding those notices that do not
110 | pertain to any part of the Derivative Works, in at least one
111 | of the following places: within a NOTICE text file distributed
112 | as part of the Derivative Works; within the Source form or
113 | documentation, if provided along with the Derivative Works; or,
114 | within a display generated by the Derivative Works, if and
115 | wherever such third-party notices normally appear. The contents
116 | of the NOTICE file are for informational purposes only and
117 | do not modify the License. You may add Your own attribution
118 | notices within Derivative Works that You distribute, alongside
119 | or as an addendum to the NOTICE text from the Work, provided
120 | that such additional attribution notices cannot be construed
121 | as modifying the License.
122 |
123 | You may add Your own copyright statement to Your modifications and
124 | may provide additional or different license terms and conditions
125 | for use, reproduction, or distribution of Your modifications, or
126 | for any such Derivative Works as a whole, provided Your use,
127 | reproduction, and distribution of the Work otherwise complies with
128 | the conditions stated in this License.
129 |
130 | 5. Submission of Contributions. Unless You explicitly state otherwise,
131 | any Contribution intentionally submitted for inclusion in the Work
132 | by You to the Licensor shall be under the terms and conditions of
133 | this License, without any additional terms or conditions.
134 | Notwithstanding the above, nothing herein shall supersede or modify
135 | the terms of any separate license agreement you may have executed
136 | with Licensor regarding such Contributions.
137 |
138 | 6. Trademarks. This License does not grant permission to use the trade
139 | names, trademarks, service marks, or product names of the Licensor,
140 | except as required for reasonable and customary use in describing the
141 | origin of the Work and reproducing the content of the NOTICE file.
142 |
143 | 7. Disclaimer of Warranty. Unless required by applicable law or
144 | agreed to in writing, Licensor provides the Work (and each
145 | Contributor provides its Contributions) on an "AS IS" BASIS,
146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147 | implied, including, without limitation, any warranties or conditions
148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149 | PARTICULAR PURPOSE. You are solely responsible for determining the
150 | appropriateness of using or redistributing the Work and assume any
151 | risks associated with Your exercise of permissions under this License.
152 |
153 | 8. Limitation of Liability. In no event and under no legal theory,
154 | whether in tort (including negligence), contract, or otherwise,
155 | unless required by applicable law (such as deliberate and grossly
156 | negligent acts) or agreed to in writing, shall any Contributor be
157 | liable to You for damages, including any direct, indirect, special,
158 | incidental, or consequential damages of any character arising as a
159 | result of this License or out of the use or inability to use the
160 | Work (including but not limited to damages for loss of goodwill,
161 | work stoppage, computer failure or malfunction, or any and all
162 | other commercial damages or losses), even if such Contributor
163 | has been advised of the possibility of such damages.
164 |
165 | 9. Accepting Warranty or Additional Liability. While redistributing
166 | the Work or Derivative Works thereof, You may choose to offer,
167 | and charge a fee for, acceptance of support, warranty, indemnity,
168 | or other liability obligations and/or rights consistent with this
169 | License. However, in accepting such obligations, You may act only
170 | on Your own behalf and on Your sole responsibility, not on behalf
171 | of any other Contributor, and only if You agree to indemnify,
172 | defend, and hold each Contributor harmless for any liability
173 | incurred by, or claims asserted against, such Contributor by reason
174 | of your accepting any such warranty or additional liability.
175 |
176 | END OF TERMS AND CONDITIONS
177 |
178 | APPENDIX: How to apply the Apache License to your work.
179 |
180 | To apply the Apache License to your work, attach the following
181 | boilerplate notice, with the fields enclosed by brackets "[]"
182 | replaced with your own identifying information. (Don't include
183 | the brackets!) The text should be enclosed in the appropriate
184 | comment syntax for the file format. We also recommend that a
185 | file or class name and description of purpose be included on the
186 | same "printed page" as the copyright notice for easier
187 | identification within third-party archives.
188 |
189 | Copyright [yyyy] [name of copyright owner]
190 |
191 | Licensed under the Apache License, Version 2.0 (the "License");
192 | you may not use this file except in compliance with the License.
193 | You may obtain a copy of the License at
194 |
195 | http://www.apache.org/licenses/LICENSE-2.0
196 |
197 | Unless required by applicable law or agreed to in writing, software
198 | distributed under the License is distributed on an "AS IS" BASIS,
199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200 | See the License for the specific language governing permissions and
201 | limitations under the License.
202 |
--------------------------------------------------------------------------------
/inference_helper/inference_helper.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2021 iwatake2222
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 | #ifndef INFERENCE_HELPER_
16 | #define INFERENCE_HELPER_
17 |
18 | /* for general */
19 | #include
20 | #include
21 | #include
22 | #include
23 | #include
24 | #include
25 |
26 | class TensorInfo {
27 | public:
28 | enum {
29 | kTensorTypeNone,
30 | kTensorTypeUint8,
31 | kTensorTypeInt8,
32 | kTensorTypeFp32,
33 | kTensorTypeInt32,
34 | kTensorTypeInt64,
35 | };
36 |
37 | public:
38 | TensorInfo()
39 | : name("")
40 | , id(-1)
41 | , tensor_type(kTensorTypeNone)
42 | , is_nchw(true)
43 | {}
44 | ~TensorInfo() {}
45 |
46 | int32_t GetElementNum() const
47 | {
48 | int32_t element_num = 1;
49 | for (const auto& dim : tensor_dims) {
50 | element_num *= dim;
51 | }
52 | return element_num;
53 | }
54 |
55 | int32_t GetBatch() const
56 | {
57 | if (tensor_dims.size() <= 0) return -1;
58 | return tensor_dims[0];
59 | }
60 |
61 | int32_t GetChannel() const
62 | {
63 | if (is_nchw) {
64 | if (tensor_dims.size() <= 1) return -1;
65 | return tensor_dims[1];
66 | } else {
67 | if (tensor_dims.size() <= 3) return -1;
68 | return tensor_dims[3];
69 | }
70 | }
71 |
72 | int32_t GetHeight() const
73 | {
74 | if (is_nchw) {
75 | if (tensor_dims.size() <= 2) return -1;
76 | return tensor_dims[2];
77 | } else {
78 | if (tensor_dims.size() <= 1) return -1;
79 | return tensor_dims[1];
80 | }
81 | }
82 |
83 | int32_t GetWidth() const
84 | {
85 | if (is_nchw) {
86 | if (tensor_dims.size() <= 3) return -1;
87 | return tensor_dims[3];
88 | } else {
89 | if (tensor_dims.size() <= 2) return -1;
90 | return tensor_dims[2];
91 | }
92 | }
93 |
94 | public:
95 | std::string name; // [In] Set the name_ of tensor
96 | int32_t id; // [Out] Do not modify (Used in InferenceHelper)
97 | int32_t tensor_type; // [In] The type of tensor (e.g. kTensorTypeFp32)
98 | std::vector tensor_dims; // InputTensorInfo: [In] The dimentions of tensor. (If empty at initialize, the size is updated from model info.)
99 | // OutputTensorInfo: [Out] The dimentions of tensor is set from model information
100 | bool is_nchw; // [IN] NCHW or NHWC
101 | };
102 |
103 | class InputTensorInfo : public TensorInfo {
104 | public:
105 | enum {
106 | kDataTypeImage,
107 | kDataTypeBlobNhwc, // data_ which already finished preprocess(color conversion, resize, normalize_, etc.)
108 | kDataTypeBlobNchw,
109 | };
110 |
111 | public:
112 | InputTensorInfo()
113 | : data(nullptr)
114 | , data_type(kDataTypeImage)
115 | , image_info({ -1, -1, -1, -1, -1, -1, -1, true, false })
116 | , normalize({ 0.0f, 0.0f, 0.0f, 1.0f, 1.0f, 1.0f })
117 | {}
118 |
119 | InputTensorInfo(std::string name_, int32_t tensor_type_, bool is_nchw_ = true)
120 | : InputTensorInfo()
121 | {
122 | name = name_;
123 | tensor_type = tensor_type_;
124 | is_nchw = is_nchw_;
125 | }
126 |
127 | ~InputTensorInfo() {}
128 |
129 | public:
130 | void* data; // [In] Set the pointer to image/blob
131 | int32_t data_type; // [In] Set the type of data_ (e.g. kDataTypeImage)
132 |
133 | struct {
134 | int32_t width;
135 | int32_t height;
136 | int32_t channel;
137 | int32_t crop_x;
138 | int32_t crop_y;
139 | int32_t crop_width;
140 | int32_t crop_height;
141 | bool is_bgr; // used when channel == 3 (true: BGR, false: RGB)
142 | bool swap_color;
143 | } image_info; // [In] used when data_type_ == kDataTypeImage
144 |
145 | struct {
146 | float mean[3];
147 | float norm[3];
148 | } normalize; // [In] used when data_type_ == kDataTypeImage
149 | };
150 |
151 |
152 | class OutputTensorInfo : public TensorInfo {
153 | public:
154 | OutputTensorInfo()
155 | : data(nullptr)
156 | , quant({ 1.0f, 0 })
157 | , data_fp32_(nullptr)
158 | {}
159 |
160 | OutputTensorInfo(std::string name_, int32_t tensor_type_, bool is_nchw_ = true)
161 | : OutputTensorInfo()
162 | {
163 | name = name_;
164 | tensor_type = tensor_type_;
165 | is_nchw = is_nchw_;
166 | }
167 |
168 | ~OutputTensorInfo() {
169 | if (data_fp32_ != nullptr) {
170 | delete[] data_fp32_;
171 | }
172 | }
173 |
174 | float* GetDataAsFloat() { /* Returned pointer should be with const, but returning pointer without const is convenient to create cv::Mat */
175 | if (tensor_type == kTensorTypeUint8 || tensor_type == kTensorTypeInt8) {
176 | if (data_fp32_ == nullptr) {
177 | data_fp32_ = new float[GetElementNum()];
178 | }
179 | if (tensor_type == kTensorTypeUint8) {
180 | #pragma omp parallel
181 | for (int32_t i = 0; i < GetElementNum(); i++) {
182 | const uint8_t* val_uint8 = static_cast(data);
183 | float val_float = (val_uint8[i] - quant.zero_point) * quant.scale;
184 | data_fp32_[i] = val_float;
185 | }
186 | } else {
187 | #pragma omp parallel
188 | for (int32_t i = 0; i < GetElementNum(); i++) {
189 | const int8_t* val_int8 = static_cast(data);
190 | float val_float = (val_int8[i] - quant.zero_point) * quant.scale;
191 | data_fp32_[i] = val_float;
192 | }
193 | }
194 | return data_fp32_;
195 | } else if (tensor_type == kTensorTypeFp32) {
196 | return static_cast(data);
197 | } else {
198 | return nullptr;
199 | }
200 | }
201 |
202 | public:
203 | void* data; // [Out] Pointer to the output data_
204 | struct {
205 | float scale;
206 | int32_t zero_point;
207 | } quant; // [Out] Parameters for dequantization (convert uint8 to float)
208 |
209 | private:
210 | float* data_fp32_;
211 | };
212 |
213 |
214 | namespace cv {
215 | class Mat;
216 | };
217 |
218 | class InferenceHelper {
219 | public:
220 | enum {
221 | kRetOk = 0,
222 | kRetErr = -1,
223 | };
224 |
225 | typedef enum {
226 | kOpencv,
227 | kOpencvGpu,
228 | kTensorflowLite,
229 | kTensorflowLiteXnnpack,
230 | kTensorflowLiteGpu,
231 | kTensorflowLiteEdgetpu,
232 | kTensorflowLiteNnapi,
233 | kTensorrt,
234 | kNcnn,
235 | kNcnnVulkan,
236 | kMnn,
237 | kSnpe,
238 | kArmnn,
239 | kNnabla,
240 | kNnablaCuda,
241 | kOnnxRuntime,
242 | kOnnxRuntimeCuda,
243 | kLibtorch,
244 | kLibtorchCuda,
245 | kTensorflow,
246 | kTensorflowGpu,
247 | kSample,
248 | } HelperType;
249 |
250 | public:
251 | static InferenceHelper* Create(const HelperType helper_type);
252 | static void PreProcessByOpenCV(const InputTensorInfo& input_tensor_info, bool is_nchw, cv::Mat& img_blob); // use this if the selected inference engine doesn't support pre-process
253 |
254 | public:
255 | virtual ~InferenceHelper() {}
256 | virtual int32_t SetNumThreads(const int32_t num_threads) = 0;
257 | virtual int32_t SetCustomOps(const std::vector>& custom_ops) = 0;
258 | virtual int32_t Initialize(const std::string& model_filename, std::vector& input_tensor_info_list, std::vector& output_tensor_info_list) = 0;
259 | virtual int32_t Finalize(void) = 0;
260 | virtual int32_t PreProcess(const std::vector& input_tensor_info_list) = 0;
261 | virtual int32_t Process(std::vector& output_tensor_info_list) = 0;
262 |
263 | protected:
264 | void ConvertNormalizeParameters(InputTensorInfo& tensor_info);
265 |
266 | void PreProcessImage(int32_t num_thread, const InputTensorInfo& input_tensor_info, float* dst);
267 | void PreProcessImage(int32_t num_thread, const InputTensorInfo& input_tensor_info, uint8_t* dst);
268 | void PreProcessImage(int32_t num_thread, const InputTensorInfo& input_tensor_info, int8_t* dst);
269 |
270 | template
271 | void PreProcessBlob(int32_t num_thread, const InputTensorInfo& input_tensor_info, T *dst);
272 |
273 | protected:
274 | HelperType helper_type_;
275 | };
276 |
277 | #endif
278 |
--------------------------------------------------------------------------------
/inference_helper/inference_helper_armnn.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2021 iwatake2222
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 | #ifndef INFERENCE_HELPER_ARMNN_
16 | #define INFERENCE_HELPER_ARMNN_
17 |
18 | /* for general */
19 | #include
20 | #include
21 | #include
22 | #include
23 | #include
24 | #include
25 |
26 | /* for My modules */
27 | #include "inference_helper.h"
28 |
29 | class ArmnnWrapper;
30 |
31 | class InferenceHelperArmnn : public InferenceHelper {
32 | public:
33 | InferenceHelperArmnn();
34 | ~InferenceHelperArmnn() override = default;
35 | int32_t SetNumThreads(const int32_t num_threads) override;
36 | int32_t SetCustomOps(const std::vector>& custom_ops) override;
37 | int32_t Initialize(const std::string& model_filename, std::vector& input_tensor_info_list, std::vector& output_tensor_info_list) override;
38 | int32_t Finalize(void) override;
39 | int32_t PreProcess(const std::vector& input_tensor_info_list) override;
40 | int32_t Process(std::vector& output_tensor_info_list) override;
41 |
42 | private:
43 | int32_t num_threads_;
44 | std::unique_ptr armnn_wrapper_;
45 |
46 | };
47 |
48 | #endif
49 |
--------------------------------------------------------------------------------
/inference_helper/inference_helper_libtorch.cpp:
--------------------------------------------------------------------------------
1 | /* Copyright 2022 iwatake2222
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 | /*** Include ***/
16 | /* for general */
17 | #include
18 | #include
19 | #include
20 | #include
21 | #include
22 | #include
23 | #include
24 | #include
25 | #include
26 | #include
27 |
28 | #ifdef _WIN32
29 | #include
30 | #endif
31 |
32 | /* for LibTorch */
33 | //#include
34 | #include // One-stop header.
35 | #include
36 | #include
37 | #include
38 |
39 | /* for My modules */
40 | #include "inference_helper_log.h"
41 | #include "inference_helper_libtorch.h"
42 |
43 | /*** Macro ***/
44 | #define TAG "InferenceHelperLibtorch"
45 | #define PRINT(...) INFERENCE_HELPER_LOG_PRINT(TAG, __VA_ARGS__)
46 | #define PRINT_E(...) INFERENCE_HELPER_LOG_PRINT_E(TAG, __VA_ARGS__)
47 |
48 |
49 | /*** Function ***/
50 | InferenceHelperLibtorch::InferenceHelperLibtorch()
51 | {
52 | num_threads_ = 1;
53 | }
54 |
55 | InferenceHelperLibtorch::~InferenceHelperLibtorch()
56 | {
57 | }
58 |
59 | int32_t InferenceHelperLibtorch::SetNumThreads(const int32_t num_threads)
60 | {
61 | num_threads_ = num_threads;
62 | torch::set_num_threads(num_threads_);
63 | return kRetOk;
64 | }
65 |
66 | int32_t InferenceHelperLibtorch::SetCustomOps(const std::vector>& custom_ops)
67 | {
68 | PRINT("[WARNING] This method is not supported\n");
69 | return kRetOk;
70 | }
71 |
72 | int32_t InferenceHelperLibtorch::Initialize(const std::string& model_filename, std::vector& input_tensor_info_list, std::vector& output_tensor_info_list)
73 | {
74 | /*** Note
75 | * Do not analyze model information.
76 | * The order of model inputs/ontputs must be the same as that of input_tensor_info_list/output_tensor_info_list
77 | */
78 |
79 | /*** Check CUDA ***/
80 | if (torch::cuda::is_available()) {
81 | PRINT("CUDA is available\n");
82 | if (helper_type_ == InferenceHelper::kLibtorchCuda) {
83 | device_type_ = torch::kCUDA;
84 | } else {
85 | device_type_ = torch::kCPU;
86 | }
87 | } else {
88 | PRINT("CUDA is not available\n");
89 | if (helper_type_ == InferenceHelper::kLibtorchCuda) {
90 | PRINT("[WARNING] kLibtorchCuda is selected, but CUDA is not available\n");
91 | device_type_ = torch::kCPU;
92 | } else {
93 | device_type_ = torch::kCPU;
94 | }
95 | }
96 |
97 | /*** Load model ***/
98 | try {
99 | module_ = torch::jit::load(model_filename);
100 | }
101 | catch (const c10::Error& e) {
102 | PRINT_E("[ERROR] Unable to load model %s: %s\n", model_filename.c_str(), e.what());
103 | return kRetErr;
104 | }
105 | module_.to(device_type_);
106 | module_.eval();
107 |
108 | /*** Convert normalize parameter to speed up ***/
109 | for (auto& input_tensor_info : input_tensor_info_list) {
110 | ConvertNormalizeParameters(input_tensor_info);
111 | }
112 |
113 | return kRetOk;
114 | };
115 |
116 | int32_t InferenceHelperLibtorch::Finalize(void)
117 | {
118 | return kRetOk;
119 | }
120 |
121 |
122 | int32_t InferenceHelperLibtorch::PreProcess(const std::vector& input_tensor_info_list)
123 | {
124 | /*** Allocate input tensor every frame ***/
125 | /* We need this only for the first time for kCPU, but tensor device changes for kCUDA. So, We need to reallocate it */
126 | /* Todo: there may be a way to reuse allocated GPU memory */
127 | input_tensor_list_.clear();
128 |
129 | /*** Normalize input data and store the converted data into the input tensor buffer ***/
130 | for (size_t input_tensor_index = 0; input_tensor_index < input_tensor_info_list.size(); input_tensor_index++) {
131 | const auto& input_tensor_info = input_tensor_info_list[input_tensor_index];
132 | const int32_t img_width = input_tensor_info.GetWidth();
133 | const int32_t img_height = input_tensor_info.GetHeight();
134 | const int32_t img_channel = input_tensor_info.GetChannel();
135 |
136 | torch::TensorOptions tensor_options;
137 | if (input_tensor_info.tensor_type == TensorInfo::kTensorTypeFp32) {
138 | tensor_options = torch::TensorOptions().dtype(torch::kFloat32).requires_grad(false);
139 | }
140 | std::vector sizes;
141 | for (auto v : input_tensor_info.tensor_dims) {
142 | sizes.push_back(v);
143 | }
144 | torch::Tensor input_tensor = torch::zeros(sizes, tensor_options);
145 |
146 |
147 | if (input_tensor_info.data_type == InputTensorInfo::kDataTypeImage) {
148 | if ((input_tensor_info.image_info.width != input_tensor_info.image_info.crop_width) || (input_tensor_info.image_info.height != input_tensor_info.image_info.crop_height)) {
149 | PRINT_E("Crop is not supported\n");
150 | return kRetErr;
151 | }
152 | if ((input_tensor_info.image_info.crop_width != img_width) || (input_tensor_info.image_info.crop_height != img_height)) {
153 | PRINT_E("Resize is not supported\n");
154 | return kRetErr;
155 | }
156 | if (input_tensor_info.image_info.channel != img_channel) {
157 | PRINT_E("Color conversion is not supported\n");
158 | return kRetErr;
159 | }
160 |
161 | /* Normalize image */
162 | if (input_tensor_info.tensor_type == TensorInfo::kTensorTypeFp32) {
163 | float* dst = (float*)(input_tensor.data_ptr());
164 | PreProcessImage(num_threads_, input_tensor_info, dst);
165 | } else if (input_tensor_info.tensor_type == TensorInfo::kTensorTypeUint8) {
166 | uint8_t* dst = (uint8_t*)(input_tensor.data_ptr());
167 | PreProcessImage(num_threads_, input_tensor_info, dst);
168 | } else if (input_tensor_info.tensor_type == TensorInfo::kTensorTypeInt8) {
169 | int8_t* dst = (int8_t*)(input_tensor.data_ptr());
170 | PreProcessImage(num_threads_, input_tensor_info, dst);
171 | } else {
172 | PRINT_E("Unsupported tensor_type (%d)\n", input_tensor_info.tensor_type);
173 | return kRetErr;
174 | }
175 | } else if ((input_tensor_info.data_type == InputTensorInfo::kDataTypeBlobNhwc) || (input_tensor_info.data_type == InputTensorInfo::kDataTypeBlobNchw)) {
176 | if (input_tensor_info.tensor_type == TensorInfo::kTensorTypeFp32) {
177 | float* dst = (float*)(input_tensor.data_ptr());
178 | PreProcessBlob(num_threads_, input_tensor_info, dst);
179 | } else if (input_tensor_info.tensor_type == TensorInfo::kTensorTypeUint8 || input_tensor_info.tensor_type == TensorInfo::kTensorTypeInt8) {
180 | uint8_t* dst = (uint8_t*)(input_tensor.data_ptr());
181 | PreProcessBlob(num_threads_, input_tensor_info, dst);
182 | } else if (input_tensor_info.tensor_type == TensorInfo::kTensorTypeInt32) {
183 | int32_t* dst = (int32_t*)(input_tensor.data_ptr());
184 | PreProcessBlob(num_threads_, input_tensor_info, dst);
185 | } else {
186 | PRINT_E("Unsupported tensor_type (%d)\n", input_tensor_info.tensor_type);
187 | return kRetErr;
188 | }
189 | } else {
190 | PRINT_E("Unsupported data_type (%d)\n", input_tensor_info.data_type);
191 | return kRetErr;
192 | }
193 |
194 | input_tensor_list_.push_back(input_tensor.to(device_type_));
195 | }
196 |
197 | return kRetOk;
198 | }
199 |
200 | int32_t InferenceHelperLibtorch::Process(std::vector& output_tensor_info_list)
201 | {
202 | /*** Inference ***/
203 | torch::jit::IValue outputs;
204 | try {
205 | outputs = module_.forward(input_tensor_list_);
206 | } catch (std::exception& e) {
207 | PRINT("Error at forward: %s\n", e.what());
208 | }
209 |
210 | /*** Extract output tensor data and save them to output_tensor_list_ ***/
211 | output_tensor_list_.clear();
212 | if (outputs.isTensor()) {
213 | torch::Tensor output_tensor = outputs.toTensor().to(torch::kCPU);
214 | output_tensor_list_.emplace_back(output_tensor);
215 | //std::cout << output_tensor << std::endl;
216 | } else if (outputs.isTuple()) {
217 | PRINT("Multiple output is not tested\n");
218 | const auto& output_tuple = outputs.toTuple()->elements();
219 | for (const auto& o : output_tuple) {
220 | torch::Tensor output_tensor = o.toTensor().to(torch::kCPU);
221 | output_tensor_list_.emplace_back(output_tensor);
222 | }
223 | // } else if (outputs.isTensorList()) {
224 | // PRINT("Multiple output is not tested\n");
225 | // const auto& output_list = outputs.toTensorList();
226 | // for (const auto& o : output_list) {
227 | // torch::Tensor output_tensor = o;
228 | // output_tensor = output_tensor.to(torch::kCPU);
229 | // output_tensor_list_.emplace_back(output_tensor);
230 | // }
231 | } else {
232 | PRINT_E("Invalid output format\n");
233 | return kRetErr;
234 | }
235 |
236 | /*** Set output data for caller ***/
237 | if (output_tensor_list_.size() != output_tensor_info_list.size()) {
238 | PRINT_E("The num of output tensors doesn't match. Model has %zu output, but code expects %zu\n", output_tensor_list_.size(), output_tensor_info_list.size());
239 | }
240 |
241 | for (size_t i = 0; i < output_tensor_list_.size(); i++) {
242 | const auto& output_tensor = output_tensor_list_[i];
243 | auto& tensor_info = output_tensor_info_list[i];
244 | int32_t ndim = output_tensor.dim();
245 | tensor_info.tensor_dims.clear();
246 | for (int idim = 0; idim < ndim; idim++) {
247 | tensor_info.tensor_dims.push_back(output_tensor.size(idim));
248 | }
249 | tensor_info.data = output_tensor.data_ptr();
250 | }
251 |
252 | return kRetOk;
253 | }
254 |
--------------------------------------------------------------------------------
/inference_helper/inference_helper_libtorch.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2022 iwatake2222
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 | #ifndef INFERENCE_HELPER_LIBTORCH_
16 | #define INFERENCE_HELPER_LIBTORCH_
17 |
18 | /* for general */
19 | #include
20 | #include
21 | #include
22 | #include
23 | #include
24 | #include
25 |
26 | /* for LibTorch */
27 | //#include
28 | #include // One-stop header.
29 |
30 | /* for My modules */
31 | #include "inference_helper.h"
32 |
33 | class InferenceHelperLibtorch : public InferenceHelper {
34 | public:
35 | InferenceHelperLibtorch();
36 | ~InferenceHelperLibtorch() override;
37 | int32_t SetNumThreads(const int32_t num_threads) override;
38 | int32_t SetCustomOps(const std::vector>& custom_ops) override;
39 | int32_t Initialize(const std::string& model_filename, std::vector& input_tensor_info_list, std::vector& output_tensor_info_list) override;
40 | int32_t Finalize(void) override;
41 | int32_t PreProcess(const std::vector& input_tensor_info_list) override;
42 | int32_t Process(std::vector& output_tensor_info_list) override;
43 |
44 | private:
45 | int32_t num_threads_;
46 |
47 | torch::jit::script::Module module_;
48 | torch::DeviceType device_type_;
49 | std::vector input_tensor_list_;
50 | std::vector output_tensor_list_;
51 |
52 | };
53 |
54 | #endif
55 |
--------------------------------------------------------------------------------
/inference_helper/inference_helper_log.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2021 iwatake2222
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 | #ifndef INFERENCE_HELPER_LOG_
16 | #define INFERENCE_HELPER_LOG_
17 |
18 | /* for general */
19 | #include
20 | #include
21 | #include
22 | #include
23 | #include
24 |
25 |
26 | #if defined(ANDROID) || defined(__ANDROID__)
27 | #define CV_COLOR_IS_RGB
28 | #include
29 | #define INFERENCE_HELPER_LOG_NDK_TAG "MyApp_NDK"
30 | #define INFERENCE_HELPER_LOG_PRINT_(...) __android_log_print(ANDROID_LOG_INFO, INFERENCE_HELPER_LOG_NDK_TAG, __VA_ARGS__)
31 | #else
32 | #define INFERENCE_HELPER_LOG_PRINT_(...) printf(__VA_ARGS__)
33 | #endif
34 |
35 | #define INFERENCE_HELPER_LOG_PRINT(INFERENCE_HELPER_LOG_PRINT_TAG, ...) do { \
36 | INFERENCE_HELPER_LOG_PRINT_("[" INFERENCE_HELPER_LOG_PRINT_TAG "][%d] ", __LINE__); \
37 | INFERENCE_HELPER_LOG_PRINT_(__VA_ARGS__); \
38 | } while(0);
39 |
40 | #define INFERENCE_HELPER_LOG_PRINT_E(INFERENCE_HELPER_LOG_PRINT_TAG, ...) do { \
41 | INFERENCE_HELPER_LOG_PRINT_("[ERR: " INFERENCE_HELPER_LOG_PRINT_TAG "][%d] ", __LINE__); \
42 | INFERENCE_HELPER_LOG_PRINT_(__VA_ARGS__); \
43 | } while(0);
44 |
45 | #endif
46 |
--------------------------------------------------------------------------------
/inference_helper/inference_helper_mnn.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2021 iwatake2222
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 | #ifndef INFERENCE_HELPER_MNN_
16 | #define INFERENCE_HELPER_MNN_
17 |
18 | /* for general */
19 | #include
20 | #include
21 | #include
22 | #include
23 | #include
24 | #include
25 |
26 | /* for MNN */
27 | #include
28 | #include
29 | #include
30 |
31 | /* for My modules */
32 | #include "inference_helper.h"
33 |
34 | class InferenceHelperMnn : public InferenceHelper {
35 | public:
36 | InferenceHelperMnn();
37 | ~InferenceHelperMnn() override;
38 | int32_t SetNumThreads(const int32_t num_threads) override;
39 | int32_t SetCustomOps(const std::vector>& custom_ops) override;
40 | int32_t Initialize(const std::string& model_filename, std::vector& input_tensor_info_list, std::vector& output_tensor_info_list) override;
41 | int32_t Finalize(void) override;
42 | int32_t PreProcess(const std::vector& input_tensor_info_list) override;
43 | int32_t Process(std::vector& output_tensor_info_list) override;
44 |
45 | private:
46 | std::unique_ptr net_;
47 | MNN::Session* session_;
48 | std::vector> out_mat_list_;
49 | int32_t num_threads_;
50 | };
51 |
52 | #endif
53 |
--------------------------------------------------------------------------------
/inference_helper/inference_helper_ncnn.cpp:
--------------------------------------------------------------------------------
1 | /* Copyright 2021 iwatake2222
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 | /*** Include ***/
16 | /* for general */
17 | #include
18 | #include
19 | #include
20 | #include
21 | #include
22 | #include
23 | #include
24 | #include
25 | #include
26 |
27 | /* for ncnn */
28 | #include "net.h"
29 |
30 | /* for My modules */
31 | #include "inference_helper_log.h"
32 | #include "inference_helper_ncnn.h"
33 |
34 | /*** Macro ***/
35 | #define TAG "InferenceHelperNcnn"
36 | #define PRINT(...) INFERENCE_HELPER_LOG_PRINT(TAG, __VA_ARGS__)
37 | #define PRINT_E(...) INFERENCE_HELPER_LOG_PRINT_E(TAG, __VA_ARGS__)
38 |
39 | /*** Function ***/
40 | /* Reference: https://github.com/Tencent/ncnn/blob/master/examples/yolox.cpp */
41 | class YoloV5Focus : public ncnn::Layer
42 | {
43 | public:
44 | YoloV5Focus()
45 | {
46 | one_blob_only = true;
47 | }
48 |
49 | virtual int forward(const ncnn::Mat& bottom_blob, ncnn::Mat& top_blob, const ncnn::Option& opt) const
50 | {
51 | int w = bottom_blob.w;
52 | int h = bottom_blob.h;
53 | int channels = bottom_blob.c;
54 |
55 | int outw = w / 2;
56 | int outh = h / 2;
57 | int outc = channels * 4;
58 |
59 | top_blob.create(outw, outh, outc, 4u, 1, opt.blob_allocator);
60 | if (top_blob.empty())
61 | return -100;
62 |
63 | #pragma omp parallel for num_threads(opt.num_threads)
64 | for (int p = 0; p < outc; p++)
65 | {
66 | const float* ptr = bottom_blob.channel(p % channels).row((p / channels) % 2) + ((p / channels) / 2);
67 | float* outptr = top_blob.channel(p);
68 |
69 | for (int i = 0; i < outh; i++)
70 | {
71 | for (int j = 0; j < outw; j++)
72 | {
73 | *outptr = *ptr;
74 |
75 | outptr += 1;
76 | ptr += 2;
77 | }
78 |
79 | ptr += w;
80 | }
81 | }
82 |
83 | return 0;
84 | }
85 | };
86 | DEFINE_LAYER_CREATOR(YoloV5Focus)
87 |
88 | InferenceHelperNcnn::InferenceHelperNcnn()
89 | {
90 | custom_ops_.clear();
91 | custom_ops_.push_back(std::pair("YoloV5Focus", (const void*)YoloV5Focus_layer_creator));
92 | num_threads_ = 1;
93 | }
94 |
95 | InferenceHelperNcnn::~InferenceHelperNcnn()
96 | {
97 | }
98 |
99 | int32_t InferenceHelperNcnn::SetNumThreads(const int32_t num_threads)
100 | {
101 | num_threads_ = num_threads;
102 | return kRetOk;
103 | }
104 |
105 | int32_t InferenceHelperNcnn::SetCustomOps(const std::vector>& custom_ops)
106 | {
107 | for (auto op : custom_ops) {
108 | custom_ops_.push_back(op);
109 | }
110 | return kRetOk;
111 | }
112 |
113 | int32_t InferenceHelperNcnn::Initialize(const std::string& model_filename, std::vector& input_tensor_info_list, std::vector& output_tensor_info_list)
114 | {
115 | /*** Create network ***/
116 | net_.reset(new ncnn::Net());
117 | net_->opt.use_fp16_arithmetic = true;
118 | net_->opt.use_fp16_packed = true;
119 | net_->opt.use_fp16_storage = true;
120 | if (helper_type_ == kNcnnVulkan) {
121 | net_->opt.use_vulkan_compute = 1;
122 | }
123 |
124 | for (auto op : custom_ops_) {
125 | net_->register_custom_layer(op.first, (ncnn::layer_creator_func)(op.second));
126 | }
127 |
128 | std::string bin_filename = model_filename;
129 | if (model_filename.find(".param") == std::string::npos) {
130 | PRINT_E("Invalid model param filename (%s)\n", model_filename.c_str());
131 | return kRetErr;
132 | }
133 | bin_filename = bin_filename.replace(bin_filename.find(".param"), std::string(".param").length(), ".bin\0");
134 | if (net_->load_param(model_filename.c_str()) != 0) {
135 | PRINT_E("Failed to load model param file (%s)\n", model_filename.c_str());
136 | return kRetErr;
137 | }
138 | if (net_->load_model(bin_filename.c_str()) != 0) {
139 | PRINT_E("Failed to load model bin file (%s)\n", bin_filename.c_str());
140 | return kRetErr;
141 | }
142 |
143 | /* Convert normalize parameter to speed up */
144 | for (auto& input_tensor_info : input_tensor_info_list) {
145 | ConvertNormalizeParameters(input_tensor_info);
146 | }
147 |
148 | /* Check if tensor info is set */
149 | for (const auto& input_tensor_info : input_tensor_info_list) {
150 | for (const auto& dim : input_tensor_info.tensor_dims) {
151 | if (dim <= 0) {
152 | PRINT_E("Invalid tensor size\n");
153 | return kRetErr;
154 | }
155 | }
156 | }
157 | //for (const auto& output_tensor_info : output_tensor_info_list) {
158 | // for (const auto& dim : output_tensor_info.tensor_dims) {
159 | // if (dim <= 0) {
160 | // PRINT_E("Invalid tensor size\n");
161 | // return kRetErr;
162 | // }
163 | // }
164 | //}
165 |
166 | return kRetOk;
167 | };
168 |
169 |
170 | int32_t InferenceHelperNcnn::Finalize(void)
171 | {
172 | net_.reset();
173 | in_mat_list_.clear();
174 | out_mat_list_.clear();
175 | if (helper_type_ == kNcnnVulkan) {
176 | ncnn::destroy_gpu_instance();
177 | }
178 | return kRetErr;
179 | }
180 |
181 | int32_t InferenceHelperNcnn::PreProcess(const std::vector& input_tensor_info_list)
182 | {
183 | in_mat_list_.clear();
184 | for (const auto& input_tensor_info : input_tensor_info_list) {
185 | ncnn::Mat ncnn_mat;
186 | if (input_tensor_info.data_type == InputTensorInfo::kDataTypeImage) {
187 | /* Crop */
188 | if ((input_tensor_info.image_info.width != input_tensor_info.image_info.crop_width) || (input_tensor_info.image_info.height != input_tensor_info.image_info.crop_height)) {
189 | PRINT_E("Crop is not supported\n");
190 | return kRetErr;
191 | }
192 | /* Convert color type */
193 | int32_t pixel_type = 0;
194 | if ((input_tensor_info.image_info.channel == 3) && (input_tensor_info.GetChannel() == 3)) {
195 | pixel_type = (input_tensor_info.image_info.is_bgr) ? ncnn::Mat::PIXEL_BGR : ncnn::Mat::PIXEL_RGB;
196 | if (input_tensor_info.image_info.swap_color) {
197 | pixel_type = (input_tensor_info.image_info.is_bgr) ? ncnn::Mat::PIXEL_BGR2RGB : ncnn::Mat::PIXEL_RGB2BGR;
198 | }
199 | } else if ((input_tensor_info.image_info.channel == 1) && (input_tensor_info.GetChannel() == 1)) {
200 | pixel_type = ncnn::Mat::PIXEL_GRAY;
201 | } else if ((input_tensor_info.image_info.channel == 3) && (input_tensor_info.GetChannel() == 1)) {
202 | pixel_type = (input_tensor_info.image_info.is_bgr) ? ncnn::Mat::PIXEL_BGR2GRAY : ncnn::Mat::PIXEL_RGB2GRAY;
203 | } else if ((input_tensor_info.image_info.channel == 1) && (input_tensor_info.GetChannel() == 3)) {
204 | pixel_type = ncnn::Mat::PIXEL_GRAY2RGB;
205 | } else {
206 | PRINT_E("Unsupported color conversion (%d, %d)\n", input_tensor_info.image_info.channel, input_tensor_info.GetChannel());
207 | return kRetErr;
208 | }
209 |
210 | if (input_tensor_info.image_info.crop_width == input_tensor_info.GetWidth() && input_tensor_info.image_info.crop_height == input_tensor_info.GetHeight()) {
211 | /* Convert to blob */
212 | ncnn_mat = ncnn::Mat::from_pixels((uint8_t*)input_tensor_info.data, pixel_type, input_tensor_info.image_info.width, input_tensor_info.image_info.height);
213 | } else {
214 | /* Convert to blob with resize */
215 | ncnn_mat = ncnn::Mat::from_pixels_resize((uint8_t*)input_tensor_info.data, pixel_type, input_tensor_info.image_info.width, input_tensor_info.image_info.height, input_tensor_info.GetWidth(), input_tensor_info.GetHeight());
216 | }
217 | /* Normalize image */
218 | ncnn_mat.substract_mean_normalize(input_tensor_info.normalize.mean, input_tensor_info.normalize.norm);
219 | } else if (input_tensor_info.data_type == InputTensorInfo::kDataTypeBlobNhwc) {
220 | PRINT_E("[ToDo] Unsupported data type (%d)\n", input_tensor_info.data_type);
221 | ncnn_mat = ncnn::Mat::from_pixels((uint8_t*)input_tensor_info.data, input_tensor_info.GetChannel() == 3 ? ncnn::Mat::PIXEL_RGB : ncnn::Mat::PIXEL_GRAY, input_tensor_info.GetWidth(), input_tensor_info.GetHeight());
222 | } else if (input_tensor_info.data_type == InputTensorInfo::kDataTypeBlobNchw) {
223 | ncnn_mat = ncnn::Mat(input_tensor_info.GetWidth(), input_tensor_info.GetHeight(), input_tensor_info.GetChannel(), input_tensor_info.data);
224 | } else {
225 | PRINT_E("Unsupported data type (%d)\n", input_tensor_info.data_type);
226 | return kRetErr;
227 | }
228 | in_mat_list_.push_back(std::pair(input_tensor_info.name, ncnn_mat));
229 | }
230 | return kRetOk;
231 | }
232 |
233 | int32_t InferenceHelperNcnn::Process(std::vector& output_tensor_info_list)
234 | {
235 | ncnn::Extractor ex = net_->create_extractor();
236 | ex.set_light_mode(true);
237 | ex.set_num_threads(num_threads_);
238 | for (const auto& inputMat : in_mat_list_) {
239 | if (ex.input(inputMat.first.c_str(), inputMat.second) != 0) {
240 | PRINT_E("Input mat error (%s)\n", inputMat.first.c_str());
241 | return kRetErr;
242 | }
243 | }
244 |
245 | out_mat_list_.clear();
246 | for (auto& output_tensor_info : output_tensor_info_list) {
247 | ncnn::Mat ncnn_out;
248 | if (ex.extract(output_tensor_info.name.c_str(), ncnn_out) != 0) {
249 | PRINT_E("Output mat error (%s)\n", output_tensor_info.name.c_str());
250 | return kRetErr;
251 | }
252 | out_mat_list_.push_back(ncnn_out); // store ncnn mat in member variable so that data keep exist
253 | output_tensor_info.data = ncnn_out.data;
254 | output_tensor_info.tensor_dims.clear();
255 | output_tensor_info.tensor_dims.push_back(1);
256 | output_tensor_info.tensor_dims.push_back(ncnn_out.c);
257 | output_tensor_info.tensor_dims.push_back(ncnn_out.h);
258 | output_tensor_info.tensor_dims.push_back(ncnn_out.w);
259 | }
260 |
261 | return kRetOk;
262 | }
263 |
--------------------------------------------------------------------------------
/inference_helper/inference_helper_ncnn.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2021 iwatake2222
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 | #ifndef INFERENCE_HELPER_NCNN_
16 | #define INFERENCE_HELPER_NCNN_
17 |
18 | /* for general */
19 | #include
20 | #include
21 | #include
22 | #include
23 | #include
24 | #include
25 |
26 | /* for ncnn */
27 | #include "net.h"
28 |
29 | /* for My modules */
30 | #include "inference_helper.h"
31 |
32 | class InferenceHelperNcnn : public InferenceHelper {
33 | public:
34 | InferenceHelperNcnn();
35 | ~InferenceHelperNcnn() override;
36 | int32_t SetNumThreads(const int32_t num_threads) override;
37 | int32_t SetCustomOps(const std::vector>& custom_ops) override;
38 | int32_t Initialize(const std::string& model_filename, std::vector& input_tensor_info_list, std::vector& output_tensor_info_list) override;
39 | int32_t Finalize(void) override;
40 | int32_t PreProcess(const std::vector& input_tensor_info_list) override;
41 | int32_t Process(std::vector& output_tensor_info_list) override;
42 |
43 | private:
44 | std::unique_ptr net_;
45 | std::vector> in_mat_list_; //
46 | std::vector out_mat_list_;
47 | int32_t num_threads_;
48 | std::vector> custom_ops_;
49 | };
50 |
51 | #endif
52 |
--------------------------------------------------------------------------------
/inference_helper/inference_helper_nnabla.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2021 iwatake2222
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 | #ifndef INFERENCE_HELPER_NNABLA_
16 | #define INFERENCE_HELPER_NNABLA_
17 |
18 | /* for general */
19 | #include
20 | #include
21 | #include
22 | #include
23 | #include
24 | #include
25 |
26 | /* for nnabla */
27 | namespace nbla {
28 | class Variable;
29 | class Context;
30 | namespace utils {
31 | namespace nnp {
32 | class Nnp;
33 | class Executor;
34 | }
35 | }
36 | }
37 |
38 | /* for My modules */
39 | #include "inference_helper.h"
40 |
41 | class InferenceHelperNnabla : public InferenceHelper {
42 | public:
43 | InferenceHelperNnabla();
44 | ~InferenceHelperNnabla() override;
45 | int32_t SetNumThreads(const int32_t num_threads) override;
46 | int32_t SetCustomOps(const std::vector>& custom_ops) override;
47 | int32_t Initialize(const std::string& model_filename, std::vector& input_tensor_info_list, std::vector& output_tensor_info_list) override;
48 | int32_t Finalize(void) override;
49 | int32_t PreProcess(const std::vector& input_tensor_info_list) override;
50 | int32_t Process(std::vector& output_tensor_info_list) override;
51 |
52 | private:
53 | void DisplayModelInfo();
54 | int32_t CheckTensorInfo(TensorInfo& tensor_info, const std::shared_ptr variable);
55 | int32_t AllocateBuffers(std::vector& input_tensor_info_list, std::vector& output_tensor_info_list);
56 | std::shared_ptr GetInputVariable(int32_t index);
57 | std::shared_ptr GetOutputVariable(int32_t index);
58 |
59 | private:
60 | int32_t num_threads_;
61 | std::shared_ptr ctx_cpu_;
62 | std::shared_ptr ctx_gpu_;
63 | std::shared_ptr nnp_;
64 | std::shared_ptr executor_;
65 | };
66 |
67 | #endif
68 |
--------------------------------------------------------------------------------
/inference_helper/inference_helper_onnx_runtime.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2022 iwatake2222
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 | #ifndef INFERENCE_HELPER_ONNX_RUNTIME_
16 | #define INFERENCE_HELPER_ONNX_RUNTIME_
17 |
18 | /* for general */
19 | #include
20 | #include
21 | #include
22 | #include
23 | #include
24 | #include
25 |
26 | /* for ONNX Runtime */
27 | #include
28 |
29 | /* for My modules */
30 | #include "inference_helper.h"
31 |
32 | class InferenceHelperOnnxRuntime : public InferenceHelper {
33 | public:
34 | InferenceHelperOnnxRuntime();
35 | ~InferenceHelperOnnxRuntime() override;
36 | int32_t SetNumThreads(const int32_t num_threads) override;
37 | int32_t SetCustomOps(const std::vector>& custom_ops) override;
38 | int32_t Initialize(const std::string& model_filename, std::vector& input_tensor_info_list, std::vector& output_tensor_info_list) override;
39 | int32_t Finalize(void) override;
40 | int32_t PreProcess(const std::vector& input_tensor_info_list) override;
41 | int32_t Process(std::vector& output_tensor_info_list) override;
42 |
43 | private:
44 | int32_t AllocateTensor(bool is_input, size_t index, std::vector& input_tensor_info_list, std::vector& output_tensor_info_list);
45 |
46 | private:
47 | int32_t num_threads_;
48 |
49 | Ort::Session session_{ nullptr };
50 | Ort::Env env_{ ORT_LOGGING_LEVEL_WARNING, "Default" };
51 | std::vector input_name_list_;
52 | std::vector output_name_list_;
53 | std::vector input_tensor_list_;
54 | std::vector output_tensor_list_;
55 | std::vector> input_buffer_list_;
56 | std::vector> output_buffer_list_;
57 | };
58 |
59 | #endif
60 |
--------------------------------------------------------------------------------
/inference_helper/inference_helper_opencv.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2021 iwatake2222
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 | #ifndef INFERENCE_HELPER_OPENCV_
16 | #define INFERENCE_HELPER_OPENCV_
17 |
18 | /* for general */
19 | #include
20 | #include
21 | #include
22 | #include
23 | #include
24 |
25 | /* for OpenCV */
26 | #include
27 | #include
28 |
29 | /* for My modules */
30 | #include "inference_helper.h"
31 |
32 | class InferenceHelperOpenCV : public InferenceHelper {
33 | public:
34 | InferenceHelperOpenCV();
35 | ~InferenceHelperOpenCV() override;
36 | int32_t SetNumThreads(const int32_t num_threads) override;
37 | int32_t SetCustomOps(const std::vector>& custom_ops) override;
38 | int32_t Initialize(const std::string& model_filename, std::vector& input_tensor_info_list, std::vector& output_tensor_info_list) override;
39 | int32_t Finalize(void) override;
40 | int32_t PreProcess(const std::vector& input_tensor_info_list) override;
41 | int32_t Process(std::vector& output_tensor_info_list) override;
42 |
43 | private:
44 | cv::dnn::Net net_;
45 | std::vector in_mat_list_;
46 | std::vector out_mat_list_; // store data as member variable so that an user can refer the results
47 | };
48 |
49 | #endif
50 |
--------------------------------------------------------------------------------
/inference_helper/inference_helper_sample.cpp:
--------------------------------------------------------------------------------
1 | /* Copyright 2022 iwatake2222
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 | /*** Include ***/
16 | /* for general */
17 | #include
18 | #include
19 | #include
20 | #include
21 | #include
22 | #include
23 | #include
24 | #include
25 | #include
26 | #include
27 |
28 | #ifdef _WIN32
29 | #include
30 | #endif
31 |
32 | /* for OOO */
33 |
34 | /* for My modules */
35 | #include "inference_helper_log.h"
36 | #include "inference_helper_sample.h"
37 |
38 | /*** Macro ***/
39 | #define TAG "InferenceHelperSample"
40 | #define PRINT(...) INFERENCE_HELPER_LOG_PRINT(TAG, __VA_ARGS__)
41 | #define PRINT_E(...) INFERENCE_HELPER_LOG_PRINT_E(TAG, __VA_ARGS__)
42 |
43 |
44 | /*** Function ***/
45 | InferenceHelperSample::InferenceHelperSample()
46 | {
47 | num_threads_ = 1;
48 | }
49 |
50 | InferenceHelperSample::~InferenceHelperSample()
51 | {
52 | }
53 |
54 | int32_t InferenceHelperSample::SetNumThreads(const int32_t num_threads)
55 | {
56 | num_threads_ = num_threads;
57 | return kRetOk;
58 | }
59 |
60 | int32_t InferenceHelperSample::SetCustomOps(const std::vector>& custom_ops)
61 | {
62 | PRINT("[WARNING] This method is not supported\n");
63 | return kRetOk;
64 | }
65 |
66 | int32_t InferenceHelperSample::Initialize(const std::string& model_filename, std::vector& input_tensor_info_list, std::vector& output_tensor_info_list)
67 | {
68 |
69 | /*** Convert normalize parameter to speed up ***/
70 | for (auto& input_tensor_info : input_tensor_info_list) {
71 | ConvertNormalizeParameters(input_tensor_info);
72 | }
73 |
74 | return kRetOk;
75 | };
76 |
77 | int32_t InferenceHelperSample::Finalize(void)
78 | {
79 | return kRetOk;
80 | }
81 |
82 | int32_t InferenceHelperSample::PreProcess(const std::vector& input_tensor_info_list)
83 | {
84 | return kRetOk;
85 | }
86 |
87 | int32_t InferenceHelperSample::Process(std::vector& output_tensor_info_list)
88 | {
89 | return kRetOk;
90 | }
91 |
--------------------------------------------------------------------------------
/inference_helper/inference_helper_sample.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2022 iwatake2222
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 | #ifndef INFERENCE_HELPER_SAMPLE_
16 | #define INFERENCE_HELPER_SAMPLE_
17 |
18 | /* for general */
19 | #include
20 | #include
21 | #include
22 | #include
23 | #include
24 | #include
25 |
26 | /* for My modules */
27 | #include "inference_helper.h"
28 |
29 | class InferenceHelperSample : public InferenceHelper {
30 | public:
31 | InferenceHelperSample();
32 | ~InferenceHelperSample() override;
33 | int32_t SetNumThreads(const int32_t num_threads) override;
34 | int32_t SetCustomOps(const std::vector>& custom_ops) override;
35 | int32_t Initialize(const std::string& model_filename, std::vector& input_tensor_info_list, std::vector& output_tensor_info_list) override;
36 | int32_t Finalize(void) override;
37 | int32_t PreProcess(const std::vector& input_tensor_info_list) override;
38 | int32_t Process(std::vector& output_tensor_info_list) override;
39 |
40 | private:
41 | int32_t num_threads_;
42 | };
43 |
44 | #endif
45 |
--------------------------------------------------------------------------------
/inference_helper/inference_helper_snpe.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2021 iwatake2222
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 | #ifndef INFERENCE_HELPER_SNPE_
16 | #define INFERENCE_HELPER_SNPE_
17 |
18 | /* for general */
19 | #include
20 | #include
21 | #include
22 | #include
23 | #include
24 | #include
25 | #include
26 |
27 | /* for SNPE */
28 |
29 | /* for My modules */
30 | #include "inference_helper.h"
31 |
32 | namespace zdl { namespace SNPE { class SNPE; } }
33 | namespace zdl { namespace DlSystem { class IUserBuffer; } }
34 | namespace zdl { namespace DlSystem { class UserBufferMap; } }
35 |
36 | class InferenceHelperSnpe : public InferenceHelper {
37 | private:
38 | enum { UNKNOWN, USERBUFFER_FLOAT, USERBUFFER_TF8, ITENSOR, USERBUFFER_TF16 };
39 | enum { CPUBUFFER, GLBUFFER };
40 |
41 | public:
42 | InferenceHelperSnpe();
43 | ~InferenceHelperSnpe() override;
44 | int32_t SetNumThreads(const int32_t num_threads) override;
45 | int32_t SetCustomOps(const std::vector>& custom_ops) override;
46 | int32_t Initialize(const std::string& model_filename, std::vector& input_tensor_info_list, std::vector& output_tensor_info_list) override;
47 | int32_t Finalize(void) override;
48 | int32_t PreProcess(const std::vector& input_tensor_info_list) override;
49 | int32_t Process(std::vector& output_tensor_info_list) override;
50 |
51 | private:
52 | std::unique_ptr CreateSnpe(const std::string& model_filename, bool use_user_supplied_buffers);
53 | int32_t GetTensorInfo(std::unique_ptr const& snpe, const std::string& name, std::vector& dims);
54 | int32_t GetAllTensorInfo(std::unique_ptr const& snpe, std::vector& input_tensor_info_list, std::vector& output_tensor_info_list);
55 |
56 | private:
57 | int32_t num_threads_;
58 | std::unique_ptr snpe_;
59 | std::unique_ptr input_map_;
60 | std::unique_ptr output_map_;
61 | std::vector > snpe_user_input_buffers_;
62 | std::vector > snpe_user_output_buffers_;
63 | std::unordered_map > application_input_buffers_;
64 | std::unordered_map > application_output_buffers_;
65 | };
66 |
67 | #endif
68 |
--------------------------------------------------------------------------------
/inference_helper/inference_helper_tensorflow.cpp:
--------------------------------------------------------------------------------
1 | /* Copyright 2022 iwatake2222
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 | /*** Include ***/
16 | /* for general */
17 | #include
18 | #include
19 | #include
20 | #include
21 | #include
22 | #include
23 | #include
24 | #include
25 | #include
26 | #include
27 |
28 | #ifdef _WIN32
29 | #include
30 | #endif
31 |
32 | /* for TensorFlow */
33 | #include
34 |
35 | /* for My modules */
36 | #include "inference_helper_log.h"
37 | #include "inference_helper_tensorflow.h"
38 |
39 | /*** Macro ***/
40 | #define TAG "InferenceHelperTensorflow"
41 | #define PRINT(...) INFERENCE_HELPER_LOG_PRINT(TAG, __VA_ARGS__)
42 | #define PRINT_E(...) INFERENCE_HELPER_LOG_PRINT_E(TAG, __VA_ARGS__)
43 |
44 |
45 | /*** Function ***/
46 | InferenceHelperTensorflow::InferenceHelperTensorflow()
47 | {
48 | num_threads_ = 1;
49 |
50 | session_ = nullptr;
51 | graph_ = TF_NewGraph();
52 | }
53 |
54 | InferenceHelperTensorflow::~InferenceHelperTensorflow()
55 | {
56 | }
57 |
58 | int32_t InferenceHelperTensorflow::SetNumThreads(const int32_t num_threads)
59 | {
60 | num_threads_ = num_threads;
61 | return kRetOk;
62 | }
63 |
64 | int32_t InferenceHelperTensorflow::SetCustomOps(const std::vector>& custom_ops)
65 | {
66 | PRINT("[WARNING] This method is not supported\n");
67 | return kRetOk;
68 | }
69 |
70 | static std::string GetOpName(const std::string& model_filename)
71 | {
72 | std::string name;
73 | for (const auto c : model_filename) {
74 | if (c == ':') break;
75 | name.push_back(c);
76 | }
77 | return name;
78 | }
79 |
80 | static int32_t GetOpIndex(const std::string& model_filename)
81 | {
82 | bool is_index_start = false;
83 | std::string index_str;
84 | for (const auto c : model_filename) {
85 | if (c == ':') {
86 | is_index_start = true;
87 | continue;
88 | }
89 | if (is_index_start) {
90 | index_str.push_back(c);
91 | }
92 | }
93 | return std::stoi(index_str);
94 | }
95 |
96 | int32_t InferenceHelperTensorflow::Initialize(const std::string& model_filename, std::vector& input_tensor_info_list, std::vector& output_tensor_info_list)
97 | {
98 | /*** Load model ***/
99 | TF_Status* status = TF_NewStatus();
100 | TF_SessionOptions* session_options = TF_NewSessionOptions();
101 | const char* tags = "serve";
102 | int32_t ntags = 1;
103 | session_ = TF_LoadSessionFromSavedModel(session_options, nullptr, model_filename.c_str(), &tags, ntags, graph_, nullptr, status);
104 | TF_DeleteSessionOptions(session_options);
105 | TF_Code status_code = TF_GetCode(status);
106 | TF_DeleteStatus(status);
107 | if (status_code != TF_OK) {
108 | PRINT_E("Unable to load model: %d, %s, %s\n", status_code, model_filename.c_str(), tags);
109 | return kRetErr;
110 | }
111 |
112 | /*** Display graph ***/
113 | //size_t pos = 0;
114 | //TF_Operation* oper;
115 | //printf("--- graph info ---\n");
116 | //while ((oper = TF_GraphNextOperation(graph, &pos)) != nullptr) {
117 | // printf("%s\n", TF_OperationName(oper));
118 | //}
119 | //printf("--- graph info ---\n");
120 |
121 | /*** Allocate tensors ***/
122 | int32_t id_input = 0;
123 | for (auto& input_tensor_info : input_tensor_info_list) {
124 | input_tensor_info.id = id_input++;
125 | TF_Output op = { TF_GraphOperationByName(graph_, GetOpName(input_tensor_info.name).c_str()), GetOpIndex(input_tensor_info.name) };
126 | if (op.oper == nullptr) {
127 | PRINT_E("Can't find input tensor name: %s\n", input_tensor_info.name.c_str());
128 | return kRetErr;
129 | }
130 | input_op_list_.emplace_back(op);
131 |
132 | std::vector dims;
133 | for (const auto& dim : input_tensor_info.tensor_dims) {
134 | dims.push_back(dim);
135 | }
136 | TF_Tensor* input_tensor = TF_AllocateTensor(TF_FLOAT, dims.data(), static_cast(dims.size()), input_tensor_info.GetElementNum() * sizeof(float));
137 | input_tensor_list_.emplace_back(input_tensor);
138 | }
139 |
140 | for (auto& output_tensor_info : output_tensor_info_list) {
141 | TF_Output op = { TF_GraphOperationByName(graph_, GetOpName(output_tensor_info.name).c_str()), GetOpIndex(output_tensor_info.name) };
142 | if (op.oper == nullptr) {
143 | PRINT_E("Can't find output tensor name: %s\n", output_tensor_info.name.c_str());
144 | return kRetErr;
145 | }
146 | output_op_list_.emplace_back(op);
147 | output_tensor_list_.emplace_back(nullptr);
148 | }
149 |
150 | /*** Convert normalize parameter to speed up ***/
151 | for (auto& input_tensor_info : input_tensor_info_list) {
152 | ConvertNormalizeParameters(input_tensor_info);
153 | }
154 |
155 | return kRetOk;
156 | };
157 |
158 | int32_t InferenceHelperTensorflow::Finalize(void)
159 | {
160 | for (auto& tensor : input_tensor_list_) {
161 | TF_DeleteTensor(tensor);
162 | }
163 | for (auto& tensor : output_tensor_list_) {
164 | TF_DeleteTensor(tensor);
165 | }
166 | TF_DeleteGraph(graph_);
167 | TF_Status* status = TF_NewStatus();
168 | TF_CloseSession(session_, status);
169 | TF_DeleteSession(session_, status);
170 | TF_DeleteStatus(status);
171 |
172 | input_op_list_.clear();
173 | output_op_list_.clear();
174 | input_tensor_list_.clear();
175 | output_tensor_list_.clear();
176 | return kRetOk;
177 | }
178 |
179 | int32_t InferenceHelperTensorflow::PreProcess(const std::vector& input_tensor_info_list)
180 | {
181 | for (const auto& input_tensor_info : input_tensor_info_list) {
182 | const int32_t img_width = input_tensor_info.GetWidth();
183 | const int32_t img_height = input_tensor_info.GetHeight();
184 | const int32_t img_channel = input_tensor_info.GetChannel();
185 | if (input_tensor_info.data_type == InputTensorInfo::kDataTypeImage) {
186 | if ((input_tensor_info.image_info.width != input_tensor_info.image_info.crop_width) || (input_tensor_info.image_info.height != input_tensor_info.image_info.crop_height)) {
187 | PRINT_E("Crop is not supported\n");
188 | return kRetErr;
189 | }
190 | if ((input_tensor_info.image_info.crop_width != img_width) || (input_tensor_info.image_info.crop_height != img_height)) {
191 | PRINT_E("Resize is not supported\n");
192 | return kRetErr;
193 | }
194 | if (input_tensor_info.image_info.channel != img_channel) {
195 | PRINT_E("Color conversion is not supported\n");
196 | return kRetErr;
197 | }
198 |
199 | /* Normalize image */
200 | if (input_tensor_info.tensor_type == TensorInfo::kTensorTypeFp32) {
201 | float* dst = static_cast(TF_TensorData(input_tensor_list_[input_tensor_info.id]));
202 | PreProcessImage(num_threads_, input_tensor_info, dst);
203 | } else if (input_tensor_info.tensor_type == TensorInfo::kTensorTypeUint8) {
204 | uint8_t* dst = static_cast(TF_TensorData(input_tensor_list_[input_tensor_info.id]));
205 | PreProcessImage(num_threads_, input_tensor_info, dst);
206 | } else if (input_tensor_info.tensor_type == TensorInfo::kTensorTypeInt8) {
207 | int8_t* dst = static_cast(TF_TensorData(input_tensor_list_[input_tensor_info.id]));
208 | PreProcessImage(num_threads_, input_tensor_info, dst);
209 | } else {
210 | PRINT_E("Unsupported tensor_type (%d)\n", input_tensor_info.tensor_type);
211 | return kRetErr;
212 | }
213 | } else if ((input_tensor_info.data_type == InputTensorInfo::kDataTypeBlobNhwc) || (input_tensor_info.data_type == InputTensorInfo::kDataTypeBlobNchw)) {
214 | if (input_tensor_info.tensor_type == TensorInfo::kTensorTypeFp32) {
215 | float* dst = static_cast(TF_TensorData(input_tensor_list_[input_tensor_info.id]));
216 | PreProcessBlob(num_threads_, input_tensor_info, dst);
217 | } else if (input_tensor_info.tensor_type == TensorInfo::kTensorTypeUint8 || input_tensor_info.tensor_type == TensorInfo::kTensorTypeInt8) {
218 | uint8_t* dst = static_cast(TF_TensorData(input_tensor_list_[input_tensor_info.id]));
219 | PreProcessBlob(num_threads_, input_tensor_info, dst);
220 | } else if (input_tensor_info.tensor_type == TensorInfo::kTensorTypeInt32) {
221 | int32_t* dst = static_cast(TF_TensorData(input_tensor_list_[input_tensor_info.id]));
222 | PreProcessBlob