├── .gitignore ├── CMakeLists.txt ├── LICENSE ├── README.md ├── SCR-pyarmnn-release.txt ├── cmake └── GlobalConfig.cmake ├── docs └── index.html ├── python └── pyarmnn │ ├── .gitignore │ ├── CMakeLists.txt │ ├── LICENSE │ ├── Makefile │ ├── conftest.py │ ├── docs_conf │ └── config.mako │ ├── examples │ ├── example_utils.py │ ├── onnx_mobilenetv2.py │ ├── requirements.txt │ └── tflite_mobilenetv1_quantized.py │ ├── pylintconfig │ ├── scripts │ ├── download_test_resources.py │ ├── generate_docs.py │ └── image_to_npy.py │ ├── setup.py │ ├── src │ └── pyarmnn │ │ ├── __init__.py │ │ ├── _generated │ │ └── __init__.py │ │ ├── _quantization │ │ ├── __init__.py │ │ └── quantize_and_dequantize.py │ │ ├── _tensor │ │ ├── __init__.py │ │ ├── const_tensor.py │ │ ├── tensor.py │ │ └── workload_tensors.py │ │ ├── _utilities │ │ ├── __init__.py │ │ └── profiling_helper.py │ │ ├── _version.py │ │ └── swig │ │ ├── armnn.i │ │ ├── armnn_caffeparser.i │ │ ├── armnn_onnxparser.i │ │ ├── armnn_tfliteparser.i │ │ ├── armnn_tfparser.i │ │ ├── armnn_version.i │ │ ├── modules │ │ ├── armnn_backend.i │ │ ├── armnn_descriptors.i │ │ ├── armnn_lstmparam.i │ │ ├── armnn_network.i │ │ ├── armnn_profiler.i │ │ ├── armnn_runtime.i │ │ ├── armnn_tensor.i │ │ ├── armnn_types.i │ │ └── armnn_types_utils.i │ │ ├── standard_header.i │ │ └── typemaps │ │ ├── network_optimize.i │ │ ├── permutation_vector.i │ │ ├── tensor_memory.i │ │ ├── tensor_shape.i │ │ └── vectors.i │ ├── swig_generate.py │ ├── test │ ├── requirements.txt │ ├── test_caffe_parser.py │ ├── test_const_tensor.py │ ├── test_descriptors.py │ ├── test_generated.py │ ├── test_iconnectable.py │ ├── test_network.py │ ├── test_onnx_parser.py │ ├── test_profiling_utilities.py │ ├── test_quantize_and_dequantize.py │ ├── test_runtime.py │ ├── test_setup.py │ ├── test_supported_backends.py │ ├── test_tensor.py │ ├── test_tensor_conversion.py │ ├── test_tensor_info.py │ ├── test_tensor_shape.py │ ├── test_tf_parser.py │ ├── test_tflite_parser.py │ ├── test_types.py │ └── test_version.py │ └── tox.ini └── whl ├── pyarmnn-19.8.0-cp37-cp37m-linux_aarch64.whl ├── pyarmnn-19.8.1-cp37-cp37m-linux_aarch64.whl ├── pyarmnn-20.2.0-cp37-cp37m-linux_aarch64.whl └── pyarmnn-20.2.0-cp38-cp38-linux_aarch64.whl /.gitignore: -------------------------------------------------------------------------------- 1 | .idea/ 2 | build/ 3 | CMakeFiles/ 4 | python/pyarmnn/src/pyarmnn/_generated/ 5 | python/pyarmnn/pyarmnn_docs-* 6 | python/pyarmnn/cmake_install.cmake 7 | python/pyarmnn/CMakeCache.txt 8 | python/pyarmnn/test/testdata/ 9 | env/ 10 | -------------------------------------------------------------------------------- /CMakeLists.txt: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright 2020 NXP 3 | # SPDX-License-Identifier: MIT 4 | # 5 | 6 | cmake_minimum_required (VERSION 3.0.2) # same as armnn 7 | project(pyarmnn) 8 | 9 | set(additional_cmake_files) 10 | list(APPEND additional_cmake_files 11 | cmake/GlobalConfig.cmake) 12 | 13 | foreach(cmake_file ${additional_cmake_files}) 14 | include(${cmake_file}) 15 | endforeach() 16 | 17 | if (BUILD_PYTHON_SRC OR BUILD_PYTHON_WHL) 18 | add_subdirectory(python/pyarmnn) 19 | else() 20 | message(STATUS "Nothing to build :)") 21 | endif() 22 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2019 ARM Limited. 4 | Copyright 2020 NXP 5 | 6 | Permission is hereby granted, free of charge, to any person obtaining a copy 7 | of this software and associated documentation files (the "Software"), to deal 8 | in the Software without restriction, including without limitation the rights 9 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 | copies of the Software, and to permit persons to whom the Software is 11 | furnished to do so, subject to the following conditions: 12 | 13 | The above copyright notice and this permission notice shall be included in all 14 | copies or substantial portions of the Software. 15 | 16 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 19 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 22 | SOFTWARE. 23 | -------------------------------------------------------------------------------- /SCR-pyarmnn-release.txt: -------------------------------------------------------------------------------- 1 | NXP Software Content Register 2 | 3 | -------------------------------------------- 4 | 5 | Package: pyarmnn-release 6 | Outgoing License: MIT 7 | License File: LICENSE 8 | Package Category: Machine Learning 9 | Type of content: source and binary 10 | Description and comments: A release package for Python3 wrappers for Arm NN inference engine 11 | Release Location: GitHub - https://github.com/NXPmicro 12 | Origin: Linaro (Arm+NXP collaboration) - https://www.linaro.org/engineering/artificial-intelligence/ 13 | Arm NN - https://review.mlplatform.org/ml/armnn 14 | 15 | -------------------------------------------- 16 | -------------------------------------------------------------------------------- /cmake/GlobalConfig.cmake: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright 2020 NXP 3 | # SPDX-License-Identifier: MIT 4 | # 5 | 6 | option(BUILD_PYTHON_WHL "Build Python wheel package" OFF) 7 | option(BUILD_PYTHON_SRC "Build Python source package" OFF) 8 | option(ARMNN_LIB "Path to ArmNN libraries" OFF) 9 | option(ARMNN_INCLUDE "Path to ArmNN headers" OFF) 10 | 11 | # Set to release configuration by default 12 | if(NOT CMAKE_BUILD_TYPE) 13 | set(CMAKE_BUILD_TYPE "Release") 14 | endif() 15 | 16 | # Setting and checking the python environment to be able to build whl/src packages 17 | if(BUILD_PYTHON_WHL OR BUILD_PYTHON_SRC) 18 | find_package(Python3 REQUIRED COMPONENTS Interpreter Development) 19 | 20 | if(BUILD_PYTHON_WHL) 21 | message(STATUS "PyArmNN wheel package is enabled") 22 | else() 23 | message(STATUS "PyArmNN wheel package is disabled") 24 | endif() 25 | if(BUILD_PYTHON_SRC) 26 | message(STATUS "PyArmNN source package is enabled") 27 | else() 28 | message(STATUS "PyArmNN source package is disabled") 29 | endif() 30 | if(NOT ${Python3_FOUND}) 31 | message(FATAL_ERROR "Python 3.x not found") 32 | endif() 33 | message(STATUS "Python ${Python3_VERSION} found at ${Python3_EXECUTABLE}") 34 | if(NOT ${Python3_Development_FOUND}) 35 | message(FATAL_ERROR "Python development package not found") 36 | endif() 37 | if (DEFINED SWIG_DIR) 38 | if(EXISTS "${SWIG_DIR}/bin/swig") 39 | set(SWIG_EXECUTABLE "${SWIG_DIR}/bin/swig") 40 | execute_process(COMMAND ${SWIG_EXECUTABLE} -version 41 | OUTPUT_VARIABLE SWIG_version_output 42 | ERROR_VARIABLE SWIG_version_output 43 | RESULT_VARIABLE SWIG_version_result) 44 | if(SWIG_version_result) 45 | message(WARNING "Command \"${SWIG_EXECUTABLE} -version\" failed with output:\n${SWIG_version_output}") 46 | find_package(SWIG) 47 | else() 48 | string(REGEX REPLACE ".*SWIG Version[^0-9.]*\([0-9.]+\).*" "\\1" 49 | SWIG_version_output "${SWIG_version_output}") 50 | set(SWIG_VERSION ${SWIG_version_output} CACHE STRING "Swig version" FORCE) 51 | set(SWIG_FOUND True) 52 | endif() 53 | else() 54 | find_package(SWIG) 55 | endif() 56 | else() 57 | find_package(SWIG) 58 | endif() 59 | if (SWIG_EXECUTABLE) 60 | message(STATUS "SWIG ${SWIG_VERSION} found at ${SWIG_EXECUTABLE}") 61 | string(REPLACE "." ";" VERSION_LIST ${SWIG_VERSION}) 62 | list(GET VERSION_LIST 0 SWIG_VERSION_MAJOR) 63 | list(GET VERSION_LIST 1 SWIG_VERSION_MINOR) 64 | list(GET VERSION_LIST 2 SWIG_VERSION_PATCH) 65 | if (${SWIG_VERSION_MAJOR} LESS 4) 66 | message(FATAL_ERROR "SWIG version 4.x required") 67 | endif() 68 | else() 69 | message(FATAL_ERROR "SWIG not found") 70 | endif() 71 | # if all goes well PYTHON_DEVENV_ENABLED can be checked in other cmakes 72 | set(PYTHON_DEVENV_ENABLED ON) 73 | 74 | # ARMNN_INCLUDE and ARMNN_LIB must be manually set 75 | if (NOT ARMNN_INCLUDE) 76 | if ($ENV{ARMNN_INCLUDE}) 77 | set(ARMNN_INCLUDE $ENV{ARMNN_INCLUDE}) 78 | else() 79 | message(FATAL_ERROR "ARMNN_INCLUDE not set") 80 | endif() 81 | endif() 82 | 83 | if (NOT ARMNN_LIB) 84 | if ($ENV{ARMNN_LIB}) 85 | set(ARMNN_LIB $ENV{ARMNN_LIB}) 86 | else() 87 | message(FATAL_ERROR "ARMNN_LIB not set") 88 | endif() 89 | endif() 90 | message(STATUS "ArmNN headers found at ${ARMNN_INCLUDE}") 91 | message(STATUS "ArmNN libraries found at ${ARMNN_LIB}") 92 | endif() 93 | -------------------------------------------------------------------------------- /python/pyarmnn/.gitignore: -------------------------------------------------------------------------------- 1 | # Created by .ignore support plugin (hsz.mobi) 2 | ### C++ template 3 | # Prerequisites 4 | *.d 5 | 6 | # Compiled Object files 7 | *.slo 8 | *.lo 9 | *.o 10 | *.obj 11 | 12 | # Precompiled Headers 13 | *.gch 14 | *.pch 15 | 16 | # Compiled Dynamic libraries 17 | *.so 18 | *.dylib 19 | *.dll 20 | 21 | # Fortran module files 22 | *.mod 23 | *.smod 24 | 25 | # Compiled Static libraries 26 | *.lai 27 | *.la 28 | *.a 29 | *.lib 30 | 31 | # Executables 32 | *.exe 33 | *.out 34 | *.app 35 | ### Python template 36 | # Byte-compiled / optimized / DLL files 37 | __pycache__/ 38 | *.py[cod] 39 | *$py.class 40 | 41 | # C extensions 42 | *.so 43 | *.o 44 | 45 | # Distribution / packaging 46 | .Python 47 | build/ 48 | develop-eggs/ 49 | dist/ 50 | downloads/ 51 | eggs/ 52 | .eggs/ 53 | lib/ 54 | lib64/ 55 | parts/ 56 | sdist/ 57 | var/ 58 | wheels/ 59 | *.egg-info/ 60 | .installed.cfg 61 | *.egg 62 | MANIFEST 63 | 64 | # Documentation 65 | docs 66 | 67 | # PyInstaller 68 | # Usually these files are written by a python script from a template 69 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 70 | *.manifest 71 | *.spec 72 | 73 | # Installer logs 74 | pip-log.txt 75 | pip-delete-this-directory.txt 76 | 77 | # Unit test / coverage reports 78 | htmlcov/ 79 | .tox/ 80 | .coverage 81 | .coverage.* 82 | .cache 83 | nosetests.xml 84 | coverage.xml 85 | *.cover 86 | .hypothesis/ 87 | .pytest_cache/ 88 | 89 | # Translations 90 | *.mo 91 | *.pot 92 | 93 | # Django stuff: 94 | *.log 95 | local_settings.py 96 | db.sqlite3 97 | 98 | # Flask stuff: 99 | instance/ 100 | .webassets-cache 101 | 102 | # Scrapy stuff: 103 | .scrapy 104 | 105 | # Sphinx documentation 106 | docs/_build/ 107 | 108 | # PyBuilder 109 | target/ 110 | 111 | # Jupyter Notebook 112 | .ipynb_checkpoints 113 | 114 | # pyenv 115 | .python-version 116 | 117 | # celery beat schedule file 118 | celerybeat-schedule 119 | 120 | # SageMath parsed files 121 | *.sage.py 122 | 123 | # Environments 124 | .env 125 | .venv 126 | env/ 127 | venv/ 128 | ENV/ 129 | env.bak/ 130 | venv.bak/ 131 | 132 | # Spyder project settings 133 | .spyderproject 134 | .spyproject 135 | 136 | # Rope project settings 137 | .ropeproject 138 | 139 | # mkdocs documentation 140 | /site 141 | 142 | # mypy 143 | .mypy_cache/ 144 | ### JetBrains template 145 | # Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio and WebStorm 146 | # Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839 147 | 148 | # User-specific stuff 149 | .idea/**/workspace.xml 150 | .idea/**/tasks.xml 151 | .idea/**/usage.statistics.xml 152 | .idea/**/dictionaries 153 | .idea/**/shelf 154 | 155 | # Sensitive or high-churn files 156 | .idea/**/dataSources/ 157 | .idea/**/dataSources.ids 158 | .idea/**/dataSources.local.xml 159 | .idea/**/sqlDataSources.xml 160 | .idea/**/dynamic.xml 161 | .idea/**/uiDesigner.xml 162 | .idea/**/dbnavigator.xml 163 | 164 | # Gradle 165 | .idea/**/gradle.xml 166 | .idea/**/libraries 167 | 168 | # Gradle and Maven with auto-import 169 | # When using Gradle or Maven with auto-import, you should exclude module files, 170 | # since they will be recreated, and may cause churn. Uncomment if using 171 | # auto-import. 172 | # .idea/modules.xml 173 | # .idea/*.iml 174 | # .idea/modules 175 | 176 | # CMake 177 | cmake-build-*/ 178 | 179 | # Mongo Explorer plugin 180 | .idea/**/mongoSettings.xml 181 | 182 | # File-based project format 183 | *.iws 184 | 185 | # IntelliJ 186 | out/ 187 | 188 | # mpeltonen/sbt-idea plugin 189 | .idea_modules/ 190 | 191 | # JIRA plugin 192 | atlassian-ide-plugin.xml 193 | 194 | # Cursive Clojure plugin 195 | .idea/replstate.xml 196 | 197 | # Crashlytics plugin (for Android Studio and IntelliJ) 198 | com_crashlytics_export_strings.xml 199 | crashlytics.properties 200 | crashlytics-build.properties 201 | fabric.properties 202 | 203 | # Editor-based Rest Client 204 | .idea/httpRequests 205 | 206 | generated_cxx/* 207 | !generated_cxx/.keep 208 | 209 | src/pyarmnn/_generated/* 210 | !src/pyarmnn/_generated/.keep 211 | !src/pyarmnn/_generated/*.py 212 | !src/pyarmnn/_generated/*.cpp 213 | .idea 214 | **/include 215 | -------------------------------------------------------------------------------- /python/pyarmnn/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright 2020 NXP 3 | # SPDX-License-Identifier: MIT 4 | # 5 | set(SETUP_PY_IN "${CMAKE_CURRENT_SOURCE_DIR}/setup.py") 6 | set(SETUP_PY "${CMAKE_CURRENT_BINARY_DIR}/setup.py") 7 | set(SWIG_GENERATE_IN "${CMAKE_CURRENT_SOURCE_DIR}/swig_generate.py") 8 | set(SWIG_GENERATE "${CMAKE_CURRENT_BINARY_DIR}/swig_generate.py") 9 | set(OUT_WRAP "${CMAKE_CURRENT_BINARY_DIR}/pyarmnn.wrap.timestamp") 10 | 11 | configure_file(${SETUP_PY_IN} ${SETUP_PY} COPYONLY) 12 | configure_file(${SWIG_GENERATE_IN} ${SWIG_GENERATE} COPYONLY) 13 | 14 | # local env variables passed down to the python scripts 15 | # scripts can thus be used standalone 16 | set(ARMNN_ENV ARMNN_INCLUDE=${ARMNN_INCLUDE} 17 | ARMNN_LIB=${ARMNN_LIB} 18 | SWIG_EXECUTABLE=${SWIG_EXECUTABLE}) 19 | 20 | # common step - generates swig wrappers and builds the lib 21 | add_custom_command(OUTPUT ${OUT_WRAP} 22 | COMMAND ${CMAKE_COMMAND} -E copy ${PROJECT_SOURCE_DIR}/README.md ${CMAKE_CURRENT_BINARY_DIR} 23 | COMMAND ${CMAKE_COMMAND} -E copy ${PROJECT_SOURCE_DIR}/LICENSE ${CMAKE_CURRENT_BINARY_DIR} 24 | COMMAND ${CMAKE_COMMAND} -E copy_directory ${CMAKE_CURRENT_SOURCE_DIR}/src ${CMAKE_CURRENT_BINARY_DIR}/src 25 | COMMAND ${CMAKE_COMMAND} -E copy_directory ${CMAKE_CURRENT_SOURCE_DIR}/test ${CMAKE_CURRENT_BINARY_DIR}/test 26 | COMMAND ${CMAKE_COMMAND} -E cmake_echo_color --green "Clearing Python build ..." 27 | COMMAND ${Python3_EXECUTABLE} ${SETUP_PY} --quiet clean --all 28 | COMMAND ${CMAKE_COMMAND} -E cmake_echo_color --green "Generating SWIG wrappers ..." 29 | COMMAND ${CMAKE_COMMAND} -E env ${ARMNN_ENV} ${Python3_EXECUTABLE} ${SWIG_GENERATE} -v 30 | COMMAND ${CMAKE_COMMAND} -E cmake_echo_color --green "Building Python extensions ..." 31 | COMMAND ${CMAKE_COMMAND} -E env ${ARMNN_ENV} ${Python3_EXECUTABLE} ${SETUP_PY} --quiet build_ext --inplace 32 | COMMAND ${CMAKE_COMMAND} -E copy_directory ${CMAKE_CURRENT_SOURCE_DIR}/src/pyarmnn/_generated ${CMAKE_CURRENT_BINARY_DIR}/src/pyarmnn/_generated 33 | COMMAND ${CMAKE_COMMAND} -E touch ${OUT_WRAP}) 34 | 35 | # source package 36 | if(BUILD_PYTHON_SRC) 37 | set(OUT_SRC "${CMAKE_CURRENT_BINARY_DIR}/pyarmnn.src.timestamp") 38 | add_custom_command(OUTPUT ${OUT_SRC} 39 | COMMAND ${CMAKE_COMMAND} -E cmake_echo_color --green "Building Python source package ..." 40 | COMMAND ${CMAKE_COMMAND} -E env ${ARMNN_ENV} ${Python3_EXECUTABLE} ${SETUP_PY} --quiet sdist 41 | COMMAND ${CMAKE_COMMAND} -E touch ${OUT_SRC} 42 | DEPENDS ${OUT_WRAP}) 43 | endif() 44 | # wheel package 45 | if(BUILD_PYTHON_WHL) 46 | set(OUT_WHL "${CMAKE_CURRENT_BINARY_DIR}/pyarmnn.whl.timestamp") 47 | add_custom_command(OUTPUT ${OUT_WHL} 48 | COMMAND ${CMAKE_COMMAND} -E cmake_echo_color --green "Building Python binary package ..." 49 | COMMAND ${CMAKE_COMMAND} -E env ${ARMNN_ENV} ${Python3_EXECUTABLE} ${SETUP_PY} --quiet bdist_wheel 50 | COMMAND ${CMAKE_COMMAND} -E touch ${OUT_WHL} 51 | DEPENDS ${OUT_WRAP}) 52 | endif() 53 | add_custom_target(pyarmnn ALL DEPENDS ${OUT_WRAP} ${OUT_SRC} ${OUT_WHL}) 54 | -------------------------------------------------------------------------------- /python/pyarmnn/LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2019 ARM Limited. 4 | Copyright 2020 NXP 5 | 6 | Permission is hereby granted, free of charge, to any person obtaining a copy 7 | of this software and associated documentation files (the "Software"), to deal 8 | in the Software without restriction, including without limitation the rights 9 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 | copies of the Software, and to permit persons to whom the Software is 11 | furnished to do so, subject to the following conditions: 12 | 13 | The above copyright notice and this permission notice shall be included in all 14 | copies or substantial portions of the Software. 15 | 16 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 19 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 22 | SOFTWARE. 23 | -------------------------------------------------------------------------------- /python/pyarmnn/Makefile: -------------------------------------------------------------------------------- 1 | # CMAKE generated file: DO NOT EDIT! 2 | # Generated by "Unix Makefiles" Generator, CMake Version 3.18 3 | 4 | # Default target executed when no arguments are given to make. 5 | default_target: all 6 | 7 | .PHONY : default_target 8 | 9 | # Allow only one "make -f Makefile2" at a time, but pass parallelism. 10 | .NOTPARALLEL: 11 | 12 | 13 | #============================================================================= 14 | # Special targets provided by cmake. 15 | 16 | # Disable implicit rules so canonical targets will work. 17 | .SUFFIXES: 18 | 19 | 20 | # Disable VCS-based implicit rules. 21 | % : %,v 22 | 23 | 24 | # Disable VCS-based implicit rules. 25 | % : RCS/% 26 | 27 | 28 | # Disable VCS-based implicit rules. 29 | % : RCS/%,v 30 | 31 | 32 | # Disable VCS-based implicit rules. 33 | % : SCCS/s.% 34 | 35 | 36 | # Disable VCS-based implicit rules. 37 | % : s.% 38 | 39 | 40 | .SUFFIXES: .hpux_make_needs_suffix_list 41 | 42 | 43 | # Command-line flag to silence nested $(MAKE). 44 | $(VERBOSE)MAKESILENT = -s 45 | 46 | #Suppress display of executed commands. 47 | $(VERBOSE).SILENT: 48 | 49 | # A target that is always out of date. 50 | cmake_force: 51 | 52 | .PHONY : cmake_force 53 | 54 | #============================================================================= 55 | # Set environment variables for the build. 56 | 57 | # The shell in which to execute make rules. 58 | SHELL = /bin/sh 59 | 60 | # The CMake executable. 61 | CMAKE_COMMAND = /usr/local/bin/cmake 62 | 63 | # The command to remove a file. 64 | RM = /usr/local/bin/cmake -E rm -f 65 | 66 | # Escaping for special characters. 67 | EQUALS = = 68 | 69 | # The top-level source directory on which CMake was run. 70 | CMAKE_SOURCE_DIR = /home/root/pyarmnn-release 71 | 72 | # The top-level build directory on which CMake was run. 73 | CMAKE_BINARY_DIR = /home/root/pyarmnn-release 74 | 75 | #============================================================================= 76 | # Targets provided globally by CMake. 77 | 78 | # Special rule for the target rebuild_cache 79 | rebuild_cache: 80 | @$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --cyan "Running CMake to regenerate build system..." 81 | /usr/local/bin/cmake --regenerate-during-build -S$(CMAKE_SOURCE_DIR) -B$(CMAKE_BINARY_DIR) 82 | .PHONY : rebuild_cache 83 | 84 | # Special rule for the target rebuild_cache 85 | rebuild_cache/fast: rebuild_cache 86 | 87 | .PHONY : rebuild_cache/fast 88 | 89 | # Special rule for the target edit_cache 90 | edit_cache: 91 | @$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --cyan "Running CMake cache editor..." 92 | /usr/local/bin/ccmake -S$(CMAKE_SOURCE_DIR) -B$(CMAKE_BINARY_DIR) 93 | .PHONY : edit_cache 94 | 95 | # Special rule for the target edit_cache 96 | edit_cache/fast: edit_cache 97 | 98 | .PHONY : edit_cache/fast 99 | 100 | # The main all target 101 | all: cmake_check_build_system 102 | cd /home/root/pyarmnn-release && $(CMAKE_COMMAND) -E cmake_progress_start /home/root/pyarmnn-release/CMakeFiles /home/root/pyarmnn-release/python/pyarmnn//CMakeFiles/progress.marks 103 | cd /home/root/pyarmnn-release && $(MAKE) $(MAKESILENT) -f CMakeFiles/Makefile2 python/pyarmnn/all 104 | $(CMAKE_COMMAND) -E cmake_progress_start /home/root/pyarmnn-release/CMakeFiles 0 105 | .PHONY : all 106 | 107 | # The main clean target 108 | clean: 109 | cd /home/root/pyarmnn-release && $(MAKE) $(MAKESILENT) -f CMakeFiles/Makefile2 python/pyarmnn/clean 110 | .PHONY : clean 111 | 112 | # The main clean target 113 | clean/fast: clean 114 | 115 | .PHONY : clean/fast 116 | 117 | # Prepare targets for installation. 118 | preinstall: all 119 | cd /home/root/pyarmnn-release && $(MAKE) $(MAKESILENT) -f CMakeFiles/Makefile2 python/pyarmnn/preinstall 120 | .PHONY : preinstall 121 | 122 | # Prepare targets for installation. 123 | preinstall/fast: 124 | cd /home/root/pyarmnn-release && $(MAKE) $(MAKESILENT) -f CMakeFiles/Makefile2 python/pyarmnn/preinstall 125 | .PHONY : preinstall/fast 126 | 127 | # clear depends 128 | depend: 129 | cd /home/root/pyarmnn-release && $(CMAKE_COMMAND) -S$(CMAKE_SOURCE_DIR) -B$(CMAKE_BINARY_DIR) --check-build-system CMakeFiles/Makefile.cmake 1 130 | .PHONY : depend 131 | 132 | # Convenience name for target. 133 | python/pyarmnn/CMakeFiles/pyarmnn.dir/rule: 134 | cd /home/root/pyarmnn-release && $(MAKE) $(MAKESILENT) -f CMakeFiles/Makefile2 python/pyarmnn/CMakeFiles/pyarmnn.dir/rule 135 | .PHONY : python/pyarmnn/CMakeFiles/pyarmnn.dir/rule 136 | 137 | # Convenience name for target. 138 | pyarmnn: python/pyarmnn/CMakeFiles/pyarmnn.dir/rule 139 | 140 | .PHONY : pyarmnn 141 | 142 | # fast build rule for target. 143 | pyarmnn/fast: 144 | cd /home/root/pyarmnn-release && $(MAKE) $(MAKESILENT) -f python/pyarmnn/CMakeFiles/pyarmnn.dir/build.make python/pyarmnn/CMakeFiles/pyarmnn.dir/build 145 | .PHONY : pyarmnn/fast 146 | 147 | # Help Target 148 | help: 149 | @echo "The following are some of the valid targets for this Makefile:" 150 | @echo "... all (the default if no target is provided)" 151 | @echo "... clean" 152 | @echo "... depend" 153 | @echo "... edit_cache" 154 | @echo "... rebuild_cache" 155 | @echo "... pyarmnn" 156 | .PHONY : help 157 | 158 | 159 | 160 | #============================================================================= 161 | # Special targets to cleanup operation of make. 162 | 163 | # Special rule to run CMake to check the build system integrity. 164 | # No rule that depends on this can have commands that come from listfiles 165 | # because they might be regenerated. 166 | cmake_check_build_system: 167 | cd /home/root/pyarmnn-release && $(CMAKE_COMMAND) -S$(CMAKE_SOURCE_DIR) -B$(CMAKE_BINARY_DIR) --check-build-system CMakeFiles/Makefile.cmake 0 168 | .PHONY : cmake_check_build_system 169 | 170 | -------------------------------------------------------------------------------- /python/pyarmnn/conftest.py: -------------------------------------------------------------------------------- 1 | # Copyright © 2020 Arm Ltd. All rights reserved. 2 | # SPDX-License-Identifier: MIT 3 | import os 4 | import platform 5 | 6 | import pytest 7 | 8 | ARCHITECTURES = set("x86_64 aarch64".split()) 9 | 10 | 11 | @pytest.fixture(scope="module") 12 | def data_folder_per_test(request): 13 | """ 14 | This fixture returns path to folder with test resources (one per test module) 15 | """ 16 | 17 | basedir, script = request.fspath.dirname, request.fspath.basename 18 | return str(os.path.join(basedir, "testdata", os.path.splitext(script)[0])) 19 | 20 | 21 | @pytest.fixture(scope="module") 22 | def shared_data_folder(request): 23 | """ 24 | This fixture returns path to folder with shared test resources among all tests 25 | """ 26 | 27 | return str(os.path.join(request.fspath.dirname, "testdata", "shared")) 28 | 29 | 30 | @pytest.fixture(scope="function") 31 | def tmpdir(tmpdir): 32 | """ 33 | This fixture returns path to temp folder. Fixture was added for py35 compatibility 34 | """ 35 | 36 | return str(tmpdir) 37 | 38 | 39 | def pytest_runtest_setup(item): 40 | supported_architectures = ARCHITECTURES.intersection(mark.name for mark in item.iter_markers()) 41 | arch = platform.machine() 42 | if supported_architectures and arch not in supported_architectures: 43 | pytest.skip("cannot run on platform {}".format(arch)) 44 | 45 | 46 | def pytest_configure(config): 47 | config.addinivalue_line( 48 | "markers", "aarch64: mark test to run only on aarch64" 49 | ) 50 | config.addinivalue_line( 51 | "markers", "x86_64: mark test to run only on x86_64" 52 | ) -------------------------------------------------------------------------------- /python/pyarmnn/docs_conf/config.mako: -------------------------------------------------------------------------------- 1 | <%! 2 | # Template configuration. Copy over in your template directory 3 | # (used with --template-dir) and adapt as required. 4 | html_lang = 'en' 5 | show_inherited_members = True 6 | extract_module_toc_into_sidebar = True 7 | list_class_variables_in_index = True 8 | sort_identifiers = True 9 | show_type_annotations = False 10 | 11 | # Show collapsed source code block next to each item. 12 | # Disabling this can improve rendering speed of large modules. 13 | show_source_code = False 14 | 15 | # A prefix to use for every HTML hyperlink in the generated documentation. 16 | # No prefix results in all links being relative. 17 | link_prefix = '' 18 | 19 | # Set the style keyword such as 'atom-one-light' or 'github-gist' 20 | # Options: https://github.com/highlightjs/highlight.js/tree/master/src/styles 21 | # Demo: https://highlightjs.org/static/demo/ 22 | hljs_style = 'github' 23 | 24 | # If set, insert Google Analytics tracking code. Value is GA 25 | # tracking id (UA-XXXXXX-Y). 26 | google_analytics = '' 27 | 28 | # If set, render LaTeX math syntax within \(...\) (inline equations), 29 | # or within \[...\] or $$...$$ or `.. math::` (block equations) 30 | # as nicely-formatted math formulas using MathJax. 31 | # Note: in Python docstrings, either all backslashes need to be escaped (\\) 32 | # or you need to use raw r-strings. 33 | latex_math = True 34 | %> 35 | -------------------------------------------------------------------------------- /python/pyarmnn/examples/onnx_mobilenetv2.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # Copyright 2020 NXP 3 | # SPDX-License-Identifier: MIT 4 | 5 | import pyarmnn as ann 6 | import numpy as np 7 | import os 8 | from PIL import Image 9 | import example_utils as eu 10 | 11 | 12 | def preprocess_onnx(img: Image, width: int, height: int, data_type, scale: float, mean: list, 13 | stddev: list): 14 | """Preprocessing function for ONNX imagenet models based on: 15 | https://github.com/onnx/models/blob/master/vision/classification/imagenet_inference.ipynb 16 | 17 | Args: 18 | img (PIL.Image): Loaded PIL.Image 19 | width (int): Target image width 20 | height (int): Target image height 21 | data_type: Image datatype (np.uint8 or np.float32) 22 | scale (float): Scaling factor 23 | mean: RGB mean values 24 | stddev: RGB standard deviation 25 | 26 | Returns: 27 | np.array: Preprocess image as Numpy array 28 | """ 29 | img = img.resize((256, 256), Image.BILINEAR) 30 | # first rescale to 256,256 and then center crop 31 | left = (256 - width) / 2 32 | top = (256 - height) / 2 33 | right = (256 + width) / 2 34 | bottom = (256 + height) / 2 35 | img = img.crop((left, top, right, bottom)) 36 | img = img.convert('RGB') 37 | img = np.array(img) 38 | img = np.reshape(img, (-1, 3)) # reshape to [RGB][RGB]... 39 | img = ((img / scale) - mean) / stddev 40 | # NHWC to NCHW conversion, by default NHWC is expected 41 | # image is loaded as [RGB][RGB][RGB]... transposing it makes it [RRR...][GGG...][BBB...] 42 | img = np.transpose(img) 43 | img = img.flatten().astype(data_type) # flatten into a 1D tensor and convert to float32 44 | return img 45 | 46 | 47 | args = eu.parse_command_line() 48 | 49 | model_filename = 'mobilenetv2-1.0.onnx' 50 | labels_filename = 'synset.txt' 51 | archive_filename = 'mobilenetv2-1.0.zip' 52 | labels_url = 'https://s3.amazonaws.com/onnx-model-zoo/' + labels_filename 53 | model_url = 'https://s3.amazonaws.com/onnx-model-zoo/mobilenet/mobilenetv2-1.0/' + model_filename 54 | 55 | # Download resources 56 | image_filenames = eu.get_images(args.data_dir) 57 | 58 | model_filename, labels_filename = eu.get_model_and_labels(args.model_dir, model_filename, labels_filename, 59 | archive_filename, 60 | [model_url, labels_url]) 61 | 62 | # all 3 resources must exist to proceed further 63 | assert os.path.exists(labels_filename) 64 | assert os.path.exists(model_filename) 65 | assert image_filenames 66 | for im in image_filenames: 67 | assert (os.path.exists(im)) 68 | 69 | # Create a network from a model file 70 | net_id, parser, runtime = eu.create_onnx_network(model_filename) 71 | 72 | # Load input information from the model and create input tensors 73 | input_binding_info = parser.GetNetworkInputBindingInfo("data") 74 | 75 | # Load output information from the model and create output tensors 76 | output_binding_info = parser.GetNetworkOutputBindingInfo("mobilenetv20_output_flatten0_reshape0") 77 | output_tensors = ann.make_output_tensors([output_binding_info]) 78 | 79 | # Load labels 80 | labels = eu.load_labels(labels_filename) 81 | 82 | # Load images and resize to expected size 83 | images = eu.load_images(image_filenames, 84 | 224, 224, 85 | np.float32, 86 | 255.0, 87 | [0.485, 0.456, 0.406], 88 | [0.229, 0.224, 0.225], 89 | preprocess_onnx) 90 | 91 | eu.run_inference(runtime, net_id, images, labels, input_binding_info, output_binding_info) 92 | -------------------------------------------------------------------------------- /python/pyarmnn/examples/requirements.txt: -------------------------------------------------------------------------------- 1 | requests>=2.24.0 2 | urllib3>=1.25.8 3 | Pillow>=6.2.2 4 | numpy>=1.18.1 5 | -------------------------------------------------------------------------------- /python/pyarmnn/examples/tflite_mobilenetv1_quantized.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # Copyright 2020 NXP 3 | # SPDX-License-Identifier: MIT 4 | 5 | import numpy as np 6 | import pyarmnn as ann 7 | import example_utils as eu 8 | import os 9 | 10 | args = eu.parse_command_line() 11 | 12 | # names of the files in the archive 13 | labels_filename = 'labels_mobilenet_quant_v1_224.txt' 14 | model_filename = 'mobilenet_v1_1.0_224_quant.tflite' 15 | archive_filename = 'mobilenet_v1_1.0_224_quant_and_labels.zip' 16 | 17 | archive_url = \ 18 | 'https://storage.googleapis.com/download.tensorflow.org/models/tflite/mobilenet_v1_1.0_224_quant_and_labels.zip' 19 | 20 | model_filename, labels_filename = eu.get_model_and_labels(args.model_dir, model_filename, labels_filename, 21 | archive_filename, archive_url) 22 | 23 | image_filenames = eu.get_images(args.data_dir) 24 | 25 | # all 3 resources must exist to proceed further 26 | assert os.path.exists(labels_filename) 27 | assert os.path.exists(model_filename) 28 | assert image_filenames 29 | for im in image_filenames: 30 | assert(os.path.exists(im)) 31 | 32 | # Create a network from the model file 33 | net_id, graph_id, parser, runtime = eu.create_tflite_network(model_filename) 34 | 35 | # Load input information from the model 36 | # tflite has all the need information in the model unlike other formats 37 | input_names = parser.GetSubgraphInputTensorNames(graph_id) 38 | assert len(input_names) == 1 # there should be 1 input tensor in mobilenet 39 | 40 | input_binding_info = parser.GetNetworkInputBindingInfo(graph_id, input_names[0]) 41 | input_width = input_binding_info[1].GetShape()[1] 42 | input_height = input_binding_info[1].GetShape()[2] 43 | 44 | # Load output information from the model and create output tensors 45 | output_names = parser.GetSubgraphOutputTensorNames(graph_id) 46 | assert len(output_names) == 1 # and only one output tensor 47 | output_binding_info = parser.GetNetworkOutputBindingInfo(graph_id, output_names[0]) 48 | 49 | # Load labels file 50 | labels = eu.load_labels(labels_filename) 51 | 52 | # Load images and resize to expected size 53 | images = eu.load_images(image_filenames, input_width, input_height) 54 | 55 | eu.run_inference(runtime, net_id, images, labels, input_binding_info, output_binding_info) 56 | -------------------------------------------------------------------------------- /python/pyarmnn/scripts/download_test_resources.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # Copyright 2020 NXP 3 | # SPDX-License-Identifier: MIT 4 | """Downloads and extracts resources for unit tests. 5 | 6 | It is mandatory to run this script prior to running unit tests. Resources are stored as a tar.gz or a tar.bz2 archive and 7 | extracted into the test/testdata/shared folder. 8 | """ 9 | 10 | import tarfile 11 | import requests 12 | import os 13 | import uuid 14 | 15 | SCRIPTS_DIR = os.path.dirname(os.path.realpath(__file__)) 16 | EXTRACT_DIR = os.path.join(SCRIPTS_DIR, "..", "test") 17 | ARCHIVE_URL = "https://snapshots.linaro.org/components/pyarmnn-tests/pyarmnn_testdata_200500_20200415.tar.bz2" 18 | 19 | 20 | def download_resources(url, save_path): 21 | # download archive - only support tar.gz or tar.bz2 22 | print("Downloading '{}'".format(url)) 23 | temp_filename = str(uuid.uuid4()) 24 | if url.endswith(".tar.bz2"): 25 | temp_filename += ".tar.bz2" 26 | elif url.endswith(".tar.gz"): 27 | temp_filename += ".tar.gz" 28 | else: 29 | raise RuntimeError("Unsupported file.") 30 | try: 31 | r = requests.get(url, stream=True) 32 | except requests.exceptions.RequestException as e: 33 | raise RuntimeError("Unable to download file: {}".format(e)) 34 | file_path = os.path.join(save_path, temp_filename) 35 | with open(file_path, 'wb') as f: 36 | f.write(r.content) 37 | 38 | # extract and delete temp file 39 | with tarfile.open(file_path, "r:bz2" if temp_filename.endswith(".tar.bz2") else "r:gz") as tar: 40 | print("Extracting '{}'".format(file_path)) 41 | tar.extractall(save_path) 42 | if os.path.exists(file_path): 43 | print("Removing '{}'".format(file_path)) 44 | os.remove(file_path) 45 | 46 | 47 | download_resources(ARCHIVE_URL, EXTRACT_DIR) 48 | -------------------------------------------------------------------------------- /python/pyarmnn/scripts/generate_docs.py: -------------------------------------------------------------------------------- 1 | # Copyright © 2019 Arm Ltd. All rights reserved. 2 | # Copyright 2020 NXP 3 | # SPDX-License-Identifier: MIT 4 | 5 | import os 6 | import tarfile 7 | 8 | import pyarmnn as ann 9 | import shutil 10 | 11 | from typing import List, Union 12 | 13 | from pdoc.cli import main 14 | 15 | package_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..') 16 | 17 | def __copy_file_to_dir(file_paths: Union[List[str], str], target_dir_path: str): 18 | """Copies multiple files to a directory. 19 | 20 | Args: 21 | file_paths (Union[List(str)]): List of files to copy 22 | target_dir_path (str): Target directory. 23 | 24 | Returns: 25 | None 26 | """ 27 | 28 | file_paths = [] + file_paths 29 | 30 | if not (os.path.exists(target_dir_path) and os.path.isdir(target_dir_path)): 31 | os.makedirs(target_dir_path) 32 | 33 | for file_path in file_paths: 34 | if not (os.path.exists(file_path) and os.path.isfile(file_path)): 35 | raise RuntimeError('Not a file: {}'.format(file_path)) 36 | 37 | file_name = os.path.basename(file_path) 38 | shutil.copyfile(file_path, os.path.join(str(target_dir_path), file_name)) 39 | 40 | 41 | def archive_docs(path: str, version: str): 42 | """Creates an archive. 43 | 44 | Args: 45 | path (str): Path which will be archived. 46 | version (str): Version of Arm NN. 47 | 48 | Returns: 49 | None 50 | """ 51 | 52 | output_filename = f'pyarmnn_docs-{version}.tar' 53 | 54 | with tarfile.open(os.path.join(package_dir, output_filename), "w") as tar: 55 | tar.add(path) 56 | 57 | 58 | if __name__ == "__main__": 59 | readme_filename = os.path.join(package_dir, '..', '..', 'README.md') 60 | with open(readme_filename, 'r') as readme_file: 61 | top_level_pyarmnn_doc = ''.join(readme_file.readlines()) 62 | ann.__doc__ = top_level_pyarmnn_doc 63 | 64 | main() 65 | target_path = os.path.join(package_dir, 'docs') 66 | archive_docs(target_path, ann.__version__) 67 | -------------------------------------------------------------------------------- /python/pyarmnn/scripts/image_to_npy.py: -------------------------------------------------------------------------------- 1 | # Copyright 2020 NXP 2 | # SPDX-License-Identifier: MIT 3 | 4 | import numpy as np 5 | from PIL import Image 6 | import argparse 7 | import os 8 | 9 | 10 | def imload(filename: str, im_width: int, im_height: int, datatype: str): 11 | """Converts an image to a numpy array and resizes. 12 | 13 | Args: 14 | filename (str): Image filename. 15 | im_width (int): Image width. 16 | im_height (int): Image height. 17 | datatype (str): Datatype to convert to (float/uint8). Float scales to <0;1> range. 18 | 19 | Returns: 20 | np.array: Image as a numpy array. 21 | """ 22 | 23 | img = Image.open(filename) 24 | img = img.resize((im_width, im_height)) 25 | img_rgb = img.convert('RGB') 26 | numpy_img_rgb = np.array(img_rgb) 27 | 28 | if datatype == "float": 29 | numpy_img_rgb = numpy_img_rgb.astype('f') / 255.0 30 | elif datatype == "uint8": 31 | numpy_img_rgb = numpy_img_rgb.astype(np.uint8) 32 | else: 33 | raise Exception("Unsupported datatype.") 34 | return numpy_img_rgb 35 | 36 | 37 | if __name__ == '__main__': 38 | parser = argparse.ArgumentParser(description='Converts a tensorflow frozen model to tflite.') 39 | parser.add_argument('--image', action='store', dest='image_file', 40 | help='Input image.') 41 | parser.add_argument('--output', action='store', dest='output_npy', 42 | help='Output NPY file.') 43 | parser.add_argument('--width', action='store', dest='image_width', 44 | help='Image width.') 45 | parser.add_argument('--height', action='store', dest='image_height', 46 | help='Image height.') 47 | parser.add_argument('--datatype', action='store', dest='datatype', default="float", 48 | help='Type of data (float, uint8).') 49 | 50 | args = parser.parse_args() 51 | 52 | np_arr = imload(args.image_file, int(args.image_width), int(args.image_height), args.datatype) 53 | output_filename = args.output_npy 54 | filename_base = os.path.basename(args.image_file) 55 | filename, ext = os.path.splitext(filename_base) 56 | if output_filename is None: 57 | output_filename = os.path.join(os.getcwd(), filename) 58 | else: 59 | dir_path = os.path.dirname(output_filename) 60 | # path does not exist 61 | if not os.path.exists(dir_path): 62 | os.makedirs(dir_path) 63 | # dir and not file is specified 64 | if os.path.isdir(output_filename): 65 | output_filename = os.path.join(output_filename, filename) 66 | 67 | np.save(output_filename, np_arr) 68 | -------------------------------------------------------------------------------- /python/pyarmnn/src/pyarmnn/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright © 2020 Arm Ltd. All rights reserved. 2 | # Copyright 2020 NXP 3 | # SPDX-License-Identifier: MIT 4 | import inspect 5 | import sys 6 | import logging 7 | 8 | from ._generated.pyarmnn_version import GetVersion, GetMajorVersion, GetMinorVersion 9 | 10 | # Parsers 11 | try: 12 | from ._generated.pyarmnn_caffeparser import ICaffeParser 13 | except ImportError as err: 14 | logger = logging.getLogger(__name__) 15 | message = "Your ArmNN library instance does not support Caffe models parser functionality. " 16 | logger.warning("%s Skipped ICaffeParser import.", message) 17 | logger.debug(str(err)) 18 | 19 | 20 | def ICaffeParser(): 21 | """In case people try importing without having Arm NN built with this parser.""" 22 | raise RuntimeError(message) 23 | 24 | try: 25 | from ._generated.pyarmnn_onnxparser import IOnnxParser 26 | except ImportError as err: 27 | logger = logging.getLogger(__name__) 28 | message = "Your ArmNN library instance does not support Onnx models parser functionality. " 29 | logger.warning("%s Skipped IOnnxParser import.", message) 30 | logger.debug(str(err)) 31 | 32 | 33 | def IOnnxParser(): 34 | """In case people try importing without having Arm NN built with this parser.""" 35 | raise RuntimeError(message) 36 | 37 | try: 38 | from ._generated.pyarmnn_tfparser import ITfParser 39 | except ImportError as err: 40 | logger = logging.getLogger(__name__) 41 | message = "Your ArmNN library instance does not support TF models parser functionality. " 42 | logger.warning("%s Skipped ITfParser import.", message) 43 | logger.debug(str(err)) 44 | 45 | 46 | def ITfParser(): 47 | """In case people try importing without having Arm NN built with this parser.""" 48 | raise RuntimeError(message) 49 | 50 | try: 51 | from ._generated.pyarmnn_tfliteparser import ITfLiteParser 52 | except ImportError as err: 53 | logger = logging.getLogger(__name__) 54 | message = "Your ArmNN library instance does not support TF lite models parser functionality. " 55 | logger.warning("%s Skipped ITfLiteParser import.", message) 56 | logger.debug(str(err)) 57 | 58 | 59 | def ITfLiteParser(): 60 | """In case people try importing without having Arm NN built with this parser.""" 61 | raise RuntimeError(message) 62 | 63 | # Network 64 | from ._generated.pyarmnn import Optimize, OptimizerOptions, IOptimizedNetwork, IInputSlot, \ 65 | IOutputSlot, IConnectableLayer, INetwork 66 | 67 | # Backend 68 | from ._generated.pyarmnn import BackendId 69 | from ._generated.pyarmnn import IDeviceSpec 70 | 71 | # Tensors 72 | from ._generated.pyarmnn import TensorInfo, TensorShape 73 | 74 | # Runtime 75 | from ._generated.pyarmnn import IRuntime, CreationOptions, INetworkProperties 76 | 77 | # Profiler 78 | from ._generated.pyarmnn import IProfiler 79 | 80 | # Types 81 | from ._generated.pyarmnn import DataType_Float16, DataType_Float32, DataType_QAsymmU8, DataType_Signed32, \ 82 | DataType_Boolean, DataType_QSymmS16, DataType_QSymmS8, DataType_QAsymmS8 83 | from ._generated.pyarmnn import DataLayout_NCHW, DataLayout_NHWC 84 | 85 | from ._generated.pyarmnn import ActivationFunction_Abs, ActivationFunction_BoundedReLu, ActivationFunction_LeakyReLu, \ 86 | ActivationFunction_Linear, ActivationFunction_ReLu, ActivationFunction_Sigmoid, ActivationFunction_SoftReLu, \ 87 | ActivationFunction_Sqrt, ActivationFunction_Square, ActivationFunction_TanH, ActivationDescriptor 88 | from ._generated.pyarmnn import ArgMinMaxFunction_Max, ArgMinMaxFunction_Min, ArgMinMaxDescriptor 89 | from ._generated.pyarmnn import BatchNormalizationDescriptor, BatchToSpaceNdDescriptor 90 | from ._generated.pyarmnn import ComparisonDescriptor, ComparisonOperation_Equal, ComparisonOperation_Greater, \ 91 | ComparisonOperation_GreaterOrEqual, ComparisonOperation_Less, \ 92 | ComparisonOperation_LessOrEqual, ComparisonOperation_NotEqual 93 | from ._generated.pyarmnn import UnaryOperation_Abs, UnaryOperation_Exp, UnaryOperation_Sqrt, UnaryOperation_Rsqrt, \ 94 | UnaryOperation_Neg, ElementwiseUnaryDescriptor 95 | from ._generated.pyarmnn import Convolution2dDescriptor, DepthToSpaceDescriptor, DepthwiseConvolution2dDescriptor, \ 96 | DetectionPostProcessDescriptor, FakeQuantizationDescriptor, FullyConnectedDescriptor, \ 97 | InstanceNormalizationDescriptor, LstmDescriptor, L2NormalizationDescriptor, MeanDescriptor 98 | from ._generated.pyarmnn import NormalizationAlgorithmChannel_Across, NormalizationAlgorithmChannel_Within, \ 99 | NormalizationAlgorithmMethod_LocalBrightness, NormalizationAlgorithmMethod_LocalContrast, NormalizationDescriptor 100 | from ._generated.pyarmnn import PadDescriptor 101 | from ._generated.pyarmnn import PermutationVector, PermuteDescriptor 102 | from ._generated.pyarmnn import OutputShapeRounding_Ceiling, OutputShapeRounding_Floor, \ 103 | PaddingMethod_Exclude, PaddingMethod_IgnoreValue, PoolingAlgorithm_Average, PoolingAlgorithm_L2, \ 104 | PoolingAlgorithm_Max, Pooling2dDescriptor 105 | from ._generated.pyarmnn import ResizeMethod_Bilinear, ResizeMethod_NearestNeighbor, ResizeDescriptor, \ 106 | ReshapeDescriptor, SliceDescriptor, SpaceToBatchNdDescriptor, SpaceToDepthDescriptor, StandInDescriptor, \ 107 | StackDescriptor, StridedSliceDescriptor, SoftmaxDescriptor, TransposeConvolution2dDescriptor, \ 108 | SplitterDescriptor 109 | from ._generated.pyarmnn import ConcatDescriptor, CreateDescriptorForConcatenation 110 | 111 | from ._generated.pyarmnn import LstmInputParams, QuantizedLstmInputParams 112 | 113 | # Public API 114 | # Quantization 115 | from ._quantization.quantize_and_dequantize import quantize, dequantize 116 | 117 | # Tensor 118 | from ._tensor.tensor import Tensor 119 | from ._tensor.const_tensor import ConstTensor 120 | from ._tensor.workload_tensors import make_input_tensors, make_output_tensors, workload_tensors_to_ndarray 121 | 122 | # Utilities 123 | from ._utilities.profiling_helper import ProfilerData, get_profiling_data 124 | 125 | from ._version import __version__, __arm_ml_version__ 126 | 127 | ARMNN_VERSION = GetVersion() 128 | 129 | 130 | def __check_version(): 131 | from ._version import check_armnn_version 132 | check_armnn_version(ARMNN_VERSION) 133 | 134 | 135 | __check_version() 136 | 137 | __all__ = [] 138 | 139 | __private_api_names = ['__check_version'] 140 | 141 | for name, obj in inspect.getmembers(sys.modules[__name__]): 142 | if inspect.isclass(obj) or inspect.isfunction(obj): 143 | if name not in __private_api_names: 144 | __all__.append(name) 145 | -------------------------------------------------------------------------------- /python/pyarmnn/src/pyarmnn/_generated/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright © 2020 Arm Ltd. All rights reserved. 2 | # SPDX-License-Identifier: MIT 3 | -------------------------------------------------------------------------------- /python/pyarmnn/src/pyarmnn/_quantization/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright © 2020 Arm Ltd. All rights reserved. 2 | # SPDX-License-Identifier: MIT 3 | 4 | from .quantize_and_dequantize import quantize, dequantize 5 | -------------------------------------------------------------------------------- /python/pyarmnn/src/pyarmnn/_quantization/quantize_and_dequantize.py: -------------------------------------------------------------------------------- 1 | # Copyright © 2020 Arm Ltd. All rights reserved. 2 | # SPDX-License-Identifier: MIT 3 | """ 4 | This file contains functions relating to quantizing and dequantizing values. 5 | """ 6 | from .._generated.pyarmnn import Quantize_uint8_t, Quantize_int8_t, Quantize_int16_t, Quantize_int32_t, \ 7 | Dequantize_uint8_t, Dequantize_int8_t, Dequantize_int16_t, Dequantize_int32_t 8 | 9 | __DTYPE_TO_QUANTIZE_FUNCTION = { 10 | 'uint8': Quantize_uint8_t, 11 | 'int8': Quantize_int8_t, 12 | 'int16': Quantize_int16_t, 13 | 'int32': Quantize_int32_t 14 | } 15 | 16 | __DTYPE_TO_DEQUANTIZE_FUNCTION = { 17 | 'uint8': ((0, 255), Dequantize_uint8_t), 18 | 'int8': ((-128, 127), Dequantize_int8_t), 19 | 'int16': ((-32768, 32767), Dequantize_int16_t), 20 | 'int32': ((-2147483648, 2147483647), Dequantize_int32_t) 21 | } 22 | 23 | 24 | def quantize(value: float, scale: float, offset: int, target_dtype: str) -> int: 25 | """Quantize the given value to the given target datatype using Arm NN. 26 | 27 | This function can be used to convert a 32-bit floating point value into 8/16/32-bit signed 28 | integer or 8-bit unsigned integer values. 29 | 30 | Args: 31 | value (float): The value to be quantized. 32 | scale (float): A numeric constant that the value is multiplied by. 33 | offset (int): A 'zero-point' used to 'shift' the integer range. 34 | target_dtype (str): The target data type. Supported values: 'unit8', 'int8', 'int16', 'int32'. 35 | 36 | Returns: 37 | int: A quantized 8-bit unsigned integer value or 8/16/32-bit signed integer value. 38 | """ 39 | 40 | if target_dtype not in __DTYPE_TO_QUANTIZE_FUNCTION: 41 | raise ValueError("""Unexpected target datatype {} given. 42 | Armnn currently supports quantization to {} values.""".format(target_dtype, list(__DTYPE_TO_QUANTIZE_FUNCTION.keys()))) 43 | 44 | return __DTYPE_TO_QUANTIZE_FUNCTION[target_dtype](float(value), scale, offset) 45 | 46 | 47 | def dequantize(value: int, scale: float, offset: float, from_dtype: str) -> float: 48 | """Dequantize the given value from the given datatype using Arm NN. 49 | 50 | This function can be used to convert an 8-bit unsigned integer value or 8/16/32-bit signed 51 | integer value into a 32-bit floating point value. Typically used when decoding an 52 | output value from an output tensor on a quantized model. 53 | 54 | Args: 55 | value (int): The value to be dequantized. Value could be numpy numeric data type. 56 | scale (float): A numeric constant that the value is multiplied by. 57 | offset (float): A 'zero-point' used to 'shift' the integer range. 58 | from_dtype (str): The data type 'value' represents. Supported values: 'unit8', 'int8', 'int16', 'int32'. 59 | 60 | Returns: 61 | float: A dequantized 32-bit floating-point value. 62 | """ 63 | 64 | # specifies which function to use with given datatype and the value range for that data type. 65 | if from_dtype not in __DTYPE_TO_DEQUANTIZE_FUNCTION: 66 | raise ValueError("""Unexpected value datatype {} given. 67 | Armnn currently supports dequantization from {} values.""".format(from_dtype, list(__DTYPE_TO_DEQUANTIZE_FUNCTION.keys()))) 68 | 69 | input_range = __DTYPE_TO_DEQUANTIZE_FUNCTION[from_dtype][0] 70 | 71 | if not input_range[0] <= value <= input_range[1]: 72 | raise ValueError('Value is not within range of the given datatype {}'.format(from_dtype)) 73 | 74 | return __DTYPE_TO_DEQUANTIZE_FUNCTION[from_dtype][1](int(value), scale, offset) 75 | -------------------------------------------------------------------------------- /python/pyarmnn/src/pyarmnn/_tensor/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright © 2020 Arm Ltd. All rights reserved. 2 | # SPDX-License-Identifier: MIT 3 | 4 | from .const_tensor import ConstTensor 5 | from .tensor import Tensor 6 | from .workload_tensors import make_input_tensors, make_output_tensors, workload_tensors_to_ndarray 7 | -------------------------------------------------------------------------------- /python/pyarmnn/src/pyarmnn/_tensor/const_tensor.py: -------------------------------------------------------------------------------- 1 | # Copyright © 2020 Arm Ltd. All rights reserved. 2 | # SPDX-License-Identifier: MIT 3 | """ 4 | This file contains the custom python implementation for Arm NN Const Tensor objects. 5 | """ 6 | import numpy as np 7 | 8 | from .._generated.pyarmnn import DataType_QAsymmU8, DataType_QSymmS8, DataType_QSymmS16, DataType_Signed32, \ 9 | DataType_QAsymmS8, DataType_Float32, DataType_Float16 10 | from .._generated.pyarmnn import ConstTensor as AnnConstTensor, TensorInfo, Tensor 11 | 12 | 13 | class ConstTensor(AnnConstTensor): 14 | """Creates a PyArmNN ConstTensor object. 15 | 16 | A ConstTensor is a Tensor with an immutable data store. Typically, a ConstTensor 17 | is used to input data into a network when running inference. 18 | 19 | This class overrides the swig generated Tensor class. The aim of 20 | this is to have an easy to use public API for the ConstTensor objects. 21 | 22 | """ 23 | 24 | def __init__(self, *args): 25 | """ 26 | Supported tensor data types: 27 | `DataType_QAsymmU8`, 28 | `DataType_QAsymmS8`, 29 | `DataType_QSymmS16`, 30 | `DataType_QSymmS8`, 31 | `DataType_Signed32`, 32 | `DataType_Float32`, 33 | `DataType_Float16` 34 | 35 | Examples: 36 | Create empty ConstTensor 37 | >>> import pyarmnn as ann 38 | >>> import numpy as np 39 | >>> ann.ConstTensor() 40 | 41 | Create ConstTensor given tensor info and input data 42 | >>> input_data = np.array(...) 43 | >>> ann.ConstTensor(ann.TensorInfo(...), input_data) 44 | 45 | Create ConstTensor from another ConstTensor i.e. copy ConstTensor 46 | >>> ann.ConstTensor(ann.ConstTensor()) 47 | 48 | Create ConstTensor from tensor 49 | >>> ann.ConstTensor(ann.Tensor()) 50 | 51 | Args: 52 | tensor (Tensor, optional): Create a ConstTensor from a Tensor. 53 | const_tensor (ConstTensor, optional): Create a ConstTensor from a ConstTensor i.e. copy. 54 | tensor_info (TensorInfo, optional): Tensor information. 55 | input_data (ndarray): The numpy array will be transformed to a 56 | buffer according to type returned by `TensorInfo.GetDataType`. 57 | Input data values type must correspond to data type returned by 58 | `TensorInfo.GetDataType`. 59 | 60 | Raises: 61 | TypeError: Unsupported input data type. 62 | ValueError: Unsupported tensor data type and incorrect input data size. 63 | """ 64 | self.__memory_area = None 65 | 66 | # TensorInfo as first argument and numpy array as second 67 | if len(args) > 1 and isinstance(args[0], TensorInfo): 68 | if isinstance(args[1], np.ndarray): 69 | self.__create_memory_area(args[0].GetDataType(), args[0].GetNumBytes(), args[0].GetNumElements(), 70 | args[1]) 71 | super().__init__(args[0], self.__memory_area.data) 72 | else: 73 | raise TypeError('Data must be provided as a numpy array.') 74 | 75 | # copy constructor - reference to memory area is passed from copied const 76 | # tensor and armnn's copy constructor is called 77 | elif len(args) > 0 and isinstance(args[0], (ConstTensor, Tensor)): 78 | self.__memory_area = args[0].get_memory_area() 79 | super().__init__(args[0]) 80 | 81 | # empty tensor 82 | elif len(args) == 0: 83 | super().__init__() 84 | 85 | else: 86 | raise ValueError('Incorrect number of arguments or type of arguments provided to create Const Tensor.') 87 | 88 | def __copy__(self) -> 'ConstTensor': 89 | """ Make copy of a const tensor. 90 | 91 | Make const tensor copyable using the python copy operation. 92 | 93 | Note: 94 | The tensor memory area is NOT copied. Instead, the new tensor maintains a 95 | reference to the same memory area as the old tensor. 96 | 97 | Example: 98 | Copy empty tensor 99 | >>> from copy import copy 100 | >>> import pyarmnn as ann 101 | >>> tensor = ann.ConstTensor() 102 | >>> copied_tensor = copy(tensor) 103 | 104 | Returns: 105 | Tensor: a copy of the tensor object provided. 106 | 107 | """ 108 | return ConstTensor(self) 109 | 110 | @staticmethod 111 | def __check_size(data: np.ndarray, num_bytes: int, num_elements: int): 112 | """ Check the size of the input data against the number of bytes provided by tensor info. 113 | 114 | Args: 115 | data (ndarray): Input data. 116 | num_bytes (int): Number of bytes required by tensor info. 117 | num_elements: Number of elements required by tensor info. 118 | 119 | Raises: 120 | ValueError: number of bytes in input data does not match tensor info. 121 | 122 | """ 123 | size_in_bytes = data.nbytes 124 | elements = data.size 125 | 126 | if size_in_bytes != num_bytes: 127 | raise ValueError( 128 | "ConstTensor requires {} bytes, {} provided. " 129 | "Is your input array data type ({}) aligned with TensorInfo?".format(num_bytes, size_in_bytes, 130 | data.dtype)) 131 | if elements != num_elements: 132 | raise ValueError("ConstTensor requires {} elements, {} provided.".format(num_elements, elements)) 133 | 134 | def __create_memory_area(self, data_type: int, num_bytes: int, num_elements: int, data: np.ndarray): 135 | """ Create the memory area used by the tensor to output its results. 136 | 137 | Args: 138 | data_type (int): The type of data that will be stored in the memory area. 139 | See DataType_*. 140 | num_bytes (int): Determines the size of the memory area that will be created. 141 | num_elements (int): Determines number of elements in memory area. 142 | data (ndarray): Input data as numpy array. 143 | 144 | """ 145 | np_data_type_mapping = {DataType_QAsymmU8: np.uint8, 146 | DataType_QAsymmS8: np.int8, 147 | DataType_QSymmS8: np.int8, 148 | DataType_Float32: np.float32, 149 | DataType_QSymmS16: np.int16, 150 | DataType_Signed32: np.int32, 151 | DataType_Float16: np.float16} 152 | 153 | if data_type not in np_data_type_mapping: 154 | raise ValueError("The data type provided for this Tensor is not supported: {}".format(data_type)) 155 | 156 | if np_data_type_mapping[data_type] != data.dtype: 157 | raise TypeError("Expected data to have type {} for type {} but instead got numpy.{}".format(np_data_type_mapping[data_type], data_type, data.dtype)) 158 | 159 | self.__check_size(data, num_bytes, num_elements) 160 | 161 | self.__memory_area = data 162 | self.__memory_area.flags.writeable = False 163 | 164 | def get_memory_area(self) -> np.ndarray: 165 | """ Get values that are stored by the tensor. 166 | 167 | Returns: 168 | ndarray: Tensor data (as numpy array). 169 | 170 | """ 171 | return self.__memory_area 172 | -------------------------------------------------------------------------------- /python/pyarmnn/src/pyarmnn/_tensor/tensor.py: -------------------------------------------------------------------------------- 1 | # Copyright © 2020 Arm Ltd. All rights reserved. 2 | # SPDX-License-Identifier: MIT 3 | """ 4 | This file contains the custom python implementation for Arm NN Tensor objects. 5 | """ 6 | import numpy as np 7 | 8 | from .._generated.pyarmnn import Tensor as annTensor, TensorInfo, DataType_QAsymmU8, DataType_QSymmS8, \ 9 | DataType_QAsymmS8, DataType_Float32, DataType_QSymmS16, DataType_Signed32, DataType_Float16 10 | 11 | 12 | class Tensor(annTensor): 13 | """Creates a PyArmNN Tensor object. 14 | 15 | This class overrides the swig generated Tensor class. The aim of 16 | this is to create an easy to use public api for the Tensor object. 17 | 18 | Memory is allocated and managed by this class, avoiding the need to manage 19 | a separate memory area for the tensor compared to the swig generated api. 20 | 21 | """ 22 | 23 | def __init__(self, *args): 24 | """ Create Tensor object. 25 | 26 | Supported tensor data types: 27 | `DataType_QAsymmU8`, 28 | `DataType_QAsymmS8`, 29 | `DataType_QSymmS16`, 30 | `DataType_QSymmS8`, 31 | `DataType_Signed32`, 32 | `DataType_Float32`, 33 | `DataType_Float16` 34 | 35 | Examples: 36 | Create an empty tensor 37 | >>> import pyarmnn as ann 38 | >>> ann.Tensor() 39 | 40 | Create tensor given tensor information 41 | >>> ann.Tensor(ann.TensorInfo(...)) 42 | 43 | Create tensor from another tensor i.e. copy a tensor 44 | >>> ann.Tensor(ann.Tensor()) 45 | 46 | Args: 47 | tensor(Tensor, optional): Create Tensor from a Tensor i.e. copy. 48 | tensor_info (TensorInfo, optional): Tensor information. 49 | 50 | Raises: 51 | TypeError: unsupported input data type. 52 | ValueError: appropriate constructor could not be found with provided arguments. 53 | 54 | """ 55 | self.__memory_area = None 56 | 57 | # TensorInfo as first argument, we need to create memory area manually 58 | if len(args) > 0 and isinstance(args[0], TensorInfo): 59 | self.__create_memory_area(args[0].GetDataType(), args[0].GetNumElements()) 60 | super().__init__(args[0], self.__memory_area.data) 61 | 62 | # copy constructor - reference to memory area is passed from copied tensor 63 | # and armnn's copy constructor is called 64 | elif len(args) > 0 and isinstance(args[0], Tensor): 65 | self.__memory_area = args[0].get_memory_area() 66 | super().__init__(args[0]) 67 | 68 | # empty constructor 69 | elif len(args) == 0: 70 | super().__init__() 71 | 72 | else: 73 | raise ValueError('Incorrect number of arguments or type of arguments provided to create Tensor.') 74 | 75 | def __copy__(self) -> 'Tensor': 76 | """ Make copy of a tensor. 77 | 78 | Make tensor copyable using the python copy operation. 79 | 80 | Note: 81 | The tensor memory area is NOT copied. Instead, the new tensor maintains a 82 | reference to the same memory area as the old tensor. 83 | 84 | Example: 85 | Copy empty tensor 86 | >>> from copy import copy 87 | >>> import pyarmnn as ann 88 | >>> tensor = ann.Tensor() 89 | >>> copied_tensor = copy(tensor) 90 | 91 | Returns: 92 | Tensor: a copy of the tensor object provided. 93 | 94 | """ 95 | return Tensor(self) 96 | 97 | def __create_memory_area(self, data_type: int, num_elements: int): 98 | """ Create the memory area used by the tensor to output its results. 99 | 100 | Args: 101 | data_type (int): The type of data that will be stored in the memory area. 102 | See DataType_*. 103 | num_elements (int): Determines the size of the memory area that will be created. 104 | 105 | """ 106 | np_data_type_mapping = {DataType_QAsymmU8: np.uint8, 107 | DataType_QAsymmS8: np.int8, 108 | DataType_QSymmS8: np.int8, 109 | DataType_Float32: np.float32, 110 | DataType_QSymmS16: np.int16, 111 | DataType_Signed32: np.int32, 112 | DataType_Float16: np.float16} 113 | 114 | if data_type not in np_data_type_mapping: 115 | raise ValueError("The data type provided for this Tensor is not supported.") 116 | 117 | self.__memory_area = np.empty(shape=(num_elements,), dtype=np_data_type_mapping[data_type]) 118 | 119 | def get_memory_area(self) -> np.ndarray: 120 | """ Get values that are stored by the tensor. 121 | 122 | Returns: 123 | ndarray : Tensor data (as numpy array). 124 | 125 | """ 126 | return self.__memory_area 127 | -------------------------------------------------------------------------------- /python/pyarmnn/src/pyarmnn/_tensor/workload_tensors.py: -------------------------------------------------------------------------------- 1 | # Copyright © 2020 Arm Ltd. All rights reserved. 2 | # SPDX-License-Identifier: MIT 3 | """ 4 | This file contains functions relating to WorkloadTensors. 5 | WorkloadTensors are the inputTensors and outputTensors that are consumed by IRuntime.EnqueueWorkload. 6 | """ 7 | from typing import Union, List, Tuple 8 | 9 | import numpy as np 10 | 11 | from .tensor import Tensor 12 | from .const_tensor import ConstTensor 13 | 14 | 15 | def make_input_tensors(inputs_binding_info: List[Tuple], 16 | input_data: List[np.ndarray]) -> List[Tuple[int, ConstTensor]]: 17 | """Returns `inputTensors` to be used with `IRuntime.EnqueueWorkload`. 18 | 19 | This is the primary function to call when you want to produce `inputTensors` for `IRuntime.EnqueueWorkload`. 20 | The output is a list of tuples containing ConstTensors with a corresponding input tensor id. 21 | The output should be used directly with `IRuntime.EnqueueWorkload`. 22 | This function works for single or multiple input data and binding information. 23 | 24 | Examples: 25 | Creating inputTensors. 26 | >>> import pyarmnn as ann 27 | >>> import numpy as np 28 | >>> 29 | >>> parser = ann.ITfLiteParser() 30 | >>> ... 31 | >>> example_image = np.array(...) 32 | >>> input_binding_info = parser.GetNetworkInputBindingInfo(...) 33 | >>> 34 | >>> input_tensors = ann.make_input_tensors([input_binding_info], [example_image]) 35 | 36 | Args: 37 | inputs_binding_info (list of tuples): (int, `TensorInfo`) Binding information for input tensors obtained from 38 | `GetNetworkInputBindingInfo`. 39 | input_data (list ndarrays): Tensor data to be used for inference. 40 | 41 | Returns: 42 | list: `inputTensors` - A list of tuples (`int` , `ConstTensor`). 43 | 44 | 45 | Raises: 46 | ValueError: If length of `inputs_binding_info` and `input_data` are not the same. 47 | """ 48 | if len(inputs_binding_info) != len(input_data): 49 | raise ValueError("Length of 'inputs_binding_info' does not match length of 'input_data'") 50 | 51 | input_tensors = [] 52 | 53 | for in_bind_info, in_data in zip(inputs_binding_info, input_data): 54 | in_tensor_id = in_bind_info[0] 55 | in_tensor_info = in_bind_info[1] 56 | input_tensors.append((in_tensor_id, ConstTensor(in_tensor_info, in_data))) 57 | 58 | return input_tensors 59 | 60 | 61 | def make_output_tensors(outputs_binding_info: List[Tuple]) -> List[Tuple[int, Tensor]]: 62 | """Returns `outputTensors` to be used with `IRuntime.EnqueueWorkload`. 63 | 64 | This is the primary function to call when you want to produce `outputTensors` for `IRuntime.EnqueueWorkload`. 65 | The output is a list of tuples containing Tensors with a corresponding output tensor id. 66 | The output should be used directly with `IRuntime.EnqueueWorkload`. 67 | 68 | Examples: 69 | Creating outputTensors. 70 | >>> import pyarmnn as ann 71 | >>> 72 | >>> parser = ann.ITfLiteParser() 73 | >>> ... 74 | >>> output_binding_info = parser.GetNetworkOutputBindingInfo(...) 75 | >>> 76 | >>> output_tensors = ann.make_output_tensors([output_binding_info]) 77 | 78 | Args: 79 | outputs_binding_info (list of tuples): (int, `TensorInfo`) Binding information for output tensors obtained from 80 | `GetNetworkOutputBindingInfo`. 81 | 82 | Returns: 83 | list: `outputTensors` - A list of tuples (`int`, `Tensor`). 84 | """ 85 | output_tensors = [] 86 | 87 | for out_bind_info in outputs_binding_info: 88 | out_tensor_id = out_bind_info[0] 89 | out_tensor_info = out_bind_info[1] 90 | output_tensors.append((out_tensor_id, Tensor(out_tensor_info))) 91 | 92 | return output_tensors 93 | 94 | 95 | def workload_tensors_to_ndarray(workload_tensors: List[Tuple[int, Union[Tensor, ConstTensor]]]) -> List[np.ndarray]: 96 | """Returns a list of the underlying tensor data as ndarrays from `inputTensors` or `outputTensors`. 97 | 98 | We refer to `inputTensors` and `outputTensors` as workload tensors because 99 | they are used with `IRuntime.EnqueueWorkload`. 100 | Although this function can be used on either `inputTensors` or `outputTensors` the main use of this function 101 | is to collect results from `outputTensors` after `IRuntime.EnqueueWorkload` has been called. 102 | 103 | Examples: 104 | Getting results after inference. 105 | >>> import pyarmnn as ann 106 | >>> 107 | >>> ... 108 | >>> runtime = ann.IRuntime(...) 109 | >>> ... 110 | >>> runtime.EnqueueWorkload(net_id, input_tensors, output_tensors) 111 | >>> 112 | >>> inference_results = workload_tensors_to_ndarray(output_tensors) 113 | 114 | Args: 115 | workload_tensors (inputTensors or outputTensors): `inputTensors` or `outputTensors` to get data from. See 116 | `make_input_tensors` and `make_output_tensors`. 117 | 118 | Returns: 119 | list: List of `ndarrays` for the underlying tensor data from given `inputTensors` or `outputTensors`. 120 | """ 121 | arrays = [] 122 | for index, (_, tensor) in enumerate(workload_tensors): 123 | arrays.append(tensor.get_memory_area().reshape(list(tensor.GetShape()))) 124 | print("Workload tensor {} shape: {}".format(index, tensor.GetShape())) 125 | 126 | return arrays 127 | -------------------------------------------------------------------------------- /python/pyarmnn/src/pyarmnn/_utilities/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright © 2020 Arm Ltd. All rights reserved. 2 | # SPDX-License-Identifier: MIT 3 | 4 | from .profiling_helper import ProfilerData, get_profiling_data 5 | -------------------------------------------------------------------------------- /python/pyarmnn/src/pyarmnn/_utilities/profiling_helper.py: -------------------------------------------------------------------------------- 1 | # Copyright © 2020 Arm Ltd. All rights reserved. 2 | # SPDX-License-Identifier: MIT 3 | """ 4 | This file contains functions relating to the use of the Arm NN profiler within PyArmNN. 5 | """ 6 | import json 7 | from collections import namedtuple 8 | 9 | ProfilerData = namedtuple('ProfilerData', ['inference_data', 'per_workload_execution_data']) 10 | ProfilerData.__doc__ = """Container to hold the profiling inference data, and the profiling data per workload. 11 | 12 | Contains: 13 | inference_data (dict): holds end-to-end inference performance data. Keys: 14 | 'time_unit' - timer units. 15 | 'execution_time' - list of total inference execution times for each inference run. 16 | per_workload_execution_data (dict): holds per operation performance data, key is a operation name 17 | Each operation has 18 | 'time_unit' - timer units. 19 | 'execution_time' - list of total execution times for each inference run. 20 | 'backend' - backend used for this operation. 21 | 22 | Examples: 23 | 24 | >>> data = get_profiling_data(profiler) 25 | >>> print(data) 26 | >>> ProfilerData(inference_data={'time_unit': 'us', 27 | 'execution_time': [8901372.972]}, 28 | per_workload_execution_data={'CopyMemGeneric_Execute_#3': {'time_unit': 'us', 29 | 'execution_time': [28.941], 30 | 'backend': 'Unknown'}, 31 | 'RefConvolution2dWorkload_Execute_#5': {'time_unit': 'us', 32 | 'execution_time': [126838.071], 33 | 'backend': 'CpuRef'}, 34 | 'RefDepthwiseConvolution2dWorkload_Execute_#6': {'time_unit': 'us', 35 | 'execution_time': [49886.208], 36 | 'backend': 'CpuRef'} 37 | ...etc 38 | } 39 | ) 40 | """ 41 | 42 | 43 | def get_profiling_data(profiler: 'IProfiler') -> ProfilerData: 44 | """Reads IProfiler object passed in, extracts the relevant data 45 | and returns it in a ProfilerData container. 46 | 47 | Args: 48 | profiler (IProfiler): The IProfiler object to be parsed. 49 | 50 | Returns: 51 | ProfilerData: A container containing the relevant data extracted from the Profiler output. 52 | """ 53 | 54 | top_level_dict = json.loads(profiler.as_json()) 55 | armnn_data = top_level_dict["ArmNN"] 56 | inference_measurements = armnn_data["inference_measurements_#1"] 57 | execution_data = inference_measurements["Execute_#2"] 58 | 59 | workload_data = {} 60 | inference_data = {} 61 | for exec_key, exec_value in execution_data.items(): 62 | # Check all items with a type. 63 | if "type" in exec_value and exec_value["type"] == "Event": 64 | for event_key, event_value in exec_value.items(): 65 | if event_key.startswith("Wall clock time_#") and event_value["type"] == "Measurement": 66 | time_data = __get_wall_clock_times__(event_value) 67 | time_data["backend"] = __get_backend(exec_key) 68 | workload_data[exec_key] = time_data 69 | # This is the total inference time map 70 | if exec_key.startswith("Wall clock time_#") and exec_value["type"] == "Measurement": 71 | time_data = __get_wall_clock_times__(exec_value) 72 | inference_data.update(time_data) 73 | return ProfilerData(inference_data=inference_data, per_workload_execution_data=workload_data) 74 | 75 | 76 | def __get_wall_clock_times__(wall_clock_item): 77 | execution_times = wall_clock_item["raw"] 78 | time_data = {} 79 | raw_data = [] 80 | for time in execution_times: 81 | raw_data.append(time) 82 | time_data["time_unit"] = wall_clock_item["unit"] 83 | time_data["execution_time"] = raw_data 84 | return time_data 85 | 86 | 87 | def __get_backend(exec_key): 88 | if "ref" in exec_key.lower(): 89 | return "CpuRef" 90 | elif "neon" in exec_key.lower(): 91 | return "CpuAcc" 92 | elif "cl" in exec_key.lower(): 93 | return "GpuAcc" 94 | elif "ethos" in exec_key.lower(): 95 | return "EthosNAcc" 96 | else: 97 | return "Unknown" 98 | -------------------------------------------------------------------------------- /python/pyarmnn/src/pyarmnn/_version.py: -------------------------------------------------------------------------------- 1 | # Copyright © 2020 Arm Ltd. All rights reserved. 2 | # Copyright 2020 NXP 3 | # SPDX-License-Identifier: MIT 4 | import os 5 | 6 | version_info = (20, 2, 0) 7 | 8 | __dev_version_env = os.getenv("PYARMNN_DEV_VER", "") 9 | 10 | if __dev_version_env: 11 | __dev_version = "dev0" 12 | try: 13 | __dev_version = "dev{}".format(int(__dev_version_env)) 14 | except ValueError: 15 | __dev_version = str(__dev_version_env) 16 | 17 | version_info = (*version_info, __dev_version) 18 | 19 | __version__ = '.'.join(str(c) for c in version_info) 20 | __arm_ml_version__ = '2{:03d}{:02d}{:02d}'.format(version_info[0], version_info[1], version_info[2]) 21 | 22 | 23 | def check_armnn_version(installed_armnn_version, expected_armnn_version=__arm_ml_version__): 24 | expected_armnn_version = expected_armnn_version[:-2] # cut off minor patch version 25 | installed_armnn_version = installed_armnn_version[:-2] # cut off minor patch version 26 | assert expected_armnn_version == installed_armnn_version, \ 27 | "Expected ArmNN version is {} but installed ArmNN version is {}".format(expected_armnn_version, installed_armnn_version) 28 | -------------------------------------------------------------------------------- /python/pyarmnn/src/pyarmnn/swig/armnn.i: -------------------------------------------------------------------------------- 1 | // 2 | // Copyright © 2020 Arm Ltd. All rights reserved. 3 | // SPDX-License-Identifier: MIT 4 | // 5 | %module pyarmnn 6 | %{ 7 | #define SWIG_FILE_WITH_INIT 8 | #include "armnn/Types.hpp" 9 | %} 10 | 11 | //typemap definitions and other common stuff 12 | %include "standard_header.i" 13 | 14 | //armnn api submodules 15 | %include "modules/armnn_backend.i" 16 | %include "modules/armnn_types.i" 17 | %include "modules/armnn_descriptors.i" 18 | %include "modules/armnn_lstmparam.i" 19 | %include "modules/armnn_network.i" 20 | %include "modules/armnn_profiler.i" 21 | %include "modules/armnn_runtime.i" 22 | %include "modules/armnn_tensor.i" 23 | %include "modules/armnn_types_utils.i" 24 | 25 | // Clear exception typemap. 26 | %exception; 27 | 28 | -------------------------------------------------------------------------------- /python/pyarmnn/src/pyarmnn/swig/armnn_caffeparser.i: -------------------------------------------------------------------------------- 1 | // 2 | // Copyright © 2020 Arm Ltd. All rights reserved. 3 | // SPDX-License-Identifier: MIT 4 | // 5 | %module pyarmnn_caffeparser 6 | %{ 7 | #define SWIG_FILE_WITH_INIT 8 | #include "armnnCaffeParser/ICaffeParser.hpp" 9 | #include "armnn/INetwork.hpp" 10 | %} 11 | 12 | //typemap definitions and other common stuff 13 | %include "standard_header.i" 14 | 15 | namespace std { 16 | %template(BindingPointInfo) pair; 17 | %template(MapStringTensorShape) map; 18 | %template(StringVector) vector; 19 | } 20 | 21 | namespace armnnCaffeParser 22 | { 23 | 24 | %feature("docstring", 25 | " 26 | Interface for creating a parser object using Caffe (http://caffe.berkeleyvision.org/) caffemodel files. 27 | 28 | Parsers are used to automatically construct Arm NN graphs from model files. 29 | 30 | ") ICaffeParser; 31 | 32 | %nodefaultctor ICaffeParser; 33 | class ICaffeParser 34 | { 35 | public: 36 | // Documentation 37 | %feature("docstring", 38 | " 39 | Retrieve binding info (layer id and tensor info) for the network input identified by the given layer name. 40 | 41 | Args: 42 | name (str): Name of the input. 43 | 44 | Returns: 45 | tuple: (`int`, `TensorInfo`) 46 | ") GetNetworkInputBindingInfo; 47 | 48 | %feature("docstring", 49 | " 50 | Retrieve binding info (layer id and `TensorInfo`) for the network output identified by the given layer name. 51 | 52 | Args: 53 | name (str): Name of the output. 54 | 55 | Returns: 56 | tuple: (`int`, `TensorInfo`) 57 | ") GetNetworkOutputBindingInfo; 58 | 59 | std::pair GetNetworkInputBindingInfo(const std::string& name); 60 | std::pair GetNetworkOutputBindingInfo(const std::string& name); 61 | }; 62 | 63 | %extend ICaffeParser { 64 | // This is not a substitution of the default constructor of the Armnn class. It tells swig to create custom __init__ 65 | // method for ICaffeParser python object that will use static factory method to do the job. 66 | 67 | ICaffeParser() { 68 | return armnnCaffeParser::ICaffeParser::CreateRaw(); 69 | } 70 | 71 | // The following does not replace a real destructor of the Armnn class. 72 | // It creates a functions that will be called when swig object goes out of the scope to clean resources. 73 | // so the user doesn't need to call ICaffeParser::Destroy himself. 74 | // $self` is a pointer to extracted ArmNN ICaffeParser object. 75 | 76 | ~ICaffeParser() { 77 | armnnCaffeParser::ICaffeParser::Destroy($self); 78 | } 79 | 80 | %feature("docstring", 81 | " 82 | Create the network from a Caffe caffemodel binary file on disk. 83 | 84 | Args: 85 | graphFile: Path to the caffe model to be parsed. 86 | inputShapes (tuple): (`string`, `TensorShape`) A tuple containing the input name and TensorShape information for the network. 87 | requestedOutputs (list): A list of the output tensor names. 88 | 89 | Returns: 90 | INetwork: INetwork object for the parsed Caffe model. 91 | ") CreateNetworkFromBinaryFile; 92 | 93 | %newobject CreateNetworkFromBinaryFile; 94 | armnn::INetwork* CreateNetworkFromBinaryFile(const char* graphFile, 95 | const std::map& inputShapes, 96 | const std::vector& requestedOutputs) { 97 | return $self->CreateNetworkFromBinaryFile(graphFile, inputShapes, requestedOutputs).release(); 98 | } 99 | } 100 | } 101 | 102 | // Clear exception typemap. 103 | %exception; 104 | -------------------------------------------------------------------------------- /python/pyarmnn/src/pyarmnn/swig/armnn_onnxparser.i: -------------------------------------------------------------------------------- 1 | // 2 | // Copyright © 2020 Arm Ltd. All rights reserved. 3 | // SPDX-License-Identifier: MIT 4 | // 5 | %module pyarmnn_onnxparser 6 | %{ 7 | #define SWIG_FILE_WITH_INIT 8 | #include "armnnOnnxParser/IOnnxParser.hpp" 9 | #include "armnn/INetwork.hpp" 10 | %} 11 | 12 | //typemap definitions and other common stuff 13 | %include "standard_header.i" 14 | 15 | namespace std { 16 | %template(BindingPointInfo) pair; 17 | %template(MapStringTensorShape) map; 18 | %template(StringVector) vector; 19 | } 20 | 21 | namespace armnnOnnxParser 22 | { 23 | %feature("docstring", 24 | " 25 | Interface for creating a parser object using ONNX (https://onnx.ai/) onnx files. 26 | 27 | Parsers are used to automatically construct Arm NN graphs from model files. 28 | 29 | ") IOnnxParser; 30 | 31 | %nodefaultctor IOnnxParser; 32 | class IOnnxParser 33 | { 34 | public: 35 | %feature("docstring", 36 | " 37 | Retrieve binding info (layer id and tensor info) for the network input identified by the given layer name. 38 | 39 | Args: 40 | name (string): Name of the input node. 41 | 42 | Returns: 43 | tuple: (`int`, `TensorInfo`) 44 | ") GetNetworkInputBindingInfo; 45 | std::pair GetNetworkInputBindingInfo(const std::string& name); 46 | 47 | %feature("docstring", 48 | " 49 | Retrieve binding info (layer id and `TensorInfo`) for the network output identified by the given layer name. 50 | 51 | Args: 52 | name (string): Name of the output node. 53 | 54 | Returns: 55 | tuple: (`int`, `TensorInfo`) 56 | ") GetNetworkOutputBindingInfo; 57 | std::pair GetNetworkOutputBindingInfo(const std::string& name); 58 | }; 59 | 60 | %extend IOnnxParser { 61 | // This is not a substitution of the default constructor of the Armnn class. It tells swig to create custom __init__ 62 | // method for IOnnxParser python object that will use static factory method to do the job. 63 | IOnnxParser() { 64 | return armnnOnnxParser::IOnnxParser::CreateRaw(); 65 | } 66 | 67 | // The following does not replace a real destructor of the Armnn class. 68 | // It creates a functions that will be called when swig object goes out of the scope to clean resources. 69 | // so the user doesn't need to call IOnnxParser::Destroy himself. 70 | // $self` is a pointer to extracted ArmNN IOnnxParser object. 71 | ~IOnnxParser() { 72 | armnnOnnxParser::IOnnxParser::Destroy($self); 73 | } 74 | 75 | %feature("docstring", 76 | " 77 | Create the network from a binary file on disk. 78 | 79 | Args: 80 | graphFile (str): Path to the onnx model to be parsed. 81 | 82 | Returns: 83 | INetwork: Parsed network. 84 | 85 | Raises: 86 | RuntimeError: If model file was not found. 87 | ") CreateNetworkFromBinaryFile; 88 | %newobject CreateNetworkFromBinaryFile; 89 | armnn::INetwork* CreateNetworkFromBinaryFile(const char* graphFile) { 90 | return $self->CreateNetworkFromBinaryFile(graphFile).release(); 91 | } 92 | } 93 | 94 | } 95 | // Clear exception typemap. 96 | %exception; 97 | -------------------------------------------------------------------------------- /python/pyarmnn/src/pyarmnn/swig/armnn_tfliteparser.i: -------------------------------------------------------------------------------- 1 | // 2 | // Copyright © 2020 Arm Ltd. All rights reserved. 3 | // SPDX-License-Identifier: MIT 4 | // 5 | %module pyarmnn_tfliteparser 6 | %{ 7 | #include "armnnTfLiteParser/ITfLiteParser.hpp" 8 | #include "armnn/Types.hpp" 9 | #include "armnn/INetwork.hpp" 10 | %} 11 | 12 | //typemap definitions and other common stuff 13 | %include "standard_header.i" 14 | 15 | namespace std { 16 | %template(BindingPointInfo) pair; 17 | %template(MapStringTensorShape) map; 18 | %template(StringVector) vector; 19 | } 20 | 21 | namespace armnnTfLiteParser 22 | { 23 | %feature("docstring", 24 | " 25 | Interface for creating a parser object using TfLite (https://www.tensorflow.org/lite) tflite files. 26 | 27 | Parsers are used to automatically construct Arm NN graphs from model files. 28 | 29 | ") ITfLiteParser; 30 | %nodefaultctor ITfLiteParser; 31 | class ITfLiteParser 32 | { 33 | public: 34 | %feature("docstring", 35 | " 36 | Retrieve binding info (layer id and tensor info) for the network input identified by the given layer name and subgraph id. 37 | Args: 38 | subgraphId (int): The subgraph id. 39 | name (str): Name of the input. 40 | 41 | Returns: 42 | tuple: (`int`, `TensorInfo`). 43 | ") GetNetworkInputBindingInfo; 44 | std::pair GetNetworkInputBindingInfo(size_t subgraphId, const std::string& name); 45 | 46 | %feature("docstring", 47 | " 48 | Retrieve binding info (layer id and `TensorInfo`) for the network output identified by the given layer name and subgraph id. 49 | 50 | Args: 51 | subgraphId (int): The subgraph id. 52 | name (str): Name of the output. 53 | 54 | Returns: 55 | tuple: (`int`, `TensorInfo`). 56 | ") GetNetworkOutputBindingInfo; 57 | std::pair GetNetworkOutputBindingInfo(size_t subgraphId, const std::string& name); 58 | 59 | %feature("docstring", 60 | " 61 | Return the number of subgraphs in the parsed model. 62 | Returns: 63 | int: The number of subgraphs. 64 | ") GetSubgraphCount; 65 | size_t GetSubgraphCount(); 66 | 67 | %feature("docstring", 68 | " 69 | Return the input tensor names for a given subgraph. 70 | 71 | Args: 72 | subgraphId (int): The subgraph id. 73 | 74 | Returns: 75 | list: A list of the input tensor names for the given model. 76 | ") GetSubgraphInputTensorNames; 77 | std::vector GetSubgraphInputTensorNames(size_t subgraphId); 78 | 79 | %feature("docstring", 80 | " 81 | Return the output tensor names for a given subgraph. 82 | 83 | Args: 84 | subgraphId (int): The subgraph id 85 | 86 | Returns: 87 | list: A list of the output tensor names for the given model. 88 | ") GetSubgraphOutputTensorNames; 89 | std::vector GetSubgraphOutputTensorNames(size_t subgraphId); 90 | }; 91 | 92 | %extend ITfLiteParser { 93 | // This is not a substitution of the default constructor of the Armnn class. It tells swig to create custom __init__ 94 | // method for ITfLiteParser python object that will use static factory method to do the job. 95 | 96 | ITfLiteParser() { 97 | return armnnTfLiteParser::ITfLiteParser::CreateRaw(); 98 | } 99 | 100 | // The following does not replace a real destructor of the Armnn class. 101 | // It creates a functions that will be called when swig object goes out of the scope to clean resources. 102 | // so the user doesn't need to call ITfLiteParser::Destroy himself. 103 | // $self` is a pointer to extracted ArmNN ITfLiteParser object. 104 | 105 | ~ITfLiteParser() { 106 | armnnTfLiteParser::ITfLiteParser::Destroy($self); 107 | } 108 | 109 | %feature("docstring", 110 | " 111 | Create the network from a flatbuffers binary file. 112 | 113 | Args: 114 | graphFile (str): Path to the tflite model to be parsed. 115 | 116 | Returns: 117 | INetwork: Parsed network. 118 | 119 | Raises: 120 | RuntimeError: If model file was not found. 121 | ") CreateNetworkFromBinaryFile; 122 | 123 | %newobject CreateNetworkFromBinaryFile; 124 | armnn::INetwork* CreateNetworkFromBinaryFile(const char* graphFile) { 125 | return $self->CreateNetworkFromBinaryFile(graphFile).release(); 126 | } 127 | 128 | } 129 | 130 | } 131 | // Clear exception typemap. 132 | %exception; 133 | -------------------------------------------------------------------------------- /python/pyarmnn/src/pyarmnn/swig/armnn_tfparser.i: -------------------------------------------------------------------------------- 1 | // 2 | // Copyright © 2020 Arm Ltd. All rights reserved. 3 | // SPDX-License-Identifier: MIT 4 | // 5 | %module pyarmnn_tfparser 6 | %{ 7 | #define SWIG_FILE_WITH_INIT 8 | #include "armnnTfParser/ITfParser.hpp" 9 | #include "armnn/INetwork.hpp" 10 | %} 11 | 12 | //typemap definitions and other common stuff 13 | %include "standard_header.i" 14 | 15 | namespace std { 16 | %template(BindingPointInfo) pair; 17 | %template(MapStringTensorShape) map; 18 | %template(StringVector) vector; 19 | } 20 | 21 | namespace armnnTfParser 22 | { 23 | %feature("docstring", 24 | " 25 | Interface for creating a parser object using TensorFlow (https://www.tensorflow.org/) frozen pb files. 26 | 27 | Parsers are used to automatically construct Arm NN graphs from model files. 28 | 29 | ") ITfParser; 30 | %nodefaultctor ITfParser; 31 | class ITfParser 32 | { 33 | public: 34 | %feature("docstring", 35 | " 36 | Retrieve binding info (layer id and `TensorInfo`) for the network input identified by the given layer name. 37 | 38 | Args: 39 | name (str): Name of the input. 40 | 41 | Returns: 42 | tuple: (`int`, `TensorInfo`). 43 | ") GetNetworkInputBindingInfo; 44 | std::pair GetNetworkInputBindingInfo(const std::string& name); 45 | 46 | %feature("docstring", 47 | " 48 | Retrieve binding info (layer id and `TensorInfo`) for the network output identified by the given layer name. 49 | 50 | Args: 51 | name (str): Name of the output. 52 | 53 | Returns: 54 | tuple: (`int`, `TensorInfo`). 55 | ") GetNetworkOutputBindingInfo; 56 | std::pair GetNetworkOutputBindingInfo(const std::string& name); 57 | }; 58 | 59 | %extend ITfParser { 60 | // This is not a substitution of the default constructor of the Armnn class. It tells swig to create custom __init__ 61 | // method for ITfParser python object that will use static factory method to do the job. 62 | 63 | ITfParser() { 64 | return armnnTfParser::ITfParser::CreateRaw(); 65 | } 66 | 67 | // The following does not replace a real destructor of the Armnn class. 68 | // It creates a functions that will be called when swig object goes out of the scope to clean resources. 69 | // so the user doesn't need to call ITfParser::Destroy himself. 70 | // $self` is a pointer to extracted ArmNN ITfParser object. 71 | 72 | ~ITfParser() { 73 | armnnTfParser::ITfParser::Destroy($self); 74 | } 75 | 76 | %feature("docstring", 77 | " 78 | Create the network from a pb Protocol buffer file. 79 | 80 | Args: 81 | graphFile (str): Path to the tf model to be parsed. 82 | inputShapes (dict): A dict containing the input name as a key and `TensorShape` as a value. 83 | requestedOutputs (list of str): A list of the output tensor names. 84 | 85 | Returns: 86 | INetwork: Parsed network. 87 | 88 | Raises: 89 | RuntimeError: If model file was not found. 90 | ") CreateNetworkFromBinaryFile; 91 | %newobject CreateNetworkFromBinaryFile; 92 | armnn::INetwork* CreateNetworkFromBinaryFile(const char* graphFile, 93 | const std::map& inputShapes, 94 | const std::vector& requestedOutputs) { 95 | return $self->CreateNetworkFromBinaryFile(graphFile, inputShapes, requestedOutputs).release(); 96 | } 97 | 98 | } 99 | 100 | } 101 | // Clear exception typemap. 102 | %exception; 103 | -------------------------------------------------------------------------------- /python/pyarmnn/src/pyarmnn/swig/armnn_version.i: -------------------------------------------------------------------------------- 1 | // 2 | // Copyright © 2020 Arm Ltd. All rights reserved. 3 | // SPDX-License-Identifier: MIT 4 | // 5 | %module pyarmnn_version 6 | 7 | %include "std_string.i" 8 | 9 | %{ 10 | #define SWIG_FILE_WITH_INIT 11 | #include "armnn/Version.hpp" 12 | %} 13 | 14 | %{ 15 | std::string GetVersion() 16 | { 17 | return ARMNN_VERSION; 18 | }; 19 | 20 | std::string GetMajorVersion() 21 | { 22 | return STRINGIFY_VALUE(ARMNN_MAJOR_VERSION); 23 | }; 24 | 25 | std::string GetMinorVersion() 26 | { 27 | return STRINGIFY_VALUE(ARMNN_MINOR_VERSION); 28 | }; 29 | %} 30 | %feature("docstring", 31 | " 32 | Returns Arm NN library full version: MAJOR + MINOR + INCREMENTAL. 33 | 34 | Returns: 35 | str: Full version of Arm NN installed. 36 | 37 | ") GetVersion; 38 | std::string GetVersion(); 39 | 40 | %feature("docstring", 41 | " 42 | Returns Arm NN library major version. The year of the release. 43 | 44 | Returns: 45 | str: Major version of Arm NN installed. 46 | 47 | ") GetMajorVersion; 48 | std::string GetMajorVersion(); 49 | 50 | %feature("docstring", 51 | " 52 | Returns Arm NN library minor version. Month of the year of the release. 53 | 54 | Returns: 55 | str: Minor version of Arm NN installed. 56 | 57 | ") GetMinorVersion; 58 | std::string GetMinorVersion(); 59 | -------------------------------------------------------------------------------- /python/pyarmnn/src/pyarmnn/swig/modules/armnn_backend.i: -------------------------------------------------------------------------------- 1 | // 2 | // Copyright © 2020 Arm Ltd. All rights reserved. 3 | // SPDX-License-Identifier: MIT 4 | // 5 | %{ 6 | #include "armnn/BackendId.hpp" 7 | %} 8 | 9 | namespace std { 10 | %template(BackendIdVector) vector; 11 | %template(BackendIdSet) unordered_set; 12 | } 13 | 14 | namespace armnn 15 | { 16 | 17 | class BackendId 18 | { 19 | public: 20 | %feature("docstring", 21 | " 22 | Creates backend id instance. 23 | Supported backend ids: 'CpuRef', 'CpuAcc', 'GpuAcc', 'EthosNAcc'. 24 | 25 | Args: 26 | id (str): Computation backend identification. 27 | ") BackendId; 28 | 29 | BackendId(const std::string& id); 30 | 31 | %feature("docstring", 32 | " 33 | Checks if backend is cpu reference implementation. 34 | Returns: 35 | bool: True if backend supports cpu reference implementation, False otherwise. 36 | 37 | ") IsCpuRef; 38 | bool IsCpuRef(); 39 | 40 | %feature("docstring", 41 | " 42 | Returns backend identification. 43 | 44 | >>> backendId = BackendId('CpuRef') 45 | >>> assert 'CpuRef' == str(backendId) 46 | >>> assert 'CpuRef' == backendId.Get() 47 | 48 | Returns: 49 | str: Backend identification. 50 | 51 | ") Get; 52 | const std::string& Get(); 53 | }; 54 | 55 | %extend BackendId { 56 | 57 | std::string __str__() { 58 | return $self->Get(); 59 | } 60 | 61 | } 62 | 63 | using BackendIdVector = std::vector; 64 | using BackendIdSet = std::unordered_set; 65 | } 66 | -------------------------------------------------------------------------------- /python/pyarmnn/src/pyarmnn/swig/modules/armnn_lstmparam.i: -------------------------------------------------------------------------------- 1 | // 2 | // Copyright © 2020 Arm Ltd. All rights reserved. 3 | // SPDX-License-Identifier: MIT 4 | // 5 | %{ 6 | #include "armnn/LstmParams.hpp" 7 | #include "armnn/QuantizedLstmParams.hpp" 8 | %} 9 | 10 | namespace armnn 11 | { 12 | 13 | %feature("docstring", 14 | " 15 | Long Short-Term Memory layer input parameters. 16 | 17 | See `INetwork.AddLstmLayer()`. 18 | Operation described by the following equations: 19 | 20 | \[i_t=\sigma(W_{xi}x_t+W_{hi}h_{t-1}+W_{ci}C_{t-1}+b_i) \\\\ 21 | f_t=\sigma(W_{xf}x_t+W_{hf}h_{t-1}+W_{cf}C_{t-1}+b_f) \\\\ 22 | C_t=clip(f_t \odot C_{t-1} + i_t \odot g(W_{xc}x_t+W_{hc}h_{t-1}+b_c),\ t_{cell}) \\\\ 23 | o_t = \sigma(W_{xo}x_t+W_{ho}h_{t-1}+W_{co}C_t+b_o) \\\\ 24 | h_t = clip(W_{proj}(o_t \odot g(C_t))+b_{proj},\ t_{proj})\ if\ there\ is\ a\ projection; \\\\ 25 | h_t = o_t \odot g(C_t)\ otherwise. \] 26 | Where: 27 | \(x_t\) - input; 28 | \(i_t\) - input gate; 29 | \(f_t\) - forget gate; 30 | \(C_t\) - cell state; 31 | \(o_t\) - output; 32 | \(h_t\) - output state; 33 | \(\sigma\) - logistic sigmoid function; 34 | \(g\) - cell input and cell output activation function, see `LstmDescriptor.m_ActivationFunc`; 35 | \(t_{cell}\) - threshold for clipping the cell state, see `LstmDescriptor.m_ClippingThresCell`; 36 | \(t_{proj}\) - threshold for clipping the projected output, see `LstmDescriptor.m_ClippingThresProj`; 37 | 38 | Contains: 39 | m_InputToInputWeights (ConstTensor): \(W_{xi}\), input-to-input weight matrix. 40 | m_InputToForgetWeights (ConstTensor): \(W_{xf}\), input-to-forget weight matrix. 41 | m_InputToCellWeights (ConstTensor): \(W_{xc}\), input-to-cell weight matrix. 42 | m_InputToOutputWeights (ConstTensor): \(W_{xo}\), input-to-output weight matrix. 43 | 44 | m_RecurrentToInputWeights (ConstTensor): \(W_{hi}\), recurrent-to-input weight matrix. 45 | m_RecurrentToForgetWeights (ConstTensor): \(W_{hf}\), recurrent-to-forget weight matrix. 46 | m_RecurrentToCellWeights (ConstTensor): \(W_{hc}\), recurrent-to-cell weight matrix. 47 | m_RecurrentToOutputWeights (ConstTensor): \(W_{ho}\), recurrent-to-output weight matrix. 48 | 49 | m_CellToInputWeights (ConstTensor): \(W_{ci}\), cell-to-input weight matrix. Has effect if `LstmDescriptor.m_PeepholeEnabled`. 50 | m_CellToForgetWeights (ConstTensor): \(W_{cf}\), cell-to-forget weight matrix. Has effect if `LstmDescriptor.m_PeepholeEnabled`. 51 | m_CellToOutputWeights (ConstTensor): \(W_{co}\), cell-to-output weight matrix. Has effect if `LstmDescriptor.m_PeepholeEnabled`. 52 | 53 | m_InputGateBias (ConstTensor): \(b_i\), input gate bias. 54 | m_ForgetGateBias (ConstTensor): \(b_f\), forget gate bias. 55 | m_CellBias (ConstTensor): \(b_c\), cell bias. 56 | m_OutputGateBias (ConstTensor): \(b_o\), output gate bias. 57 | 58 | m_ProjectionWeights (ConstTensor): \(W_{proj}\), projection weight matrix. 59 | Has effect if `LstmDescriptor.m_ProjectionEnabled` is set to True. 60 | m_ProjectionBias (ConstTensor): \(b_{proj}\), projection bias. 61 | Has effect if `LstmDescriptor.m_ProjectionEnabled` is set to True. 62 | m_InputLayerNormWeights (ConstTensor): normalisation weights for input, 63 | has effect if `LstmDescriptor.m_LayerNormEnabled` set to True. 64 | m_ForgetLayerNormWeights (ConstTensor): normalisation weights for forget gate, 65 | has effect if `LstmDescriptor.m_LayerNormEnabled` set to True. 66 | m_CellLayerNormWeights (ConstTensor): normalisation weights for current cell, 67 | has effect if `LstmDescriptor.m_LayerNormEnabled` set to True. 68 | m_OutputLayerNormWeights (ConstTensor): normalisation weights for output gate, 69 | has effect if `LstmDescriptor.m_LayerNormEnabled` set to True. 70 | 71 | ") LstmInputParams; 72 | struct LstmInputParams 73 | { 74 | LstmInputParams(); 75 | 76 | const armnn::ConstTensor* m_InputToInputWeights; 77 | const armnn::ConstTensor* m_InputToForgetWeights; 78 | const armnn::ConstTensor* m_InputToCellWeights; 79 | const armnn::ConstTensor* m_InputToOutputWeights; 80 | const armnn::ConstTensor* m_RecurrentToInputWeights; 81 | const armnn::ConstTensor* m_RecurrentToForgetWeights; 82 | const armnn::ConstTensor* m_RecurrentToCellWeights; 83 | const armnn::ConstTensor* m_RecurrentToOutputWeights; 84 | const armnn::ConstTensor* m_CellToInputWeights; 85 | const armnn::ConstTensor* m_CellToForgetWeights; 86 | const armnn::ConstTensor* m_CellToOutputWeights; 87 | const armnn::ConstTensor* m_InputGateBias; 88 | const armnn::ConstTensor* m_ForgetGateBias; 89 | const armnn::ConstTensor* m_CellBias; 90 | const armnn::ConstTensor* m_OutputGateBias; 91 | const armnn::ConstTensor* m_ProjectionWeights; 92 | const armnn::ConstTensor* m_ProjectionBias; 93 | const armnn::ConstTensor* m_InputLayerNormWeights; 94 | const armnn::ConstTensor* m_ForgetLayerNormWeights; 95 | const armnn::ConstTensor* m_CellLayerNormWeights; 96 | const armnn::ConstTensor* m_OutputLayerNormWeights; 97 | }; 98 | 99 | %feature("docstring", 100 | " 101 | Quantized Long Short-Term Memory layer input parameters. 102 | 103 | See `INetwork.AddQuantizedLstmLayer()`. 104 | Operation described by the following equations: 105 | 106 | \[i_t=\sigma(W_{xi}x_t+W_{hi}h_{t-1}+W_{ci}C_{t-1}+b_i) \\\\ 107 | f_t=\sigma(W_{xf}x_t+W_{hf}h_{t-1}+W_{cf}C_{t-1}+b_f) \\\\ 108 | C_t=clip(f_t \odot C_{t-1} + i_t \odot g(W_{xc}x_t+W_{hc}h_{t-1}+b_c),\ t_{cell}) \\\\ 109 | o_t = \sigma(W_{xo}x_t+W_{ho}h_{t-1}+W_{co}C_t+b_o) \\\\ 110 | h_t = clip(W_{proj}(o_t \odot g(C_t))+b_{proj},\ t_{proj})\ if\ there\ is\ a\ projection; \\\\ 111 | h_t = o_t \odot g(C_t)\ otherwise. \] 112 | Where: 113 | \(x_t\) - input; 114 | \(i_t\) - input gate; 115 | \(f_t\) - forget gate; 116 | \(C_t\) - cell state; 117 | \(o_t\) - output; 118 | \(h_t\) - output state; 119 | \(\sigma\) - logistic sigmoid function; 120 | \(g\) - cell input and cell output activation function, see `LstmDescriptor.m_ActivationFunc`; 121 | \(t_{cell}\) - threshold for clipping the cell state, see `LstmDescriptor.m_ClippingThresCell`; 122 | \(t_{proj}\) - threshold for clipping the projected output, see `LstmDescriptor.m_ClippingThresProj`; 123 | 124 | Contains: 125 | m_InputToInputWeights (ConstTensor): \(W_{xi}\), input-to-input weight matrix. 126 | m_InputToForgetWeights (ConstTensor): \(W_{xf}\), input-to-forget weight matrix. 127 | m_InputToCellWeights (ConstTensor): \(W_{xc}\), input-to-cell weight matrix. 128 | m_InputToOutputWeights (ConstTensor): \(W_{xo}\), input-to-output weight matrix. 129 | 130 | m_RecurrentToInputWeights (ConstTensor): \(W_{hi}\), recurrent-to-input weight matrix. 131 | m_RecurrentToForgetWeights (ConstTensor): \(W_{hf}\), recurrent-to-forget weight matrix. 132 | m_RecurrentToCellWeights (ConstTensor): \(W_{hc}\), recurrent-to-cell weight matrix. 133 | m_RecurrentToOutputWeights (ConstTensor): \(W_{ho}\), recurrent-to-output weight matrix. 134 | 135 | m_InputGateBias (ConstTensor): \(b_i\), input gate bias. 136 | m_ForgetGateBias (ConstTensor): \(b_f\), forget gate bias. 137 | m_CellBias (ConstTensor): \(b_c\), cell bias. 138 | m_OutputGateBias (ConstTensor): \(b_o\), output gate bias. 139 | ") QuantizedLstmInputParams; 140 | struct QuantizedLstmInputParams 141 | { 142 | QuantizedLstmInputParams(); 143 | 144 | const armnn::ConstTensor* m_InputToInputWeights; 145 | const armnn::ConstTensor* m_InputToForgetWeights; 146 | const armnn::ConstTensor* m_InputToCellWeights; 147 | const armnn::ConstTensor* m_InputToOutputWeights; 148 | const armnn::ConstTensor* m_RecurrentToInputWeights; 149 | const armnn::ConstTensor* m_RecurrentToForgetWeights; 150 | const armnn::ConstTensor* m_RecurrentToCellWeights; 151 | const armnn::ConstTensor* m_RecurrentToOutputWeights; 152 | const armnn::ConstTensor* m_InputGateBias; 153 | const armnn::ConstTensor* m_ForgetGateBias; 154 | const armnn::ConstTensor* m_CellBias; 155 | const armnn::ConstTensor* m_OutputGateBias; 156 | }; 157 | 158 | 159 | } 160 | -------------------------------------------------------------------------------- /python/pyarmnn/src/pyarmnn/swig/modules/armnn_profiler.i: -------------------------------------------------------------------------------- 1 | // 2 | // Copyright © 2020 Arm Ltd. All rights reserved. 3 | // SPDX-License-Identifier: MIT 4 | // 5 | %{ 6 | #include "armnn/IProfiler.hpp" 7 | %} 8 | 9 | namespace armnn 10 | { 11 | 12 | %feature("docstring", 13 | " 14 | Interface for profiling Arm NN. See `IRuntime.GetProfiler`. 15 | 16 | IProfiler object allows you to enable profiling and get various profiling results. 17 | 18 | ") IProfiler; 19 | %nodefaultctor IProfiler; 20 | %nodefaultdtor IProfiler; 21 | class IProfiler 22 | { 23 | public: 24 | 25 | %feature("docstring", 26 | " 27 | Sets the profiler to start/stop profiling. 28 | 29 | Args: 30 | enableProfiling (bool): Flag to enable/disable profiling. 31 | 32 | ") EnableProfiling; 33 | 34 | void EnableProfiling(bool enableProfiling); 35 | 36 | %feature("docstring", 37 | " 38 | Checks if profiling is enabled. 39 | 40 | Returns: 41 | bool: If profiling is enabled or not. 42 | 43 | ") IsProfilingEnabled; 44 | 45 | bool IsProfilingEnabled(); 46 | }; 47 | 48 | %extend IProfiler { 49 | 50 | %feature("docstring", 51 | " 52 | Gets the string value of the profiling events analysis log. 53 | 54 | Returns: 55 | str: The profiling events analysis log. 56 | 57 | ") event_log; 58 | 59 | std::string event_log() 60 | { 61 | std::ostringstream oss; 62 | $self->AnalyzeEventsAndWriteResults(oss); 63 | return oss.str(); 64 | } 65 | 66 | %feature("docstring", 67 | " 68 | Gets the profiling log as the JSON string. 69 | 70 | Returns: 71 | str: Profiling log as JSON formatted string. 72 | 73 | ") as_json; 74 | 75 | std::string as_json() 76 | { 77 | std::ostringstream oss; 78 | $self->Print(oss); 79 | return oss.str(); 80 | } 81 | } 82 | } 83 | -------------------------------------------------------------------------------- /python/pyarmnn/src/pyarmnn/swig/modules/armnn_runtime.i: -------------------------------------------------------------------------------- 1 | // 2 | // Copyright © 2020 Arm Ltd. All rights reserved. 3 | // SPDX-License-Identifier: MIT 4 | // 5 | %{ 6 | #include "armnn/IRuntime.hpp" 7 | #include 8 | #include 9 | #include 10 | %} 11 | 12 | namespace std { 13 | %template() pair; 14 | %template(IntPair) pair; 15 | %template(ConstTensorPair) pair; 16 | %template(TensorPair) pair; 17 | 18 | %template(InputTensorsVector) vector>; 19 | %template(OutputTensorsVector) vector>; 20 | } 21 | 22 | %include 23 | 24 | %shared_ptr(IGpuAccTunedParameters); 25 | 26 | #pragma SWIG nowarn=SWIGWARN_PARSE_NESTED_CLASS 27 | 28 | %{ 29 | typedef armnn::IRuntime::CreationOptions CreationOptions; 30 | %} 31 | 32 | struct CreationOptions 33 | { 34 | %feature("docstring", 35 | " 36 | Structure for holding creation options. For majority of cases it is fine to leave values at default. 37 | 38 | Contains: 39 | m_GpuAccTunedParameters (IGpuAccTunedParameters): If set, uses the GpuAcc tuned parameters from the given object 40 | when executing GPU workloads. It will also be updated with new 41 | tuned parameters if it is configured to do so. 42 | 43 | m_EnableGpuProfiling (bool): Setting this flag will allow the user to obtain GPU profiling information from 44 | the runtime. 45 | 46 | m_DynamicBackendsPath (string): Setting this value will override the paths set by the DYNAMIC_BACKEND_PATHS 47 | compiler directive. Only a single path is allowed for the override. 48 | 49 | ") CreationOptions; 50 | 51 | CreationOptions(); 52 | std::shared_ptr m_GpuAccTunedParameters; 53 | bool m_EnableGpuProfiling; 54 | std::string m_DynamicBackendsPath; 55 | }; 56 | 57 | namespace armnn 58 | { 59 | 60 | struct INetworkProperties 61 | { 62 | %feature("docstring", 63 | " 64 | Structure for holding network properties. 65 | 66 | Contains: 67 | m_ImportEnabled (bool): Enable import. 68 | 69 | m_ExportEnabled (bool): Enable export. 70 | 71 | ") INetworkProperties; 72 | INetworkProperties(bool importEnabled = false, bool exportEnabled = false); 73 | 74 | const bool m_ImportEnabled; 75 | const bool m_ExportEnabled; 76 | }; 77 | 78 | %feature("docstring", 79 | " 80 | Interface for runtime objects. 81 | 82 | Runtime objects are responsible for performing inference on an `IOptimizedNetwork`. 83 | 84 | Args: 85 | options (CreationOptions): CreationOptions data struct. 86 | 87 | ") IRuntime; 88 | %nodefaultctor IRuntime; 89 | class IRuntime 90 | { 91 | public: 92 | 93 | %ignore 94 | armnn::IRuntime::UnloadNetwork(NetworkId networkId); 95 | 96 | %ignore 97 | armnn::IRuntime::EnqueueWorkload(NetworkId networkId, 98 | const std::vector>& inputTensors, 99 | const std::vector>& outputTensors); 100 | 101 | %feature("docstring", 102 | " 103 | Get information relating to networks input tensor. 104 | 105 | Args: 106 | networkId (int): Unique ID of the network being run. 107 | layerId (int): Unique ID of the input layer. 108 | 109 | Returns: 110 | TensorInfo: Information relating to the input tensor a network. 111 | ") GetInputTensorInfo; 112 | armnn::TensorInfo GetInputTensorInfo(int networkId, int layerId); 113 | 114 | %feature("docstring", 115 | " 116 | Get information relating to networks output tensor. 117 | 118 | Args: 119 | networkId (int): Unique ID of the network being run. 120 | layerId (int): Unique ID of the output layer. 121 | 122 | Returns: 123 | TensorInfo: Information relating to the output tensor a network. 124 | ") GetOutputTensorInfo; 125 | armnn::TensorInfo GetOutputTensorInfo(int networkId, int layerId); 126 | 127 | %feature("docstring", 128 | " 129 | Get information relating supported compute backends on current device. 130 | 131 | Returns: 132 | IDeviceSpec: Device spec information detailing all supported backends on current platform. 133 | ") GetDeviceSpec; 134 | const IDeviceSpec& GetDeviceSpec(); 135 | }; 136 | 137 | %extend IRuntime { 138 | //tell python to disown the IOptimizedNetwork pointer 139 | //because IRuntime takes ownership 140 | %typemap(in) armnn::IOptimizedNetwork* { 141 | if (!SWIG_IsOK(SWIG_ConvertPtr($input, (void **) &$1, $1_descriptor, SWIG_POINTER_DISOWN))) { 142 | SWIG_exception_fail(SWIG_TypeError, "in method '$symname', argument 2 of type armnn::IOptimizedNetwork*"); 143 | } 144 | } 145 | 146 | %feature("docstring", 147 | " 148 | Loads a complete network into the IRuntime. 149 | The runtime takes ownership of the network once passed in. 150 | Args: 151 | network (IOptimizedNetwork): An optimized network to load into the IRuntime. 152 | networkProperties (INetworkProperties): Properties that allows the user to opt-in to import/export behavior. Default: None. 153 | Returns: 154 | tuple: (int, str) Network id and non fatal failure or warning messsages. 155 | Raises: 156 | RuntimeError: If process fails. 157 | ") LoadNetwork; 158 | 159 | std::pair LoadNetwork(armnn::IOptimizedNetwork* network, 160 | const INetworkProperties* networkProperties = nullptr) 161 | { 162 | armnn::IOptimizedNetworkPtr netPtr(network, &armnn::IOptimizedNetwork::Destroy); 163 | armnn::NetworkId networkIdOut; 164 | std::string errorString; 165 | armnn::Status status; 166 | 167 | if (networkProperties) { 168 | status = $self->LoadNetwork(networkIdOut, std::move(netPtr), errorString, *networkProperties); 169 | } else { 170 | status = $self->LoadNetwork(networkIdOut, std::move(netPtr), errorString); 171 | } 172 | 173 | if(status == armnn::Status::Failure) 174 | { 175 | throw armnn::Exception(errorString); 176 | } 177 | 178 | auto net_id_int = static_cast(networkIdOut); 179 | return std::make_pair(net_id_int, errorString); 180 | }; 181 | 182 | %typemap(in) armnn::IOptimizedNetwork*; 183 | %feature("docstring", 184 | " 185 | Calling this function will perform an inference on your network. 186 | 187 | Args: 188 | networkId (int): Unique ID of the network to run. 189 | inputTensors (list): A list of tuples (int, `ConstTensor`), see `make_input_tensors`. 190 | outputTensors (list): A list of tuples (int, `Tensor`), see `make_output_tensors`. 191 | 192 | ") EnqueueWorkload; 193 | void EnqueueWorkload(int networkId, const std::vector>& inputTensors, 194 | const std::vector>& outputTensors) { 195 | armnn::Status status = $self->EnqueueWorkload(networkId, inputTensors, outputTensors); 196 | 197 | if(status == armnn::Status::Failure) 198 | { 199 | throw armnn::Exception("Failed to enqueue workload for network."); 200 | } 201 | }; 202 | 203 | %feature("docstring", 204 | " 205 | Unload a currently loaded network from the runtime. 206 | 207 | Args: 208 | networkId (int): Unique ID of the network to unload. 209 | 210 | ") UnloadNetwork; 211 | void UnloadNetwork(int networkId) { 212 | armnn::Status status = $self->UnloadNetwork(networkId); 213 | if(status == armnn::Status::Failure) 214 | { 215 | throw armnn::Exception("Failed to unload network."); 216 | } 217 | }; 218 | 219 | %feature("docstring", 220 | " 221 | Returns the IProfiler instance registered against the working thread, and stored on the loaded network. 222 | Be aware that if the runtime has unloaded the network, or if the runtime is destroyed, 223 | that the IProfiler instance will also be destroyed, and will cause a segmentation fault. 224 | 225 | Args: 226 | networkId (int): The ID of the loaded network you want to profile. 227 | 228 | Returns: 229 | IProfiler: IProfiler instance the given loaded network has stored. 230 | 231 | Raises: 232 | RuntimeError: If no profiler is found. 233 | ") GetProfiler; 234 | 235 | armnn::IProfiler* GetProfiler(int networkId) { 236 | std::shared_ptr profiler = $self->GetProfiler(networkId); 237 | if (nullptr == profiler) { 238 | throw armnn::Exception("Failed to get profiler"); 239 | } 240 | return profiler.get(); 241 | }; 242 | 243 | ~IRuntime() { 244 | armnn::IRuntime::Destroy($self); 245 | } 246 | 247 | IRuntime(const CreationOptions& options) { 248 | return armnn::IRuntime::CreateRaw(options); 249 | } 250 | 251 | } 252 | 253 | } 254 | 255 | -------------------------------------------------------------------------------- /python/pyarmnn/src/pyarmnn/swig/modules/armnn_tensor.i: -------------------------------------------------------------------------------- 1 | // 2 | // Copyright © 2020 Arm Ltd. All rights reserved. 3 | // SPDX-License-Identifier: MIT 4 | // 5 | %{ 6 | #include "armnn/Tensor.hpp" 7 | %} 8 | 9 | %include 10 | %include 11 | 12 | namespace armnn 13 | { 14 | 15 | %feature("docstring", 16 | " 17 | Class for holding the shape information of an Arm NN tensor. 18 | 19 | This class is iterable. You can iterate over it to get each value of the Tensor shape. 20 | 21 | Examples: 22 | Obtain tensor shape information as a list. 23 | >>> import pyarmnn as ann 24 | >>> import numpy as np 25 | >>> 26 | >>> tensor_info = ann.TensorInfo(ann.TensorShape((4, 2, 1, 3)), ann.DataType_Float32) 27 | >>> tensor = ann.ConstTensor(tensor_info, np.ones([4, 2, 1, 3], dtype=np.float32)) 28 | >>> print(list(tensor.GetShape())) 29 | [4, 2, 1, 3] 30 | 31 | ") TensorShape; 32 | class TensorShape 33 | { 34 | // Make TensorShape iterable so we can return shape dims easily. 35 | %pythoncode %{ 36 | def __iter__(self): 37 | for dim in range(self.GetNumDimensions()): 38 | yield self[dim] 39 | %} 40 | 41 | 42 | public: 43 | %tensor_shape_typemap(unsigned int numDimensions, const unsigned int* dimensionSizes); 44 | TensorShape(unsigned int numDimensions, const unsigned int* dimensionSizes); 45 | %clear_tensor_shape_typemap(unsigned int numDimensions, const unsigned int* dimensionSizes); 46 | 47 | %feature("docstring", 48 | " 49 | Returns the number of dimensions in this TensorShape. 50 | 51 | Returns: 52 | int: The number of dimensions in this TensorShape. 53 | 54 | ") GetNumDimensions; 55 | unsigned int GetNumDimensions() const; 56 | 57 | %feature("docstring", 58 | " 59 | Returns the total number of elements for a tensor with this TensorShape. 60 | 61 | Returns: 62 | int: The total number of elements for a tensor with this TensorShape. 63 | 64 | ") GetNumElements; 65 | unsigned int GetNumElements() const; 66 | 67 | }; 68 | 69 | %extend TensorShape { 70 | 71 | unsigned int __getitem__(unsigned int i) const { 72 | return $self->operator[](i); 73 | } 74 | void __setitem__(unsigned int i, unsigned int val) { 75 | $self->operator[](i) = val; 76 | } 77 | 78 | std::string __str__() { 79 | std::string dim = "NumDimensions: " + std::to_string($self->GetNumDimensions()); 80 | std::string elm = "NumElements: " + std::to_string($self->GetNumElements()); 81 | 82 | std::string shapeStr = "TensorShape{Shape("; 83 | 84 | auto numDimensions = $self->GetNumDimensions(); 85 | auto sizeDims = $self->GetNumDimensions(); 86 | for (unsigned int i = 0; i < numDimensions; i++) { 87 | shapeStr += std::to_string($self->operator[](i)); 88 | 89 | if (sizeDims - 1 > 0) { 90 | shapeStr += ", "; 91 | } 92 | sizeDims--; 93 | } 94 | shapeStr = shapeStr + "), " + dim + ", " + elm + "}"; 95 | return shapeStr; 96 | } 97 | 98 | } 99 | 100 | 101 | %feature("docstring", 102 | " 103 | Class for holding the tensor information of an Arm NN tensor such as quantization, datatype, shape etc. 104 | 105 | ") TensorInfo; 106 | class TensorInfo 107 | { 108 | public: 109 | TensorInfo(); 110 | 111 | TensorInfo(const TensorInfo& other); 112 | 113 | TensorInfo(const TensorShape& shape, DataType dataType, 114 | float quantizationScale = 0.0f, int32_t quantizationOffset = 0); 115 | 116 | %feature("docstring", 117 | " 118 | Get the tensor shape. 119 | 120 | Return: 121 | TensorShape: Current shape of the tensor. 122 | 123 | ") GetShape; 124 | TensorShape& GetShape(); 125 | 126 | %feature("docstring", 127 | " 128 | Set the tensor shape. Must have the same number of elements as current tensor. 129 | 130 | Args: 131 | newShape (TensorShape): New tensor shape to reshape to. 132 | 133 | ") SetShape; 134 | void SetShape(const TensorShape& newShape); 135 | 136 | %feature("docstring", 137 | " 138 | Returns the number of dimensions in this Tensor. 139 | 140 | Returns: 141 | int: The number of dimensions in this Tensor. 142 | 143 | ") GetNumDimensions; 144 | unsigned int GetNumDimensions() const; 145 | 146 | %feature("docstring", 147 | " 148 | Returns the total number of elements for this Tensor. 149 | 150 | Returns: 151 | int: The total number of elements for this Tensor. 152 | 153 | ") GetNumElements; 154 | unsigned int GetNumElements() const; 155 | 156 | %feature("docstring", 157 | " 158 | Get the tensor datatype. 159 | 160 | Returns: 161 | DataType: Current tensor DataType. 162 | 163 | ") GetDataType; 164 | DataType GetDataType() const; 165 | 166 | %feature("docstring", 167 | " 168 | Set the tensor datatype. 169 | 170 | Args: 171 | type (DataType): DataType to set the tensor to. 172 | 173 | ") SetDataType; 174 | void SetDataType(DataType type); 175 | 176 | %feature("docstring", 177 | " 178 | Get the value of the tensors quantization scale. 179 | 180 | Returns: 181 | float: Tensor quantization scale value. 182 | 183 | ") GetQuantizationScale; 184 | float GetQuantizationScale() const; 185 | 186 | %feature("docstring", 187 | " 188 | Get the value of the tensors quantization offset. 189 | 190 | Returns: 191 | int: Tensor quantization offset value. 192 | 193 | ") GetQuantizationOffset; 194 | int32_t GetQuantizationOffset() const; 195 | 196 | %feature("docstring", 197 | " 198 | Set the value of the tensors quantization scale. 199 | 200 | Args: 201 | scale (float): Scale value to set. 202 | 203 | ") SetQuantizationScale; 204 | void SetQuantizationScale(float scale); 205 | 206 | %feature("docstring", 207 | " 208 | Set the value of the tensors quantization offset. 209 | 210 | Args: 211 | offset (int): Offset value to set. 212 | 213 | ") SetQuantizationOffset; 214 | void SetQuantizationOffset(int32_t offset); 215 | 216 | %feature("docstring", 217 | " 218 | Returns true if the tensor is a quantized data type. 219 | 220 | Returns: 221 | bool: True if the tensor is a quantized data type. 222 | 223 | ") IsQuantized; 224 | bool IsQuantized() const; 225 | 226 | 227 | 228 | %feature("docstring", 229 | " 230 | Check that the types are the same and, if quantize, that the quantization parameters are the same. 231 | 232 | Returns: 233 | bool: True if matched, else False. 234 | 235 | ") IsTypeSpaceMatch; 236 | bool IsTypeSpaceMatch(const TensorInfo& other) const; 237 | 238 | %feature("docstring", 239 | " 240 | Get the number of bytes needed for this tensor. 241 | 242 | Returns: 243 | int: Number of bytes consumed by this tensor. 244 | 245 | ") GetNumBytes; 246 | unsigned int GetNumBytes() const; 247 | 248 | }; 249 | 250 | %extend TensorInfo { 251 | 252 | std::string __str__() { 253 | const std::string tmp = "TensorInfo{DataType: " + std::to_string(static_cast($self->GetDataType())) 254 | + ", IsQuantized: " + std::to_string($self->IsQuantized()) 255 | + ", QuantizationScale: " + std::to_string( $self->GetQuantizationScale()) 256 | + ", QuantizationOffset: " + std::to_string($self->GetQuantizationOffset()) 257 | + ", NumDimensions: " + std::to_string($self->GetNumDimensions()) 258 | + ", NumElements: " + std::to_string($self->GetNumElements()) + "}"; 259 | return tmp; 260 | } 261 | 262 | } 263 | 264 | class Tensor 265 | { 266 | public: 267 | ~Tensor(); 268 | Tensor(); 269 | Tensor(const Tensor& other); 270 | 271 | %mutable_memory(void* memory); 272 | Tensor(const TensorInfo& info, void* memory); 273 | %clear_mutable_memory(void* memory); 274 | 275 | const TensorInfo& GetInfo() const; 276 | const TensorShape& GetShape() const; 277 | 278 | DataType GetDataType() const; 279 | unsigned int GetNumDimensions() const; 280 | unsigned int GetNumBytes() const; 281 | unsigned int GetNumElements() const; 282 | 283 | /* we want to disable getting the memory area from here - forcing use of get_memory_area() in public api. 284 | void* GetMemoryArea() const;*/ 285 | }; 286 | 287 | %extend Tensor { 288 | 289 | std::string __str__() { 290 | const std::string tmp = "Tensor{DataType: " + std::to_string(static_cast($self->GetDataType())) 291 | + ", NumBytes: " + std::to_string($self->GetNumBytes()) 292 | + ", NumDimensions: " + std::to_string( $self->GetNumDimensions()) 293 | + ", NumElements: " + std::to_string($self->GetNumElements()) + "}"; 294 | return tmp; 295 | } 296 | } 297 | 298 | class ConstTensor 299 | { 300 | public: 301 | ~ConstTensor(); 302 | ConstTensor(); 303 | ConstTensor(const Tensor& other); 304 | ConstTensor(const ConstTensor& other); 305 | 306 | %const_memory(const void* memory); 307 | ConstTensor(const TensorInfo& info, const void* memory); 308 | %clear_const_memory(const void* memory); 309 | 310 | const TensorInfo& GetInfo() const; 311 | const TensorShape& GetShape() const; 312 | 313 | DataType GetDataType() const; 314 | unsigned int GetNumDimensions() const; 315 | unsigned int GetNumBytes() const; 316 | unsigned int GetNumElements() const; 317 | 318 | /* we want to disable getting the memory area from here - forcing use of get_memory_area() in public api. 319 | void* GetMemoryArea() const;*/ 320 | }; 321 | 322 | %extend ConstTensor { 323 | 324 | std::string __str__() { 325 | const std::string tmp = "ConstTensor{DataType: " + std::to_string(static_cast($self->GetDataType())) 326 | + ", NumBytes: " + std::to_string($self->GetNumBytes()) 327 | + ", NumDimensions: " + std::to_string( $self->GetNumDimensions()) 328 | + ", NumElements: " + std::to_string($self->GetNumElements()) + "}"; 329 | return tmp; 330 | } 331 | } 332 | 333 | } 334 | -------------------------------------------------------------------------------- /python/pyarmnn/src/pyarmnn/swig/modules/armnn_types.i: -------------------------------------------------------------------------------- 1 | // 2 | // Copyright © 2020 Arm Ltd. All rights reserved. 3 | // SPDX-License-Identifier: MIT 4 | // 5 | %{ 6 | #include "armnn/Types.hpp" 7 | %} 8 | 9 | %include 10 | 11 | 12 | namespace armnn 13 | { 14 | 15 | %feature("docstring", 16 | " 17 | Vector used to permute a tensor. 18 | 19 | For a 4-d tensor laid out in a memory with the format (Batch Element, Height, Width, Channels), 20 | which is to be passed as an input to Arm NN, each source dimension is mapped to the corresponding 21 | Arm NN dimension. The Batch dimension remains the same (0 -> 0). The source Height dimension is mapped 22 | to the location of the Arm NN Height dimension (1 -> 2). Similar arguments are made for the Width and 23 | Channels (2 -> 3 and 3 -> 1). This will lead to m_DimMappings pointing to the following array: 24 | [ 0, 2, 3, 1 ]. 25 | 26 | Note that the mapping should be reversed if considering the case of Arm NN 4-d outputs (Batch Element, 27 | Channels, Height, Width) being written to a destination with the format mentioned above. We now have 28 | 0 -> 0, 2 -> 1, 3 -> 2, 1 -> 3, which, when reordered, lead to the following m_DimMappings contents: 29 | [ 0, 3, 1, 2 ]. 30 | 31 | Args: 32 | dimMappings (list): Indicates how to translate tensor elements from a given source into the target destination, 33 | when source and target potentially have different memory layouts. 34 | ") PermutationVector; 35 | 36 | class PermutationVector 37 | { 38 | public: 39 | using ValueType = unsigned int; 40 | using SizeType = unsigned int; 41 | 42 | %permutation_vector_typemap(const ValueType *dimMappings, SizeType numDimMappings); 43 | PermutationVector(const ValueType *dimMappings, SizeType numDimMappings); 44 | %clear_permutation_vector_typemap(const ValueType *dimMappings, SizeType numDimMappings); 45 | 46 | 47 | %feature("docstring", 48 | " 49 | Get the PermutationVector size. 50 | 51 | Return: 52 | SizeType: Current size of the PermutationVector. 53 | 54 | ") GetSize; 55 | SizeType GetSize(); 56 | 57 | %feature("docstring", 58 | " 59 | Checks if a specified permutation vector is its inverse 60 | 61 | Return: 62 | bool: returns true if the specified Permutation vector is its inverse. 63 | 64 | ") IsInverse; 65 | bool IsInverse(const PermutationVector& other); 66 | }; 67 | 68 | %extend PermutationVector { 69 | 70 | unsigned int __getitem__(unsigned int i) const { 71 | return $self->operator[](i); 72 | } 73 | 74 | bool __eq__(PermutationVector other) { 75 | int size = $self->GetSize(); 76 | int otherSize = other.GetSize(); 77 | if(size != otherSize) 78 | { 79 | return false; 80 | } 81 | for(int i = 0; i < size; ++i){ 82 | if($self->operator[](i) != other[i]) 83 | { 84 | return false; 85 | } 86 | return true; 87 | } 88 | return true; 89 | } 90 | } 91 | 92 | } 93 | %feature("docstring", 94 | " 95 | Interface for device specifications. Main use is to get information relating to what compute capability the device being used has. 96 | ") IDeviceSpec; 97 | 98 | 99 | %feature("docstring", 100 | " 101 | Returns the backends supported by this compute device. 102 | 103 | Returns: 104 | set: This devices supported backends. 105 | 106 | ") GetSupportedBackends; 107 | 108 | %ignore ProfilingGuid; 109 | %ignore PermutationVector; 110 | #define ARMNN_DEPRECATED_ENUM // SWIG does not support C++ attributes, need this to help generate from Deprecated.hpp. 111 | #define ARMNN_DEPRECATED_ENUM_MSG(message) // SWIG does not support C++ attributes, need this to help generate from Deprecated.hpp. 112 | %include "armnn/Types.hpp" 113 | 114 | 115 | 116 | %extend armnn::IDeviceSpec { 117 | 118 | 119 | std::string __str__() { 120 | 121 | std::string deviceStr = "IDeviceSpec { supportedBackends: ["; 122 | 123 | auto bends = $self->GetSupportedBackends(); 124 | auto sizeBends = $self->GetSupportedBackends().size(); 125 | for (std::unordered_set::const_iterator p = bends.begin(); p != bends.end(); ++p) { 126 | 127 | deviceStr += p->Get(); 128 | 129 | if (sizeBends - 1 > 0) { 130 | deviceStr += ", "; 131 | } 132 | sizeBends--; 133 | 134 | } 135 | deviceStr = deviceStr + "]}"; 136 | 137 | return deviceStr; 138 | } 139 | 140 | } 141 | -------------------------------------------------------------------------------- /python/pyarmnn/src/pyarmnn/swig/modules/armnn_types_utils.i: -------------------------------------------------------------------------------- 1 | // 2 | // Copyright © 2020 Arm Ltd. All rights reserved. 3 | // SPDX-License-Identifier: MIT 4 | // 5 | %{ 6 | #include "armnn/TypesUtils.hpp" 7 | %} 8 | 9 | namespace armnn 10 | { 11 | constexpr unsigned int GetDataTypeSize(DataType dataType); 12 | 13 | constexpr const char* GetDataTypeName(DataType dataType); 14 | 15 | template 16 | QuantizedType Quantize(float value, float scale, int32_t offset); 17 | %template(Quantize_uint8_t) Quantize; 18 | %template(Quantize_int8_t) Quantize; 19 | %template(Quantize_int16_t) Quantize; 20 | %template(Quantize_int32_t) Quantize; 21 | 22 | template 23 | float Dequantize(QuantizedType value, float scale, int32_t offset); 24 | %template(Dequantize_uint8_t) Dequantize; 25 | %template(Dequantize_int8_t) Dequantize; 26 | %template(Dequantize_int16_t) Dequantize; 27 | %template(Dequantize_int32_t) Dequantize; 28 | } -------------------------------------------------------------------------------- /python/pyarmnn/src/pyarmnn/swig/standard_header.i: -------------------------------------------------------------------------------- 1 | // 2 | // Copyright © 2020 Arm Ltd. All rights reserved. 3 | // SPDX-License-Identifier: MIT 4 | // 5 | %include "stl.i" 6 | %include "cstring.i" 7 | %include "std_string.i" 8 | %include "std_vector.i" 9 | %include "std_unordered_set.i" 10 | %include "std_pair.i" 11 | %include "stdint.i" 12 | %include "carrays.i" 13 | %include "exception.i" 14 | %include "typemaps.i" 15 | %include "std_iostream.i" 16 | 17 | %ignore *::operator=; 18 | %ignore *::operator[]; 19 | 20 | 21 | // Define exception typemap to wrap armnn exception into python exception. 22 | 23 | %exception{ 24 | try { 25 | $action 26 | } catch (armnn::Exception &e) { 27 | SWIG_exception(SWIG_RuntimeError, const_cast(e.what())); 28 | } 29 | }; 30 | 31 | %exception __getitem__ { 32 | try { 33 | $action 34 | } catch (armnn::InvalidArgumentException &e) { 35 | SWIG_exception(SWIG_ValueError, const_cast(e.what())); 36 | } catch (const std::out_of_range &e) { 37 | SWIG_exception(SWIG_IndexError, const_cast(e.what())); 38 | } catch (const std::exception &e) { 39 | SWIG_exception(SWIG_RuntimeError, const_cast(e.what())); 40 | } 41 | }; 42 | 43 | %exception __setitem__ { 44 | try { 45 | $action 46 | } catch (armnn::InvalidArgumentException &e) { 47 | SWIG_exception(SWIG_ValueError, const_cast(e.what())); 48 | } catch (const std::out_of_range &e) { 49 | SWIG_exception(SWIG_IndexError, const_cast(e.what())); 50 | } catch (const std::exception &e) { 51 | SWIG_exception(SWIG_RuntimeError, const_cast(e.what())); 52 | } 53 | }; 54 | -------------------------------------------------------------------------------- /python/pyarmnn/src/pyarmnn/swig/typemaps/network_optimize.i: -------------------------------------------------------------------------------- 1 | // 2 | // Copyright © 2020 Arm Ltd. All rights reserved. 3 | // SPDX-License-Identifier: MIT 4 | // 5 | %define %optimize_typemap_out 6 | %typemap(out) (std::pair>) { 7 | PyObject * network = SWIG_NewPointerObj(SWIG_as_voidptr($1.first), SWIGTYPE_p_armnn__IOptimizedNetwork, SWIG_POINTER_OWN); 8 | $result = PyTuple_New(2); 9 | 10 | // Convert vector to fixed-size tuple 11 | std::vector strings = $1.second; 12 | Py_ssize_t size = strings.size(); 13 | 14 | // New reference. Need to Py_DECREF 15 | PyObject* errMsgTuple = PyTuple_New(size); 16 | 17 | if (!errMsgTuple) { 18 | Py_XDECREF(errMsgTuple); 19 | return PyErr_NoMemory(); 20 | } 21 | 22 | for (Py_ssize_t i = 0; i < size; i++) { 23 | // New reference. Need to Py_DECREF 24 | PyObject *string = PyString_FromString(strings[i].c_str()); 25 | 26 | if (!string) { 27 | Py_XDECREF(string); 28 | return PyErr_NoMemory(); 29 | } 30 | PyTuple_SetItem(errMsgTuple, i, string); 31 | } 32 | 33 | // Create result tuple 34 | PyTuple_SetItem($result, 0, network); 35 | PyTuple_SetItem($result, 1, errMsgTuple); 36 | } 37 | %enddef 38 | 39 | %define %clear_optimize_typemap_out 40 | %typemap(out) (std::pair>) 41 | %enddef 42 | -------------------------------------------------------------------------------- /python/pyarmnn/src/pyarmnn/swig/typemaps/permutation_vector.i: -------------------------------------------------------------------------------- 1 | // 2 | // Copyright © 2020 Arm Ltd. All rights reserved. 3 | // SPDX-License-Identifier: MIT 4 | // 5 | %define %permutation_vector_typemap(TYPE1, TYPE2) 6 | %typemap(in) (TYPE1, TYPE2) { 7 | if (PyTuple_Check($input)) { 8 | PyObject* seq = $input; 9 | 10 | $2 = PySequence_Fast_GET_SIZE(seq); 11 | $1 = (unsigned int*)PyMem_RawMalloc($2*sizeof(unsigned int)); 12 | 13 | 14 | if(!$1) { 15 | PyErr_NoMemory(); 16 | SWIG_fail; 17 | } 18 | int size = (int)$2; 19 | for(int i=0; i < size; i++) { 20 | PyObject *longItem; 21 | // Borrowed reference. No need to Py_DECREF 22 | PyObject *item = PySequence_Fast_GET_ITEM(seq, i); 23 | if(!item) { 24 | PyErr_SetString(PyExc_TypeError, "Failed to read data from tuple"); 25 | SWIG_fail; 26 | } 27 | // New reference. Need to Py_DECREF 28 | longItem = PyNumber_Long(item); 29 | if(!longItem) { 30 | Py_XDECREF(longItem); 31 | PyErr_SetString(PyExc_TypeError, "All elements must be numbers"); 32 | SWIG_fail; 33 | } 34 | $1[i] = (unsigned int)PyLong_AsUnsignedLong(longItem); 35 | Py_XDECREF(longItem); 36 | } 37 | 38 | } else { 39 | PyErr_SetString(PyExc_TypeError, "Argument is not a tuple"); 40 | SWIG_fail; 41 | } 42 | } 43 | 44 | %typemap(freearg) (TYPE1, TYPE2) { 45 | PyMem_RawFree($1); 46 | } 47 | %enddef 48 | 49 | %define %clear_permutation_vector_typemap(TYPE1, TYPE2) 50 | %typemap(in) (TYPE1, TYPE2); 51 | %typemap(freearg) (TYPE1, TYPE2); 52 | %enddef 53 | -------------------------------------------------------------------------------- /python/pyarmnn/src/pyarmnn/swig/typemaps/tensor_memory.i: -------------------------------------------------------------------------------- 1 | // 2 | // Copyright © 2020 Arm Ltd. All rights reserved. 3 | // SPDX-License-Identifier: MIT 4 | // 5 | %define %mutable_memory(TYPEMAP) 6 | %typemap(in) (TYPEMAP) { 7 | int res; void *buf = 0; 8 | Py_buffer view; 9 | res = PyObject_GetBuffer($input, &view, PyBUF_WRITABLE); 10 | buf = view.buf; 11 | PyBuffer_Release(&view); 12 | if (res < 0) { 13 | PyErr_Clear(); 14 | %argument_fail(res, "(TYPEMAP)", $symname, $argnum); 15 | } 16 | $1 = buf; 17 | } 18 | 19 | %typemap(typecheck) (TYPEMAP) { 20 | $1 = PyObject_CheckBuffer($input) || PyTuple_Check($input) ? 1 : 0; 21 | } 22 | %enddef 23 | 24 | %define %clear_mutable_memory(TYPEMAP) 25 | %typemap(in) (TYPEMAP); 26 | %typemap(typecheck) (TYPEMAP); 27 | %enddef 28 | 29 | %define %const_memory(TYPEMAP) 30 | %typemap(in) (TYPEMAP) { 31 | int res; void *buf = 0; 32 | Py_buffer view; 33 | res = PyObject_GetBuffer($input, &view, PyBUF_CONTIG_RO); 34 | buf = view.buf; 35 | PyBuffer_Release(&view); 36 | if (res < 0) { 37 | PyErr_Clear(); 38 | %argument_fail(res, "(TYPEMAP)", $symname, $argnum); 39 | } 40 | $1 = buf; 41 | } 42 | 43 | %typemap(typecheck) (TYPEMAP) { 44 | $1 = PyObject_CheckBuffer($input) || PyTuple_Check($input) ? 1 : 0; 45 | } 46 | %enddef 47 | 48 | %define %clear_const_memory(TYPEMAP) 49 | %typemap(in) (TYPEMAP); 50 | %typemap(typecheck) (TYPEMAP); 51 | %enddef 52 | 53 | -------------------------------------------------------------------------------- /python/pyarmnn/src/pyarmnn/swig/typemaps/tensor_shape.i: -------------------------------------------------------------------------------- 1 | // 2 | // Copyright © 2020 Arm Ltd. All rights reserved. 3 | // SPDX-License-Identifier: MIT 4 | // 5 | %define %tensor_shape_typemap(TYPE1, TYPE2) 6 | %typemap(in) (TYPE1, TYPE2) { 7 | if (PyTuple_Check($input)) { 8 | PyObject* seq = $input; 9 | 10 | $1 = PySequence_Fast_GET_SIZE(seq); 11 | $2 = (unsigned int*)PyMem_RawMalloc($1*sizeof(unsigned int)); 12 | 13 | if(!$2) { 14 | PyErr_NoMemory(); 15 | SWIG_fail; 16 | } 17 | int size = (int)$1; 18 | for(int i=0; i < size; i++) { 19 | PyObject *longItem; 20 | // Borrowed reference. No need to Py_DECREF 21 | PyObject *item = PySequence_Fast_GET_ITEM(seq, i); 22 | if(!item) { 23 | PyErr_SetString(PyExc_TypeError, "Failed to read data from tuple"); 24 | SWIG_fail; 25 | } 26 | // New reference. Need to Py_DECREF 27 | longItem = PyNumber_Long(item); 28 | if(!longItem) { 29 | Py_XDECREF(longItem); 30 | PyErr_SetString(PyExc_TypeError, "All elements must be numbers"); 31 | SWIG_fail; 32 | } 33 | $2[i] = (unsigned int)PyLong_AsUnsignedLong(longItem); 34 | Py_XDECREF(longItem); 35 | } 36 | 37 | } else { 38 | PyErr_SetString(PyExc_TypeError, "Argument is not a tuple"); 39 | SWIG_fail; 40 | } 41 | } 42 | 43 | %typemap(freearg) (TYPE1, TYPE2) { 44 | PyMem_RawFree($2); 45 | } 46 | %enddef 47 | 48 | %define %clear_tensor_shape_typemap(TYPE1, TYPE2) 49 | %typemap(in) (TYPE1, TYPE2); 50 | %typemap(freearg) (TYPE1, TYPE2); 51 | %enddef 52 | -------------------------------------------------------------------------------- /python/pyarmnn/src/pyarmnn/swig/typemaps/vectors.i: -------------------------------------------------------------------------------- 1 | // 2 | // Copyright © 2020 Arm Ltd. All rights reserved. 3 | // SPDX-License-Identifier: MIT 4 | // 5 | %inline %{ 6 | //-------------------------from_python_to_cpp----------------------------- 7 | int from_python_to_cpp(PyObject *obj, long* val) { 8 | return SWIG_AsVal_long(obj, val); 9 | } 10 | 11 | int from_python_to_cpp(PyObject *obj, int* val) { 12 | return SWIG_AsVal_int(obj, val); 13 | } 14 | 15 | int from_python_to_cpp(PyObject *obj, unsigned int* val) { 16 | return SWIG_AsVal_unsigned_SS_int(obj, val); 17 | } 18 | 19 | int from_python_to_cpp(PyObject *obj, unsigned short* val) { 20 | return SWIG_AsVal_unsigned_SS_short(obj, val); 21 | } 22 | 23 | int from_python_to_cpp(PyObject *obj, float* val) { 24 | return SWIG_AsVal_float(obj, val); 25 | } 26 | 27 | int from_python_to_cpp(PyObject *obj, double* val) { 28 | return SWIG_AsVal_double(obj, val); 29 | } 30 | #ifdef SWIG_LONG_LONG_AVAILABLE 31 | int from_python_to_cpp(PyObject *obj, unsigned long long* val) { 32 | return SWIG_AsVal_unsigned_SS_long_SS_long(obj, val); 33 | } 34 | 35 | int from_python_to_cpp(PyObject *obj, long long* val) { 36 | return SWIG_AsVal_long_SS_long(obj, val); 37 | } 38 | #endif 39 | 40 | int from_python_to_cpp(PyObject *obj, unsigned long* val) { 41 | return SWIG_AsVal_unsigned_SS_long(obj, val); 42 | } 43 | 44 | int from_python_to_cpp(PyObject *obj, short* val) { 45 | return SWIG_AsVal_short(obj, val); 46 | } 47 | //-------------------------from_cpp_to_python----------------------------- 48 | PyObject* from_cpp_to_python(long& val){ 49 | return PyLong_FromLong(val); 50 | } 51 | 52 | PyObject* from_cpp_to_python(unsigned long& val){ 53 | return PyLong_FromUnsignedLong(val); 54 | } 55 | #ifdef SWIG_LONG_LONG_AVAILABLE 56 | PyObject* from_cpp_to_python(long long& val){ 57 | return PyLong_FromLongLong(val); 58 | } 59 | 60 | PyObject* from_cpp_to_python(unsigned long long& val){ 61 | return PyLong_FromUnsignedLongLong(val); 62 | } 63 | #endif 64 | 65 | PyObject* from_cpp_to_python(int& val){ 66 | return PyLong_FromLong(static_cast(val)); 67 | } 68 | 69 | PyObject* from_cpp_to_python(unsigned int& val){ 70 | return PyLong_FromUnsignedLong(static_cast(val)); 71 | } 72 | 73 | PyObject* from_cpp_to_python(unsigned short& val){ 74 | return PyLong_FromUnsignedLong(static_cast(val)); 75 | } 76 | 77 | PyObject* from_cpp_to_python(float& val){ 78 | return PyFloat_FromDouble(static_cast(val)); 79 | } 80 | 81 | PyObject* from_cpp_to_python(double& val){ 82 | return PyFloat_FromDouble(val); 83 | } 84 | 85 | template 86 | PyObject* from_cpp_to_python(std::pair& pair){ 87 | 88 | PyObject* first = from_cpp_to_python(pair.first); 89 | PyObject* second = from_cpp_to_python(pair.second); 90 | 91 | PyObject* localTuple = PyTuple_New(2); 92 | 93 | if (!localTuple) { 94 | Py_XDECREF(localTuple); 95 | return PyErr_NoMemory(); 96 | } 97 | 98 | PyTuple_SetItem(localTuple, 0, first); 99 | PyTuple_SetItem(localTuple, 1, second); 100 | 101 | return localTuple; 102 | } 103 | 104 | template 105 | static int from_python_to_cpp(PyObject* tuple, std::pair* out) { 106 | 107 | if (PyTuple_Check(tuple)) { 108 | 109 | auto size = PyTuple_Size(tuple); 110 | 111 | if (size != 2) { 112 | return SWIG_ValueError; 113 | } 114 | 115 | PyObject* firstPy = PyTuple_GetItem(tuple, 0); 116 | PyObject* secondPy = PyTuple_GetItem(tuple, 1); 117 | 118 | if (!SWIG_IsOK(from_python_to_cpp(firstPy, &out->first))) { 119 | return SWIG_TypeError; 120 | } 121 | 122 | if (!SWIG_IsOK(from_python_to_cpp(secondPy, &out->second))) { 123 | return SWIG_TypeError; 124 | } 125 | 126 | } else { 127 | return SWIG_TypeError; 128 | } 129 | 130 | return SWIG_OK; 131 | } 132 | //---------------std::vector <-> python list --------------------- 133 | template 134 | static PyObject* from_vector_to_python(std::vector* input) { 135 | Py_ssize_t size = input->size(); 136 | PyObject* localList = PyList_New(size); 137 | 138 | if (!localList) { 139 | Py_XDECREF(localList); 140 | return PyErr_NoMemory(); 141 | } 142 | 143 | for(Py_ssize_t i = 0; i < size; ++i) { 144 | 145 | PyObject* obj = from_cpp_to_python(input->at(i)); 146 | 147 | PyList_SET_ITEM(localList, i, obj); 148 | } 149 | return localList; 150 | } 151 | 152 | template 153 | int from_python_to_vector(PyObject* seq, std::vector& out) { 154 | Py_ssize_t size = PySequence_Fast_GET_SIZE(seq); 155 | 156 | for(Py_ssize_t i=0; i < size; i++) { 157 | PyObject *item = PySequence_Fast_GET_ITEM(seq, i); 158 | if(!item) { 159 | PyErr_SetString(PyExc_TypeError, "Failed to read data from given sequence"); 160 | 161 | return SWIG_NullReferenceError; 162 | } 163 | 164 | T element; 165 | int res = from_python_to_cpp(item, &element); 166 | if (!SWIG_IsOK(res)) { 167 | PyObject* itemRepr = PyObject_Repr(item); 168 | PyObject* itemStrObj = PyUnicode_AsEncodedString(itemRepr, "utf-8", "replace"); 169 | const char* itemStr = PyBytes_AS_STRING(itemStrObj); 170 | 171 | auto pythonType = Py_TYPE(item)->tp_name; 172 | 173 | PyErr_Format(PyExc_TypeError, "Failed to convert python input value %s of type '%s' to C type '%s'", itemStr, pythonType, typeid(T).name()); 174 | Py_XDECREF(itemStrObj); 175 | Py_XDECREF(itemRepr); 176 | Py_DECREF(seq); 177 | return SWIG_TypeError; 178 | } 179 | out.push_back(element); 180 | } 181 | return SWIG_OK; 182 | } 183 | 184 | %} 185 | 186 | %define %list_to_vector(TYPEMAP...) 187 | 188 | // this typemap works for struct argument set 189 | %typemap(in) TYPEMAP* (TYPEMAP tmp) { 190 | if (PySequence_Check($input)) { 191 | 192 | if (from_python_to_vector($input, tmp) < 0) { 193 | SWIG_fail; 194 | } 195 | 196 | $1 = &tmp; 197 | 198 | } else { 199 | PyErr_SetString(PyExc_TypeError, "Argument value object does not provide sequence protocol, implement __getitem__() method."); 200 | SWIG_fail; 201 | } 202 | } 203 | 204 | // this typemap works for constructor 205 | %typemap(in) TYPEMAP { 206 | if (PySequence_Check($input)) { 207 | if (from_python_to_vector($input, $1) < 0){ 208 | SWIG_fail; 209 | } 210 | } else { 211 | PyErr_SetString(PyExc_TypeError, "Argument value object does not provide sequence protocol, implement __getitem__() method."); 212 | SWIG_fail; 213 | } 214 | } 215 | 216 | // this typemap works for struct argument get 217 | 218 | %typemap(out) TYPEMAP* { 219 | $result = from_vector_to_python($1); 220 | } 221 | 222 | // this typemap works for overloaded methods and ctors 223 | %typemap(typecheck) (TYPEMAP) { 224 | $1 = PySequence_Check($input) ? 1 : 0; 225 | } 226 | 227 | %enddef 228 | 229 | %define %list_to_vector_clear(TYPEMAP...) 230 | %typemap(in) (TYPEMAP); 231 | %typemap(in) TYPEMAP* (TYPEMAP tmp); 232 | %typemap(typecheck) (TYPEMAP); 233 | %typemap(out) TYPEMAP*; 234 | %enddef 235 | 236 | -------------------------------------------------------------------------------- /python/pyarmnn/swig_generate.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # Copyright © 2020 Arm Ltd. All rights reserved. 3 | # Copyright 2020 NXP 4 | # SPDX-License-Identifier: MIT 5 | """This script executes SWIG commands to generate armnn and armnn version wrappers. 6 | This script cannot be moved to ./script dir because it uses find_armnn function from setup.py script. 7 | Both scripts must be in the same folder. 8 | """ 9 | import os 10 | import re 11 | import subprocess 12 | import argparse 13 | 14 | from setup import find_includes 15 | 16 | __current_dir = os.path.dirname(os.path.realpath(__file__)) 17 | __swig_exec = None 18 | __verbose = False 19 | 20 | SWIG_EXEC_ENV = "SWIG_EXECUTABLE" 21 | 22 | 23 | def get_swig_exec(swig_exec_env: str = SWIG_EXEC_ENV): 24 | """Returns the swig command. Uses either an env variable or the `swig` command 25 | and verifies it works. 26 | 27 | Args: 28 | swig_exec_env(str): Env variable pointing to the swig executable. 29 | 30 | Returns: 31 | str: Path to swig executable. 32 | 33 | Raises: 34 | RuntimeError: If unable to execute any version of swig. 35 | """ 36 | swig_exec = os.getenv(swig_exec_env) 37 | if swig_exec is None: 38 | swig_exec = "swig" 39 | if subprocess.Popen([swig_exec, "-version"], stdout=subprocess.DEVNULL): 40 | return swig_exec 41 | else: 42 | raise RuntimeError("Unable to execute swig.") 43 | 44 | 45 | def check_swig_version(expected_version: str): 46 | """Checks version of swig. 47 | 48 | Args: 49 | expected_version(str): String containing expected version. 50 | 51 | Returns: 52 | bool: True if version is correct, False otherwise 53 | """ 54 | cmd = subprocess.Popen([__swig_exec, "-version"], stdout=subprocess.PIPE) 55 | out, _ = cmd.communicate() 56 | 57 | pattern = re.compile(r"(?<=Version ).+(?=$)", re.MULTILINE) 58 | match = pattern.search(out.decode('utf-8')) 59 | 60 | if match: 61 | version_string = match.group(0).strip() 62 | if __verbose: 63 | print(f"SWIG version: {version_string}") 64 | return version_string.startswith(expected_version) 65 | else: 66 | return False 67 | 68 | 69 | def generate_wrap(name: str, extr_includes): 70 | """Generates the python wrapper using swig. 71 | 72 | Args: 73 | name(str): Name of the wrapper template. 74 | extr_includes(str): Include paths. 75 | 76 | Raises: 77 | RuntimeError: If wrapper fails to be generated. 78 | """ 79 | in_dir = os.path.join(__current_dir, "src", "pyarmnn", "swig") 80 | out_dir = os.path.join(__current_dir, "src", "pyarmnn", "_generated") 81 | if __verbose: 82 | print(f"Generating wrap for {name} ...") 83 | code = os.system(f"{__swig_exec} -c++ -python -Wall " 84 | + "-o {} ".format(os.path.join(out_dir, f"{name}_wrap.cpp")) 85 | + f"-outdir {out_dir} " 86 | + f"{extr_includes} " 87 | + f"-I{in_dir} " 88 | + os.path.join(in_dir, f"{name}.i")) 89 | if code != 0: 90 | raise RuntimeError(f"Failed to generate {name} ext.") 91 | 92 | 93 | if __name__ == "__main__": 94 | __swig_exec = get_swig_exec() 95 | 96 | # This check is redundant in case CMake is used, it's here for standalone use 97 | if not check_swig_version('4.'): 98 | raise RuntimeError("Wrong swig version was found. Expected SWIG version is 4.x.x") 99 | 100 | armnn_includes = find_includes() 101 | 102 | parser = argparse.ArgumentParser("Script to generate SWIG wrappers.") 103 | parser.add_argument("-v", "--verbose", help="Verbose output.", action="store_true") 104 | args = parser.parse_args() 105 | 106 | __verbose = args.verbose 107 | 108 | wrap_names = ['armnn_version', 109 | 'armnn', 110 | 'armnn_caffeparser', 111 | 'armnn_onnxparser', 112 | 'armnn_tfparser', 113 | 'armnn_tfliteparser'] 114 | 115 | for n in wrap_names: 116 | generate_wrap(n, f"-I{'-I'.join(armnn_includes)} ") 117 | -------------------------------------------------------------------------------- /python/pyarmnn/test/requirements.txt: -------------------------------------------------------------------------------- 1 | requests>=2.24.0 2 | Pillow>=6.2.2 3 | pytest>=5.4.3 4 | -------------------------------------------------------------------------------- /python/pyarmnn/test/test_caffe_parser.py: -------------------------------------------------------------------------------- 1 | # Copyright © 2020 Arm Ltd. All rights reserved. 2 | # SPDX-License-Identifier: MIT 3 | import os 4 | 5 | import pytest 6 | import pyarmnn as ann 7 | import numpy as np 8 | 9 | 10 | @pytest.fixture() 11 | def parser(shared_data_folder): 12 | """ 13 | Parse and setup the test network to be used for the tests below 14 | """ 15 | 16 | # Create caffe parser 17 | parser = ann.ICaffeParser() 18 | 19 | # Specify path to model 20 | path_to_model = os.path.join(shared_data_folder, 'mock_model.caffemodel') 21 | 22 | # Specify the tensor shape relative to the input [1, 1, 28, 28] 23 | tensor_shape = {'Placeholder': ann.TensorShape((1, 1, 28, 28))} 24 | 25 | # Specify the requested_outputs 26 | requested_outputs = ["output"] 27 | 28 | # Parse caffe binary & create network 29 | parser.CreateNetworkFromBinaryFile(path_to_model, tensor_shape, requested_outputs) 30 | 31 | yield parser 32 | 33 | 34 | def test_caffe_parser_swig_destroy(): 35 | assert ann.ICaffeParser.__swig_destroy__, "There is a swig python destructor defined" 36 | assert ann.ICaffeParser.__swig_destroy__.__name__ == "delete_ICaffeParser" 37 | 38 | 39 | def test_check_caffe_parser_swig_ownership(parser): 40 | # Check to see that SWIG has ownership for parser. This instructs SWIG to take 41 | # ownership of the return value. This allows the value to be automatically 42 | # garbage-collected when it is no longer in use 43 | assert parser.thisown 44 | 45 | 46 | def test_get_network_input_binding_info(parser): 47 | input_binding_info = parser.GetNetworkInputBindingInfo("Placeholder") 48 | 49 | tensor = input_binding_info[1] 50 | assert tensor.GetDataType() == 1 51 | assert tensor.GetNumDimensions() == 4 52 | assert tensor.GetNumElements() == 784 53 | 54 | 55 | def test_get_network_output_binding_info(parser): 56 | output_binding_info1 = parser.GetNetworkOutputBindingInfo("output") 57 | 58 | # Check the tensor info retrieved from GetNetworkOutputBindingInfo 59 | tensor1 = output_binding_info1[1] 60 | 61 | assert tensor1.GetDataType() == 1 62 | assert tensor1.GetNumDimensions() == 2 63 | assert tensor1.GetNumElements() == 10 64 | 65 | 66 | def test_filenotfound_exception(shared_data_folder): 67 | parser = ann.ICaffeParser() 68 | 69 | # path to model 70 | path_to_model = os.path.join(shared_data_folder, 'some_unknown_network.caffemodel') 71 | 72 | # generic tensor shape [1, 1, 1, 1] 73 | tensor_shape = {'data': ann.TensorShape((1, 1, 1, 1))} 74 | 75 | # requested_outputs 76 | requested_outputs = [""] 77 | 78 | with pytest.raises(RuntimeError) as err: 79 | parser.CreateNetworkFromBinaryFile(path_to_model, tensor_shape, requested_outputs) 80 | 81 | # Only check for part of the exception since the exception returns 82 | # absolute path which will change on different machines. 83 | assert 'Failed to open graph file' in str(err.value) 84 | 85 | 86 | def test_caffe_parser_end_to_end(shared_data_folder): 87 | parser = ann.ICaffeParser = ann.ICaffeParser() 88 | 89 | # Load the network specifying the inputs and outputs 90 | input_name = "Placeholder" 91 | tensor_shape = {input_name: ann.TensorShape((1, 1, 28, 28))} 92 | requested_outputs = ["output"] 93 | 94 | network = parser.CreateNetworkFromBinaryFile(os.path.join(shared_data_folder, 'mock_model.caffemodel'), 95 | tensor_shape, requested_outputs) 96 | 97 | # Specify preferred backend 98 | preferred_backends = [ann.BackendId('CpuAcc'), ann.BackendId('CpuRef')] 99 | 100 | input_binding_info = parser.GetNetworkInputBindingInfo(input_name) 101 | 102 | options = ann.CreationOptions() 103 | runtime = ann.IRuntime(options) 104 | 105 | opt_network, messages = ann.Optimize(network, preferred_backends, runtime.GetDeviceSpec(), ann.OptimizerOptions()) 106 | 107 | assert 0 == len(messages) 108 | 109 | net_id, messages = runtime.LoadNetwork(opt_network) 110 | 111 | assert "" == messages 112 | 113 | # Load test image data stored in input_caffe.npy 114 | input_tensor_data = np.load(os.path.join(shared_data_folder, 'caffe_parser/input_caffe.npy')).astype(np.float32) 115 | input_tensors = ann.make_input_tensors([input_binding_info], [input_tensor_data]) 116 | 117 | # Load output binding info and 118 | outputs_binding_info = [] 119 | for output_name in requested_outputs: 120 | outputs_binding_info.append(parser.GetNetworkOutputBindingInfo(output_name)) 121 | output_tensors = ann.make_output_tensors(outputs_binding_info) 122 | 123 | runtime.EnqueueWorkload(net_id, input_tensors, output_tensors) 124 | 125 | output_vectors = ann.workload_tensors_to_ndarray(output_tensors) 126 | 127 | # Load golden output file for result comparison. 128 | expected_output = np.load(os.path.join(shared_data_folder, 'caffe_parser/golden_output_caffe.npy')) 129 | 130 | # Check that output matches golden output to 4 decimal places (there are slight rounding differences after this) 131 | np.testing.assert_almost_equal(output_vectors[0], expected_output, 4) 132 | -------------------------------------------------------------------------------- /python/pyarmnn/test/test_generated.py: -------------------------------------------------------------------------------- 1 | # Copyright © 2020 Arm Ltd. All rights reserved. 2 | # SPDX-License-Identifier: MIT 3 | import inspect 4 | from typing import Tuple 5 | 6 | import pytest 7 | 8 | import pyarmnn._generated.pyarmnn as generated_armnn 9 | import pyarmnn._generated.pyarmnn_caffeparser as generated_caffe 10 | import pyarmnn._generated.pyarmnn_onnxparser as generated_onnx 11 | import pyarmnn._generated.pyarmnn_tfliteparser as generated_tflite 12 | import pyarmnn._generated.pyarmnn_tfparser as generated_tf 13 | 14 | swig_independent_classes = ('IBackend', 15 | 'IDeviceSpec', 16 | 'IConnectableLayer', 17 | 'IInputSlot', 18 | 'IOutputSlot', 19 | 'IProfiler') 20 | 21 | 22 | def get_classes(swig_independent_classes: Tuple): 23 | # We need to ignore some swig generated_armnn classes. This is because some are abstract classes 24 | # They cannot be created with the swig generated_armnn wrapper, therefore they don't need a destructor. 25 | # Swig also generates its own meta class - this needs to be ignored. 26 | ignored_class_names = (*swig_independent_classes, '_SwigNonDynamicMeta') 27 | return list(filter(lambda x: x[0] not in ignored_class_names, 28 | inspect.getmembers(generated_armnn, inspect.isclass) + 29 | inspect.getmembers(generated_caffe, inspect.isclass) + 30 | inspect.getmembers(generated_tflite, inspect.isclass) + 31 | inspect.getmembers(generated_onnx, inspect.isclass) + 32 | inspect.getmembers(generated_tf, inspect.isclass))) 33 | 34 | 35 | @pytest.mark.parametrize("class_instance", get_classes(swig_independent_classes), ids=lambda x: 'class={}'.format(x[0])) 36 | class TestPyOwnedClasses: 37 | 38 | def test_destructors_exist_per_class(self, class_instance): 39 | assert getattr(class_instance[1], '__swig_destroy__', None) 40 | 41 | def test_owned(self, class_instance): 42 | assert getattr(class_instance[1], 'thisown', None) 43 | 44 | 45 | @pytest.mark.parametrize("class_instance", swig_independent_classes) 46 | class TestPyIndependentClasses: 47 | 48 | def test_destructors_does_not_exist_per_class(self, class_instance): 49 | assert not getattr(class_instance[1], '__swig_destroy__', None) 50 | 51 | def test_not_owned(self, class_instance): 52 | assert not getattr(class_instance[1], 'thisown', None) 53 | -------------------------------------------------------------------------------- /python/pyarmnn/test/test_iconnectable.py: -------------------------------------------------------------------------------- 1 | # Copyright © 2020 Arm Ltd. All rights reserved. 2 | # SPDX-License-Identifier: MIT 3 | import pytest 4 | 5 | import pyarmnn as ann 6 | 7 | 8 | @pytest.fixture(scope="function") 9 | def network(): 10 | return ann.INetwork() 11 | 12 | 13 | class TestIInputIOutputIConnectable: 14 | 15 | def test_input_slot(self, network): 16 | # Create input, addition & output layer 17 | input1 = network.AddInputLayer(0, "input1") 18 | input2 = network.AddInputLayer(1, "input2") 19 | add = network.AddAdditionLayer("addition") 20 | output = network.AddOutputLayer(0, "output") 21 | 22 | # Connect the input/output slots for each layer 23 | input1.GetOutputSlot(0).Connect(add.GetInputSlot(0)) 24 | input2.GetOutputSlot(0).Connect(add.GetInputSlot(1)) 25 | add.GetOutputSlot(0).Connect(output.GetInputSlot(0)) 26 | 27 | # Check IInputSlot GetConnection() 28 | input_slot = add.GetInputSlot(0) 29 | input_slot_connection = input_slot.GetConnection() 30 | 31 | assert isinstance(input_slot_connection, ann.IOutputSlot) 32 | 33 | del input_slot_connection 34 | 35 | assert input_slot.GetConnection() 36 | assert isinstance(input_slot.GetConnection(), ann.IOutputSlot) 37 | 38 | del input_slot 39 | 40 | assert add.GetInputSlot(0) 41 | 42 | def test_output_slot(self, network): 43 | 44 | # Create input, addition & output layer 45 | input1 = network.AddInputLayer(0, "input1") 46 | input2 = network.AddInputLayer(1, "input2") 47 | add = network.AddAdditionLayer("addition") 48 | output = network.AddOutputLayer(0, "output") 49 | 50 | # Connect the input/output slots for each layer 51 | input1.GetOutputSlot(0).Connect(add.GetInputSlot(0)) 52 | input2.GetOutputSlot(0).Connect(add.GetInputSlot(1)) 53 | add.GetOutputSlot(0).Connect(output.GetInputSlot(0)) 54 | 55 | # Check IInputSlot GetConnection() 56 | add_get_input_connection = add.GetInputSlot(0).GetConnection() 57 | output_get_input_connection = output.GetInputSlot(0).GetConnection() 58 | 59 | # Check IOutputSlot GetConnection() 60 | add_get_output_connect = add.GetOutputSlot(0).GetConnection(0) 61 | assert isinstance(add_get_output_connect.GetConnection(), ann.IOutputSlot) 62 | 63 | # Test IOutputSlot GetNumConnections() & CalculateIndexOnOwner() 64 | assert add_get_input_connection.GetNumConnections() == 1 65 | assert len(add_get_input_connection) == 1 66 | assert add_get_input_connection[0] 67 | assert add_get_input_connection.CalculateIndexOnOwner() == 0 68 | 69 | # Check GetOwningLayerGuid(). Check that it is different for add and output layer 70 | assert add_get_input_connection.GetOwningLayerGuid() != output_get_input_connection.GetOwningLayerGuid() 71 | 72 | # Set TensorInfo 73 | test_tensor_info = ann.TensorInfo(ann.TensorShape((2, 3)), ann.DataType_Float32) 74 | 75 | # Check IsTensorInfoSet() 76 | assert not add_get_input_connection.IsTensorInfoSet() 77 | add_get_input_connection.SetTensorInfo(test_tensor_info) 78 | assert add_get_input_connection.IsTensorInfoSet() 79 | 80 | # Check GetTensorInfo() 81 | output_tensor_info = add_get_input_connection.GetTensorInfo() 82 | assert 2 == output_tensor_info.GetNumDimensions() 83 | assert 6 == output_tensor_info.GetNumElements() 84 | 85 | # Check Disconnect() 86 | assert output_get_input_connection.GetNumConnections() == 1 # 1 connection to Outputslot0 from input1 87 | add.GetOutputSlot(0).Disconnect(output.GetInputSlot(0)) # disconnect add.OutputSlot0 from Output.InputSlot0 88 | assert output_get_input_connection.GetNumConnections() == 0 89 | 90 | def test_output_slot__out_of_range(self, network): 91 | # Create input layer to check output slot get item handling 92 | input1 = network.AddInputLayer(0, "input1") 93 | 94 | outputSlot = input1.GetOutputSlot(0) 95 | with pytest.raises(ValueError) as err: 96 | outputSlot[1] 97 | 98 | assert "Invalid index 1 provided" in str(err.value) 99 | 100 | def test_iconnectable_guid(self, network): 101 | 102 | # Check IConnectable GetGuid() 103 | # Note Guid can change based on which tests are run so 104 | # checking here that each layer does not have the same guid 105 | add_id = network.AddAdditionLayer().GetGuid() 106 | output_id = network.AddOutputLayer(0).GetGuid() 107 | assert add_id != output_id 108 | 109 | def test_iconnectable_layer_functions(self, network): 110 | 111 | # Create input, addition & output layer 112 | input1 = network.AddInputLayer(0, "input1") 113 | input2 = network.AddInputLayer(1, "input2") 114 | add = network.AddAdditionLayer("addition") 115 | output = network.AddOutputLayer(0, "output") 116 | 117 | # Check GetNumInputSlots(), GetName() & GetNumOutputSlots() 118 | assert input1.GetNumInputSlots() == 0 119 | assert input1.GetName() == "input1" 120 | assert input1.GetNumOutputSlots() == 1 121 | 122 | assert input2.GetNumInputSlots() == 0 123 | assert input2.GetName() == "input2" 124 | assert input2.GetNumOutputSlots() == 1 125 | 126 | assert add.GetNumInputSlots() == 2 127 | assert add.GetName() == "addition" 128 | assert add.GetNumOutputSlots() == 1 129 | 130 | assert output.GetNumInputSlots() == 1 131 | assert output.GetName() == "output" 132 | assert output.GetNumOutputSlots() == 0 133 | 134 | # Check GetOutputSlot() 135 | input1_get_output = input1.GetOutputSlot(0) 136 | assert input1_get_output.GetNumConnections() == 0 137 | assert len(input1_get_output) == 0 138 | 139 | # Check GetInputSlot() 140 | add_get_input = add.GetInputSlot(0) 141 | add_get_input.GetConnection() 142 | assert isinstance(add_get_input, ann.IInputSlot) 143 | -------------------------------------------------------------------------------- /python/pyarmnn/test/test_onnx_parser.py: -------------------------------------------------------------------------------- 1 | # Copyright © 2020 Arm Ltd. All rights reserved. 2 | # SPDX-License-Identifier: MIT 3 | import os 4 | 5 | import pytest 6 | import pyarmnn as ann 7 | import numpy as np 8 | from typing import List 9 | 10 | 11 | @pytest.fixture() 12 | def parser(shared_data_folder): 13 | """ 14 | Parse and setup the test network to be used for the tests below 15 | """ 16 | 17 | # create onnx parser 18 | parser = ann.IOnnxParser() 19 | 20 | # path to model 21 | path_to_model = os.path.join(shared_data_folder, 'mock_model.onnx') 22 | 23 | # parse onnx binary & create network 24 | parser.CreateNetworkFromBinaryFile(path_to_model) 25 | 26 | yield parser 27 | 28 | 29 | def test_onnx_parser_swig_destroy(): 30 | assert ann.IOnnxParser.__swig_destroy__, "There is a swig python destructor defined" 31 | assert ann.IOnnxParser.__swig_destroy__.__name__ == "delete_IOnnxParser" 32 | 33 | 34 | def test_check_onnx_parser_swig_ownership(parser): 35 | # Check to see that SWIG has ownership for parser. This instructs SWIG to take 36 | # ownership of the return value. This allows the value to be automatically 37 | # garbage-collected when it is no longer in use 38 | assert parser.thisown 39 | 40 | 41 | def test_onnx_parser_get_network_input_binding_info(parser): 42 | input_binding_info = parser.GetNetworkInputBindingInfo("input") 43 | 44 | tensor = input_binding_info[1] 45 | assert tensor.GetDataType() == 1 46 | assert tensor.GetNumDimensions() == 4 47 | assert tensor.GetNumElements() == 784 48 | assert tensor.GetQuantizationOffset() == 0 49 | assert tensor.GetQuantizationScale() == 0 50 | 51 | 52 | def test_onnx_parser_get_network_output_binding_info(parser): 53 | output_binding_info = parser.GetNetworkOutputBindingInfo("output") 54 | 55 | tensor = output_binding_info[1] 56 | assert tensor.GetDataType() == 1 57 | assert tensor.GetNumDimensions() == 4 58 | assert tensor.GetNumElements() == 10 59 | assert tensor.GetQuantizationOffset() == 0 60 | assert tensor.GetQuantizationScale() == 0 61 | 62 | 63 | def test_onnx_filenotfound_exception(shared_data_folder): 64 | parser = ann.IOnnxParser() 65 | 66 | # path to model 67 | path_to_model = os.path.join(shared_data_folder, 'some_unknown_model.onnx') 68 | 69 | # parse onnx binary & create network 70 | 71 | with pytest.raises(RuntimeError) as err: 72 | parser.CreateNetworkFromBinaryFile(path_to_model) 73 | 74 | # Only check for part of the exception since the exception returns 75 | # absolute path which will change on different machines. 76 | assert 'Invalid (null) filename' in str(err.value) 77 | 78 | 79 | def test_onnx_parser_end_to_end(shared_data_folder): 80 | parser = ann.IOnnxParser = ann.IOnnxParser() 81 | 82 | network = parser.CreateNetworkFromBinaryFile(os.path.join(shared_data_folder, 'mock_model.onnx')) 83 | 84 | # load test image data stored in input_onnx.npy 85 | input_binding_info = parser.GetNetworkInputBindingInfo("input") 86 | input_tensor_data = np.load(os.path.join(shared_data_folder, 'onnx_parser/input_onnx.npy')).astype(np.float32) 87 | 88 | options = ann.CreationOptions() 89 | runtime = ann.IRuntime(options) 90 | 91 | preferred_backends = [ann.BackendId('CpuAcc'), ann.BackendId('CpuRef')] 92 | opt_network, messages = ann.Optimize(network, preferred_backends, runtime.GetDeviceSpec(), ann.OptimizerOptions()) 93 | 94 | assert 0 == len(messages) 95 | 96 | net_id, messages = runtime.LoadNetwork(opt_network) 97 | 98 | assert "" == messages 99 | 100 | input_tensors = ann.make_input_tensors([input_binding_info], [input_tensor_data]) 101 | output_tensors = ann.make_output_tensors([parser.GetNetworkOutputBindingInfo("output")]) 102 | 103 | runtime.EnqueueWorkload(net_id, input_tensors, output_tensors) 104 | 105 | output = ann.workload_tensors_to_ndarray(output_tensors) 106 | 107 | # Load golden output file for result comparison. 108 | golden_output = np.load(os.path.join(shared_data_folder, 'onnx_parser/golden_output_onnx.npy')) 109 | 110 | # Check that output matches golden output to 4 decimal places (there are slight rounding differences after this) 111 | np.testing.assert_almost_equal(output[0], golden_output, decimal=4) 112 | -------------------------------------------------------------------------------- /python/pyarmnn/test/test_profiling_utilities.py: -------------------------------------------------------------------------------- 1 | # Copyright © 2020 Arm Ltd. All rights reserved. 2 | # SPDX-License-Identifier: MIT 3 | import os 4 | 5 | import pytest 6 | 7 | import pyarmnn as ann 8 | 9 | 10 | class MockIProfiler: 11 | def __init__(self, json_string): 12 | self._profile_json = json_string 13 | 14 | def as_json(self): 15 | return self._profile_json 16 | 17 | 18 | @pytest.fixture() 19 | def mock_profiler(shared_data_folder): 20 | path_to_file = os.path.join(shared_data_folder, 'mock_profile_out.json') 21 | with open(path_to_file, 'r') as file: 22 | profiler_output = file.read() 23 | return MockIProfiler(profiler_output) 24 | 25 | 26 | def test_inference_exec(mock_profiler): 27 | profiling_data_obj = ann.get_profiling_data(mock_profiler) 28 | 29 | assert (len(profiling_data_obj.inference_data) > 0) 30 | assert (len(profiling_data_obj.per_workload_execution_data) > 0) 31 | 32 | # Check each total execution time 33 | assert (profiling_data_obj.inference_data["execution_time"] == [1.1, 2.2, 3.3, 4.4, 5.5, 6.6]) 34 | assert (profiling_data_obj.inference_data["time_unit"] == "us") 35 | 36 | 37 | @pytest.mark.parametrize("exec_times, unit, backend, workload", [([2, 2, 38 | 2, 2, 39 | 2, 2], 40 | 'us', 41 | 'CpuRef', 42 | 'RefSomeMock1dWorkload_Execute_#5'), 43 | ([2, 2, 44 | 2, 2, 45 | 2, 2], 46 | 'us', 47 | 'CpuAcc', 48 | 'NeonSomeMock2Workload_Execute_#6'), 49 | ([2, 2, 50 | 2, 2, 51 | 2, 2], 52 | 'us', 53 | 'GpuAcc', 54 | 'ClSomeMock3dWorkload_Execute_#7'), 55 | ([2, 2, 56 | 2, 2, 57 | 2, 2], 58 | 'us', 59 | 'EthosNAcc', 60 | 'EthosNSomeMock4dWorkload_Execute_#8') 61 | ]) 62 | def test_profiler_workloads(mock_profiler, exec_times, unit, backend, workload): 63 | profiling_data_obj = ann.get_profiling_data(mock_profiler) 64 | 65 | work_load_exec = profiling_data_obj.per_workload_execution_data[workload] 66 | assert work_load_exec["execution_time"] == exec_times 67 | assert work_load_exec["time_unit"] == unit 68 | assert work_load_exec["backend"] == backend 69 | -------------------------------------------------------------------------------- /python/pyarmnn/test/test_quantize_and_dequantize.py: -------------------------------------------------------------------------------- 1 | # Copyright © 2020 Arm Ltd. All rights reserved. 2 | # SPDX-License-Identifier: MIT 3 | import pytest 4 | import numpy as np 5 | 6 | import pyarmnn as ann 7 | 8 | # import generated so we can test for Dequantize_* and Quantize_* 9 | # functions not available in the public API. 10 | import pyarmnn._generated.pyarmnn as gen_ann 11 | 12 | 13 | @pytest.mark.parametrize('method', ['Quantize_int8_t', 14 | 'Quantize_uint8_t', 15 | 'Quantize_int16_t', 16 | 'Quantize_int32_t', 17 | 'Dequantize_int8_t', 18 | 'Dequantize_uint8_t', 19 | 'Dequantize_int16_t', 20 | 'Dequantize_int32_t']) 21 | def test_quantize_exists(method): 22 | assert method in dir(gen_ann) and callable(getattr(gen_ann, method)) 23 | 24 | 25 | @pytest.mark.parametrize('dt, min, max', [('uint8', 0, 255), 26 | ('int8', -128, 127), 27 | ('int16', -32768, 32767), 28 | ('int32', -2147483648, 2147483647)]) 29 | def test_quantize_uint8_output(dt, min, max): 30 | result = ann.quantize(3.3274056911468506, 0.02620004490017891, 128, dt) 31 | assert type(result) is int and min <= result <= max 32 | 33 | 34 | @pytest.mark.parametrize('dt', ['uint8', 35 | 'int8', 36 | 'int16', 37 | 'int32']) 38 | def test_dequantize_uint8_output(dt): 39 | result = ann.dequantize(3, 0.02620004490017891, 128, dt) 40 | assert type(result) is float 41 | 42 | 43 | def test_quantize_unsupported_dtype(): 44 | with pytest.raises(ValueError) as err: 45 | ann.quantize(3.3274056911468506, 0.02620004490017891, 128, 'uint16') 46 | 47 | assert 'Unexpected target datatype uint16 given.' in str(err.value) 48 | 49 | 50 | def test_dequantize_unsupported_dtype(): 51 | with pytest.raises(ValueError) as err: 52 | ann.dequantize(3, 0.02620004490017891, 128, 'uint16') 53 | 54 | assert 'Unexpected value datatype uint16 given.' in str(err.value) 55 | 56 | 57 | def test_dequantize_value_range(): 58 | with pytest.raises(ValueError) as err: 59 | ann.dequantize(-1, 0.02620004490017891, 128, 'uint8') 60 | 61 | assert 'Value is not within range of the given datatype uint8' in str(err.value) 62 | 63 | 64 | @pytest.mark.parametrize('dt, data', [('uint8', np.uint8(255)), 65 | ('int8', np.int8(127)), 66 | ('int16', np.int16(32767)), 67 | ('int32', np.int32(2147483647)), 68 | 69 | ('uint8', np.int8(127)), 70 | ('uint8', np.int16(255)), 71 | ('uint8', np.int32(255)), 72 | 73 | ('int8', np.uint8(127)), 74 | ('int8', np.int16(127)), 75 | ('int8', np.int32(127)), 76 | 77 | ('int16', np.int8(127)), 78 | ('int16', np.uint8(255)), 79 | ('int16', np.int32(32767)), 80 | 81 | ('int32', np.uint8(255)), 82 | ('int16', np.int8(127)), 83 | ('int32', np.int16(32767)) 84 | 85 | ]) 86 | def test_dequantize_numpy_dt(dt, data): 87 | result = ann.dequantize(data, 1, 0, dt) 88 | 89 | assert type(result) is float 90 | 91 | assert np.float32(data) == result 92 | -------------------------------------------------------------------------------- /python/pyarmnn/test/test_runtime.py: -------------------------------------------------------------------------------- 1 | # Copyright © 2020 Arm Ltd. All rights reserved. 2 | # SPDX-License-Identifier: MIT 3 | import os 4 | 5 | import pytest 6 | import numpy as np 7 | 8 | import pyarmnn as ann 9 | 10 | 11 | @pytest.fixture(scope="function") 12 | def random_runtime(shared_data_folder): 13 | parser = ann.ITfLiteParser() 14 | network = parser.CreateNetworkFromBinaryFile(os.path.join(shared_data_folder, 'mock_model.tflite')) 15 | preferred_backends = [ann.BackendId('CpuRef')] 16 | options = ann.CreationOptions() 17 | runtime = ann.IRuntime(options) 18 | 19 | graphs_count = parser.GetSubgraphCount() 20 | 21 | graph_id = graphs_count - 1 22 | input_names = parser.GetSubgraphInputTensorNames(graph_id) 23 | 24 | input_binding_info = parser.GetNetworkInputBindingInfo(graph_id, input_names[0]) 25 | input_tensor_id = input_binding_info[0] 26 | 27 | input_tensor_info = input_binding_info[1] 28 | 29 | output_names = parser.GetSubgraphOutputTensorNames(graph_id) 30 | 31 | input_data = np.random.randint(255, size=input_tensor_info.GetNumElements(), dtype=np.uint8) 32 | 33 | const_tensor_pair = (input_tensor_id, ann.ConstTensor(input_tensor_info, input_data)) 34 | 35 | input_tensors = [const_tensor_pair] 36 | 37 | output_tensors = [] 38 | 39 | for index, output_name in enumerate(output_names): 40 | out_bind_info = parser.GetNetworkOutputBindingInfo(graph_id, output_name) 41 | 42 | out_tensor_info = out_bind_info[1] 43 | out_tensor_id = out_bind_info[0] 44 | 45 | output_tensors.append((out_tensor_id, 46 | ann.Tensor(out_tensor_info))) 47 | 48 | yield preferred_backends, network, runtime, input_tensors, output_tensors 49 | 50 | 51 | @pytest.fixture(scope='function') 52 | def mock_model_runtime(shared_data_folder): 53 | parser = ann.ITfLiteParser() 54 | network = parser.CreateNetworkFromBinaryFile(os.path.join(shared_data_folder, 'mock_model.tflite')) 55 | graph_id = 0 56 | 57 | input_binding_info = parser.GetNetworkInputBindingInfo(graph_id, "input_1") 58 | 59 | input_tensor_data = np.load(os.path.join(shared_data_folder, 'tflite_parser/input_lite.npy')) 60 | 61 | preferred_backends = [ann.BackendId('CpuRef')] 62 | 63 | options = ann.CreationOptions() 64 | runtime = ann.IRuntime(options) 65 | 66 | opt_network, messages = ann.Optimize(network, preferred_backends, runtime.GetDeviceSpec(), ann.OptimizerOptions()) 67 | 68 | print(messages) 69 | 70 | net_id, messages = runtime.LoadNetwork(opt_network) 71 | 72 | print(messages) 73 | 74 | input_tensors = ann.make_input_tensors([input_binding_info], [input_tensor_data]) 75 | 76 | output_names = parser.GetSubgraphOutputTensorNames(graph_id) 77 | outputs_binding_info = [] 78 | 79 | for output_name in output_names: 80 | outputs_binding_info.append(parser.GetNetworkOutputBindingInfo(graph_id, output_name)) 81 | 82 | output_tensors = ann.make_output_tensors(outputs_binding_info) 83 | 84 | yield runtime, net_id, input_tensors, output_tensors 85 | 86 | 87 | def test_python_disowns_network(random_runtime): 88 | preferred_backends = random_runtime[0] 89 | network = random_runtime[1] 90 | runtime = random_runtime[2] 91 | opt_network, _ = ann.Optimize(network, preferred_backends, 92 | runtime.GetDeviceSpec(), ann.OptimizerOptions()) 93 | 94 | runtime.LoadNetwork(opt_network) 95 | 96 | assert not opt_network.thisown 97 | 98 | 99 | def test_load_network(random_runtime): 100 | preferred_backends = random_runtime[0] 101 | network = random_runtime[1] 102 | runtime = random_runtime[2] 103 | 104 | opt_network, _ = ann.Optimize(network, preferred_backends, 105 | runtime.GetDeviceSpec(), ann.OptimizerOptions()) 106 | 107 | net_id, messages = runtime.LoadNetwork(opt_network) 108 | assert "" == messages 109 | assert net_id == 0 110 | 111 | 112 | def test_load_network_properties_provided(random_runtime): 113 | preferred_backends = random_runtime[0] 114 | network = random_runtime[1] 115 | runtime = random_runtime[2] 116 | 117 | opt_network, _ = ann.Optimize(network, preferred_backends, 118 | runtime.GetDeviceSpec(), ann.OptimizerOptions()) 119 | 120 | properties = ann.INetworkProperties(True, True) 121 | net_id, messages = runtime.LoadNetwork(opt_network, properties) 122 | assert "" == messages 123 | assert net_id == 0 124 | 125 | 126 | def test_unload_network_fails_for_invalid_net_id(random_runtime): 127 | preferred_backends = random_runtime[0] 128 | network = random_runtime[1] 129 | runtime = random_runtime[2] 130 | 131 | ann.Optimize(network, preferred_backends, runtime.GetDeviceSpec(), ann.OptimizerOptions()) 132 | 133 | with pytest.raises(RuntimeError) as err: 134 | runtime.UnloadNetwork(9) 135 | 136 | expected_error_message = "Failed to unload network." 137 | assert expected_error_message in str(err.value) 138 | 139 | 140 | def test_enqueue_workload(random_runtime): 141 | preferred_backends = random_runtime[0] 142 | network = random_runtime[1] 143 | runtime = random_runtime[2] 144 | input_tensors = random_runtime[3] 145 | output_tensors = random_runtime[4] 146 | 147 | opt_network, _ = ann.Optimize(network, preferred_backends, 148 | runtime.GetDeviceSpec(), ann.OptimizerOptions()) 149 | 150 | net_id, _ = runtime.LoadNetwork(opt_network) 151 | runtime.EnqueueWorkload(net_id, input_tensors, output_tensors) 152 | 153 | 154 | def test_enqueue_workload_fails_with_empty_input_tensors(random_runtime): 155 | preferred_backends = random_runtime[0] 156 | network = random_runtime[1] 157 | runtime = random_runtime[2] 158 | input_tensors = [] 159 | output_tensors = random_runtime[4] 160 | 161 | opt_network, _ = ann.Optimize(network, preferred_backends, 162 | runtime.GetDeviceSpec(), ann.OptimizerOptions()) 163 | 164 | net_id, _ = runtime.LoadNetwork(opt_network) 165 | with pytest.raises(RuntimeError) as err: 166 | runtime.EnqueueWorkload(net_id, input_tensors, output_tensors) 167 | 168 | expected_error_message = "Number of inputs provided does not match network." 169 | assert expected_error_message in str(err.value) 170 | 171 | 172 | @pytest.mark.x86_64 173 | @pytest.mark.parametrize('count', [5]) 174 | def test_multiple_inference_runs_yield_same_result(count, mock_model_runtime): 175 | """ 176 | Test that results remain consistent among multiple runs of the same inference. 177 | """ 178 | runtime = mock_model_runtime[0] 179 | net_id = mock_model_runtime[1] 180 | input_tensors = mock_model_runtime[2] 181 | output_tensors = mock_model_runtime[3] 182 | 183 | expected_results = np.array([[4, 85, 108, 29, 8, 16, 0, 2, 5, 0]]) 184 | 185 | for _ in range(count): 186 | runtime.EnqueueWorkload(net_id, input_tensors, output_tensors) 187 | 188 | output_vectors = ann.workload_tensors_to_ndarray(output_tensors) 189 | 190 | for i in range(len(expected_results)): 191 | assert output_vectors[i].all() == expected_results[i].all() 192 | 193 | 194 | @pytest.mark.aarch64 195 | def test_aarch64_inference_results(mock_model_runtime): 196 | 197 | runtime = mock_model_runtime[0] 198 | net_id = mock_model_runtime[1] 199 | input_tensors = mock_model_runtime[2] 200 | output_tensors = mock_model_runtime[3] 201 | 202 | runtime.EnqueueWorkload(net_id, input_tensors, output_tensors) 203 | 204 | output_vectors = ann.workload_tensors_to_ndarray(output_tensors) 205 | 206 | expected_outputs = expected_results = np.array([[4, 85, 108, 29, 8, 16, 0, 2, 5, 0]]) 207 | 208 | for i in range(len(expected_outputs)): 209 | assert output_vectors[i].all() == expected_results[i].all() 210 | 211 | 212 | def test_enqueue_workload_with_profiler(random_runtime): 213 | """ 214 | Tests ArmNN's profiling extension 215 | """ 216 | preferred_backends = random_runtime[0] 217 | network = random_runtime[1] 218 | runtime = random_runtime[2] 219 | input_tensors = random_runtime[3] 220 | output_tensors = random_runtime[4] 221 | 222 | opt_network, _ = ann.Optimize(network, preferred_backends, 223 | runtime.GetDeviceSpec(), ann.OptimizerOptions()) 224 | net_id, _ = runtime.LoadNetwork(opt_network) 225 | 226 | profiler = runtime.GetProfiler(net_id) 227 | # By default profiling should be turned off: 228 | assert profiler.IsProfilingEnabled() is False 229 | 230 | # Enable profiling: 231 | profiler.EnableProfiling(True) 232 | assert profiler.IsProfilingEnabled() is True 233 | 234 | # Run the inference: 235 | runtime.EnqueueWorkload(net_id, input_tensors, output_tensors) 236 | 237 | # Get profile output as a string: 238 | str_profile = profiler.as_json() 239 | 240 | # Verify that certain markers are present: 241 | assert len(str_profile) != 0 242 | assert str_profile.find('\"ArmNN\": {') > 0 243 | 244 | # Get events analysis output as a string: 245 | str_events_analysis = profiler.event_log() 246 | 247 | assert "Event Sequence - Name | Duration (ms) | Start (ms) | Stop (ms) | Device" in str_events_analysis 248 | 249 | assert profiler.thisown == 0 250 | 251 | 252 | def test_check_runtime_swig_ownership(random_runtime): 253 | # Check to see that SWIG has ownership for runtime. This instructs SWIG to take 254 | # ownership of the return value. This allows the value to be automatically 255 | # garbage-collected when it is no longer in use 256 | runtime = random_runtime[2] 257 | assert runtime.thisown 258 | -------------------------------------------------------------------------------- /python/pyarmnn/test/test_setup.py: -------------------------------------------------------------------------------- 1 | # Copyright © 2020 Arm Ltd. All rights reserved. 2 | # SPDX-License-Identifier: MIT 3 | import os 4 | import sys 5 | import shutil 6 | 7 | import pytest 8 | 9 | sys.path.append(os.path.abspath('..')) 10 | from setup import find_armnn, find_includes, linux_gcc_lib_search, check_armnn_version 11 | 12 | 13 | @pytest.fixture(autouse=True) 14 | def _setup_armnn(tmpdir): 15 | includes = str(os.path.join(tmpdir, 'include')) 16 | libs = str(os.path.join(tmpdir, 'lib')) 17 | os.environ["TEST_ARMNN_INCLUDE"] = includes 18 | os.environ["TEST_ARMNN_LIB"] = libs 19 | os.environ["EMPTY_ARMNN_INCLUDE"] = '' 20 | 21 | os.mkdir(includes) 22 | os.mkdir(libs) 23 | 24 | with open(os.path.join(libs, "libarmnn.so"), "w"): 25 | pass 26 | 27 | with open(os.path.join(libs, "libarmnnSomeThing1.so"), "w"): 28 | pass 29 | with open(os.path.join(libs, "libarmnnSomeThing1.so.1"), "w"): 30 | pass 31 | with open(os.path.join(libs, "libarmnnSomeThing1.so.1.2"), "w"): 32 | pass 33 | 34 | with open(os.path.join(libs, "libarmnnSomeThing2.so"), "w"): 35 | pass 36 | 37 | with open(os.path.join(libs, "libSomeThing3.so"), "w"): 38 | pass 39 | 40 | yield 41 | 42 | del os.environ["TEST_ARMNN_INCLUDE"] 43 | del os.environ["TEST_ARMNN_LIB"] 44 | del os.environ["EMPTY_ARMNN_INCLUDE"] 45 | shutil.rmtree(includes) 46 | shutil.rmtree(libs) 47 | 48 | 49 | def test_find_armnn(tmpdir): 50 | lib_names, lib_paths = find_armnn(lib_name='libarmnn*.so', 51 | armnn_libs_env="TEST_ARMNN_LIB", 52 | default_lib_search=("/lib",)) 53 | armnn_includes = find_includes(armnn_include_env="TEST_ARMNN_INCLUDE") 54 | 55 | assert [':libarmnn.so', ':libarmnnSomeThing1.so', ':libarmnnSomeThing2.so'] == sorted(lib_names) 56 | assert [os.path.join(tmpdir, 'lib')] == lib_paths 57 | assert [os.path.join(tmpdir, 'include')] == armnn_includes 58 | 59 | 60 | def test_find_armnn_default_path(tmpdir): 61 | lib_names, lib_paths = find_armnn(lib_name='libarmnn*.so', 62 | armnn_libs_env="RUBBISH_LIB", 63 | default_lib_search=(os.environ["TEST_ARMNN_LIB"],)) 64 | armnn_includes = find_includes('TEST_ARMNN_INCLUDE') 65 | assert [':libarmnn.so', ':libarmnnSomeThing1.so', ':libarmnnSomeThing2.so'] == sorted(lib_names) 66 | assert [os.path.join(tmpdir, 'lib')] == lib_paths 67 | assert [os.path.join(tmpdir, 'include')] == armnn_includes 68 | 69 | 70 | def test_not_find_armnn(tmpdir): 71 | with pytest.raises(RuntimeError) as err: 72 | find_armnn(lib_name='libarmnn*.so', armnn_libs_env="RUBBISH_LIB", 73 | default_lib_search=("/lib",)) 74 | 75 | assert 'ArmNN library libarmnn*.so was not found in (\'/lib\',)' in str(err.value) 76 | 77 | 78 | @pytest.mark.parametrize("env", ["RUBBISH_INCLUDE", "EMPTY_ARMNN_INCLUDE"]) 79 | def test_rubbish_armnn_include(tmpdir, env): 80 | includes = find_includes(armnn_include_env=env) 81 | assert includes == ['/usr/local/include', '/usr/include'] 82 | 83 | 84 | def test_gcc_serch_path(): 85 | assert linux_gcc_lib_search() 86 | 87 | 88 | def test_armnn_version(): 89 | check_armnn_version('20190800', '20190800') 90 | 91 | 92 | def test_incorrect_armnn_version(): 93 | with pytest.raises(AssertionError) as err: 94 | check_armnn_version('20190800', '20190500') 95 | 96 | assert 'Expected ArmNN version is 201905 but installed ArmNN version is 201908' in str(err.value) 97 | 98 | 99 | def test_armnn_version_patch_does_not_matter(): 100 | check_armnn_version('20190800', '20190801') 101 | -------------------------------------------------------------------------------- /python/pyarmnn/test/test_supported_backends.py: -------------------------------------------------------------------------------- 1 | # Copyright © 2020 Arm Ltd. All rights reserved. 2 | # SPDX-License-Identifier: MIT 3 | import os 4 | import platform 5 | import pytest 6 | import pyarmnn as ann 7 | 8 | 9 | @pytest.fixture() 10 | def get_supported_backends_setup(shared_data_folder): 11 | options = ann.CreationOptions() 12 | runtime = ann.IRuntime(options) 13 | 14 | get_device_spec = runtime.GetDeviceSpec() 15 | supported_backends = get_device_spec.GetSupportedBackends() 16 | 17 | yield supported_backends 18 | 19 | 20 | def test_ownership(): 21 | options = ann.CreationOptions() 22 | runtime = ann.IRuntime(options) 23 | 24 | device_spec = runtime.GetDeviceSpec() 25 | 26 | assert not device_spec.thisown 27 | 28 | 29 | def test_to_string(): 30 | options = ann.CreationOptions() 31 | runtime = ann.IRuntime(options) 32 | 33 | device_spec = runtime.GetDeviceSpec() 34 | expected_str = "IDeviceSpec {{ supportedBackends: [" \ 35 | "{}" \ 36 | "]}}".format(', '.join(map(lambda b: str(b), device_spec.GetSupportedBackends()))) 37 | 38 | assert expected_str == str(device_spec) 39 | 40 | 41 | def test_get_supported_backends_cpu_ref(get_supported_backends_setup): 42 | assert "CpuRef" in map(lambda b: str(b), get_supported_backends_setup) 43 | 44 | 45 | @pytest.mark.aarch64 46 | class TestNoneCpuRefBackends: 47 | 48 | @pytest.mark.parametrize("backend", ["CpuAcc"]) 49 | def test_get_supported_backends_cpu_acc(self, get_supported_backends_setup, backend): 50 | assert backend in map(lambda b: str(b), get_supported_backends_setup) 51 | -------------------------------------------------------------------------------- /python/pyarmnn/test/test_tensor.py: -------------------------------------------------------------------------------- 1 | # Copyright © 2020 Arm Ltd. All rights reserved. 2 | # SPDX-License-Identifier: MIT 3 | from copy import copy 4 | 5 | import pytest 6 | import numpy as np 7 | import pyarmnn as ann 8 | 9 | 10 | def __get_tensor_info(dt): 11 | tensor_info = ann.TensorInfo(ann.TensorShape((2, 3)), dt) 12 | 13 | return tensor_info 14 | 15 | 16 | @pytest.mark.parametrize("dt", [ann.DataType_Float32, ann.DataType_Float16, 17 | ann.DataType_QAsymmU8, ann.DataType_QSymmS8, 18 | ann.DataType_QAsymmS8]) 19 | def test_create_tensor_with_info(dt): 20 | tensor_info = __get_tensor_info(dt) 21 | elements = tensor_info.GetNumElements() 22 | num_bytes = tensor_info.GetNumBytes() 23 | d_type = dt 24 | 25 | tensor = ann.Tensor(tensor_info) 26 | 27 | assert tensor_info != tensor.GetInfo(), "Different objects" 28 | assert elements == tensor.GetNumElements() 29 | assert num_bytes == tensor.GetNumBytes() 30 | assert d_type == tensor.GetDataType() 31 | 32 | 33 | def test_create_tensor_undefined_datatype(): 34 | tensor_info = ann.TensorInfo() 35 | tensor_info.SetDataType(99) 36 | 37 | with pytest.raises(ValueError) as err: 38 | ann.Tensor(tensor_info) 39 | 40 | assert 'The data type provided for this Tensor is not supported.' in str(err.value) 41 | 42 | 43 | @pytest.mark.parametrize("dt", [ann.DataType_Float32]) 44 | def test_tensor_memory_output(dt): 45 | tensor_info = __get_tensor_info(dt) 46 | tensor = ann.Tensor(tensor_info) 47 | 48 | # empty memory area because inference has not yet been run. 49 | assert tensor.get_memory_area().tolist() # has random stuff 50 | assert 4 == tensor.get_memory_area().itemsize, "it is float32" 51 | 52 | 53 | @pytest.mark.parametrize("dt", [ann.DataType_Float32, ann.DataType_Float16, 54 | ann.DataType_QAsymmU8, ann.DataType_QSymmS8, 55 | ann.DataType_QAsymmS8]) 56 | def test_tensor__str__(dt): 57 | tensor_info = __get_tensor_info(dt) 58 | elements = tensor_info.GetNumElements() 59 | num_bytes = tensor_info.GetNumBytes() 60 | d_type = dt 61 | dimensions = tensor_info.GetNumDimensions() 62 | 63 | tensor = ann.Tensor(tensor_info) 64 | 65 | assert str(tensor) == "Tensor{{DataType: {}, NumBytes: {}, NumDimensions: " \ 66 | "{}, NumElements: {}}}".format(d_type, num_bytes, dimensions, elements) 67 | 68 | 69 | def test_create_empty_tensor(): 70 | tensor = ann.Tensor() 71 | 72 | assert 0 == tensor.GetNumElements() 73 | assert 0 == tensor.GetNumBytes() 74 | assert tensor.get_memory_area() is None 75 | 76 | 77 | @pytest.mark.parametrize("dt", [ann.DataType_Float32, ann.DataType_Float16, 78 | ann.DataType_QAsymmU8, ann.DataType_QSymmS8, 79 | ann.DataType_QAsymmS8]) 80 | def test_create_tensor_from_tensor(dt): 81 | tensor_info = __get_tensor_info(dt) 82 | tensor = ann.Tensor(tensor_info) 83 | copied_tensor = ann.Tensor(tensor) 84 | 85 | assert copied_tensor != tensor, "Different objects" 86 | assert copied_tensor.GetInfo() != tensor.GetInfo(), "Different objects" 87 | assert copied_tensor.get_memory_area().ctypes.data == tensor.get_memory_area().ctypes.data, "Same memory area" 88 | assert copied_tensor.GetNumElements() == tensor.GetNumElements() 89 | assert copied_tensor.GetNumBytes() == tensor.GetNumBytes() 90 | assert copied_tensor.GetDataType() == tensor.GetDataType() 91 | 92 | 93 | @pytest.mark.parametrize("dt", [ann.DataType_Float32, ann.DataType_Float16, 94 | ann.DataType_QAsymmU8, ann.DataType_QSymmS8, 95 | ann.DataType_QAsymmS8]) 96 | def test_copy_tensor(dt): 97 | tensor = ann.Tensor(__get_tensor_info(dt)) 98 | copied_tensor = copy(tensor) 99 | 100 | assert copied_tensor != tensor, "Different objects" 101 | assert copied_tensor.GetInfo() != tensor.GetInfo(), "Different objects" 102 | assert copied_tensor.get_memory_area().ctypes.data == tensor.get_memory_area().ctypes.data, "Same memory area" 103 | assert copied_tensor.GetNumElements() == tensor.GetNumElements() 104 | assert copied_tensor.GetNumBytes() == tensor.GetNumBytes() 105 | assert copied_tensor.GetDataType() == tensor.GetDataType() 106 | 107 | 108 | @pytest.mark.parametrize("dt", [ann.DataType_Float32, ann.DataType_Float16, 109 | ann.DataType_QAsymmU8, ann.DataType_QSymmS8, 110 | ann.DataType_QAsymmS8]) 111 | def test_copied_tensor_has_memory_area_access_after_deletion_of_original_tensor(dt): 112 | 113 | tensor = ann.Tensor(__get_tensor_info(dt)) 114 | 115 | tensor.get_memory_area()[0] = 100 116 | 117 | initial_mem_copy = np.array(tensor.get_memory_area()) 118 | 119 | assert 100 == initial_mem_copy[0] 120 | 121 | copied_tensor = ann.Tensor(tensor) 122 | 123 | del tensor 124 | np.testing.assert_array_equal(copied_tensor.get_memory_area(), initial_mem_copy) 125 | assert 100 == copied_tensor.get_memory_area()[0] 126 | 127 | 128 | def test_create_const_tensor_incorrect_args(): 129 | with pytest.raises(ValueError) as err: 130 | ann.Tensor('something', 'something') 131 | 132 | expected_error_message = "Incorrect number of arguments or type of arguments provided to create Tensor." 133 | assert expected_error_message in str(err.value) 134 | 135 | 136 | @pytest.mark.parametrize("dt", [ann.DataType_Float16]) 137 | def test_tensor_memory_output_fp16(dt): 138 | # Check Tensor with float16 139 | tensor_info = __get_tensor_info(dt) 140 | tensor = ann.Tensor(tensor_info) 141 | 142 | assert tensor.GetNumElements() == 6 143 | assert tensor.GetNumBytes() == 12 144 | assert tensor.GetDataType() == ann.DataType_Float16 145 | -------------------------------------------------------------------------------- /python/pyarmnn/test/test_tensor_conversion.py: -------------------------------------------------------------------------------- 1 | # Copyright © 2020 Arm Ltd. All rights reserved. 2 | # SPDX-License-Identifier: MIT 3 | import os 4 | 5 | import pytest 6 | import pyarmnn as ann 7 | import numpy as np 8 | 9 | 10 | @pytest.fixture(scope="function") 11 | def get_tensor_info_input(shared_data_folder): 12 | """ 13 | Sample input tensor information. 14 | """ 15 | parser = ann.ITfLiteParser() 16 | parser.CreateNetworkFromBinaryFile(os.path.join(shared_data_folder, 'mock_model.tflite')) 17 | graph_id = 0 18 | 19 | input_binding_info = [parser.GetNetworkInputBindingInfo(graph_id, 'input_1')] 20 | 21 | yield input_binding_info 22 | 23 | 24 | @pytest.fixture(scope="function") 25 | def get_tensor_info_output(shared_data_folder): 26 | """ 27 | Sample output tensor information. 28 | """ 29 | parser = ann.ITfLiteParser() 30 | parser.CreateNetworkFromBinaryFile(os.path.join(shared_data_folder, 'mock_model.tflite')) 31 | graph_id = 0 32 | 33 | output_names = parser.GetSubgraphOutputTensorNames(graph_id) 34 | outputs_binding_info = [] 35 | 36 | for output_name in output_names: 37 | outputs_binding_info.append(parser.GetNetworkOutputBindingInfo(graph_id, output_name)) 38 | 39 | yield outputs_binding_info 40 | 41 | 42 | def test_make_input_tensors(get_tensor_info_input): 43 | input_tensor_info = get_tensor_info_input 44 | input_data = [] 45 | 46 | for tensor_id, tensor_info in input_tensor_info: 47 | input_data.append(np.random.randint(0, 255, size=(1, tensor_info.GetNumElements())).astype(np.uint8)) 48 | 49 | input_tensors = ann.make_input_tensors(input_tensor_info, input_data) 50 | assert len(input_tensors) == 1 51 | 52 | for tensor, tensor_info in zip(input_tensors, input_tensor_info): 53 | # Because we created ConstTensor function, we cannot check type directly. 54 | assert type(tensor[1]).__name__ == 'ConstTensor' 55 | assert str(tensor[1].GetInfo()) == str(tensor_info[1]) 56 | 57 | 58 | def test_make_output_tensors(get_tensor_info_output): 59 | output_binding_info = get_tensor_info_output 60 | 61 | output_tensors = ann.make_output_tensors(output_binding_info) 62 | assert len(output_tensors) == 1 63 | 64 | for tensor, tensor_info in zip(output_tensors, output_binding_info): 65 | assert type(tensor[1]) == ann.Tensor 66 | assert str(tensor[1].GetInfo()) == str(tensor_info[1]) 67 | 68 | 69 | def test_workload_tensors_to_ndarray(get_tensor_info_output): 70 | # Check shape and size of output from workload_tensors_to_ndarray matches expected. 71 | output_binding_info = get_tensor_info_output 72 | output_tensors = ann.make_output_tensors(output_binding_info) 73 | 74 | data = ann.workload_tensors_to_ndarray(output_tensors) 75 | 76 | for i in range(0, len(output_tensors)): 77 | assert data[i].shape == tuple(output_tensors[i][1].GetShape()) 78 | assert data[i].size == output_tensors[i][1].GetNumElements() 79 | 80 | 81 | def test_make_input_tensors_fp16(get_tensor_info_input): 82 | # Check ConstTensor with float16 83 | input_tensor_info = get_tensor_info_input 84 | input_data = [] 85 | 86 | for tensor_id, tensor_info in input_tensor_info: 87 | input_data.append(np.random.randint(0, 255, size=(1, tensor_info.GetNumElements())).astype(np.float16)) 88 | tensor_info.SetDataType(ann.DataType_Float16) # set datatype to float16 89 | 90 | input_tensors = ann.make_input_tensors(input_tensor_info, input_data) 91 | assert len(input_tensors) == 1 92 | 93 | for tensor, tensor_info in zip(input_tensors, input_tensor_info): 94 | # Because we created ConstTensor function, we cannot check type directly. 95 | assert type(tensor[1]).__name__ == 'ConstTensor' 96 | assert str(tensor[1].GetInfo()) == str(tensor_info[1]) 97 | assert tensor[1].GetDataType() == ann.DataType_Float16 98 | assert tensor[1].GetNumElements() == 28*28*1 99 | assert tensor[1].GetNumBytes() == (28*28*1)*2 # check each element is two byte 100 | -------------------------------------------------------------------------------- /python/pyarmnn/test/test_tensor_info.py: -------------------------------------------------------------------------------- 1 | # Copyright © 2020 Arm Ltd. All rights reserved. 2 | # SPDX-License-Identifier: MIT 3 | import pyarmnn as ann 4 | 5 | 6 | def test_tensor_info_ctor_shape(): 7 | tensor_shape = ann.TensorShape((1, 1, 2)) 8 | 9 | tensor_info = ann.TensorInfo(tensor_shape, ann.DataType_QAsymmU8, 0.5, 1) 10 | 11 | assert 2 == tensor_info.GetNumElements() 12 | assert 3 == tensor_info.GetNumDimensions() 13 | assert ann.DataType_QAsymmU8 == tensor_info.GetDataType() 14 | assert 0.5 == tensor_info.GetQuantizationScale() 15 | assert 1 == tensor_info.GetQuantizationOffset() 16 | 17 | shape = tensor_info.GetShape() 18 | 19 | assert 2 == shape.GetNumElements() 20 | assert 3 == shape.GetNumDimensions() 21 | 22 | 23 | def test_tensor_info__str__(): 24 | tensor_info = ann.TensorInfo(ann.TensorShape((2, 3)), ann.DataType_QAsymmU8, 0.5, 1) 25 | 26 | assert tensor_info.__str__() == "TensorInfo{DataType: 2, IsQuantized: 1, QuantizationScale: 0.500000, " \ 27 | "QuantizationOffset: 1, NumDimensions: 2, NumElements: 6}" 28 | -------------------------------------------------------------------------------- /python/pyarmnn/test/test_tensor_shape.py: -------------------------------------------------------------------------------- 1 | # Copyright © 2020 Arm Ltd. All rights reserved. 2 | # SPDX-License-Identifier: MIT 3 | import pytest 4 | import pyarmnn as ann 5 | 6 | 7 | def test_tensor_shape_tuple(): 8 | tensor_shape = ann.TensorShape((1, 2, 3)) 9 | 10 | assert 3 == tensor_shape.GetNumDimensions() 11 | assert 6 == tensor_shape.GetNumElements() 12 | 13 | 14 | def test_tensor_shape_one(): 15 | tensor_shape = ann.TensorShape((10,)) 16 | assert 1 == tensor_shape.GetNumDimensions() 17 | assert 10 == tensor_shape.GetNumElements() 18 | 19 | 20 | def test_tensor_shape_empty(): 21 | with pytest.raises(RuntimeError) as err: 22 | ann.TensorShape(()) 23 | 24 | assert "Tensor numDimensions must be greater than 0" in str(err.value) 25 | 26 | 27 | def test_tensor_shape_tuple_mess(): 28 | tensor_shape = ann.TensorShape((1, "2", 3.0)) 29 | 30 | assert 3 == tensor_shape.GetNumDimensions() 31 | assert 6 == tensor_shape.GetNumElements() 32 | 33 | 34 | def test_tensor_shape_list(): 35 | 36 | with pytest.raises(TypeError) as err: 37 | ann.TensorShape([1, 2, 3]) 38 | 39 | assert "Argument is not a tuple" in str(err.value) 40 | 41 | 42 | def test_tensor_shape_tuple_mess_fail(): 43 | 44 | with pytest.raises(TypeError) as err: 45 | ann.TensorShape((1, "two", 3.0)) 46 | 47 | assert "All elements must be numbers" in str(err.value) 48 | 49 | 50 | def test_tensor_shape_varags(): 51 | with pytest.raises(TypeError) as err: 52 | ann.TensorShape(1, 2, 3) 53 | 54 | assert "__init__() takes 2 positional arguments but 4 were given" in str(err.value) 55 | 56 | 57 | def test_tensor_shape__get_item_out_of_bounds(): 58 | tensor_shape = ann.TensorShape((1, 2, 3)) 59 | with pytest.raises(ValueError) as err: 60 | for i in range(4): 61 | tensor_shape[i] 62 | 63 | assert "Invalid dimension index: 3 (number of dimensions is 3)" in str(err.value) 64 | 65 | 66 | def test_tensor_shape__set_item_out_of_bounds(): 67 | tensor_shape = ann.TensorShape((1, 2, 3)) 68 | with pytest.raises(ValueError) as err: 69 | for i in range(4): 70 | tensor_shape[i] = 1 71 | 72 | assert "Invalid dimension index: 3 (number of dimensions is 3)" in str(err.value) 73 | 74 | 75 | def test_tensor_shape___str__(): 76 | tensor_shape = ann.TensorShape((1, 2, 3)) 77 | 78 | assert str(tensor_shape) == "TensorShape{Shape(1, 2, 3), NumDimensions: 3, NumElements: 6}" 79 | -------------------------------------------------------------------------------- /python/pyarmnn/test/test_tf_parser.py: -------------------------------------------------------------------------------- 1 | # Copyright © 2020 Arm Ltd. All rights reserved. 2 | # SPDX-License-Identifier: MIT 3 | import os 4 | 5 | import pytest 6 | import pyarmnn as ann 7 | import numpy as np 8 | 9 | 10 | @pytest.fixture() 11 | def parser(shared_data_folder): 12 | """ 13 | Parse and setup the test network to be used for the tests below 14 | """ 15 | 16 | # create tf parser 17 | parser = ann.ITfParser() 18 | 19 | # path to model 20 | path_to_model = os.path.join(shared_data_folder, 'mock_model.pb') 21 | 22 | # tensor shape [1, 28, 28, 1] 23 | tensorshape = {'input': ann.TensorShape((1, 28, 28, 1))} 24 | 25 | # requested_outputs 26 | requested_outputs = ["output"] 27 | 28 | # parse tf binary & create network 29 | parser.CreateNetworkFromBinaryFile(path_to_model, tensorshape, requested_outputs) 30 | 31 | yield parser 32 | 33 | 34 | def test_tf_parser_swig_destroy(): 35 | assert ann.ITfParser.__swig_destroy__, "There is a swig python destructor defined" 36 | assert ann.ITfParser.__swig_destroy__.__name__ == "delete_ITfParser" 37 | 38 | 39 | def test_check_tf_parser_swig_ownership(parser): 40 | # Check to see that SWIG has ownership for parser. This instructs SWIG to take 41 | # ownership of the return value. This allows the value to be automatically 42 | # garbage-collected when it is no longer in use 43 | assert parser.thisown 44 | 45 | 46 | def test_tf_parser_get_network_input_binding_info(parser): 47 | input_binding_info = parser.GetNetworkInputBindingInfo("input") 48 | 49 | tensor = input_binding_info[1] 50 | assert tensor.GetDataType() == 1 51 | assert tensor.GetNumDimensions() == 4 52 | assert tensor.GetNumElements() == 28*28*1 53 | assert tensor.GetQuantizationOffset() == 0 54 | assert tensor.GetQuantizationScale() == 0 55 | 56 | 57 | def test_tf_parser_get_network_output_binding_info(parser): 58 | output_binding_info = parser.GetNetworkOutputBindingInfo("output") 59 | 60 | tensor = output_binding_info[1] 61 | assert tensor.GetDataType() == 1 62 | assert tensor.GetNumDimensions() == 2 63 | assert tensor.GetNumElements() == 10 64 | assert tensor.GetQuantizationOffset() == 0 65 | assert tensor.GetQuantizationScale() == 0 66 | 67 | 68 | def test_tf_filenotfound_exception(shared_data_folder): 69 | parser = ann.ITfParser() 70 | 71 | # path to model 72 | path_to_model = os.path.join(shared_data_folder, 'some_unknown_model.pb') 73 | 74 | # tensor shape [1, 1, 1, 1] 75 | tensorshape = {'input': ann.TensorShape((1, 1, 1, 1))} 76 | 77 | # requested_outputs 78 | requested_outputs = [""] 79 | 80 | # parse tf binary & create network 81 | 82 | with pytest.raises(RuntimeError) as err: 83 | parser.CreateNetworkFromBinaryFile(path_to_model, tensorshape, requested_outputs) 84 | 85 | # Only check for part of the exception since the exception returns 86 | # absolute path which will change on different machines. 87 | assert 'failed to open' in str(err.value) 88 | 89 | 90 | def test_tf_parser_end_to_end(shared_data_folder): 91 | parser = ann.ITfParser = ann.ITfParser() 92 | 93 | tensorshape = {'input': ann.TensorShape((1, 28, 28, 1))} 94 | requested_outputs = ["output"] 95 | 96 | network = parser.CreateNetworkFromBinaryFile(os.path.join(shared_data_folder, 'mock_model.pb'), 97 | tensorshape, requested_outputs) 98 | 99 | input_binding_info = parser.GetNetworkInputBindingInfo("input") 100 | 101 | # load test image data stored in input_tf.npy 102 | input_tensor_data = np.load(os.path.join(shared_data_folder, 'tf_parser/input_tf.npy')).astype(np.float32) 103 | 104 | preferred_backends = [ann.BackendId('CpuAcc'), ann.BackendId('CpuRef')] 105 | 106 | options = ann.CreationOptions() 107 | runtime = ann.IRuntime(options) 108 | 109 | opt_network, messages = ann.Optimize(network, preferred_backends, runtime.GetDeviceSpec(), ann.OptimizerOptions()) 110 | 111 | assert 0 == len(messages) 112 | 113 | net_id, messages = runtime.LoadNetwork(opt_network) 114 | 115 | assert "" == messages 116 | 117 | input_tensors = ann.make_input_tensors([input_binding_info], [input_tensor_data]) 118 | 119 | outputs_binding_info = [] 120 | 121 | for output_name in requested_outputs: 122 | outputs_binding_info.append(parser.GetNetworkOutputBindingInfo(output_name)) 123 | 124 | output_tensors = ann.make_output_tensors(outputs_binding_info) 125 | 126 | runtime.EnqueueWorkload(net_id, input_tensors, output_tensors) 127 | output_vectors = ann.workload_tensors_to_ndarray(output_tensors) 128 | 129 | # Load golden output file for result comparison. 130 | golden_output = np.load(os.path.join(shared_data_folder, 'tf_parser/golden_output_tf.npy')) 131 | 132 | # Check that output matches golden output to 4 decimal places (there are slight rounding differences after this) 133 | np.testing.assert_almost_equal(output_vectors[0], golden_output, decimal=4) 134 | -------------------------------------------------------------------------------- /python/pyarmnn/test/test_tflite_parser.py: -------------------------------------------------------------------------------- 1 | # Copyright © 2020 Arm Ltd. All rights reserved. 2 | # SPDX-License-Identifier: MIT 3 | import os 4 | 5 | import pytest 6 | import pyarmnn as ann 7 | import numpy as np 8 | 9 | 10 | @pytest.fixture() 11 | def parser(shared_data_folder): 12 | """ 13 | Parse and setup the test network to be used for the tests below 14 | """ 15 | parser = ann.ITfLiteParser() 16 | parser.CreateNetworkFromBinaryFile(os.path.join(shared_data_folder, 'mock_model.tflite')) 17 | 18 | yield parser 19 | 20 | 21 | def test_tflite_parser_swig_destroy(): 22 | assert ann.ITfLiteParser.__swig_destroy__, "There is a swig python destructor defined" 23 | assert ann.ITfLiteParser.__swig_destroy__.__name__ == "delete_ITfLiteParser" 24 | 25 | 26 | def test_check_tflite_parser_swig_ownership(parser): 27 | # Check to see that SWIG has ownership for parser. This instructs SWIG to take 28 | # ownership of the return value. This allows the value to be automatically 29 | # garbage-collected when it is no longer in use 30 | assert parser.thisown 31 | 32 | 33 | def test_tflite_get_sub_graph_count(parser): 34 | graphs_count = parser.GetSubgraphCount() 35 | assert graphs_count == 1 36 | 37 | 38 | def test_tflite_get_network_input_binding_info(parser): 39 | graphs_count = parser.GetSubgraphCount() 40 | graph_id = graphs_count - 1 41 | 42 | input_names = parser.GetSubgraphInputTensorNames(graph_id) 43 | 44 | input_binding_info = parser.GetNetworkInputBindingInfo(graph_id, input_names[0]) 45 | 46 | tensor = input_binding_info[1] 47 | assert tensor.GetDataType() == 2 48 | assert tensor.GetNumDimensions() == 4 49 | assert tensor.GetNumElements() == 784 50 | assert tensor.GetQuantizationOffset() == 128 51 | assert tensor.GetQuantizationScale() == 0.007843137718737125 52 | 53 | 54 | def test_tflite_get_network_output_binding_info(parser): 55 | graphs_count = parser.GetSubgraphCount() 56 | graph_id = graphs_count - 1 57 | 58 | output_names = parser.GetSubgraphOutputTensorNames(graph_id) 59 | 60 | output_binding_info1 = parser.GetNetworkOutputBindingInfo(graph_id, output_names[0]) 61 | 62 | # Check the tensor info retrieved from GetNetworkOutputBindingInfo 63 | tensor1 = output_binding_info1[1] 64 | 65 | assert tensor1.GetDataType() == 2 66 | assert tensor1.GetNumDimensions() == 2 67 | assert tensor1.GetNumElements() == 10 68 | assert tensor1.GetQuantizationOffset() == 0 69 | assert tensor1.GetQuantizationScale() == 0.00390625 70 | 71 | 72 | def test_tflite_get_subgraph_input_tensor_names(parser): 73 | graphs_count = parser.GetSubgraphCount() 74 | graph_id = graphs_count - 1 75 | 76 | input_names = parser.GetSubgraphInputTensorNames(graph_id) 77 | 78 | assert input_names == ('input_1',) 79 | 80 | 81 | def test_tflite_get_subgraph_output_tensor_names(parser): 82 | graphs_count = parser.GetSubgraphCount() 83 | graph_id = graphs_count - 1 84 | 85 | output_names = parser.GetSubgraphOutputTensorNames(graph_id) 86 | 87 | assert output_names[0] == 'dense/Softmax' 88 | 89 | 90 | def test_tflite_filenotfound_exception(shared_data_folder): 91 | parser = ann.ITfLiteParser() 92 | 93 | with pytest.raises(RuntimeError) as err: 94 | parser.CreateNetworkFromBinaryFile(os.path.join(shared_data_folder, 'some_unknown_network.tflite')) 95 | 96 | # Only check for part of the exception since the exception returns 97 | # absolute path which will change on different machines. 98 | assert 'Cannot find the file' in str(err.value) 99 | 100 | 101 | def test_tflite_parser_end_to_end(shared_data_folder): 102 | parser = ann.ITfLiteParser() 103 | 104 | network = parser.CreateNetworkFromBinaryFile(os.path.join(shared_data_folder, "mock_model.tflite")) 105 | 106 | graphs_count = parser.GetSubgraphCount() 107 | graph_id = graphs_count - 1 108 | 109 | input_names = parser.GetSubgraphInputTensorNames(graph_id) 110 | input_binding_info = parser.GetNetworkInputBindingInfo(graph_id, input_names[0]) 111 | 112 | output_names = parser.GetSubgraphOutputTensorNames(graph_id) 113 | 114 | preferred_backends = [ann.BackendId('CpuAcc'), ann.BackendId('CpuRef')] 115 | 116 | options = ann.CreationOptions() 117 | runtime = ann.IRuntime(options) 118 | 119 | opt_network, messages = ann.Optimize(network, preferred_backends, runtime.GetDeviceSpec(), ann.OptimizerOptions()) 120 | assert 0 == len(messages) 121 | 122 | net_id, messages = runtime.LoadNetwork(opt_network) 123 | assert "" == messages 124 | 125 | # Load test image data stored in input_lite.npy 126 | input_tensor_data = np.load(os.path.join(shared_data_folder, 'tflite_parser/input_lite.npy')) 127 | input_tensors = ann.make_input_tensors([input_binding_info], [input_tensor_data]) 128 | 129 | output_tensors = [] 130 | for index, output_name in enumerate(output_names): 131 | out_bind_info = parser.GetNetworkOutputBindingInfo(graph_id, output_name) 132 | out_tensor_info = out_bind_info[1] 133 | out_tensor_id = out_bind_info[0] 134 | output_tensors.append((out_tensor_id, 135 | ann.Tensor(out_tensor_info))) 136 | 137 | runtime.EnqueueWorkload(net_id, input_tensors, output_tensors) 138 | 139 | output_vectors = [] 140 | for index, out_tensor in enumerate(output_tensors): 141 | output_vectors.append(out_tensor[1].get_memory_area()) 142 | 143 | # Load golden output file for result comparison. 144 | expected_outputs = np.load(os.path.join(shared_data_folder, 'tflite_parser/golden_output_lite.npy')) 145 | 146 | # Check that output matches golden output 147 | assert (expected_outputs == output_vectors[0]).all() 148 | -------------------------------------------------------------------------------- /python/pyarmnn/test/test_types.py: -------------------------------------------------------------------------------- 1 | # Copyright © 2020 Arm Ltd. All rights reserved. 2 | # SPDX-License-Identifier: MIT 3 | import pytest 4 | import pyarmnn as ann 5 | 6 | 7 | def test_activation_function(): 8 | assert 0 == ann.ActivationFunction_Sigmoid 9 | assert 1 == ann.ActivationFunction_TanH 10 | assert 2 == ann.ActivationFunction_Linear 11 | assert 3 == ann.ActivationFunction_ReLu 12 | assert 4 == ann.ActivationFunction_BoundedReLu 13 | assert 5 == ann.ActivationFunction_SoftReLu 14 | assert 6 == ann.ActivationFunction_LeakyReLu 15 | assert 7 == ann.ActivationFunction_Abs 16 | assert 8 == ann.ActivationFunction_Sqrt 17 | assert 9 == ann.ActivationFunction_Square 18 | 19 | 20 | def test_permutation_vector(): 21 | pv = ann.PermutationVector((0, 2, 3, 1)) 22 | assert pv[0] == 0 23 | assert pv[2] == 3 24 | 25 | pv2 = ann.PermutationVector((0, 2, 3, 1)) 26 | assert pv == pv2 27 | 28 | pv4 = ann.PermutationVector((0, 3, 1, 2)) 29 | assert pv.IsInverse(pv4) 30 | -------------------------------------------------------------------------------- /python/pyarmnn/test/test_version.py: -------------------------------------------------------------------------------- 1 | # Copyright © 2020 Arm Ltd. All rights reserved. 2 | # SPDX-License-Identifier: MIT 3 | import os 4 | import importlib 5 | 6 | 7 | def test_rel_version(): 8 | import pyarmnn._version as v 9 | importlib.reload(v) 10 | assert "dev" not in v.__version__ 11 | del v 12 | 13 | 14 | def test_dev_version(): 15 | import pyarmnn._version as v 16 | os.environ["PYARMNN_DEV_VER"] = "1" 17 | 18 | importlib.reload(v) 19 | 20 | assert "20.2.0.dev1" == v.__version__ 21 | 22 | del os.environ["PYARMNN_DEV_VER"] 23 | del v 24 | 25 | 26 | def test_arm_version_not_affected(): 27 | import pyarmnn._version as v 28 | os.environ["PYARMNN_DEV_VER"] = "1" 29 | 30 | importlib.reload(v) 31 | 32 | assert "20200200" == v.__arm_ml_version__ 33 | 34 | del os.environ["PYARMNN_DEV_VER"] 35 | del v 36 | -------------------------------------------------------------------------------- /python/pyarmnn/tox.ini: -------------------------------------------------------------------------------- 1 | [tox] 2 | skip_missing_interpreters=true 3 | envlist = 4 | py35 5 | py36 6 | py37 7 | 8 | [testenv] 9 | deps = pytest==5.2.0 10 | pytest-cov==2.8.1 11 | attrs==19.3.0 12 | setuptools==41.6.0 13 | numpy==1.17.2 14 | pillow==6.1.0 15 | requests==2.23.0 16 | 17 | recreate = True 18 | whitelist_externals = /bin/sh 19 | commands = 20 | python ./scripts/download_test_resources.py 21 | python -m pytest test/ -v {posargs} --junit-xml=test_report_junit-{envname}.xml --cov=pyarmnn --cov-report xml:coverage-{envname}.xml 22 | 23 | [testenv:devenv] 24 | envdir = env 25 | basepython = python3 26 | usedevelop = True 27 | deps = {[testenv]deps} 28 | tox 29 | skip_install = True 30 | commands = python -c "import sys; print('Dev environment created: ' + sys.executable)" 31 | 32 | [testenv:gen] 33 | basepython = python3 34 | skip_install = True 35 | usedevelop = True 36 | passenv = 37 | ARMNN_LIB 38 | ARMNN_INCLUDE 39 | commands = 40 | python setup.py clean --all 41 | python ./swig_generate.py -v 42 | python setup.py build_ext --inplace 43 | 44 | [testenv:doc] 45 | basepython = python3 46 | deps = pdoc3==0.6.3 47 | passenv = 48 | PYARMNN_DEV_VER 49 | commands = 50 | python ./scripts/generate_docs.py --html --output-dir docs pyarmnn --force --template-dir=./docs_conf 51 | 52 | [testenv:pylint] 53 | basepython = python3 54 | deps = pylint==2.3.1 55 | numpy==1.17.2 56 | recreate = False 57 | skip_install = True 58 | usedevelop = True 59 | setenv = 60 | PYTHONPATH = src 61 | commands = 62 | sh -c "pylint --rcfile=pylintconfig src --output-format=parseable --reports=no > pylint_results.txt || true" 63 | -------------------------------------------------------------------------------- /whl/pyarmnn-19.8.0-cp37-cp37m-linux_aarch64.whl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nxp-imx/pyarmnn-release/d9a445af6c85019552437a7454aff64e31fa45b2/whl/pyarmnn-19.8.0-cp37-cp37m-linux_aarch64.whl -------------------------------------------------------------------------------- /whl/pyarmnn-19.8.1-cp37-cp37m-linux_aarch64.whl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nxp-imx/pyarmnn-release/d9a445af6c85019552437a7454aff64e31fa45b2/whl/pyarmnn-19.8.1-cp37-cp37m-linux_aarch64.whl -------------------------------------------------------------------------------- /whl/pyarmnn-20.2.0-cp37-cp37m-linux_aarch64.whl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nxp-imx/pyarmnn-release/d9a445af6c85019552437a7454aff64e31fa45b2/whl/pyarmnn-20.2.0-cp37-cp37m-linux_aarch64.whl -------------------------------------------------------------------------------- /whl/pyarmnn-20.2.0-cp38-cp38-linux_aarch64.whl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nxp-imx/pyarmnn-release/d9a445af6c85019552437a7454aff64e31fa45b2/whl/pyarmnn-20.2.0-cp38-cp38-linux_aarch64.whl --------------------------------------------------------------------------------