├── LICENSE ├── README.md ├── Rockchip ├── cpp │ └── yolov5 │ │ ├── .clang-format │ │ ├── 3rdparty │ │ └── librknn_api │ │ │ ├── include │ │ │ ├── rknn_api.h │ │ │ └── rknn_matmul_api.h │ │ │ └── lib │ │ │ └── aarch64 │ │ │ ├── librknn_api.so │ │ │ └── librknnrt.so │ │ ├── CMakeLists.txt │ │ ├── README.md │ │ ├── csrc │ │ ├── multi_thread │ │ │ └── detect-mt.cpp │ │ └── single_thread │ │ │ └── detect.cpp │ │ ├── include │ │ ├── ThreadPool.h │ │ ├── checker.h │ │ ├── decoder.hpp │ │ ├── det_data.h │ │ ├── rk_utils.hpp │ │ └── rknet_cv.hpp │ │ └── pysrc │ │ ├── bus.jpg │ │ ├── dataset.txt │ │ ├── onnx2rknn.py │ │ ├── rknn_toolkit2-1.5.0+1fa95b5c-cp38-cp38-linux_x86_64.whl │ │ ├── run.sh │ │ └── zidane.jpg └── python │ └── yolov8 │ ├── README.md │ ├── export.py │ ├── imagelist.txt │ ├── onnx2rknn.py │ ├── rknn_infer.py │ ├── rknn_infer_mt.py │ ├── rknn_toolkit2-1.5.0+1fa95b5c-cp38-cp38-linux_x86_64.whl │ ├── rknn_toolkit_lite2-1.5.0-cp38-cp38-linux_aarch64.whl │ ├── triplemu-0.0.1-cp38-cp38-linux_aarch64.whl │ └── zidane.jpg ├── images └── zidane.jpg └── packages ├── rknn_toolkit2-1.5.0+1fa95b5c-cp38-cp38-linux_x86_64.whl ├── rknn_toolkit_lite2-1.5.0-cp38-cp38-linux_aarch64.whl └── triplemu-0.0.1-cp38-cp38-linux_aarch64.whl /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2023 triple Mu 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # AI-on-Board 2 | 3 | ## yolov5 on RK3588 4 | 5 | See more in [`README.md`](Rockchip/cpp/yolov5/README.md) 6 | 7 | ## yolov8 on RK3588 8 | 9 | See more in [`README.md`](Rockchip/python/yolov8/README.md) 10 | -------------------------------------------------------------------------------- /Rockchip/cpp/yolov5/.clang-format: -------------------------------------------------------------------------------- 1 | Language: Cpp 2 | AccessModifierOffset: -4 3 | AlignAfterOpenBracket: Align 4 | AllowShortEnumsOnASingleLine: false 5 | AlignConsecutiveAssignments: true 6 | AlignConsecutiveDeclarations: true 7 | AlignEscapedNewlines: Right 8 | AlignOperands: true 9 | AlignTrailingComments: true 10 | AllowAllParametersOfDeclarationOnNextLine: true 11 | AllowAllArgumentsOnNextLine: true 12 | AllowShortBlocksOnASingleLine: Empty 13 | AllowShortCaseLabelsOnASingleLine: false 14 | AllowShortFunctionsOnASingleLine: Empty 15 | AllowShortIfStatementsOnASingleLine: Never 16 | AllowShortLoopsOnASingleLine: false 17 | AlwaysBreakAfterReturnType: None 18 | AlwaysBreakBeforeMultilineStrings: false 19 | AlwaysBreakTemplateDeclarations: true 20 | BinPackArguments: false 21 | BinPackParameters: false 22 | BreakBeforeBinaryOperators: NonAssignment 23 | BreakBeforeBraces: Stroustrup 24 | BreakBeforeTernaryOperators: false 25 | BreakConstructorInitializers: AfterColon 26 | BreakInheritanceList: AfterColon 27 | BreakStringLiterals: false 28 | ColumnLimit: 120 29 | CompactNamespaces: false 30 | ConstructorInitializerAllOnOneLineOrOnePerLine: true 31 | ConstructorInitializerIndentWidth: 4 32 | ContinuationIndentWidth: 4 33 | Cpp11BracedListStyle: true 34 | DerivePointerAlignment: false 35 | FixNamespaceComments: true 36 | IndentCaseLabels: true 37 | IndentPPDirectives: None 38 | IndentWidth: 4 39 | IndentWrappedFunctionNames: false 40 | KeepEmptyLinesAtTheStartOfBlocks: true 41 | MaxEmptyLinesToKeep: 1 42 | NamespaceIndentation: None 43 | PointerAlignment: Left 44 | ReflowComments: true 45 | SortIncludes: true 46 | SortUsingDeclarations: false 47 | SpaceAfterCStyleCast: false 48 | SpaceAfterTemplateKeyword: false 49 | SpaceBeforeAssignmentOperators: true 50 | SpaceBeforeCtorInitializerColon: false 51 | SpaceBeforeInheritanceColon: false 52 | SpaceBeforeParens: ControlStatements 53 | SpaceInEmptyParentheses: false 54 | SpacesBeforeTrailingComments: 2 55 | SpacesInAngles: false 56 | SpacesInCStyleCastParentheses: false 57 | SpacesInContainerLiterals: false 58 | SpacesInParentheses: false 59 | SpacesInSquareBrackets: false 60 | Standard: c++11 61 | TabWidth: 4 62 | UseTab: Never -------------------------------------------------------------------------------- /Rockchip/cpp/yolov5/3rdparty/librknn_api/include/rknn_api.h: -------------------------------------------------------------------------------- 1 | /**************************************************************************** 2 | * 3 | * Copyright (c) 2017 - 2022 by Rockchip Corp. All rights reserved. 4 | * 5 | * The material in this file is confidential and contains trade secrets 6 | * of Rockchip Corporation. This is proprietary information owned by 7 | * Rockchip Corporation. No part of this work may be disclosed, 8 | * reproduced, copied, transmitted, or used in any way for any purpose, 9 | * without the express written permission of Rockchip Corporation. 10 | * 11 | *****************************************************************************/ 12 | 13 | 14 | #ifndef _RKNN_API_H 15 | #define _RKNN_API_H 16 | 17 | #ifdef __cplusplus 18 | extern "C" { 19 | #endif 20 | 21 | #include 22 | 23 | /* 24 | Definition of extended flag for rknn_init. 25 | */ 26 | /* set high priority context. */ 27 | #define RKNN_FLAG_PRIOR_HIGH 0x00000000 28 | 29 | /* set medium priority context */ 30 | #define RKNN_FLAG_PRIOR_MEDIUM 0x00000001 31 | 32 | /* set low priority context. */ 33 | #define RKNN_FLAG_PRIOR_LOW 0x00000002 34 | 35 | /* asynchronous mode. 36 | when enable, rknn_outputs_get will not block for too long because it directly retrieves the result of 37 | the previous frame which can increase the frame rate on single-threaded mode, but at the cost of 38 | rknn_outputs_get not retrieves the result of the current frame. 39 | in multi-threaded mode you do not need to turn this mode on. */ 40 | #define RKNN_FLAG_ASYNC_MASK 0x00000004 41 | 42 | /* collect performance mode. 43 | when enable, you can get detailed performance reports via rknn_query(ctx, RKNN_QUERY_PERF_DETAIL, ...), 44 | but it will reduce the frame rate. */ 45 | #define RKNN_FLAG_COLLECT_PERF_MASK 0x00000008 46 | 47 | /* allocate all memory in outside, includes weight/internal/inputs/outputs */ 48 | #define RKNN_FLAG_MEM_ALLOC_OUTSIDE 0x00000010 49 | 50 | /* weight sharing with the same network structure */ 51 | #define RKNN_FLAG_SHARE_WEIGHT_MEM 0x00000020 52 | 53 | /* send fence fd from outside */ 54 | #define RKNN_FLAG_FENCE_IN_OUTSIDE 0x00000040 55 | 56 | /* get fence fd from inside */ 57 | #define RKNN_FLAG_FENCE_OUT_OUTSIDE 0x00000080 58 | 59 | /* dummy init flag: could only get total_weight_size and total_internal_size by rknn_query*/ 60 | #define RKNN_FLAG_COLLECT_MODEL_INFO_ONLY 0x00000100 61 | 62 | /* allocate internal memory in outside */ 63 | #define RKNN_FLAG_INTERNAL_ALLOC_OUTSIDE 0x00000200 64 | 65 | /* 66 | Error code returned by the RKNN API. 67 | */ 68 | #define RKNN_SUCC 0 /* execute succeed. */ 69 | #define RKNN_ERR_FAIL -1 /* execute failed. */ 70 | #define RKNN_ERR_TIMEOUT -2 /* execute timeout. */ 71 | #define RKNN_ERR_DEVICE_UNAVAILABLE -3 /* device is unavailable. */ 72 | #define RKNN_ERR_MALLOC_FAIL -4 /* memory malloc fail. */ 73 | #define RKNN_ERR_PARAM_INVALID -5 /* parameter is invalid. */ 74 | #define RKNN_ERR_MODEL_INVALID -6 /* model is invalid. */ 75 | #define RKNN_ERR_CTX_INVALID -7 /* context is invalid. */ 76 | #define RKNN_ERR_INPUT_INVALID -8 /* input is invalid. */ 77 | #define RKNN_ERR_OUTPUT_INVALID -9 /* output is invalid. */ 78 | #define RKNN_ERR_DEVICE_UNMATCH -10 /* the device is unmatch, please update rknn sdk 79 | and npu driver/firmware. */ 80 | #define RKNN_ERR_INCOMPATILE_PRE_COMPILE_MODEL -11 /* This RKNN model use pre_compile mode, but not compatible with current driver. */ 81 | #define RKNN_ERR_INCOMPATILE_OPTIMIZATION_LEVEL_VERSION -12 /* This RKNN model set optimization level, but not compatible with current driver. */ 82 | #define RKNN_ERR_TARGET_PLATFORM_UNMATCH -13 /* This RKNN model set target platform, but not compatible with current platform. */ 83 | 84 | /* 85 | Definition for tensor 86 | */ 87 | #define RKNN_MAX_DIMS 16 /* maximum dimension of tensor. */ 88 | #define RKNN_MAX_NUM_CHANNEL 15 /* maximum channel number of input tensor. */ 89 | #define RKNN_MAX_NAME_LEN 256 /* maximum name lenth of tensor. */ 90 | #define RKNN_MAX_DYNAMIC_SHAPE_NUM 512 /* maximum number of dynamic shape for each input. */ 91 | 92 | #ifdef __arm__ 93 | typedef uint32_t rknn_context; 94 | #else 95 | typedef uint64_t rknn_context; 96 | #endif 97 | 98 | 99 | /* 100 | The query command for rknn_query 101 | */ 102 | typedef enum _rknn_query_cmd { 103 | RKNN_QUERY_IN_OUT_NUM = 0, /* query the number of input & output tensor. */ 104 | RKNN_QUERY_INPUT_ATTR = 1, /* query the attribute of input tensor. */ 105 | RKNN_QUERY_OUTPUT_ATTR = 2, /* query the attribute of output tensor. */ 106 | RKNN_QUERY_PERF_DETAIL = 3, /* query the detail performance, need set 107 | RKNN_FLAG_COLLECT_PERF_MASK when call rknn_init, 108 | this query needs to be valid after rknn_outputs_get. */ 109 | RKNN_QUERY_PERF_RUN = 4, /* query the time of run, 110 | this query needs to be valid after rknn_outputs_get. */ 111 | RKNN_QUERY_SDK_VERSION = 5, /* query the sdk & driver version */ 112 | 113 | RKNN_QUERY_MEM_SIZE = 6, /* query the weight & internal memory size */ 114 | RKNN_QUERY_CUSTOM_STRING = 7, /* query the custom string */ 115 | 116 | RKNN_QUERY_NATIVE_INPUT_ATTR = 8, /* query the attribute of native input tensor. */ 117 | RKNN_QUERY_NATIVE_OUTPUT_ATTR = 9, /* query the attribute of native output tensor. */ 118 | 119 | RKNN_QUERY_NATIVE_NC1HWC2_INPUT_ATTR = 8, /* query the attribute of native input tensor. */ 120 | RKNN_QUERY_NATIVE_NC1HWC2_OUTPUT_ATTR = 9, /* query the attribute of native output tensor. */ 121 | 122 | RKNN_QUERY_NATIVE_NHWC_INPUT_ATTR = 10, /* query the attribute of native input tensor. */ 123 | RKNN_QUERY_NATIVE_NHWC_OUTPUT_ATTR = 11, /* query the attribute of native output tensor. */ 124 | 125 | RKNN_QUERY_DEVICE_MEM_INFO = 12, /* query the attribute of rknn memory information. */ 126 | 127 | RKNN_QUERY_INPUT_DYNAMIC_RANGE = 13, /* query the dynamic shape range of rknn input tensor. */ 128 | RKNN_QUERY_CURRENT_INPUT_ATTR = 14, /* query the current shape of rknn input tensor, only valid for dynamic rknn model*/ 129 | RKNN_QUERY_CURRENT_OUTPUT_ATTR = 15, /* query the current shape of rknn output tensor, only valid for dynamic rknn model*/ 130 | 131 | RKNN_QUERY_CURRENT_NATIVE_INPUT_ATTR = 16, /* query the current native shape of rknn input tensor, only valid for dynamic rknn model*/ 132 | RKNN_QUERY_CURRENT_NATIVE_OUTPUT_ATTR = 17, /* query the current native shape of rknn output tensor, only valid for dynamic rknn model*/ 133 | 134 | 135 | RKNN_QUERY_CMD_MAX 136 | } rknn_query_cmd; 137 | 138 | /* 139 | the tensor data type. 140 | */ 141 | typedef enum _rknn_tensor_type { 142 | RKNN_TENSOR_FLOAT32 = 0, /* data type is float32. */ 143 | RKNN_TENSOR_FLOAT16, /* data type is float16. */ 144 | RKNN_TENSOR_INT8, /* data type is int8. */ 145 | RKNN_TENSOR_UINT8, /* data type is uint8. */ 146 | RKNN_TENSOR_INT16, /* data type is int16. */ 147 | RKNN_TENSOR_UINT16, /* data type is uint16. */ 148 | RKNN_TENSOR_INT32, /* data type is int32. */ 149 | RKNN_TENSOR_UINT32, /* data type is uint32. */ 150 | RKNN_TENSOR_INT64, /* data type is int64. */ 151 | RKNN_TENSOR_BOOL, 152 | 153 | RKNN_TENSOR_TYPE_MAX 154 | } rknn_tensor_type; 155 | 156 | inline static const char* get_type_string(rknn_tensor_type type) 157 | { 158 | switch(type) { 159 | case RKNN_TENSOR_FLOAT32: return "FP32"; 160 | case RKNN_TENSOR_FLOAT16: return "FP16"; 161 | case RKNN_TENSOR_INT8: return "INT8"; 162 | case RKNN_TENSOR_UINT8: return "UINT8"; 163 | case RKNN_TENSOR_INT16: return "INT16"; 164 | case RKNN_TENSOR_UINT16: return "UINT16"; 165 | case RKNN_TENSOR_INT32: return "INT32"; 166 | case RKNN_TENSOR_UINT32: return "UINT32"; 167 | case RKNN_TENSOR_INT64: return "INT64"; 168 | case RKNN_TENSOR_BOOL: return "BOOL"; 169 | default: return "UNKNOW"; 170 | } 171 | } 172 | 173 | /* 174 | the quantitative type. 175 | */ 176 | typedef enum _rknn_tensor_qnt_type { 177 | RKNN_TENSOR_QNT_NONE = 0, /* none. */ 178 | RKNN_TENSOR_QNT_DFP, /* dynamic fixed point. */ 179 | RKNN_TENSOR_QNT_AFFINE_ASYMMETRIC, /* asymmetric affine. */ 180 | 181 | RKNN_TENSOR_QNT_MAX 182 | } rknn_tensor_qnt_type; 183 | 184 | inline static const char* get_qnt_type_string(rknn_tensor_qnt_type type) 185 | { 186 | switch(type) { 187 | case RKNN_TENSOR_QNT_NONE: return "NONE"; 188 | case RKNN_TENSOR_QNT_DFP: return "DFP"; 189 | case RKNN_TENSOR_QNT_AFFINE_ASYMMETRIC: return "AFFINE"; 190 | default: return "UNKNOW"; 191 | } 192 | } 193 | 194 | /* 195 | the tensor data format. 196 | */ 197 | typedef enum _rknn_tensor_format { 198 | RKNN_TENSOR_NCHW = 0, /* data format is NCHW. */ 199 | RKNN_TENSOR_NHWC, /* data format is NHWC. */ 200 | RKNN_TENSOR_NC1HWC2, /* data format is NC1HWC2. */ 201 | RKNN_TENSOR_UNDEFINED, 202 | 203 | RKNN_TENSOR_FORMAT_MAX 204 | } rknn_tensor_format; 205 | 206 | /* 207 | the mode of running on target NPU core. 208 | */ 209 | typedef enum _rknn_core_mask { 210 | RKNN_NPU_CORE_AUTO = 0, /* default, run on NPU core randomly. */ 211 | RKNN_NPU_CORE_0 = 1, /* run on NPU core 0. */ 212 | RKNN_NPU_CORE_1 = 2, /* run on NPU core 1. */ 213 | RKNN_NPU_CORE_2 = 4, /* run on NPU core 2. */ 214 | RKNN_NPU_CORE_0_1 = RKNN_NPU_CORE_0 | RKNN_NPU_CORE_1, /* run on NPU core 1 and core 2. */ 215 | RKNN_NPU_CORE_0_1_2 = RKNN_NPU_CORE_0_1 | RKNN_NPU_CORE_2, /* run on NPU core 1 and core 2 and core 3. */ 216 | 217 | RKNN_NPU_CORE_UNDEFINED, 218 | } rknn_core_mask; 219 | 220 | inline static const char* get_format_string(rknn_tensor_format fmt) 221 | { 222 | switch(fmt) { 223 | case RKNN_TENSOR_NCHW: return "NCHW"; 224 | case RKNN_TENSOR_NHWC: return "NHWC"; 225 | case RKNN_TENSOR_NC1HWC2: return "NC1HWC2"; 226 | case RKNN_TENSOR_UNDEFINED: return "UNDEFINED"; 227 | default: return "UNKNOW"; 228 | } 229 | } 230 | 231 | /* 232 | the information for RKNN_QUERY_IN_OUT_NUM. 233 | */ 234 | typedef struct _rknn_input_output_num { 235 | uint32_t n_input; /* the number of input. */ 236 | uint32_t n_output; /* the number of output. */ 237 | } rknn_input_output_num; 238 | 239 | /* 240 | the information for RKNN_QUERY_INPUT_ATTR / RKNN_QUERY_OUTPUT_ATTR. 241 | */ 242 | typedef struct _rknn_tensor_attr { 243 | uint32_t index; /* input parameter, the index of input/output tensor, 244 | need set before call rknn_query. */ 245 | 246 | uint32_t n_dims; /* the number of dimensions. */ 247 | uint32_t dims[RKNN_MAX_DIMS]; /* the dimensions array. */ 248 | char name[RKNN_MAX_NAME_LEN]; /* the name of tensor. */ 249 | 250 | uint32_t n_elems; /* the number of elements. */ 251 | uint32_t size; /* the bytes size of tensor. */ 252 | 253 | rknn_tensor_format fmt; /* the data format of tensor. */ 254 | rknn_tensor_type type; /* the data type of tensor. */ 255 | rknn_tensor_qnt_type qnt_type; /* the quantitative type of tensor. */ 256 | int8_t fl; /* fractional length for RKNN_TENSOR_QNT_DFP. */ 257 | int32_t zp; /* zero point for RKNN_TENSOR_QNT_AFFINE_ASYMMETRIC. */ 258 | float scale; /* scale for RKNN_TENSOR_QNT_AFFINE_ASYMMETRIC. */ 259 | 260 | uint32_t w_stride; /* the stride of tensor along the width dimention of input, 261 | Note: it is read-only, 0 means equal to width. */ 262 | uint32_t size_with_stride; /* the bytes size of tensor with stride. */ 263 | 264 | uint8_t pass_through; /* pass through mode, for rknn_set_io_mem interface. 265 | if TRUE, the buf data is passed directly to the input node of the rknn model 266 | without any conversion. the following variables do not need to be set. 267 | if FALSE, the buf data is converted into an input consistent with the model 268 | according to the following type and fmt. so the following variables 269 | need to be set.*/ 270 | uint32_t h_stride; /* the stride along the height dimention of input, 271 | Note: it is write-only, if it was set to 0, h_stride = height. */ 272 | } rknn_tensor_attr; 273 | 274 | typedef struct _rknn_input_range { 275 | uint32_t index; /* input parameter, the index of input/output tensor, 276 | need set before call rknn_query. */ 277 | uint32_t shape_number; /* the number of shape. */ 278 | rknn_tensor_format fmt; /* the data format of tensor. */ 279 | char name[RKNN_MAX_NAME_LEN]; /* the name of tensor. */ 280 | uint32_t dyn_range[RKNN_MAX_DYNAMIC_SHAPE_NUM][RKNN_MAX_DIMS]; /* the dynamic input dimensions range. */ 281 | uint32_t n_dims; /* the number of dimensions. */ 282 | 283 | } rknn_input_range; 284 | 285 | /* 286 | the information for RKNN_QUERY_PERF_DETAIL. 287 | */ 288 | typedef struct _rknn_perf_detail { 289 | char* perf_data; /* the string pointer of perf detail. don't need free it by user. */ 290 | uint64_t data_len; /* the string length. */ 291 | } rknn_perf_detail; 292 | 293 | /* 294 | the information for RKNN_QUERY_PERF_RUN. 295 | */ 296 | typedef struct _rknn_perf_run { 297 | int64_t run_duration; /* real inference time (us) */ 298 | } rknn_perf_run; 299 | 300 | /* 301 | the information for RKNN_QUERY_SDK_VERSION. 302 | */ 303 | typedef struct _rknn_sdk_version { 304 | char api_version[256]; /* the version of rknn api. */ 305 | char drv_version[256]; /* the version of rknn driver. */ 306 | } rknn_sdk_version; 307 | 308 | /* 309 | the information for RKNN_QUERY_MEM_SIZE. 310 | */ 311 | typedef struct _rknn_mem_size { 312 | uint32_t total_weight_size; /* the weight memory size */ 313 | uint32_t total_internal_size; /* the internal memory size, exclude inputs/outputs */ 314 | uint64_t total_dma_allocated_size; /* total dma memory allocated size */ 315 | uint32_t total_sram_size; /* total system sram size reserved for rknn */ 316 | uint32_t free_sram_size; /* free system sram size reserved for rknn */ 317 | uint32_t reserved[10]; /* reserved */ 318 | } rknn_mem_size; 319 | 320 | /* 321 | the information for RKNN_QUERY_CUSTOM_STRING. 322 | */ 323 | typedef struct _rknn_custom_string { 324 | char string[1024]; /* the string of custom, lengths max to 1024 bytes */ 325 | } rknn_custom_string; 326 | 327 | /* 328 | The flags of rknn_tensor_mem. 329 | */ 330 | typedef enum _rknn_tensor_mem_flags { 331 | RKNN_TENSOR_MEMORY_FLAGS_ALLOC_INSIDE = 1, /*Used to mark in rknn_destroy_mem() whether it is necessary to release the "mem" pointer itself. 332 | If the flag RKNN_TENSOR_MEMORY_FLAGS_ALLOC_INSIDE is set, rknn_destroy_mem() will call free(mem).*/ 333 | RKNN_TENSOR_MEMORY_FLAGS_FROM_FD = 2, /*Used to mark in rknn_create_mem_from_fd() whether it is necessary to release the "mem" pointer itself. 334 | If the flag RKNN_TENSOR_MEMORY_FLAGS_FROM_FD is set, rknn_destroy_mem() will call free(mem).*/ 335 | RKNN_TENSOR_MEMORY_FLAGS_FROM_PHYS = 3, /*Used to mark in rknn_create_mem_from_phys() whether it is necessary to release the "mem" pointer itself. 336 | If the flag RKNN_TENSOR_MEMORY_FLAGS_FROM_PHYS is set, rknn_destroy_mem() will call free(mem).*/ 337 | RKNN_TENSOR_MEMORY_FLAGS_UNKNOWN 338 | } rknn_tensor_mem_flags; 339 | 340 | /* 341 | the memory information of tensor. 342 | */ 343 | typedef struct _rknn_tensor_memory { 344 | void* virt_addr; /* the virtual address of tensor buffer. */ 345 | uint64_t phys_addr; /* the physical address of tensor buffer. */ 346 | int32_t fd; /* the fd of tensor buffer. */ 347 | int32_t offset; /* indicates the offset of the memory. */ 348 | uint32_t size; /* the size of tensor buffer. */ 349 | uint32_t flags; /* the flags of tensor buffer, reserved */ 350 | void * priv_data; /* the private data of tensor buffer. */ 351 | } rknn_tensor_mem; 352 | 353 | /* 354 | the input information for rknn_input_set. 355 | */ 356 | typedef struct _rknn_input { 357 | uint32_t index; /* the input index. */ 358 | void* buf; /* the input buf for index. */ 359 | uint32_t size; /* the size of input buf. */ 360 | uint8_t pass_through; /* pass through mode. 361 | if TRUE, the buf data is passed directly to the input node of the rknn model 362 | without any conversion. the following variables do not need to be set. 363 | if FALSE, the buf data is converted into an input consistent with the model 364 | according to the following type and fmt. so the following variables 365 | need to be set.*/ 366 | rknn_tensor_type type; /* the data type of input buf. */ 367 | rknn_tensor_format fmt; /* the data format of input buf. 368 | currently the internal input format of NPU is NCHW by default. 369 | so entering NCHW data can avoid the format conversion in the driver. */ 370 | } rknn_input; 371 | 372 | /* 373 | the output information for rknn_outputs_get. 374 | */ 375 | typedef struct _rknn_output { 376 | uint8_t want_float; /* want transfer output data to float */ 377 | uint8_t is_prealloc; /* whether buf is pre-allocated. 378 | if TRUE, the following variables need to be set. 379 | if FALSE, the following variables do not need to be set. */ 380 | uint32_t index; /* the output index. */ 381 | void* buf; /* the output buf for index. 382 | when is_prealloc = FALSE and rknn_outputs_release called, 383 | this buf pointer will be free and don't use it anymore. */ 384 | uint32_t size; /* the size of output buf. */ 385 | } rknn_output; 386 | 387 | /* 388 | the extend information for rknn_init. 389 | */ 390 | typedef struct _rknn_init_extend { 391 | rknn_context ctx; /* rknn context */ 392 | int32_t real_model_offset; /* real rknn model file offset, only valid when init context with rknn file path */ 393 | uint32_t real_model_size; /* real rknn model file size, only valid when init context with rknn file path */ 394 | uint8_t reserved[120]; /* reserved */ 395 | } rknn_init_extend; 396 | 397 | /* 398 | the extend information for rknn_run. 399 | */ 400 | typedef struct _rknn_run_extend { 401 | uint64_t frame_id; /* output parameter, indicate current frame id of run. */ 402 | int32_t non_block; /* block flag of run, 0 is block else 1 is non block */ 403 | int32_t timeout_ms; /* timeout for block mode, in milliseconds */ 404 | int32_t fence_fd; /* fence fd from other unit */ 405 | } rknn_run_extend; 406 | 407 | /* 408 | the extend information for rknn_outputs_get. 409 | */ 410 | typedef struct _rknn_output_extend { 411 | uint64_t frame_id; /* output parameter, indicate the frame id of outputs, corresponds to 412 | struct rknn_run_extend.frame_id.*/ 413 | } rknn_output_extend; 414 | 415 | 416 | /* rknn_init 417 | 418 | initial the context and load the rknn model. 419 | 420 | input: 421 | rknn_context* context the pointer of context handle. 422 | void* model if size > 0, pointer to the rknn model, if size = 0, filepath to the rknn model. 423 | uint32_t size the size of rknn model. 424 | uint32_t flag extend flag, see the define of RKNN_FLAG_XXX_XXX. 425 | rknn_init_extend* extend the extend information of init. 426 | return: 427 | int error code. 428 | */ 429 | int rknn_init(rknn_context* context, void* model, uint32_t size, uint32_t flag, rknn_init_extend* extend); 430 | 431 | /* rknn_dup_context 432 | 433 | initial the context and load the rknn model. 434 | 435 | input: 436 | rknn_context* context_in the pointer of context in handle. 437 | rknn_context* context_out the pointer of context out handle. 438 | return: 439 | int error code. 440 | */ 441 | int rknn_dup_context(rknn_context* context_in, rknn_context* context_out); 442 | 443 | /* rknn_destroy 444 | 445 | unload the rknn model and destroy the context. 446 | 447 | input: 448 | rknn_context context the handle of context. 449 | return: 450 | int error code. 451 | */ 452 | int rknn_destroy(rknn_context context); 453 | 454 | 455 | /* rknn_query 456 | 457 | query the information about model or others. see rknn_query_cmd. 458 | 459 | input: 460 | rknn_context context the handle of context. 461 | rknn_query_cmd cmd the command of query. 462 | void* info the buffer point of information. 463 | uint32_t size the size of information. 464 | return: 465 | int error code. 466 | */ 467 | int rknn_query(rknn_context context, rknn_query_cmd cmd, void* info, uint32_t size); 468 | 469 | 470 | /* rknn_inputs_set 471 | 472 | set inputs information by input index of rknn model. 473 | inputs information see rknn_input. 474 | 475 | input: 476 | rknn_context context the handle of context. 477 | uint32_t n_inputs the number of inputs. 478 | rknn_input inputs[] the arrays of inputs information, see rknn_input. 479 | return: 480 | int error code 481 | */ 482 | int rknn_inputs_set(rknn_context context, uint32_t n_inputs, rknn_input inputs[]); 483 | 484 | /* 485 | rknn_set_batch_core_num 486 | 487 | set rknn batch core_num. 488 | 489 | input: 490 | rknn_context context the handle of context. 491 | int core_num the core number. 492 | return: 493 | int error code. 494 | 495 | */ 496 | int rknn_set_batch_core_num(rknn_context context, int core_num); 497 | 498 | /* rknn_set_core_mask 499 | 500 | set rknn core mask.(only supported on RK3588 now) 501 | 502 | RKNN_NPU_CORE_AUTO: auto mode, default value 503 | RKNN_NPU_CORE_0: core 0 mode 504 | RKNN_NPU_CORE_1: core 1 mode 505 | RKNN_NPU_CORE_2: core 2 mode 506 | RKNN_NPU_CORE_0_1: combine core 0/1 mode 507 | RKNN_NPU_CORE_0_1_2: combine core 0/1/2 mode 508 | 509 | input: 510 | rknn_context context the handle of context. 511 | rknn_core_mask core_mask the core mask. 512 | return: 513 | int error code. 514 | */ 515 | int rknn_set_core_mask(rknn_context context, rknn_core_mask core_mask); 516 | 517 | /* rknn_run 518 | 519 | run the model to execute inference. 520 | 521 | input: 522 | rknn_context context the handle of context. 523 | rknn_run_extend* extend the extend information of run. 524 | return: 525 | int error code. 526 | */ 527 | int rknn_run(rknn_context context, rknn_run_extend* extend); 528 | 529 | 530 | /* rknn_wait 531 | 532 | wait the model after execute inference. 533 | 534 | input: 535 | rknn_context context the handle of context. 536 | rknn_run_extend* extend the extend information of run. 537 | return: 538 | int error code. 539 | */ 540 | int rknn_wait(rknn_context context, rknn_run_extend* extend); 541 | 542 | 543 | /* rknn_outputs_get 544 | 545 | wait the inference to finish and get the outputs. 546 | this function will block until inference finish. 547 | the results will set to outputs[]. 548 | 549 | input: 550 | rknn_context context the handle of context. 551 | uint32_t n_outputs the number of outputs. 552 | rknn_output outputs[] the arrays of output, see rknn_output. 553 | rknn_output_extend* the extend information of output. 554 | return: 555 | int error code. 556 | */ 557 | int rknn_outputs_get(rknn_context context, uint32_t n_outputs, rknn_output outputs[], rknn_output_extend* extend); 558 | 559 | 560 | /* rknn_outputs_release 561 | 562 | release the outputs that get by rknn_outputs_get. 563 | after called, the rknn_output[x].buf get from rknn_outputs_get will 564 | also be free when rknn_output[x].is_prealloc = FALSE. 565 | 566 | input: 567 | rknn_context context the handle of context. 568 | uint32_t n_ouputs the number of outputs. 569 | rknn_output outputs[] the arrays of output. 570 | return: 571 | int error code 572 | */ 573 | int rknn_outputs_release(rknn_context context, uint32_t n_ouputs, rknn_output outputs[]); 574 | 575 | 576 | /* new api for zero copy */ 577 | 578 | /* rknn_create_mem_from_phys (memory allocated outside) 579 | 580 | initialize tensor memory from physical address. 581 | 582 | input: 583 | rknn_context ctx the handle of context. 584 | uint64_t phys_addr physical address. 585 | void *virt_addr virtual address. 586 | uint32_t size the size of tensor buffer. 587 | return: 588 | rknn_tensor_mem the pointer of tensor memory information. 589 | */ 590 | rknn_tensor_mem* rknn_create_mem_from_phys(rknn_context ctx, uint64_t phys_addr, void *virt_addr, uint32_t size); 591 | 592 | 593 | /* rknn_create_mem_from_fd (memory allocated outside) 594 | 595 | initialize tensor memory from file description. 596 | 597 | input: 598 | rknn_context ctx the handle of context. 599 | int32_t fd file description. 600 | void *virt_addr virtual address. 601 | uint32_t size the size of tensor buffer. 602 | int32_t offset indicates the offset of the memory (virt_addr without offset). 603 | return: 604 | rknn_tensor_mem the pointer of tensor memory information. 605 | */ 606 | rknn_tensor_mem* rknn_create_mem_from_fd(rknn_context ctx, int32_t fd, void *virt_addr, uint32_t size, int32_t offset); 607 | 608 | 609 | /* rknn_create_mem_from_mb_blk (memory allocated outside) 610 | 611 | create tensor memory from mb_blk. 612 | 613 | input: 614 | rknn_context ctx the handle of context. 615 | void *mb_blk mb_blk allocate from system api. 616 | int32_t offset indicates the offset of the memory. 617 | return: 618 | rknn_tensor_mem the pointer of tensor memory information. 619 | */ 620 | rknn_tensor_mem* rknn_create_mem_from_mb_blk(rknn_context ctx, void *mb_blk, int32_t offset); 621 | 622 | 623 | /* rknn_create_mem (memory allocated inside) 624 | 625 | create tensor memory. 626 | 627 | input: 628 | rknn_context ctx the handle of context. 629 | uint32_t size the size of tensor buffer. 630 | return: 631 | rknn_tensor_mem the pointer of tensor memory information. 632 | */ 633 | rknn_tensor_mem* rknn_create_mem(rknn_context ctx, uint32_t size); 634 | 635 | 636 | /* rknn_destroy_mem (support allocate inside and outside) 637 | 638 | destroy tensor memory. 639 | 640 | input: 641 | rknn_context ctx the handle of context. 642 | rknn_tensor_mem *mem the pointer of tensor memory information. 643 | return: 644 | int error code 645 | */ 646 | int rknn_destroy_mem(rknn_context ctx, rknn_tensor_mem *mem); 647 | 648 | 649 | /* rknn_set_weight_mem 650 | 651 | set the weight memory. 652 | 653 | input: 654 | rknn_context ctx the handle of context. 655 | rknn_tensor_mem *mem the array of tensor memory information 656 | return: 657 | int error code. 658 | */ 659 | int rknn_set_weight_mem(rknn_context ctx, rknn_tensor_mem *mem); 660 | 661 | 662 | /* rknn_set_internal_mem 663 | 664 | set the internal memory. 665 | 666 | input: 667 | rknn_context ctx the handle of context. 668 | rknn_tensor_mem *mem the array of tensor memory information 669 | return: 670 | int error code. 671 | */ 672 | int rknn_set_internal_mem(rknn_context ctx, rknn_tensor_mem *mem); 673 | 674 | 675 | /* rknn_set_io_mem 676 | 677 | set the input and output tensors buffer. 678 | 679 | input: 680 | rknn_context ctx the handle of context. 681 | rknn_tensor_mem *mem the array of tensor memory information. 682 | rknn_tensor_attr *attr the attribute of input or output tensor buffer. 683 | return: 684 | int error code. 685 | */ 686 | int rknn_set_io_mem(rknn_context ctx, rknn_tensor_mem *mem, rknn_tensor_attr *attr); 687 | 688 | /* rknn_set_input_shape 689 | 690 | set the input tensor shape (only valid for dynamic shape rknn model). 691 | 692 | input: 693 | rknn_context ctx the handle of context. 694 | rknn_tensor_attr *attr the attribute of input or output tensor buffer. 695 | return: 696 | int error code. 697 | */ 698 | int rknn_set_input_shape(rknn_context ctx, rknn_tensor_attr* attr); 699 | 700 | #ifdef __cplusplus 701 | } //extern "C" 702 | #endif 703 | 704 | #endif //_RKNN_API_H 705 | -------------------------------------------------------------------------------- /Rockchip/cpp/yolov5/3rdparty/librknn_api/include/rknn_matmul_api.h: -------------------------------------------------------------------------------- 1 | /**************************************************************************** 2 | * 3 | * Copyright (c) 2017 - 2018 by Rockchip Corp. All rights reserved. 4 | * 5 | * The material in this file is confidential and contains trade secrets 6 | * of Rockchip Corporation. This is proprietary information owned by 7 | * Rockchip Corporation. No part of this work may be disclosed, 8 | * reproduced, copied, transmitted, or used in any way for any purpose, 9 | * without the express written permission of Rockchip Corporation. 10 | * 11 | *****************************************************************************/ 12 | 13 | #ifndef _RKNN_MATMUL_API_H 14 | #define _RKNN_MATMUL_API_H 15 | 16 | #ifdef __cplusplus 17 | extern "C" { 18 | #endif 19 | 20 | #include "rknn_api.h" 21 | 22 | typedef rknn_context rknn_matmul_ctx; 23 | 24 | typedef struct _rknn_matmul_tensor_attr 25 | { 26 | char name[RKNN_MAX_NAME_LEN]; 27 | 28 | // indicate A(M, K) or B(K, N) or C(M, N) 29 | uint32_t n_dims; 30 | uint32_t dims[RKNN_MAX_DIMS]; 31 | 32 | // matmul tensor size 33 | uint32_t size; 34 | 35 | // matmul tensor data type 36 | // int8 : A, B 37 | // int32: C 38 | rknn_tensor_type type; 39 | } rknn_matmul_tensor_attr; 40 | 41 | typedef struct _rknn_matmul_io_attr 42 | { 43 | // indicate A(M, K) or B(K, N) or C(M, N) 44 | rknn_matmul_tensor_attr A; 45 | rknn_matmul_tensor_attr B; 46 | rknn_matmul_tensor_attr C; 47 | } rknn_matmul_io_attr; 48 | 49 | /* 50 | matmul information struct 51 | */ 52 | typedef struct rknn_matmul_info_t 53 | { 54 | int32_t M; 55 | int32_t K; // limit: rk356x: int8 type must be aligned with 32byte, float16 type must be aligned with 16byte; 56 | // rk3588: int8 type must be aligned with 32byte, float16 type must be aligned with 32byte; 57 | int32_t N; // limit: rk356x: int8 type must be aligned with 16byte, float16 type must be aligned with 8byte; 58 | // rk3588: int8 type must be aligned with 32byte, float16 type must be aligned with 16byte; 59 | 60 | // matmul data type 61 | // int8: int8(A) x int8(B) -> int32(C) 62 | // float16: float16(A) x float16(B) -> float32(C) 63 | rknn_tensor_type type; 64 | 65 | // matmul native layout for B 66 | // 0: normal layout 67 | // 1: native layout 68 | int32_t native_layout; 69 | 70 | // matmul perf layout for A and C 71 | // 0: normal layout 72 | // 1: perf layout 73 | int32_t perf_layout; 74 | } rknn_matmul_info; 75 | 76 | /* rknn_matmul_create 77 | 78 | params: 79 | rknn_matmul_ctx *ctx the handle of context. 80 | rknn_matmul_info *info the matmal information. 81 | rknn_matmul_io_attr *io_attr inputs/output attribute 82 | return: 83 | int error code 84 | */ 85 | int rknn_matmul_create(rknn_matmul_ctx* ctx, rknn_matmul_info* info, rknn_matmul_io_attr* io_attr); 86 | 87 | /* rknn_matmul_set_io_mem 88 | 89 | params: 90 | rknn_matmul_ctx ctx the handle of context. 91 | rknn_tensor_mem *mem the pointer of tensor memory information. 92 | rknn_matmul_tensor_attr *attr the attribute of input or output tensor buffer. 93 | return: 94 | int error code. 95 | 96 | formula: 97 | C = A * B, 98 | 99 | limit: 100 | K <= 4096 101 | K limit: rk356x: int8 type must be aligned with 32byte, float16 type must be aligned with 16byte; 102 | rk3588: int8 type must be aligned with 32byte, float16 type must be aligned with 32byte; 103 | N limit: rk356x: int8 type must be aligned with 16byte, float16 type must be aligned with 8byte; 104 | rk3588: int8 type must be aligned with 32byte, float16 type must be aligned with 16byte; 105 | 106 | A shape: M x K 107 | normal layout: (M, K) 108 | [M1K1, M1K2, ..., M1Kk, 109 | M2K1, M2K2, ..., M2Kk, 110 | ... 111 | MmK1, MmK2, ..., MmKk] 112 | for rk356x: 113 | int8: 114 | perf layout: (K / 8, M, 8) 115 | [K1M1, K2M1, ..., K8M1, 116 | K9M2, K10M2, ..., K16M2, 117 | ... 118 | K(k-7)Mm, K(k-6)Mm, ..., KkMm] 119 | float16: 120 | perf layout: (K / 4, M, 4) 121 | [K1M1, K2M1, ..., K4M1, 122 | K9M2, K10M2, ..., K8M2, 123 | ... 124 | K(k-3)Mm, K(k-2)Mm, ..., KkMm] 125 | for rk3588: 126 | int8: 127 | perf layout: (K / 16, M, 16) 128 | [K1M1, K2M1, ..., K16M1, 129 | K9M2, K10M2, ..., K32M2, 130 | ... 131 | K(k-15)Mm, K(k-14)Mm, ..., KkMm] 132 | float16: 133 | perf layout: (K / 8, M, 8) 134 | [K1M1, K2M1, ..., K8M1, 135 | K9M2, K10M2, ..., K16M2, 136 | ... 137 | K(k-7)Mm, K(k-6)Mm, ..., KkMm] 138 | B shape: K x N 139 | normal layout: (K, N) 140 | [K1N1, K1N2, ..., K1Nn, 141 | K2N1, K2N2, ..., K2Nn, 142 | ... 143 | KkN1, KkN2, ..., KkNn] 144 | for rk356x: 145 | int8: 146 | native layout: (N / 16, K / 32, 16, 32) 147 | [K1N1, K2N1, ..., K32N1, 148 | K1N2, K2N2, ..., K32N2, 149 | ... 150 | K1N16, K2N16, ..., K32N16, 151 | K33N1, K34N1, ..., K64N1, 152 | K33N2, K34N2, ..., K64N2, 153 | ... 154 | K(k-31)N16, K(k-30)N16, ..., KkN16, 155 | K1N17, K2N17, ..., K32N17, 156 | K1N18, K2N18, ..., K32N18, 157 | ... 158 | K(k-31)Nn, K(k-30)Nn, ..., KkNn] 159 | float16: 160 | native layout: (N / 8, K / 16, 8, 16) 161 | [K1N1, K2N1, ..., K16N1, 162 | K1N2, K2N2, ..., K16N2, 163 | ... 164 | K1N8, K2N8, ..., K16N8, 165 | K17N1, K18N1, ..., K32N1, 166 | K17N2, K18N2, ..., K32N2, 167 | ... 168 | K(k-15)N8, K(k-30)N8, ..., KkN8, 169 | K1N9, K2N9, ..., K16N9, 170 | K1N10, K2N10, ..., K16N10, 171 | ... 172 | K(k-15)Nn, K(k-14)Nn, ..., KkNn] 173 | for rk3588: 174 | int8: 175 | native layout: (N / 32, K / 32, 32, 32) 176 | [K1N1, K2N1, ..., K32N1, 177 | K1N2, K2N2, ..., K32N2, 178 | ... 179 | K1N32, K2N32, ..., K32N32, 180 | K33N1, K34N1, ..., K64N1, 181 | K33N2, K34N2, ..., K64N2, 182 | ... 183 | K(k-31)N32, K(k-30)N32, ..., KkN32, 184 | K1N33, K2N33, ..., K32N33, 185 | K1N34, K2N34, ..., K32N34, 186 | ... 187 | K(k-31)Nn, K(k-30)Nn, ..., KkNn] 188 | float16: 189 | native layout: (N / 16, K / 32, 16, 32) 190 | [K1N1, K2N1, ..., K32N1, 191 | K1N2, K2N2, ..., K32N2, 192 | ... 193 | K1N16, K2N16, ..., K32N16, 194 | K33N1, K34N1, ..., K64N1, 195 | K33N2, K34N2, ..., K64N2, 196 | ... 197 | K(k-31)N16, K(k-30)N16, ..., KkN16, 198 | K1N17, K2N17, ..., K32N17, 199 | K1N18, K2N18, ..., K32N18, 200 | ... 201 | K(k-31)Nn, K(k-30)Nn, ..., KkNn] 202 | C shape: M x N 203 | normal layout: (M, N) 204 | [M1N1, M1N2, ..., M1Nn, 205 | M2N1, M2N2, ..., M2Nn, 206 | ... 207 | MmN1, MmN2, ..., MmNn] 208 | perf layout: (N / 4, M, 4) 209 | [N1M1, N2M1, ..., N4M1, 210 | N5M2, N6M2, ..., N8M2, 211 | ... 212 | N(n-3)Mm, N(n-2)Mm, ..., NnMm] 213 | */ 214 | int rknn_matmul_set_io_mem(rknn_matmul_ctx ctx, rknn_tensor_mem* mem, rknn_matmul_tensor_attr* attr); 215 | 216 | /* rknn_matmul_set_core_mask 217 | 218 | set rknn core mask.(only support rk3588 in current) 219 | 220 | RKNN_NPU_CORE_AUTO: auto mode, default value 221 | RKNN_NPU_CORE_0: core 0 mode 222 | RKNN_NPU_CORE_1: core 1 mode 223 | RKNN_NPU_CORE_2: core 2 mode 224 | RKNN_NPU_CORE_0_1: combine core 0/1 mode 225 | RKNN_NPU_CORE_0_1_2: combine core 0/1/2 mode 226 | 227 | input: 228 | rknn_matmul_ctx context the handle of context. 229 | rknn_core_mask core_mask the core mask. 230 | return: 231 | int error code. 232 | */ 233 | int rknn_matmul_set_core_mask(rknn_matmul_ctx context, rknn_core_mask core_mask); 234 | 235 | /* rknn_matmul_run 236 | 237 | run the matmul in blocking mode 238 | 239 | params: 240 | rknn_matmul_ctx ctx the handle of context. 241 | return: 242 | int error code. 243 | */ 244 | int rknn_matmul_run(rknn_matmul_ctx ctx); 245 | 246 | /* rknn_matmul_destroy 247 | 248 | destroy the matmul context 249 | 250 | params: 251 | rknn_matmul_ctx ctx the handle of context. 252 | return: 253 | int error code. 254 | */ 255 | int rknn_matmul_destroy(rknn_matmul_ctx ctx); 256 | 257 | #ifdef __cplusplus 258 | } // extern "C" 259 | #endif 260 | 261 | #endif // _RKNN_MATMUL_API_H -------------------------------------------------------------------------------- /Rockchip/cpp/yolov5/3rdparty/librknn_api/lib/aarch64/librknn_api.so: -------------------------------------------------------------------------------- 1 | librknnrt.so -------------------------------------------------------------------------------- /Rockchip/cpp/yolov5/3rdparty/librknn_api/lib/aarch64/librknnrt.so: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/triple-Mu/AI-on-Board/d528842493431d3b5355b49d17c549def55e6223/Rockchip/cpp/yolov5/3rdparty/librknn_api/lib/aarch64/librknnrt.so -------------------------------------------------------------------------------- /Rockchip/cpp/yolov5/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | cmake_minimum_required(VERSION 3.0.0) 2 | project(rknn-yolov5) 3 | 4 | set(CMAKE_CXX_STANDARD 14) 5 | set(CMAKE_BUILD_TYPE Release) 6 | set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -O3") 7 | add_definitions(-w) 8 | 9 | find_package(OpenCV REQUIRED) 10 | 11 | include_directories(${OpenCV_INCLUDE_DIRS} /usr/include/rga 3rdparty/librknn_api/include include) 12 | 13 | add_executable(rknn-yolov5 csrc/single_thread/detect.cpp) 14 | target_link_libraries(rknn-yolov5 PRIVATE dl rt pthread rknnrt rga ${OpenCV_LIBRARIES}) 15 | 16 | add_executable(rknn-yolov5-mt csrc/multi_thread/detect-mt.cpp) 17 | target_link_libraries(rknn-yolov5-mt PRIVATE dl rt pthread rknnrt rga ${OpenCV_LIBRARIES}) -------------------------------------------------------------------------------- /Rockchip/cpp/yolov5/README.md: -------------------------------------------------------------------------------- 1 | # YOLOv5 deploy in RK3588 2 | 3 | ## 1. Prepare your environment 4 | 5 | ### 1.1 On X86 PC 6 | 7 | Suggest to use anaconda to create a virtual environment. 8 | 9 | ```bash 10 | conda create -n rknn python=3.8 11 | conda activate rknn 12 | ``` 13 | 14 | Install yolov5: 15 | 16 | ```bash 17 | git clone https://github.com/triple-Mu/yolov5.git -b triplemu/model-only 18 | # install yolov5 19 | cd yolov5 20 | pip install -r requirements.txt 21 | ``` 22 | 23 | Convert pt(yolov5s.pt) to onnx(yolov5s.onnx): 24 | 25 | ```bash 26 | python export.py --weights yolov5s.pt --include onnx --simplify 27 | ``` 28 | 29 | Convert onnx to rknn: 30 | 31 | `rknn_toolkit2-1.5.0+1fa95b5c-cp38-cp38-linux_x86_64.whl` is in `packages` 32 | 33 | ```bash 34 | cd AI-on-Board/Rockchip/cpp/yolov5/pysrc 35 | pip install rknn_toolkit2-1.5.0+1fa95b5c-cp38-cp38-linux_x86_64.whl 36 | # modify the onnx2rknn.py: ONNX_MODEL RKNN_MODEL IMG_PATH DATASET IMG_SIZE 37 | # get yolov5s.rknn model 38 | sh run.sh 39 | ``` 40 | 41 | ### 1.2 On ARM RK3588 42 | 43 | Copy this repo to your board and build rknn-yolov5 demo(single thread and multithread). 44 | 45 | ```bash 46 | cd AI-on-Board/Rockchip/cpp/yolov5 47 | mkdir -p build && cd build 48 | cmake .. 49 | make -j$(nproc) 50 | ``` 51 | 52 | ## 2. Run 53 | 54 | ```bash 55 | ./rknn-yolov5 ./pysrc/yolov5s.rknn ./test.mp4 56 | ./rknn-yolov5-mt ./pysrc/yolov5s.rknn ./test.mp4 57 | ``` 58 | 59 | `test.mp4` is your own mp4 path. 60 | -------------------------------------------------------------------------------- /Rockchip/cpp/yolov5/csrc/multi_thread/detect-mt.cpp: -------------------------------------------------------------------------------- 1 | #include "ThreadPool.h" 2 | #include "rknet_cv.hpp" 3 | 4 | #define NUM_THREAD 8 5 | 6 | int main(int argc, char** argv) 7 | { 8 | std::string model_name{argv[1]}; 9 | std::string video_name{argv[2]}; 10 | std::string window_name = "on detection"; 11 | 12 | size_t counter = 0; 13 | cv::VideoCapture cap(video_name); 14 | cv::namedWindow(window_name, cv::WINDOW_FREERATIO); 15 | cv::resizeWindow(window_name, 1280, 960); 16 | cv::moveWindow(window_name, 300, 300); 17 | 18 | std::vector detectors; 19 | ThreadPool pool(NUM_THREAD); 20 | std::queue> futures; 21 | 22 | // 初始化 23 | for (int i = 0; i < NUM_THREAD; i++) { 24 | RKYOLOv5* ptr = new RKYOLOv5(model_name, i); 25 | detectors.push_back(ptr); 26 | cap >> ptr->mImage; 27 | futures.push( 28 | pool.enqueue(&RKYOLOv5::forward, std::ref(*ptr), std::ref(ptr->mImage), std::ref(ptr->mObjects), true)); 29 | } 30 | 31 | timestamp start = get_timestamp(); 32 | timestamp end; 33 | 34 | while (cap.isOpened()) { 35 | if (futures.front().get() != 0) { 36 | break; 37 | } 38 | futures.pop(); 39 | cv::imshow(window_name, detectors[counter % NUM_THREAD]->mImage); 40 | if (cv::waitKey(1) == 'q') // 延时1毫秒,按q键退出 41 | break; 42 | if (!cap.read(detectors[counter % NUM_THREAD]->mImage)) 43 | break; 44 | auto cnt = counter++ % NUM_THREAD; 45 | futures.push(pool.enqueue(&RKYOLOv5::forward, 46 | std::ref(*detectors[cnt]), 47 | std::ref(detectors[cnt]->mImage), 48 | std::ref(detectors[cnt]->mObjects), 49 | true)); 50 | 51 | if (counter % 60 == 0) { 52 | end = get_timestamp(); 53 | printf("FPS every 60 frames is: %4.2f\n", 6e7f / get_count(start, end)); 54 | start = end; 55 | } 56 | } 57 | 58 | // 释放剩下的资源 59 | while (!futures.empty()) { 60 | if (futures.front().get()) 61 | break; 62 | futures.pop(); 63 | } 64 | for (int i = 0; i < NUM_THREAD; i++) { 65 | delete detectors[i]; 66 | } 67 | cap.release(); 68 | cv::destroyAllWindows(); 69 | return 0; 70 | } 71 | -------------------------------------------------------------------------------- /Rockchip/cpp/yolov5/csrc/single_thread/detect.cpp: -------------------------------------------------------------------------------- 1 | #include "rk_utils.hpp" 2 | #include "rknet_cv.hpp" 3 | 4 | int main(int argc, char** argv) 5 | { 6 | std::string model_name{argv[1]}; 7 | std::string video_name{argv[2]}; 8 | std::string window_name = "on detection"; 9 | 10 | cv::VideoCapture cap(video_name); 11 | cv::namedWindow(window_name, cv::WINDOW_FREERATIO); 12 | cv::resizeWindow(window_name, 1280, 960); 13 | cv::moveWindow(window_name, 300, 300); 14 | auto detector = new RKYOLOv5(model_name, 0); 15 | auto mfps = MovingAverageFPS(30); 16 | 17 | cv::Mat image; 18 | std::vector objects; 19 | while (cap.isOpened()) { 20 | cap >> image; 21 | timestamp start = get_timestamp(); 22 | detector->forward(image, objects, true); 23 | timestamp end = get_timestamp(); 24 | auto cost = 1e6f / get_count(start, end); 25 | draw_fps(image, mfps.next(cost)); 26 | cv::imshow(window_name, image); 27 | if (cv::waitKey(1) == 'q') { 28 | break; 29 | } 30 | } 31 | 32 | delete detector; 33 | cap.release(); 34 | cv::destroyAllWindows(); 35 | return 0; 36 | } 37 | -------------------------------------------------------------------------------- /Rockchip/cpp/yolov5/include/ThreadPool.h: -------------------------------------------------------------------------------- 1 | #ifndef THREAD_POOL_H 2 | #define THREAD_POOL_H 3 | 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include 10 | #include 11 | #include 12 | #include 13 | 14 | class ThreadPool { 15 | public: 16 | ThreadPool(size_t); 17 | template 18 | auto enqueue(F&& f, Args&&... args) -> std::future::type>; 19 | ~ThreadPool(); 20 | 21 | private: 22 | // need to keep track of threads so we can join them 23 | std::vector workers; 24 | // the task queue 25 | std::queue> tasks; 26 | 27 | // synchronization 28 | std::mutex queue_mutex; 29 | std::condition_variable condition; 30 | bool stop; 31 | }; 32 | 33 | // the constructor just launches some amount of workers 34 | inline ThreadPool::ThreadPool(size_t threads): stop(false) 35 | { 36 | for (size_t i = 0; i < threads; ++i) 37 | workers.emplace_back([this] { 38 | for (;;) { 39 | std::function task; 40 | 41 | { 42 | std::unique_lock lock(this->queue_mutex); 43 | this->condition.wait(lock, [this] { return this->stop || !this->tasks.empty(); }); 44 | if (this->stop && this->tasks.empty()) 45 | return; 46 | task = std::move(this->tasks.front()); 47 | this->tasks.pop(); 48 | } 49 | 50 | task(); 51 | } 52 | }); 53 | } 54 | 55 | // add new work item to the pool 56 | template 57 | auto ThreadPool::enqueue(F&& f, Args&&... args) -> std::future::type> 58 | { 59 | using return_type = typename std::result_of::type; 60 | 61 | auto task = 62 | std::make_shared>(std::bind(std::forward(f), std::forward(args)...)); 63 | 64 | std::future res = task->get_future(); 65 | { 66 | std::unique_lock lock(queue_mutex); 67 | 68 | // don't allow enqueueing after stopping the pool 69 | if (stop) 70 | throw std::runtime_error("enqueue on stopped ThreadPool"); 71 | 72 | tasks.emplace([task]() { (*task)(); }); 73 | } 74 | condition.notify_one(); 75 | return res; 76 | } 77 | 78 | // the destructor joins all threads 79 | inline ThreadPool::~ThreadPool() 80 | { 81 | { 82 | std::unique_lock lock(queue_mutex); 83 | stop = true; 84 | } 85 | condition.notify_all(); 86 | for (std::thread& worker : workers) 87 | worker.join(); 88 | } 89 | 90 | #endif -------------------------------------------------------------------------------- /Rockchip/cpp/yolov5/include/checker.h: -------------------------------------------------------------------------------- 1 | // 2 | // Created by ubuntu on 23-5-31. 3 | // 4 | 5 | #ifndef _CHECKER_H_ 6 | #define _CHECKER_H_ 7 | 8 | #ifndef NDEBUG 9 | #define CHECK(call) \ 10 | do { \ 11 | const auto ret = call; \ 12 | if (!ret) { \ 13 | printf("********** Error occurred ! **********\n"); \ 14 | printf("***** File: %s\n", __FILE__); \ 15 | printf("***** Line: %d\n", __LINE__); \ 16 | printf("***** Error: %s\n", #call); \ 17 | exit(1); \ 18 | } \ 19 | } while (0) 20 | #else 21 | #define CHECK(call) \ 22 | { \ 23 | const auto ret = call; \ 24 | } 25 | #endif 26 | #endif //_CHECKER_H_ 27 | -------------------------------------------------------------------------------- /Rockchip/cpp/yolov5/include/decoder.hpp: -------------------------------------------------------------------------------- 1 | // 2 | // Created by ubuntu on 23-5-31. 3 | // 4 | 5 | #ifndef __DECODER__ 6 | #define __DECODER__ 7 | 8 | #include 9 | #include 10 | #include 11 | 12 | #include "det_data.h" 13 | 14 | template 15 | __inline__ static T clamp(T val, T min, T max) 16 | { 17 | return val > min ? (val < max ? val : max) : min; 18 | } 19 | 20 | __inline__ static float fast_exp(float x) 21 | { 22 | union { 23 | uint32_t i; 24 | float f; 25 | } v{}; 26 | v.i = (1 << 23) * (1.4426950409 * x + 126.93490512f); 27 | return v.f; 28 | } 29 | 30 | __inline__ static float un_sigmoid(float y) 31 | { 32 | return -1.f * logf((1.f / y) - 1.f); 33 | } 34 | 35 | __inline__ static float fast_sigmoid(float x) 36 | { 37 | return 1.f / (1.f + fast_exp(-x)); 38 | } 39 | 40 | static void qsort_descent_inplace(std::vector& proposals, int left, int right) 41 | { 42 | int i = left; 43 | int j = right; 44 | float p = proposals[(left + right) / 2].score; 45 | 46 | while (i <= j) { 47 | while (proposals[i].score > p) { 48 | i++; 49 | } 50 | 51 | while (proposals[j].score < p) { 52 | j--; 53 | } 54 | 55 | if (i <= j) { 56 | // swap 57 | std::swap(proposals[i], proposals[j]); 58 | 59 | i++; 60 | j--; 61 | } 62 | } 63 | 64 | #pragma omp parallel sections 65 | { 66 | #pragma omp section 67 | { 68 | if (left < j) { 69 | qsort_descent_inplace(proposals, left, j); 70 | } 71 | } 72 | #pragma omp section 73 | { 74 | if (i < right) { 75 | qsort_descent_inplace(proposals, i, right); 76 | } 77 | } 78 | } 79 | } 80 | 81 | static void qsort_descent_inplace(std::vector& objects) 82 | { 83 | if (objects.empty()) 84 | return; 85 | 86 | qsort_descent_inplace(objects, 0, objects.size() - 1); 87 | } 88 | 89 | static void 90 | batchednms(std::vector& proposals, std::vector& indices, float iou_thres = 0.65f, bool agnostic = false) 91 | { 92 | indices.clear(); 93 | auto n = proposals.size(); 94 | 95 | for (int i = 0; i < n; i++) { 96 | const Object& a = proposals[i]; 97 | 98 | bool keep = true; 99 | 100 | for (auto& idx : indices) { 101 | const Object& b = proposals[idx]; 102 | if (!agnostic && a.label != b.label) { 103 | continue; 104 | } 105 | if ((a.box & b.box) > iou_thres) { 106 | keep = false; 107 | } 108 | } 109 | if (keep) { 110 | indices.push_back(i); 111 | } 112 | } 113 | } 114 | 115 | static void generate_proposals_yolov5(const float* feature, 116 | const float* anchors, 117 | const int feat_height, 118 | const int feat_width, 119 | const int stride, 120 | std::vector& proposals, 121 | float conf_thres = 0.25, 122 | int num_classes = 80) 123 | { 124 | const int num_anchors = 3; 125 | const int walk_through = (5 + num_classes) * 3; 126 | float usigmoid_conf_thres = un_sigmoid(conf_thres); 127 | for (int h = 0; h < feat_height; ++h) { 128 | for (int w = 0; w < feat_width; ++w) { 129 | const float* cur_ptr = feature + (h * feat_width + w) * walk_through; 130 | for (int na = 0; na < num_anchors; ++na) { 131 | const float anchor_w = anchors[na * 2]; 132 | const float anchor_h = anchors[na * 2 + 1]; 133 | const int walk_anchor = na * (5 + num_classes); 134 | const float* cur_anchor_ptr = cur_ptr + walk_anchor; 135 | if (cur_anchor_ptr[4] > usigmoid_conf_thres) { 136 | const float* max_score_ptr = std::max_element(cur_anchor_ptr + 5, cur_anchor_ptr + 5 + num_classes); 137 | float score = fast_sigmoid(cur_anchor_ptr[4]) * fast_sigmoid(*max_score_ptr); 138 | if (score > conf_thres) { 139 | float dx = fast_sigmoid(cur_anchor_ptr[0]); 140 | float dy = fast_sigmoid(cur_anchor_ptr[1]); 141 | float dw = fast_sigmoid(cur_anchor_ptr[2]); 142 | float dh = fast_sigmoid(cur_anchor_ptr[3]); 143 | 144 | float pb_cx = (dx * 2.f - 0.5f + (float)w) * (float)stride; 145 | float pb_cy = (dy * 2.f - 0.5f + (float)h) * (float)stride; 146 | 147 | float pb_w = std::pow(dw * 2.f, 2.f) * anchor_w * (float)stride; 148 | float pb_h = std::pow(dh * 2.f, 2.f) * anchor_h * (float)stride; 149 | 150 | float x1 = pb_cx - pb_w * 0.5f; 151 | float y1 = pb_cy - pb_h * 0.5f; 152 | float x2 = pb_cx + pb_w * 0.5f; 153 | float y2 = pb_cy + pb_h * 0.5f; 154 | 155 | int label = (int)std::distance(cur_anchor_ptr + 5, max_score_ptr); 156 | Object obj; 157 | obj.label = label; 158 | obj.score = score; 159 | obj.box = {x1, y1, x2 - x1, y2 - y1}; 160 | proposals.push_back(obj); 161 | } 162 | } 163 | } 164 | } 165 | } 166 | } 167 | #endif //__DECODER__ 168 | -------------------------------------------------------------------------------- /Rockchip/cpp/yolov5/include/det_data.h: -------------------------------------------------------------------------------- 1 | // 2 | // Created by ubuntu on 23-3-21. 3 | // 4 | 5 | #ifndef __DETDATA_H__ 6 | #define __DETDATA_H__ 7 | 8 | #include 9 | 10 | const int BOX_COLORS[][3] = { 11 | { 0, 114, 189 }, 12 | { 217, 83, 25 }, 13 | { 237, 177, 32 }, 14 | { 126, 47, 142 }, 15 | { 119, 172, 48 }, 16 | { 77, 190, 238 }, 17 | { 162, 20, 47 }, 18 | { 76, 76, 76 }, 19 | { 153, 153, 153 }, 20 | { 255, 0, 0 }, 21 | { 255, 128, 0 }, 22 | { 191, 191, 0 }, 23 | { 0, 255, 0 }, 24 | { 0, 0, 255 }, 25 | { 170, 0, 255 }, 26 | { 85, 85, 0 }, 27 | { 85, 170, 0 }, 28 | { 85, 255, 0 }, 29 | { 170, 85, 0 }, 30 | { 170, 170, 0 }, 31 | { 170, 255, 0 }, 32 | { 255, 85, 0 }, 33 | { 255, 170, 0 }, 34 | { 255, 255, 0 }, 35 | { 0, 85, 128 }, 36 | { 0, 170, 128 }, 37 | { 0, 255, 128 }, 38 | { 85, 0, 128 }, 39 | { 85, 85, 128 }, 40 | { 85, 170, 128 }, 41 | { 85, 255, 128 }, 42 | { 170, 0, 128 }, 43 | { 170, 85, 128 }, 44 | { 170, 170, 128 }, 45 | { 170, 255, 128 }, 46 | { 255, 0, 128 }, 47 | { 255, 85, 128 }, 48 | { 255, 170, 128 }, 49 | { 255, 255, 128 }, 50 | { 0, 85, 255 }, 51 | { 0, 170, 255 }, 52 | { 0, 255, 255 }, 53 | { 85, 0, 255 }, 54 | { 85, 85, 255 }, 55 | { 85, 170, 255 }, 56 | { 85, 255, 255 }, 57 | { 170, 0, 255 }, 58 | { 170, 85, 255 }, 59 | { 170, 170, 255 }, 60 | { 170, 255, 255 }, 61 | { 255, 0, 255 }, 62 | { 255, 85, 255 }, 63 | { 255, 170, 255 }, 64 | { 85, 0, 0 }, 65 | { 128, 0, 0 }, 66 | { 170, 0, 0 }, 67 | { 212, 0, 0 }, 68 | { 255, 0, 0 }, 69 | { 0, 43, 0 }, 70 | { 0, 85, 0 }, 71 | { 0, 128, 0 }, 72 | { 0, 170, 0 }, 73 | { 0, 212, 0 }, 74 | { 0, 255, 0 }, 75 | { 0, 0, 43 }, 76 | { 0, 0, 85 }, 77 | { 0, 0, 128 }, 78 | { 0, 0, 170 }, 79 | { 0, 0, 212 }, 80 | { 0, 0, 255 }, 81 | { 0, 0, 0 }, 82 | { 36, 36, 36 }, 83 | { 73, 73, 73 }, 84 | { 109, 109, 109 }, 85 | { 146, 146, 146 }, 86 | { 182, 182, 182 }, 87 | { 219, 219, 219 }, 88 | { 0, 114, 189 }, 89 | { 80, 183, 189 }, 90 | { 128, 128, 0 } 91 | }; 92 | 93 | const char* NAMES[] = { 94 | "person", "bicycle", "car", "motorcycle", "airplane", 95 | "bus", "train", "truck", "boat", "traffic light", 96 | "fire hydrant", "stop sign", "parking meter", "bench", "bird", 97 | "cat", "dog", "horse", "sheep", "cow", 98 | "elephant", "bear", "zebra", "giraffe", "backpack", 99 | "umbrella", "handbag", "tie", "suitcase", "frisbee", 100 | "skis", "snowboard", "sports ball", "kite", "baseball bat", 101 | "baseball glove", "skateboard", "surfboard", "tennis racket", "bottle", 102 | "wine glass", "cup", "fork", "knife", "spoon", 103 | "bowl", "banana", "apple", "sandwich", "orange", 104 | "broccoli", "carrot", "hot dog", "pizza", "donut", 105 | "cake", "chair", "couch", "potted plant", "bed", 106 | "dining table", "toilet", "tv", "laptop", "mouse", 107 | "remote", "keyboard", "cell phone", "microwave", "oven", 108 | "toaster", "sink", "refrigerator", "book", "clock", 109 | "vase", "scissors", "teddy bear", "hair drier", "toothbrush" 110 | }; 111 | 112 | const int MASK_COLORS[][3] = { 113 | { 255, 56, 56 }, 114 | { 255, 157, 151 }, 115 | { 255, 112, 31 }, 116 | { 255, 178, 29 }, 117 | { 207, 210, 49 }, 118 | { 72, 249, 10 }, 119 | { 146, 204, 23 }, 120 | { 61, 219, 134 }, 121 | { 26, 147, 52 }, 122 | { 0, 212, 187 }, 123 | { 44, 153, 168 }, 124 | { 0, 194, 255 }, 125 | { 52, 69, 147 }, 126 | { 100, 115, 255 }, 127 | { 0, 24, 236 }, 128 | { 132, 56, 255 }, 129 | { 82, 0, 133 }, 130 | { 203, 56, 255 }, 131 | { 255, 149, 200 }, 132 | { 255, 55, 199 } 133 | }; 134 | 135 | const int KPT_COLORS[][3] = { 136 | { 0, 255, 0 }, 137 | { 0, 255, 0 }, 138 | { 0, 255, 0 }, 139 | { 0, 255, 0 }, 140 | { 0, 255, 0 }, 141 | { 255, 128, 0 }, 142 | { 255, 128, 0 }, 143 | { 255, 128, 0 }, 144 | { 255, 128, 0 }, 145 | { 255, 128, 0 }, 146 | { 255, 128, 0 }, 147 | { 51, 153, 255 }, 148 | { 51, 153, 255 }, 149 | { 51, 153, 255 }, 150 | { 51, 153, 255 }, 151 | { 51, 153, 255 }, 152 | { 51, 153, 255 } 153 | }; 154 | 155 | const int LIBM_COLORS[][3] = { 156 | { 51, 153, 255 }, 157 | { 51, 153, 255 }, 158 | { 51, 153, 255 }, 159 | { 51, 153, 255 }, 160 | { 255, 51, 255 }, 161 | { 255, 51, 255 }, 162 | { 255, 51, 255 }, 163 | { 255, 128, 0 }, 164 | { 255, 128, 0 }, 165 | { 255, 128, 0 }, 166 | { 255, 128, 0 }, 167 | { 255, 128, 0 }, 168 | { 0, 255, 0 }, 169 | { 0, 255, 0 }, 170 | { 0, 255, 0 }, 171 | { 0, 255, 0 }, 172 | { 0, 255, 0 }, 173 | { 0, 255, 0 }, 174 | { 0, 255, 0 } 175 | }; 176 | 177 | const int SKELETON[][2] = { 178 | { 16, 14 }, 179 | { 14, 12 }, 180 | { 17, 15 }, 181 | { 15, 13 }, 182 | { 12, 13 }, 183 | { 6, 12 }, 184 | { 7, 13 }, 185 | { 6, 7 }, 186 | { 6, 8 }, 187 | { 7, 9 }, 188 | { 8, 10 }, 189 | { 9, 11 }, 190 | { 2, 3 }, 191 | { 1, 2 }, 192 | { 1, 3 }, 193 | { 2, 4 }, 194 | { 3, 5 }, 195 | { 4, 6 }, 196 | { 5, 7 } 197 | }; 198 | 199 | struct ResizeInfo 200 | { 201 | float ratio_w; 202 | float ratio_h; 203 | int pad_l; 204 | int pad_t; 205 | int pad_r; 206 | int pad_b; 207 | 208 | ResizeInfo() = default; 209 | 210 | ResizeInfo(float rw, float rh, int pl, int pt, int pr, int pb) : 211 | ratio_w(rw), ratio_h(rh), pad_l(pl), pad_t(pt), pad_r(pr), pad_b(pb) 212 | { 213 | } 214 | 215 | ResizeInfo(float rw, float rh) : 216 | ratio_w(rw), ratio_h(rh), pad_l(0), pad_t(0), pad_r(0), pad_b(0) 217 | { 218 | } 219 | 220 | ~ResizeInfo() = default; 221 | 222 | }; 223 | 224 | struct Bbox 225 | { 226 | float x{ 0.f }; 227 | float y{ 0.f }; 228 | float width{ 0.f }; 229 | float height{ 0.f }; 230 | 231 | Bbox() = default; 232 | 233 | Bbox(float xmin, float ymin, float w, float h) 234 | : x(xmin), y(ymin), width(w), height(h) 235 | { 236 | } 237 | 238 | float operator&(const Bbox& other) const 239 | { 240 | float x1 = std::max(this->x, other.x); 241 | float y1 = std::max(this->y, other.y); 242 | float x2 = std::min(this->x + this->width, other.x + other.width); 243 | float y2 = std::min(this->y + this->height, other.y + other.height); 244 | float intersection_area = std::max(0.f, x2 - x1) * std::max(0.f, y2 - y1); 245 | float union_area = this->width * this->height + other.width * other.height - intersection_area; 246 | if (union_area <= 0.f) 247 | { 248 | return 0.f; 249 | } 250 | return intersection_area / (union_area + 1e-12f); 251 | } 252 | 253 | float area() const 254 | { 255 | return this->width * this->height; 256 | } 257 | 258 | ~Bbox() = default; 259 | }; 260 | 261 | struct Object 262 | { 263 | int label; 264 | float score; 265 | Bbox box; 266 | }; 267 | 268 | #endif //__DETDATA_H__ 269 | -------------------------------------------------------------------------------- /Rockchip/cpp/yolov5/include/rk_utils.hpp: -------------------------------------------------------------------------------- 1 | // 2 | // Created by ubuntu on 23-6-7. 3 | // 4 | 5 | #ifndef __RK_UTILS__ 6 | #define __RK_UTILS__ 7 | 8 | #include 9 | #include 10 | #include 11 | #include 12 | 13 | #include "checker.h" 14 | #include "rknn_api.h" 15 | 16 | typedef unsigned char uchar; 17 | using timestamp = std::chrono::time_point; 18 | 19 | static void print_tensor_attr(rknn_tensor_attr* attr) 20 | { 21 | printf(" index=%d, name=%s, n_dims=%d, dims=[%d, %d, %d, %d], n_elems=%d, size=%d, fmt=%s, type=%s, qnt_type=%s, " 22 | "zp=%d, scale=%f\n", 23 | attr->index, 24 | attr->name, 25 | attr->n_dims, 26 | attr->dims[0], 27 | attr->dims[1], 28 | attr->dims[2], 29 | attr->dims[3], 30 | attr->n_elems, 31 | attr->size, 32 | get_format_string(attr->fmt), 33 | get_type_string(attr->type), 34 | get_qnt_type_string(attr->qnt_type), 35 | attr->zp, 36 | attr->scale); 37 | } 38 | 39 | static uchar* load_rknn_model(const std::string& rknn_path, int* size) 40 | { 41 | std::ifstream file(rknn_path, std::ios::binary); 42 | CHECK(file.good()); 43 | file.seekg(0, std::ios::end); 44 | *size = (int)file.tellg(); 45 | file.seekg(0, std::ios::beg); 46 | uchar* model_buffer = new uchar[*size]; 47 | CHECK(model_buffer != nullptr); 48 | file.read(reinterpret_cast(model_buffer), *size); 49 | file.close(); 50 | return model_buffer; 51 | } 52 | 53 | class MovingAverageFPS { 54 | public: 55 | explicit MovingAverageFPS(const int& size = 30) 56 | { 57 | this->size = size; 58 | this->sum = 0.f; 59 | } 60 | 61 | float next(const float& val) 62 | { 63 | if (this->fps_queue.size() == size) { 64 | this->sum -= this->fps_queue.front(); 65 | this->fps_queue.pop(); 66 | } 67 | this->fps_queue.emplace(val); 68 | this->sum += val; 69 | return this->sum / this->fps_queue.size(); 70 | } 71 | 72 | ~MovingAverageFPS() = default; 73 | 74 | private: 75 | int size; 76 | float sum; 77 | std::queue fps_queue; 78 | }; 79 | 80 | static timestamp get_timestamp() 81 | { 82 | return std::chrono::time_point_cast(std::chrono::system_clock::now()); 83 | } 84 | 85 | static long get_count(timestamp start, timestamp end) 86 | { 87 | return std::chrono::duration_cast(end - start).count(); 88 | } 89 | 90 | #endif //__RK_UTILS__ 91 | -------------------------------------------------------------------------------- /Rockchip/cpp/yolov5/include/rknet_cv.hpp: -------------------------------------------------------------------------------- 1 | // 2 | // Created by ubuntu on 23-5-31. 3 | // 4 | 5 | #ifndef __RKRUNNER__ 6 | #define __RKRUNNER__ 7 | 8 | #include "checker.h" 9 | #include "det_data.h" 10 | #include "im2d.h" 11 | #include "rga.h" 12 | #include "rknn_api.h" 13 | 14 | #include "decoder.hpp" 15 | #include "opencv2/opencv.hpp" 16 | #include "rk_utils.hpp" 17 | 18 | #define NUM_CLASSES 80 19 | #define CONF_THRES 0.25 20 | #define IOU_THRES 0.65 21 | 22 | static void draw_fps(cv::Mat& image, float fps) 23 | { 24 | char text[32]; 25 | sprintf(text, "FPS=%.2f", fps); 26 | 27 | int baseLine = 0; 28 | cv::Size label_size = cv::getTextSize(text, cv::FONT_HERSHEY_SIMPLEX, 0.5, 1, &baseLine); 29 | 30 | int y = 0; 31 | int x = image.cols - label_size.width; 32 | 33 | cv::rectangle(image, {x, y, label_size.width, label_size.height + baseLine}, {255, 255, 255}, -1); 34 | 35 | cv::putText(image, text, {x, y + label_size.height}, cv::FONT_HERSHEY_SIMPLEX, 0.5, {0, 0, 0}); 36 | } 37 | 38 | static void draw_on_image(cv::Mat& image, const std::vector& objects) 39 | { 40 | int baseLine = 0; 41 | for (auto& object : objects) { 42 | const int& cate_idx = object.label; 43 | cv::Scalar bbox_color(BOX_COLORS[cate_idx][0], BOX_COLORS[cate_idx][1], BOX_COLORS[cate_idx][2]); 44 | 45 | const char* name = NAMES[cate_idx]; 46 | cv::Rect bbox(object.box.x, object.box.y, object.box.width, object.box.height); 47 | cv::rectangle(image, bbox, bbox_color, 2); 48 | char text[256]; 49 | sprintf(text, "%s %.1f%%", name, object.score * 100); 50 | 51 | cv::Size label_size = cv::getTextSize(text, cv::FONT_HERSHEY_SIMPLEX, 0.4, 1, &baseLine); 52 | int x = bbox.x; 53 | int y = bbox.y + 1; 54 | 55 | if (y > image.rows) { 56 | y = image.rows; 57 | } 58 | 59 | cv::rectangle(image, {x, y, label_size.width, label_size.height + baseLine}, {0, 0, 255}, -1); 60 | 61 | cv::putText(image, text, {x, y + label_size.height}, cv::FONT_HERSHEY_SIMPLEX, 0.4, {255, 255, 255}, 1); 62 | } 63 | } 64 | 65 | class RKYOLOv5 { 66 | public: 67 | RKYOLOv5(const std::string& rknn_path, int core_id); 68 | 69 | ~RKYOLOv5(); 70 | 71 | void preprocess(const cv::Mat& image); 72 | 73 | int forward(cv::Mat& image, std::vector& objects, bool draw); 74 | 75 | public: 76 | int mNet_h; 77 | int mNet_w; 78 | int mNumInputs; 79 | int mNumOutputs; 80 | float mAnchors[3][6] = {{1.25, 1.625, 2.0, 3.75, 4.125, 2.875}, 81 | {1.875, 3.8125, 3.875, 2.8125, 3.6875, 7.4375}, 82 | {3.625, 2.8125, 4.875, 6.1875, 11.65625, 10.1875}}; 83 | 84 | // using for multithread 85 | cv::Mat mImage; 86 | std::vector mObjects; 87 | 88 | private: 89 | rknn_tensor_attr mInput_attr; 90 | rknn_tensor_attr mOutput_attrs[3]; 91 | 92 | rknn_input mInput; 93 | rknn_output mOutputs[3]; 94 | 95 | rga_buffer_t src; 96 | rga_buffer_t dst; 97 | 98 | im_rect src_rect; 99 | im_rect dst_rect; 100 | 101 | rknn_context mContext; 102 | void* resize_buf = nullptr; 103 | }; 104 | 105 | RKYOLOv5::RKYOLOv5(const std::string& rknn_path, int core_id) 106 | { 107 | int model_size; 108 | uchar* model_buffer = load_rknn_model(rknn_path, &model_size); 109 | CHECK(rknn_init(&this->mContext, model_buffer, model_size, RKNN_FLAG_COLLECT_PERF_MASK, nullptr) >= 0); 110 | free(model_buffer); 111 | rknn_core_mask core_mask; 112 | if (core_id == -1) { 113 | core_mask = RKNN_NPU_CORE_AUTO; 114 | } 115 | else if (core_id % 3 == 0) { 116 | core_mask = RKNN_NPU_CORE_0; 117 | } 118 | else if (core_id % 3 == 1) { 119 | core_mask = RKNN_NPU_CORE_1; 120 | } 121 | else { 122 | core_mask = RKNN_NPU_CORE_2; 123 | } 124 | CHECK(rknn_set_core_mask(this->mContext, core_mask) >= 0); 125 | 126 | rknn_input_output_num io_num; 127 | CHECK(rknn_query(this->mContext, RKNN_QUERY_IN_OUT_NUM, &io_num, sizeof(io_num)) >= 0); 128 | this->mNumInputs = io_num.n_input; 129 | this->mNumOutputs = io_num.n_output; 130 | 131 | memset(&this->mInput_attr, 0x00, sizeof(rknn_tensor_attr)); 132 | memset(&this->mInput, 0x00, sizeof(rknn_input)); 133 | 134 | this->mInput_attr.index = 0; 135 | CHECK(rknn_query(this->mContext, RKNN_QUERY_INPUT_ATTR, &this->mInput_attr, sizeof(rknn_tensor_attr)) >= 0); 136 | 137 | memset(&this->mOutput_attrs[0], 0x00, sizeof(rknn_tensor_attr)); 138 | memset(&this->mOutput_attrs[1], 0x00, sizeof(rknn_tensor_attr)); 139 | memset(&this->mOutput_attrs[2], 0x00, sizeof(rknn_tensor_attr)); 140 | 141 | memset(&this->mOutputs[0], 0x00, sizeof(rknn_output)); 142 | memset(&this->mOutputs[1], 0x00, sizeof(rknn_output)); 143 | memset(&this->mOutputs[2], 0x00, sizeof(rknn_output)); 144 | 145 | this->mOutput_attrs[0].index = 0; 146 | this->mOutput_attrs[1].index = 1; 147 | this->mOutput_attrs[2].index = 2; 148 | 149 | CHECK(rknn_query(this->mContext, RKNN_QUERY_OUTPUT_ATTR, &this->mOutput_attrs[0], sizeof(rknn_tensor_attr)) >= 0); 150 | CHECK(rknn_query(this->mContext, RKNN_QUERY_OUTPUT_ATTR, &this->mOutput_attrs[1], sizeof(rknn_tensor_attr)) >= 0); 151 | CHECK(rknn_query(this->mContext, RKNN_QUERY_OUTPUT_ATTR, &this->mOutput_attrs[2], sizeof(rknn_tensor_attr)) >= 0); 152 | 153 | if (this->mInput_attr.fmt == RKNN_TENSOR_NCHW) { 154 | this->mNet_h = this->mInput_attr.dims[2]; 155 | this->mNet_w = this->mInput_attr.dims[3]; 156 | } 157 | else { 158 | this->mNet_h = this->mInput_attr.dims[1]; 159 | this->mNet_w = this->mInput_attr.dims[2]; 160 | } 161 | 162 | this->mInput.index = 0; 163 | this->mInput.type = RKNN_TENSOR_UINT8; 164 | this->mInput.size = 1 * 3 * this->mNet_h * this->mNet_w; 165 | this->mInput.fmt = RKNN_TENSOR_NHWC; 166 | this->mInput.pass_through = 0; 167 | 168 | memset(&this->src, 0x00, sizeof(this->src)); 169 | memset(&this->dst, 0x00, sizeof(this->dst)); 170 | memset(&this->src_rect, 0x00, sizeof(this->src_rect)); 171 | memset(&this->dst_rect, 0x00, sizeof(this->dst_rect)); 172 | } 173 | 174 | RKYOLOv5::~RKYOLOv5() 175 | { 176 | CHECK(rknn_destroy(this->mContext) >= 0); 177 | if (this->resize_buf != nullptr) { 178 | free(this->resize_buf); 179 | this->resize_buf = nullptr; 180 | } 181 | } 182 | 183 | void RKYOLOv5::preprocess(const cv::Mat& image) 184 | { 185 | int img_width = image.cols; 186 | int img_height = image.rows; 187 | 188 | cv::Mat input_image; 189 | cv::cvtColor(image, input_image, cv::COLOR_BGR2RGB); 190 | 191 | if (img_width != this->mNet_w || img_height != this->mNet_h) { 192 | 193 | if (this->resize_buf == nullptr) { 194 | this->resize_buf = malloc(this->mNet_h * this->mNet_w * 3 * sizeof(uint8_t)); 195 | } 196 | memset(this->resize_buf, 0x00, this->mNet_h * this->mNet_w * 3 * sizeof(uint8_t)); 197 | 198 | this->src = wrapbuffer_virtualaddr(input_image.data, img_width, img_height, RgaSURF_FORMAT::RK_FORMAT_RGB_888); 199 | this->dst = 200 | wrapbuffer_virtualaddr(this->resize_buf, this->mNet_w, this->mNet_h, RgaSURF_FORMAT::RK_FORMAT_RGB_888); 201 | CHECK(imcheck(src, dst, src_rect, dst_rect) == IM_STATUS::IM_STATUS_NOERROR); 202 | CHECK(imresize(src, dst) == IM_STATUS::IM_STATUS_SUCCESS); 203 | this->mInput.buf = this->resize_buf; 204 | } 205 | else { 206 | this->mInput.buf = input_image.data; 207 | } 208 | } 209 | 210 | int RKYOLOv5::forward(cv::Mat& image, std::vector& objects, bool draw) 211 | { 212 | if (image.empty()) { 213 | return 1; 214 | } 215 | int img_width = image.cols; 216 | int img_height = image.rows; 217 | this->preprocess(image); 218 | ResizeInfo resize_info = ResizeInfo(this->mNet_w / (float)img_width, this->mNet_h / (float)img_height); 219 | 220 | 221 | CHECK(rknn_inputs_set(this->mContext, this->mNumInputs, &this->mInput) >= 0); 222 | 223 | 224 | this->mOutputs[0].want_float = 1; 225 | this->mOutputs[1].want_float = 1; 226 | this->mOutputs[2].want_float = 1; 227 | 228 | CHECK(rknn_run(this->mContext, nullptr) >= 0); 229 | CHECK(rknn_outputs_get(this->mContext, this->mNumOutputs, this->mOutputs, nullptr) >= 0); 230 | 231 | objects.clear(); 232 | std::vector proposals; 233 | 234 | int stride = 4; 235 | for (int i = 0; i < this->mNumOutputs; i++) { 236 | float* feature = (float*)this->mOutputs[i].buf; 237 | stride <<= 1; 238 | generate_proposals_yolov5(feature, 239 | this->mAnchors[i], 240 | this->mNet_h / stride, 241 | this->mNet_w / stride, 242 | stride, 243 | proposals, 244 | CONF_THRES, 245 | NUM_CLASSES); 246 | } 247 | 248 | std::vector indices; 249 | 250 | batchednms(proposals, indices, IOU_THRES, false); 251 | 252 | for (auto& idx : indices) { 253 | const Object& pro = proposals[idx]; 254 | auto& bbox = pro.box; 255 | float x1 = bbox.x; 256 | float y1 = bbox.y; 257 | float x2 = bbox.x + bbox.width; 258 | float y2 = bbox.y + bbox.height; 259 | float score = pro.score; 260 | int label = pro.label; 261 | 262 | x1 = (x1 - resize_info.pad_l) / resize_info.ratio_w; 263 | y1 = (y1 - resize_info.pad_t) / resize_info.ratio_h; 264 | x2 = (x2 - resize_info.pad_l) / resize_info.ratio_w; 265 | y2 = (y2 - resize_info.pad_t) / resize_info.ratio_h; 266 | 267 | x1 = clamp(x1, 1.f, img_width - 1.f); 268 | y1 = clamp(y1, 1.f, img_height - 1.f); 269 | x2 = clamp(x2, 1.f, img_width - 1.f); 270 | y2 = clamp(y2, 1.f, img_height - 1.f); 271 | 272 | Object object; 273 | object.score = score; 274 | object.label = label; 275 | object.box = {x1, y1, x2 - x1, y2 - y1}; 276 | objects.push_back(object); 277 | } 278 | if (draw) { 279 | draw_on_image(image, objects); 280 | } 281 | return 0; 282 | } 283 | 284 | #endif //__RKRUNNER__ 285 | -------------------------------------------------------------------------------- /Rockchip/cpp/yolov5/pysrc/bus.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/triple-Mu/AI-on-Board/d528842493431d3b5355b49d17c549def55e6223/Rockchip/cpp/yolov5/pysrc/bus.jpg -------------------------------------------------------------------------------- /Rockchip/cpp/yolov5/pysrc/dataset.txt: -------------------------------------------------------------------------------- 1 | zidane.jpg -------------------------------------------------------------------------------- /Rockchip/cpp/yolov5/pysrc/onnx2rknn.py: -------------------------------------------------------------------------------- 1 | import cv2 2 | import numpy as np 3 | from rknn.api import RKNN 4 | from numpy import ndarray 5 | from typing import List 6 | from dataclasses import dataclass 7 | 8 | ONNX_MODEL = 'yolov5s.onnx' 9 | RKNN_MODEL = 'yolov5s.rknn' 10 | IMG_PATH = 'bus.jpg' 11 | DATASET = './dataset.txt' 12 | 13 | QUANTIZE_ON = True 14 | 15 | OBJ_THRESH = 0.25 16 | NMS_THRESH = 0.45 17 | IMG_SIZE = 640 18 | 19 | CLASSES = ("person", "bicycle", "car", "motorbike ", "aeroplane ", "bus ", "train", "truck ", "boat", "traffic light", 20 | "fire hydrant", "stop sign ", "parking meter", "bench", "bird", "cat", "dog ", "horse ", "sheep", "cow", 21 | "elephant", 22 | "bear", "zebra ", "giraffe", "backpack", "umbrella", "handbag", "tie", "suitcase", "frisbee", "skis", 23 | "snowboard", "sports ball", "kite", 24 | "baseball bat", "baseball glove", "skateboard", "surfboard", "tennis racket", "bottle", "wine glass", "cup", 25 | "fork", "knife ", 26 | "spoon", "bowl", "banana", "apple", "sandwich", "orange", "broccoli", "carrot", "hot dog", "pizza ", "donut", 27 | "cake", "chair", "sofa", 28 | "pottedplant", "bed", "diningtable", "toilet ", "tvmonitor", "laptop ", "mouse ", "remote ", 29 | "keyboard ", "cell phone", "microwave ", 30 | "oven ", "toaster", "sink", "refrigerator ", "book", "clock", "vase", "scissors ", "teddy bear ", 31 | "hair drier", "toothbrush ") 32 | 33 | ANCHORS = [[1.25, 1.625, 2.0, 3.75, 4.125, 2.875], 34 | [1.875, 3.8125, 3.875, 2.8125, 3.6875, 7.4375], 35 | [3.625, 2.8125, 4.875, 6.1875, 11.65625, 10.1875]] 36 | 37 | 38 | @dataclass 39 | class Object: 40 | label: int 41 | score: float 42 | box: ndarray 43 | 44 | 45 | def sigmoid(x): 46 | return 1. / (1. + np.exp(-x)) 47 | 48 | 49 | def yolov5_decode(feats: List[ndarray], 50 | anchors: List[List[float]], 51 | conf_thres: float): 52 | proposals: List[Object] = [] 53 | for i, feat in enumerate(feats): 54 | stride = 8 << i 55 | feat_h, feat_w = feat.shape[1:-1] 56 | anchor = anchors[i] 57 | feat = sigmoid(feat) 58 | feat = feat.reshape((feat_h, feat_w, 3, -1)) 59 | box_feat, conf_feat, score_feat = np.split(feat, [4, 5], -1) 60 | 61 | hIdx, wIdx, aIdx, _ = np.where(conf_feat > conf_thres) 62 | 63 | num_proposal = hIdx.size 64 | if not num_proposal: 65 | continue 66 | 67 | score_feat = score_feat[hIdx, wIdx, aIdx] * conf_feat[hIdx, wIdx, aIdx] 68 | boxes = box_feat[hIdx, wIdx, aIdx] 69 | labels = score_feat.argmax(-1) 70 | scores = score_feat.max(-1) 71 | 72 | indices = np.where(scores > conf_thres)[0] 73 | if len(indices) == 0: 74 | continue 75 | 76 | for idx in indices: 77 | a_w = anchor[2 * aIdx[idx]] 78 | a_h = anchor[2 * aIdx[idx] + 1] 79 | x, y, w, h = boxes[idx] 80 | x = (x * 2.0 - 0.5 + wIdx[idx]) * stride 81 | y = (y * 2.0 - 0.5 + hIdx[idx]) * stride 82 | w = (w * 2.0) ** 2 * a_w * stride 83 | h = (h * 2.0) ** 2 * a_h * stride 84 | 85 | x1 = x - w / 2 86 | y1 = y - h / 2 87 | 88 | proposals.append(Object(labels[idx], scores[idx], np.array([x1, y1, w, h], dtype=np.float32))) 89 | return proposals 90 | 91 | 92 | def nms(proposals: List[Object], conf_thres: float = 0.25, iou_thres: float = 0.65): 93 | bboxes = [] 94 | scores = [] 95 | class_ids = [] 96 | for proposal in proposals: 97 | bboxes.append(proposal.box) 98 | scores.append(proposal.score) 99 | class_ids.append(proposal.label) 100 | indices = cv2.dnn.NMSBoxesBatched(bboxes, scores, class_ids, conf_thres, iou_thres) 101 | results = [] 102 | for idx in indices: 103 | result = proposals[idx] 104 | result.box[2:] += result.box[:2] 105 | results.append(result) 106 | return results 107 | 108 | 109 | def draw(image, results: List[Object]): 110 | for result in results: 111 | box = result.box 112 | cl = result.label 113 | score = result.score 114 | top, left, right, bottom = box 115 | print(f'class: {CLASSES[cl]}, score: {score}') 116 | print(f'box coordinate left,top,right,down: [{top}, {left}, {right}, {bottom}]') 117 | top = int(top) 118 | left = int(left) 119 | right = int(right) 120 | bottom = int(bottom) 121 | 122 | cv2.rectangle(image, (top, left), (right, bottom), (255, 0, 0), 2) 123 | cv2.putText(image, f'{CLASSES[cl]} {score:.2f}', 124 | (top, left - 6), 125 | cv2.FONT_HERSHEY_SIMPLEX, 126 | 0.6, (0, 0, 255), 2) 127 | 128 | 129 | if __name__ == '__main__': 130 | 131 | # Create RKNN object 132 | rknn = RKNN(verbose=True) 133 | 134 | # pre-process config 135 | print('--> Config model') 136 | rknn.config(mean_values=[[0, 0, 0]], std_values=[[255, 255, 255]], target_platform='rk3588s') 137 | print('done') 138 | 139 | # Load ONNX model 140 | print('--> Loading model') 141 | ret = rknn.load_onnx(model=ONNX_MODEL) 142 | if ret != 0: 143 | print('Load model failed!') 144 | exit(ret) 145 | print('done') 146 | 147 | # Build model 148 | print('--> Building model') 149 | ret = rknn.build(do_quantization=QUANTIZE_ON, dataset=DATASET) 150 | if ret != 0: 151 | print('Build model failed!') 152 | exit(ret) 153 | print('done') 154 | 155 | # Export RKNN model 156 | print('--> Export rknn model') 157 | ret = rknn.export_rknn(RKNN_MODEL) 158 | if ret != 0: 159 | print('Export rknn model failed!') 160 | exit(ret) 161 | print('done') 162 | 163 | # Init runtime environment 164 | print('--> Init runtime environment') 165 | ret = rknn.init_runtime() 166 | # ret = rknn.init_runtime('rk3566') 167 | if ret != 0: 168 | print('Init runtime environment failed!') 169 | exit(ret) 170 | print('done') 171 | 172 | # Set inputs 173 | img = cv2.imread(IMG_PATH) 174 | # img, ratio, (dw, dh) = letterbox(img, new_shape=(IMG_SIZE, IMG_SIZE)) 175 | img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) 176 | img = cv2.resize(img, (IMG_SIZE, IMG_SIZE)) 177 | 178 | # Inference 179 | print('--> Running model') 180 | outputs = rknn.inference(inputs=[img]) 181 | print('done') 182 | 183 | # post process 184 | proposals = yolov5_decode(outputs, ANCHORS, OBJ_THRESH) 185 | results = nms(proposals, OBJ_THRESH, NMS_THRESH) 186 | 187 | img_1 = cv2.cvtColor(img, cv2.COLOR_RGB2BGR) 188 | if len(results) > 0: 189 | draw(img_1, results) 190 | cv2.imwrite('result.jpg', img_1) 191 | 192 | rknn.release() 193 | -------------------------------------------------------------------------------- /Rockchip/cpp/yolov5/pysrc/rknn_toolkit2-1.5.0+1fa95b5c-cp38-cp38-linux_x86_64.whl: -------------------------------------------------------------------------------- 1 | ../../../packages/rknn_toolkit2-1.5.0+1fa95b5c-cp38-cp38-linux_x86_64.whl -------------------------------------------------------------------------------- /Rockchip/cpp/yolov5/pysrc/run.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | echo "Now running on your x86 PC!" 3 | python3 onnx2rknn.py 4 | -------------------------------------------------------------------------------- /Rockchip/cpp/yolov5/pysrc/zidane.jpg: -------------------------------------------------------------------------------- 1 | ../../../../images/zidane.jpg -------------------------------------------------------------------------------- /Rockchip/python/yolov8/README.md: -------------------------------------------------------------------------------- 1 | # YOLOv8 deploy in RK3588 2 | 3 | ## 1. Prepare your environment 4 | 5 | ### 1.1 On X86 PC 6 | 7 | Suggest to use anaconda to create a virtual environment. 8 | 9 | ```bash 10 | conda create -n rknn python=3.8 11 | conda activate rknn 12 | ``` 13 | 14 | Install yolov8: 15 | 16 | ```bash 17 | git clone https://github.com/triple-Mu/yolov8.git -b triplemu/model-only 18 | # uninstall ultralytics first 19 | pip uninstall ultralytics 20 | # install yolov8 21 | cd yolov8 22 | pip install -r requirements.txt 23 | pip install . 24 | ``` 25 | 26 | Convert pt to onnx: 27 | 28 | ```bash 29 | git clone https://github.com/triple-Mu/AI-on-Board.git 30 | cd AI-on-Board/Rockchip/python/yolov8 31 | # modify the export.py: pt_path to your own first 32 | python export.py 33 | ``` 34 | 35 | Convert onnx to rknn: 36 | 37 | `rknn_toolkit2-1.5.0+1fa95b5c-cp38-cp38-linux_x86_64.whl` is in `packages` 38 | 39 | ```bash 40 | pip install rknn_toolkit2-1.5.0+1fa95b5c-cp38-cp38-linux_x86_64.whl 41 | # modify the onnx2rknn.py: ONNX_MODEL RKNN_MODEL IMG_PATH DATASET IMG_SIZE 42 | python onnx2rknn.py 43 | ``` 44 | 45 | ### 1.2 On ARM RK3588 46 | 47 | Copy this repo to your board. 48 | 49 | Install rknn-lite and triplemu tools: 50 | `rknn_toolkit_lite2-1.5.0-cp38-cp38-linux_aarch64.whl` and `triplemu-0.0.1-cp38-cp38-linux_aarch64.whl` is in `packages` 51 | 52 | ```bash 53 | cd AI-on-Board/Rockchip/python/yolov8 54 | # install rknn_toolkit_lite and triplemu tools on RK3588 55 | pip install rknn_toolkit_lite2-1.5.0-cp38-cp38-linux_aarch64.whl 56 | pip install triplemu-0.0.1-cp38-cp38-linux_aarch64.whl 57 | ``` 58 | 59 | ## 2. Run 60 | 61 | ```bash 62 | python rknn_infer.py --input zidane.jpg --rknn yolov8s.rknn --show 63 | ``` 64 | 65 | ### Description of all arguments 66 | 67 | - `--input` : The image path or images dir or mp4 path. 68 | - `--rknn` : The rknn model path. 69 | - `--show` : Whether to show results. 70 | - `--output` : The output dir path for saving results. 71 | - `--iou-thres` : IoU threshold for NMS algorithm. 72 | - `--conf-thres` : Confidence threshold for NMS algorithm. -------------------------------------------------------------------------------- /Rockchip/python/yolov8/export.py: -------------------------------------------------------------------------------- 1 | from ultralytics import YOLO 2 | 3 | pt_path = 'yolov8s.pt' 4 | model = YOLO(pt_path) 5 | model.export(format='onnx', opset=12, simplify=True, imgsz=640) -------------------------------------------------------------------------------- /Rockchip/python/yolov8/imagelist.txt: -------------------------------------------------------------------------------- 1 | zidane.jpg -------------------------------------------------------------------------------- /Rockchip/python/yolov8/onnx2rknn.py: -------------------------------------------------------------------------------- 1 | import cv2 2 | from rknn.api import RKNN 3 | 4 | ONNX_MODEL = 'yolov8s.onnx' 5 | RKNN_MODEL = 'yolov8s.rknn' 6 | IMG_PATH = 'zidane.jpg' 7 | DATASET = 'imagelist.txt' 8 | IMG_SIZE = 640 9 | 10 | QUANTIZE_ON = True 11 | 12 | # Create RKNN object 13 | rknn = RKNN(verbose=True) 14 | 15 | # pre-process config 16 | print('--> Config model') 17 | rknn.config(mean_values=[[0, 0, 0]], std_values=[[255, 255, 255]], 18 | quantized_algorithm='kl_divergence', 19 | quantized_method='layer', 20 | target_platform='rk3588s', 21 | custom_string='yolov8s') 22 | print('done') 23 | 24 | # Load ONNX model 25 | print('--> Loading model') 26 | ret = rknn.load_onnx(model=ONNX_MODEL) 27 | if ret != 0: 28 | print('Load model failed!') 29 | exit(ret) 30 | print('done') 31 | 32 | # Build model 33 | print('--> Building model') 34 | ret = rknn.build(do_quantization=QUANTIZE_ON, dataset=DATASET) 35 | if ret != 0: 36 | print('Build model failed!') 37 | exit(ret) 38 | print('done') 39 | 40 | # Export RKNN model 41 | print('--> Export rknn model') 42 | ret = rknn.export_rknn(RKNN_MODEL) 43 | if ret != 0: 44 | print('Export rknn model failed!') 45 | exit(ret) 46 | print('done') 47 | 48 | # Init runtime environment 49 | print('--> Init runtime environment') 50 | ret = rknn.init_runtime() 51 | # ret = rknn.init_runtime('rk3566') 52 | if ret != 0: 53 | print('Init runtime environment failed!') 54 | exit(ret) 55 | print('done') 56 | 57 | # Set inputs 58 | img = cv2.imread(IMG_PATH) 59 | img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) 60 | img = cv2.resize(img, (IMG_SIZE, IMG_SIZE)) 61 | 62 | # Inference 63 | print('--> Running model') 64 | outputs = rknn.inference(inputs=[img]) 65 | print('done') 66 | 67 | rknn.release() 68 | -------------------------------------------------------------------------------- /Rockchip/python/yolov8/rknn_infer.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import cv2 3 | import random 4 | import numpy as np 5 | from pathlib import Path 6 | from rknnlite.api import RKNNLite # noqa 7 | from triplemu import postprocess_yolov8 # noqa 8 | 9 | IMG_EXTENSIONS = ('.jpg', '.jpeg', '.png', '.ppm', '.bmp', '.pgm', '.tif', 10 | '.tiff', '.webp') 11 | 12 | random.seed(0) 13 | 14 | CLASSES = ('person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 15 | 'train', 'truck', 'boat', 'traffic light', 'fire hydrant', 16 | 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 17 | 'horse', 'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe', 18 | 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee', 19 | 'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat', 20 | 'baseball glove', 'skateboard', 'surfboard', 'tennis racket', 21 | 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', 22 | 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot', 23 | 'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 24 | 'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop', 25 | 'mouse', 'remote', 'keyboard', 'cell phone', 'microwave', 'oven', 26 | 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', 27 | 'scissors', 'teddy bear', 'hair drier', 'toothbrush') 28 | 29 | COLORS = { 30 | cls: [random.randint(0, 255) for _ in range(3)] 31 | for i, cls in enumerate(CLASSES) 32 | } 33 | 34 | 35 | def parse_args(): 36 | parser = argparse.ArgumentParser() 37 | parser.add_argument('--input', type=str, help='Image path, images path or video path.') 38 | parser.add_argument('--rknn', type=str, help='Rknn path file') 39 | parser.add_argument('--output', type=str, default='output', help='Output path for saving results.') 40 | parser.add_argument( 41 | '--show', action='store_true', help='Show the detection results') 42 | parser.add_argument( 43 | '--score-thr', type=float, default=0.25, help='Bbox score threshold') 44 | parser.add_argument( 45 | '--iou-thr', type=float, default=0.65, help='Bbox iou threshold') 46 | args = parser.parse_args() 47 | return args 48 | 49 | 50 | def load_rknn(rknn_path: str, core_id: int = 0): 51 | rknn_lite = RKNNLite() 52 | ret = rknn_lite.load_rknn(rknn_path) 53 | assert ret == 0 54 | 55 | if core_id == 0: 56 | ret = rknn_lite.init_runtime(core_mask=RKNNLite.NPU_CORE_0) 57 | elif core_id == 1: 58 | ret = rknn_lite.init_runtime(core_mask=RKNNLite.NPU_CORE_1) 59 | elif core_id == 2: 60 | ret = rknn_lite.init_runtime(core_mask=RKNNLite.NPU_CORE_2) 61 | elif core_id == -1: 62 | ret = rknn_lite.init_runtime(core_mask=RKNNLite.NPU_CORE_0_1_2) 63 | else: 64 | ret = rknn_lite.init_runtime() 65 | assert ret == 0 66 | return rknn_lite 67 | 68 | 69 | def run(rknn_lite: RKNNLite, image_bgr: np.ndarray, 70 | conf_thres: float, iou_thres: float, 71 | net_h: int, net_w: int, num_classes: int = 80) -> list: 72 | orin_h, orin_w = image_bgr.shape[:2] 73 | ratio_h = net_h / orin_h 74 | ratio_w = net_w / orin_w 75 | image_bgr = cv2.resize(image_bgr, (0, 0), fx=ratio_w, fy=ratio_h, interpolation=cv2.INTER_LINEAR) 76 | image_rgb = cv2.cvtColor(image_bgr, cv2.COLOR_BGR2RGB) 77 | outputs = rknn_lite.inference(inputs=[image_rgb]) 78 | outputs = postprocess_yolov8(*outputs, 79 | conf_thres=conf_thres, iou_thres=iou_thres, 80 | net_h=net_h, net_w=net_w, 81 | orin_h=orin_h, orin_w=orin_w, 82 | ratio_h=ratio_h, ratio_w=ratio_w, 83 | num_classes=num_classes) 84 | return outputs 85 | 86 | 87 | def draw_on_image(image_bgr: np.ndarray, outputs: list): 88 | for output in outputs: 89 | x1 = output.x1 90 | y1 = output.y1 91 | x2 = output.x2 92 | y2 = output.y2 93 | cls = CLASSES[output.label] 94 | color = COLORS[cls] 95 | cv2.rectangle(image_bgr, [x1, y1], [x2, y2], color, 2) 96 | cv2.putText(image_bgr, 97 | f'{cls}:{output.score:.3f}', (x1, y1 - 2), 98 | cv2.FONT_HERSHEY_SIMPLEX, 99 | 0.75, [0, 0, 225], 100 | thickness=2) 101 | 102 | 103 | def main(): 104 | args = parse_args() 105 | rknn_lite = load_rknn(args.rknn) 106 | inputs = Path(args.input) 107 | output_path = Path(args.output) 108 | if not args.show: 109 | output_path.mkdir(parents=True, exist_ok=True) 110 | 111 | if inputs.suffix in IMG_EXTENSIONS: 112 | image = cv2.imread(str(inputs)) 113 | save_path = output_path / inputs.name 114 | outputs = run(rknn_lite, image, args.score_thr, args.iou_thr, 640, 640, num_classes=len(CLASSES)) 115 | draw_on_image(image, outputs) 116 | if args.show: 117 | cv2.imshow('result', image) 118 | cv2.waitKey(0) 119 | else: 120 | cv2.imwrite(str(save_path), image) 121 | elif inputs.is_dir(): 122 | for image_path in inputs.iterdir(): 123 | if image_path.suffix in IMG_EXTENSIONS: 124 | image = cv2.imread(str(image_path)) 125 | save_path = output_path / inputs.name 126 | outputs = run(rknn_lite, image, args.score_thr, args.iou_thr, 640, 640, num_classes=len(CLASSES)) 127 | draw_on_image(image, outputs) 128 | if args.show: 129 | cv2.imshow('result', image) 130 | cv2.waitKey(0) 131 | else: 132 | cv2.imwrite(str(save_path), image) 133 | elif inputs.suffix == '.mp4': 134 | cap = cv2.VideoCapture(str(inputs)) 135 | frame_id = 0 136 | while cap.isOpened(): 137 | ret, frame = cap.read() 138 | if not ret: 139 | break 140 | save_path = output_path / f'frame_{frame_id:04d}.jpg' 141 | outputs = run(rknn_lite, frame, args.score_thr, args.iou_thr, 640, 640, num_classes=len(CLASSES)) 142 | draw_on_image(frame, outputs) 143 | if args.show: 144 | cv2.imshow('result', frame) 145 | if cv2.waitKey(1) & 0xFF == ord('q'): 146 | break 147 | else: 148 | cv2.imwrite(str(save_path), frame) 149 | cap.release() 150 | cv2.destroyAllWindows() 151 | rknn_lite.release() 152 | 153 | 154 | if __name__ == '__main__': 155 | main() 156 | -------------------------------------------------------------------------------- /Rockchip/python/yolov8/rknn_infer_mt.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import cv2 3 | import time 4 | import random 5 | import numpy as np 6 | from pathlib import Path 7 | from queue import Queue 8 | from rknnlite.api import RKNNLite # noqa 9 | from triplemu import postprocess_yolov8 # noqa 10 | from concurrent.futures import ThreadPoolExecutor 11 | 12 | IMG_EXTENSIONS = ('.jpg', '.jpeg', '.png', '.ppm', '.bmp', '.pgm', '.tif', 13 | '.tiff', '.webp') 14 | 15 | random.seed(0) 16 | 17 | CLASSES = ('person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 18 | 'train', 'truck', 'boat', 'traffic light', 'fire hydrant', 19 | 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 20 | 'horse', 'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe', 21 | 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee', 22 | 'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat', 23 | 'baseball glove', 'skateboard', 'surfboard', 'tennis racket', 24 | 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', 25 | 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot', 26 | 'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 27 | 'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop', 28 | 'mouse', 'remote', 'keyboard', 'cell phone', 'microwave', 'oven', 29 | 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', 30 | 'scissors', 'teddy bear', 'hair drier', 'toothbrush') 31 | 32 | COLORS = { 33 | cls: [random.randint(0, 255) for _ in range(3)] 34 | for i, cls in enumerate(CLASSES) 35 | } 36 | 37 | NUM_CLASSES = 80 38 | 39 | 40 | def parse_args(): 41 | parser = argparse.ArgumentParser() 42 | parser.add_argument('--input', type=str, help='Image path, images path or video path.') 43 | parser.add_argument('--rknn', type=str, help='Rknn path file') 44 | parser.add_argument('--output', type=str, default='output', help='Output path for saving results.') 45 | parser.add_argument('--num_thread', type=int, default=3, help='Num threads') 46 | parser.add_argument( 47 | '--show', action='store_true', help='Show the detection results') 48 | parser.add_argument('--num_classes', type=int, default=80, help='Num classes') 49 | args = parser.parse_args() 50 | global NUM_CLASSES 51 | NUM_CLASSES = args.num_classes 52 | return args 53 | 54 | 55 | def load_rknn(rknn_path: str, core_id: int = 0): 56 | rknn_lite = RKNNLite() 57 | ret = rknn_lite.load_rknn(rknn_path) 58 | assert ret == 0 59 | 60 | if core_id == 0: 61 | ret = rknn_lite.init_runtime(core_mask=RKNNLite.NPU_CORE_0) 62 | elif core_id == 1: 63 | ret = rknn_lite.init_runtime(core_mask=RKNNLite.NPU_CORE_1) 64 | elif core_id == 2: 65 | ret = rknn_lite.init_runtime(core_mask=RKNNLite.NPU_CORE_2) 66 | elif core_id == -1: 67 | ret = rknn_lite.init_runtime(core_mask=RKNNLite.NPU_CORE_0_1_2) 68 | else: 69 | ret = rknn_lite.init_runtime() 70 | assert ret == 0 71 | return rknn_lite 72 | 73 | 74 | def draw_on_image(image_bgr: np.ndarray, outputs: list): 75 | for output in outputs: 76 | x1 = output.x1 77 | y1 = output.y1 78 | x2 = output.x2 79 | y2 = output.y2 80 | cls = CLASSES[output.label] 81 | color = COLORS[cls] 82 | cv2.rectangle(image_bgr, [x1, y1], [x2, y2], color, 2) 83 | cv2.putText(image_bgr, 84 | f'{cls}:{output.score:.3f}', (x1, y1 - 2), 85 | cv2.FONT_HERSHEY_SIMPLEX, 86 | 0.75, [0, 0, 225], 87 | thickness=2) 88 | 89 | 90 | def run(rknn_lite: RKNNLite, image_bgr: np.ndarray, 91 | conf_thres: float = 0.25, iou_thres: float = 0.65, 92 | net_h: int = 640, net_w: int = 640, num_classes: int = NUM_CLASSES) -> np.ndarray: 93 | orin_h, orin_w = image_bgr.shape[:2] 94 | ratio_h = net_h / orin_h 95 | ratio_w = net_w / orin_w 96 | _image_bgr = cv2.resize(image_bgr, (0, 0), fx=ratio_w, fy=ratio_h, interpolation=cv2.INTER_LINEAR) 97 | image_rgb = cv2.cvtColor(_image_bgr, cv2.COLOR_BGR2RGB) 98 | outputs = rknn_lite.inference(inputs=[image_rgb]) 99 | outputs = postprocess_yolov8(*outputs, 100 | conf_thres=conf_thres, iou_thres=iou_thres, 101 | net_h=net_h, net_w=net_w, 102 | orin_h=orin_h, orin_w=orin_w, 103 | ratio_h=ratio_h, ratio_w=ratio_w, 104 | num_classes=num_classes) 105 | draw_on_image(image_bgr, outputs) 106 | return image_bgr 107 | 108 | 109 | class rknnPoolExecutor: 110 | def __init__(self, rknn_path, num_thread, func): 111 | assert num_thread > 0 112 | self.num_thread = num_thread 113 | self.queue = Queue() 114 | self.rknnPool = [load_rknn(rknn_path, i % 3) for i in range(num_thread)] 115 | self.pool = ThreadPoolExecutor(max_workers=num_thread) 116 | self.func = func 117 | self.num = 0 118 | 119 | def put(self, frame): 120 | self.queue.put( 121 | self.pool.submit( 122 | self.func, self.rknnPool[self.num % self.num_thread], frame 123 | )) 124 | self.num += 1 125 | 126 | def get(self): 127 | if self.queue.empty(): 128 | return None, False 129 | fut = self.queue.get() 130 | return fut.result(), True 131 | 132 | def release(self): 133 | self.pool.shutdown() 134 | for rknn_lite in self.rknnPool: 135 | rknn_lite.release() 136 | 137 | 138 | def main(): 139 | args = parse_args() 140 | inputs = Path(args.input) 141 | output_path = Path(args.output) 142 | if not args.show: 143 | output_path.mkdir(parents=True, exist_ok=True) 144 | 145 | assert inputs.suffix == '.mp4' 146 | pools = rknnPoolExecutor(args.rknn, args.num_thread, run) 147 | 148 | for i in range(args.num_thread + 1): 149 | pools.put(np.zeros((640, 640, 3), dtype=np.uint8)) 150 | 151 | cap = cv2.VideoCapture(str(inputs)) 152 | frame_id = 0 153 | 154 | loopTime, initTime = time.perf_counter(), time.perf_counter() 155 | fps = 0 156 | now = 0 157 | while cap.isOpened(): 158 | ret, frame = cap.read() 159 | if not ret: 160 | break 161 | save_path = output_path / f'frame_{frame_id:04d}.jpg' 162 | pools.put(frame) 163 | image_bgr, flag = pools.get() 164 | if not flag: 165 | break 166 | if args.show: 167 | cv2.imshow('result', image_bgr) 168 | if cv2.waitKey(1) & 0xFF == ord('q'): 169 | break 170 | else: 171 | cv2.imwrite(str(save_path), image_bgr) 172 | 173 | if pools.num % 30 == 0: 174 | now = time.perf_counter() 175 | fps = 30 / (now - loopTime) 176 | print(f'30帧平均帧率: {fps:0>6.2f}帧') 177 | loopTime = now 178 | now = time.perf_counter() 179 | fps = pools.num / (now - initTime) 180 | print(f'总平均帧率: {fps:0>6.2f}帧') 181 | cap.release() 182 | pools.release() 183 | cv2.destroyAllWindows() 184 | 185 | 186 | if __name__ == '__main__': 187 | main() 188 | -------------------------------------------------------------------------------- /Rockchip/python/yolov8/rknn_toolkit2-1.5.0+1fa95b5c-cp38-cp38-linux_x86_64.whl: -------------------------------------------------------------------------------- 1 | ../../../packages/rknn_toolkit2-1.5.0+1fa95b5c-cp38-cp38-linux_x86_64.whl -------------------------------------------------------------------------------- /Rockchip/python/yolov8/rknn_toolkit_lite2-1.5.0-cp38-cp38-linux_aarch64.whl: -------------------------------------------------------------------------------- 1 | ../../../packages/rknn_toolkit_lite2-1.5.0-cp38-cp38-linux_aarch64.whl -------------------------------------------------------------------------------- /Rockchip/python/yolov8/triplemu-0.0.1-cp38-cp38-linux_aarch64.whl: -------------------------------------------------------------------------------- 1 | ../../../packages/triplemu-0.0.1-cp38-cp38-linux_aarch64.whl -------------------------------------------------------------------------------- /Rockchip/python/yolov8/zidane.jpg: -------------------------------------------------------------------------------- 1 | ../../../images/zidane.jpg -------------------------------------------------------------------------------- /images/zidane.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/triple-Mu/AI-on-Board/d528842493431d3b5355b49d17c549def55e6223/images/zidane.jpg -------------------------------------------------------------------------------- /packages/rknn_toolkit2-1.5.0+1fa95b5c-cp38-cp38-linux_x86_64.whl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/triple-Mu/AI-on-Board/d528842493431d3b5355b49d17c549def55e6223/packages/rknn_toolkit2-1.5.0+1fa95b5c-cp38-cp38-linux_x86_64.whl -------------------------------------------------------------------------------- /packages/rknn_toolkit_lite2-1.5.0-cp38-cp38-linux_aarch64.whl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/triple-Mu/AI-on-Board/d528842493431d3b5355b49d17c549def55e6223/packages/rknn_toolkit_lite2-1.5.0-cp38-cp38-linux_aarch64.whl -------------------------------------------------------------------------------- /packages/triplemu-0.0.1-cp38-cp38-linux_aarch64.whl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/triple-Mu/AI-on-Board/d528842493431d3b5355b49d17c549def55e6223/packages/triplemu-0.0.1-cp38-cp38-linux_aarch64.whl --------------------------------------------------------------------------------