├── .gitignore ├── LICENSE ├── README.md ├── README_cn.md ├── demos ├── OCR │ └── PaddleOCR │ │ ├── README.md │ │ ├── README_cn.md │ │ ├── data │ │ ├── paddleocr.png │ │ └── paddleocr_test.jpg │ │ ├── model │ │ └── download.sh │ │ ├── output │ │ └── predict.jpg │ │ ├── paddle_ocr.py │ │ ├── test_paddle_ocr.ipynb │ │ └── yaml │ │ ├── paddleocr_det_config.yaml │ │ └── paddleocr_rec_config.yaml ├── Pose │ └── YOLO11-Pose │ │ ├── README.md │ │ ├── README_cn.md │ │ ├── YOLOv11_Pose.py │ │ ├── cpp │ │ ├── CMakeLists.txt │ │ ├── README.md │ │ └── main.cc │ │ ├── imgs │ │ ├── YOLO11_demo.jpeg │ │ ├── yolo11n_pose_bayese_640x640_nv12.png │ │ └── yolo11n_pose_bayese_640x640_nv12_modified.png │ │ ├── jupyter_BSP_YOLO11_Pose.ipynb │ │ ├── models │ │ └── download.md │ │ └── ptq_yamls │ │ ├── yolo11_pose_bayese_640x640_nchw.yaml │ │ └── yolo11_pose_bayese_640x640_nv12.yaml ├── Seg │ ├── YOLO11-Seg │ │ ├── YOLO11-Seg_NCHWRGB │ │ │ └── yolov8_instance_seg_bayese_640x640_nchw.yaml │ │ └── YOLO11-Seg_YUV420SP │ │ │ ├── README.md │ │ │ ├── README_cn.md │ │ │ ├── YOLO11_Seg_YUV420SP.py │ │ │ ├── config_yolo11_seg_bayese_640x640_nv12.yaml │ │ │ ├── cpp │ │ │ ├── CMakeLists.txt │ │ │ ├── README.md │ │ │ └── main.cc │ │ │ ├── eval_YOLO11_Seg_generate_labels.py │ │ │ ├── imgs │ │ │ ├── YOLOv8_Instance_Segmentation_Origin.png │ │ │ ├── YOLOv8_Instance_Segmentation_Quantize.png │ │ │ ├── demo_rdkx5_yolov8n_seg.jpg │ │ │ ├── yolo11nseg_detect_bayese_640x640_nv12.png │ │ │ └── yolo11nseg_detect_bayese_640x640_nv12_modified.png │ │ │ ├── jupyter_YOLO11_Seg.ipynb │ │ │ └── ptq_models │ │ │ └── download.md │ ├── YOLOE-11-Seg-Prompt-Free │ │ └── YOLOE-11-Seg-Prompt-Free_YUV420SP │ │ │ ├── README.md │ │ │ ├── README_cn.md │ │ │ ├── YOLOE-11-Seg-PromptFree_YUV420SP.py │ │ │ ├── cauchy_yoloe11segPF_export.py │ │ │ ├── config_yolo11e_seg_pf_bayese_640x640_nv12.yaml │ │ │ ├── imgs │ │ │ ├── demo_rdkx5_indoor.jpg │ │ │ ├── demo_rdkx5_indoor2.jpg │ │ │ ├── demo_rdkx5_outdoor.jpg │ │ │ └── visualization.svg │ │ │ └── ptq_models │ │ │ └── download.md │ └── YOLOv8-Seg │ │ ├── README.md │ │ ├── README_cn.md │ │ ├── YOLOv8_Seg.py │ │ ├── cpp │ │ ├── CMakeLists.txt │ │ ├── README.md │ │ └── main.cc │ │ ├── imgs │ │ ├── YOLOv8_Instance_Segmentation_Origin.png │ │ ├── YOLOv8_Instance_Segmentation_Quantize.png │ │ ├── demo_rdkx5_yolov8n_seg.jpg │ │ ├── yolov8n_instance_seg_bayese_640x640_nv12.png │ │ └── yolov8n_instance_seg_bayese_640x640_nv12_modified.png │ │ ├── jupyter_BSP_YOLOv8_Instance_Segmentation.ipynb │ │ ├── models │ │ └── download.md │ │ └── ptq_yamls │ │ ├── yolov8_instance_seg_bayese_640x640_nchw.yaml │ │ └── yolov8_instance_seg_bayese_640x640_nv12.yaml ├── classification │ ├── ConvNeXt │ │ ├── README.md │ │ ├── README_cn.md │ │ ├── data │ │ │ ├── ConvNeXt_Block.png │ │ │ ├── ImageNet_1k.json │ │ │ ├── cheetah.JPEG │ │ │ └── inference.png │ │ ├── model │ │ │ └── download.sh │ │ ├── test_ConvNeXt_atto.ipynb │ │ ├── test_ConvNeXt_femto.ipynb │ │ ├── test_ConvNeXt_nano.ipynb │ │ ├── test_ConvNeXt_pico.ipynb │ │ └── yaml │ │ │ ├── ConvNeXt_atto.yaml │ │ │ ├── ConvNeXt_femto.yaml │ │ │ └── ConvNeXt_nano.yaml │ ├── EdgeNeXt │ │ ├── README.md │ │ ├── README_cn.md │ │ ├── data │ │ │ ├── EdgeNeXt_architecture.png │ │ │ ├── ImageNet_1k.json │ │ │ ├── Zebra.jpg │ │ │ └── inference.png │ │ ├── model │ │ │ └── download.sh │ │ ├── test_EdgeNeXt_base.ipynb │ │ ├── test_EdgeNeXt_small.ipynb │ │ ├── test_EdgeNeXt_x_small.ipynb │ │ ├── test_EdgeNeXt_xx_small.ipynb │ │ └── yaml │ │ │ ├── EdgeNeXt_base_config.yaml │ │ │ ├── EdgeNeXt_small_config.yaml │ │ │ ├── EdgeNeXt_x_small_config.yaml │ │ │ └── EdgeNeXt_xx_small_config.yaml │ ├── EfficientFormer │ │ ├── README.md │ │ ├── README_cn.md │ │ ├── data │ │ │ ├── EfficientFormer_architecture.png │ │ │ ├── ImageNet_1k.json │ │ │ ├── bittern.JPEG │ │ │ ├── inference.png │ │ │ └── latency_profiling.png │ │ ├── model │ │ │ └── download.sh │ │ ├── test_EfficientFormer_l1.ipynb │ │ ├── test_EfficientFormer_l3.ipynb │ │ └── yaml │ │ │ ├── EfficientFormer_l1_config.yaml │ │ │ └── EfficientFormer_l3_config.yaml │ ├── EfficientFormerV2 │ │ ├── README.md │ │ ├── README_cn.md │ │ ├── data │ │ │ ├── EfficientFormerV2_architecture.png │ │ │ ├── ImageNet_1k.json │ │ │ ├── goldfish.JPEG │ │ │ └── inference.png │ │ ├── model │ │ │ └── download.sh │ │ ├── test_EfficientFormerv2_s0.ipynb │ │ ├── test_EfficientFormerv2_s1.ipynb │ │ ├── test_EfficientFormerv2_s2.ipynb │ │ └── yaml │ │ │ ├── EfficientFormerv2_s0_config.yaml │ │ │ ├── EfficientFormerv2_s1_config.yaml │ │ │ └── EfficientFormerv2_s2_config.yaml │ ├── EfficientNet │ │ ├── README.md │ │ ├── README_cn.md │ │ ├── data │ │ │ ├── EfficientNet_architecture.png │ │ │ ├── ImageNet_1k.json │ │ │ ├── inference.png │ │ │ └── redshank.JPEG │ │ ├── model │ │ │ └── download.sh │ │ ├── test_EfficientNet_B2.ipynb │ │ ├── test_EfficientNet_B3.ipynb │ │ ├── test_EfficientNet_B4.ipynb │ │ └── yaml │ │ │ ├── EfficientNet_B2_config.yaml │ │ │ ├── EfficientNet_B3_config.yaml │ │ │ └── EfficientNet_B4_config.yaml │ ├── EfficientViT_MSRA │ │ ├── README.md │ │ ├── README_cn.md │ │ ├── data │ │ │ ├── Comparison between Transformer & CNN.png │ │ │ ├── EfficientViT_msra_architecture.png │ │ │ ├── ImageNet_1k.json │ │ │ ├── MHSA computation.jpg │ │ │ ├── hook.JPEG │ │ │ └── inference.png │ │ ├── model │ │ │ └── download.sh │ │ ├── test_EfficientViT_MSRA_m5.ipynb │ │ └── yaml │ │ │ └── EfficientViT_MSRA_m5_config.yaml │ ├── FastViT │ │ ├── README.md │ │ ├── README_cn.md │ │ ├── data │ │ │ ├── FastViT_architecture.png │ │ │ ├── ImageNet_1k.json │ │ │ ├── bucket.JPEG │ │ │ └── inference.png │ │ ├── model │ │ │ └── download.sh │ │ ├── test_FastViT_S12.ipynb │ │ ├── test_FastViT_SA12.ipynb │ │ ├── test_FastViT_T12.ipynb │ │ ├── test_FastViT_T8.ipynb │ │ └── yaml │ │ │ ├── FastViT_S12_config.yaml │ │ │ ├── FastViT_SA12_config.yaml │ │ │ ├── FastViT_T12_config.yaml │ │ │ └── FastViT_T8_config.yaml │ ├── FasterNet │ │ ├── README.md │ │ ├── README_cn.md │ │ ├── data │ │ │ ├── FLOPs of Nets.png │ │ │ ├── FasterNet_architecture.png │ │ │ ├── ImageNet_1k.json │ │ │ ├── drake.JPEG │ │ │ └── inference.png │ │ ├── model │ │ │ └── download.sh │ │ ├── test_FasterNet_S.ipynb │ │ ├── test_FasterNet_T0.ipynb │ │ ├── test_FasterNet_T1.ipynb │ │ ├── test_FasterNet_T2.ipynb │ │ └── yaml │ │ │ ├── FasterNet_S_config.yaml │ │ │ ├── FasterNet_T0_config.yaml │ │ │ ├── FasterNet_T1_config.yaml │ │ │ └── FasterNet_T2_config.yaml │ ├── GoogLeNet │ │ ├── README.md │ │ ├── README_cn.md │ │ ├── data │ │ │ ├── GoogLeNet_architecture.png │ │ │ ├── ImageNet_1k.json │ │ │ ├── indigo_bunting.JPEG │ │ │ └── inference.png │ │ ├── model │ │ │ └── download.sh │ │ └── test_GoogLeNet.ipynb │ ├── MobileNetV1 │ │ ├── README.md │ │ ├── README_cn.md │ │ ├── data │ │ │ ├── ImageNet_1k.json │ │ │ ├── bulbul.JPEG │ │ │ ├── depthwise&pointwise.png │ │ │ └── inference.png │ │ ├── model │ │ │ └── download.sh │ │ └── test_MobileNetV1.ipynb │ ├── MobileNetV2 │ │ ├── README.md │ │ ├── README_cn.md │ │ ├── data │ │ │ ├── ImageNet_1k.json │ │ │ ├── Scottish_deerhound.JPEG │ │ │ ├── inference.png │ │ │ ├── mobilenetv2_architecture.png │ │ │ └── seperated_conv.png │ │ ├── model │ │ │ └── download.sh │ │ └── test_MobileNetV2.ipynb │ ├── MobileNetV3 │ │ ├── README.md │ │ ├── README_cn.md │ │ ├── data │ │ │ ├── ImageNet_1k.json │ │ │ ├── MobileNetV3_architecture.png │ │ │ ├── inference.png │ │ │ └── kit_fox.JPEG │ │ ├── model │ │ │ └── download.sh │ │ ├── test_MobileNetV3.ipynb │ │ └── yaml │ │ │ └── MobileNetV3_config.yaml │ ├── MobileNetV4 │ │ ├── README.md │ │ ├── README_cn.md │ │ ├── data │ │ │ ├── ImageNet_1k.json │ │ │ ├── MobileNetV4_architecture.png │ │ │ ├── great_grey_owl.JPEG │ │ │ └── inference.png │ │ ├── model │ │ │ └── download.sh │ │ ├── test_MobileNetV4_conv_medium_224x224_nv12.ipynb │ │ ├── test_MobileNetV4_conv_small_224x224_nv12.ipynb │ │ └── yaml │ │ │ ├── MobileNetV4_medium.yaml │ │ │ └── MobileNetV4_small.yaml │ ├── MobileOne │ │ ├── README.md │ │ ├── README_cn.md │ │ ├── data │ │ │ ├── ImageNet_1k.json │ │ │ ├── MobileOne_architecture.png │ │ │ ├── inference.png │ │ │ └── tiger_beetle.JPEG │ │ ├── model │ │ │ └── download.sh │ │ ├── test_MobileOne_S0.ipynb │ │ ├── test_MobileOne_S1.ipynb │ │ ├── test_MobileOne_S2.ipynb │ │ ├── test_MobileOne_S3.ipynb │ │ ├── test_MobileOne_S4.ipynb │ │ └── yaml │ │ │ ├── MobileOne_S0_config.yaml │ │ │ ├── MobileOne_S1_config.yaml │ │ │ ├── MobileOne_S2_config.yaml │ │ │ ├── MobileOne_S3_config.yaml │ │ │ └── MobileOne_S4_config.yaml │ ├── Model quantization deployment.md │ ├── README.md │ ├── README_cn.md │ ├── RepGhost │ │ ├── README.md │ │ ├── README_cn.md │ │ ├── data │ │ │ ├── ImageNet_1k.json │ │ │ ├── RepGhost_architecture.png │ │ │ ├── ibex.JPEG │ │ │ └── inference.png │ │ ├── model │ │ │ └── download.sh │ │ ├── test_RepGhost_100.ipynb │ │ ├── test_RepGhost_111.ipynb │ │ ├── test_RepGhost_130.ipynb │ │ ├── test_RepGhost_150.ipynb │ │ ├── test_RepGhost_200.ipynb │ │ └── yaml │ │ │ ├── RepGhost_100.yaml │ │ │ ├── RepGhost_111.yaml │ │ │ ├── RepGhost_130.yaml │ │ │ ├── RepGhost_150.yaml │ │ │ └── RepGhost_200.yaml │ ├── RepVGG │ │ ├── README.md │ │ ├── README_cn.md │ │ ├── data │ │ │ ├── ImageNet_1k.json │ │ │ ├── RepVGG_architecture.png │ │ │ ├── gooze.JPEG │ │ │ └── inference.png │ │ ├── model │ │ │ └── download.sh │ │ ├── test_RepVGG_A0.ipynb │ │ ├── test_RepVGG_A1.ipynb │ │ ├── test_RepVGG_A2.ipynb │ │ ├── test_RepVGG_B0.ipynb │ │ ├── test_RepVGG_B1g2.ipynb │ │ ├── test_RepVGG_B1g4.ipynb │ │ └── yaml │ │ │ ├── RepVGG_A0_config.yaml │ │ │ ├── RepVGG_A1_config.yaml │ │ │ ├── RepVGG_A2_config.yaml │ │ │ ├── RepVGG_B0_config.yaml │ │ │ ├── RepVGG_B1g2_config.yaml │ │ │ └── RepVGG_B1g4_config.yaml │ ├── RepViT │ │ ├── README.md │ │ ├── README_cn.md │ │ ├── data │ │ │ ├── ImageNet_1k.json │ │ │ ├── RepViT_DW.png │ │ │ ├── RepViT_architecture.png │ │ │ ├── inference.png │ │ │ └── yurt.JPEG │ │ ├── model │ │ │ └── download.sh │ │ ├── test_RepViT_m0_9.ipynb │ │ ├── test_RepViT_m1_0.ipynb │ │ ├── test_RepViT_m1_1.ipynb │ │ └── yaml │ │ │ ├── RepViT_m0_9_config.yaml │ │ │ ├── RepViT_m1_0_config.yaml │ │ │ └── RepViT_m1_1_config.yaml │ ├── ResNeXt │ │ ├── README.md │ │ ├── README_cn.md │ │ ├── data │ │ │ ├── ImageNet_1k.json │ │ │ ├── ResNet&ResNeXt.png │ │ │ ├── bee_eater.JPEG │ │ │ └── inference.png │ │ ├── model │ │ │ └── download.sh │ │ ├── test_ResNeXt50_32x4d.ipynb │ │ └── yaml │ │ │ └── ResNeXt50_32x4d_config.yaml │ ├── ResNet │ │ ├── README.md │ │ ├── README_cn.md │ │ ├── data │ │ │ ├── ImageNet_1k.json │ │ │ ├── ResNet_architecture.png │ │ │ ├── ResNet_architecture2.png │ │ │ ├── inference.png │ │ │ └── white_wolf.JPEG │ │ ├── model │ │ │ └── download.sh │ │ └── test_ResNet18.ipynb │ ├── VargConvNet │ │ ├── data │ │ │ ├── ImageNet_1k.json │ │ │ └── box_turtle.JPEG │ │ ├── model │ │ │ └── download.sh │ │ └── test_VargConvNet.ipynb │ └── 模型量化部署.md ├── detect │ ├── FCOS │ │ ├── FCOS.py │ │ ├── README.md │ │ ├── README_cn.md │ │ ├── imgs │ │ │ ├── demo_rdkx5_fcos_detect.jpg │ │ │ ├── fcos_512x512_nv12.png │ │ │ ├── fcos_efficientnetb0_512x512_nv12.png │ │ │ ├── fcos_efficientnetb2_768x768_nv12.png │ │ │ └── fcos_efficientnetb3_896x896_nv12.png │ │ ├── jupyter_BSP_FCOS_Detect.ipynb │ │ ├── jupyter_result.jpg │ │ └── models │ │ │ └── download.md │ ├── YOLO11 │ │ ├── YOLO11-Detect_NCHWRGB │ │ │ ├── config_yolo11_detect_bayese_640x640_nchw.yaml │ │ │ ├── cpp │ │ │ │ ├── CMakeLists.txt │ │ │ │ ├── README.md │ │ │ │ └── main.cc │ │ │ └── ptq_models │ │ │ │ └── download.md │ │ └── YOLO11-Detect_YUV420SP │ │ │ ├── README.md │ │ │ ├── README_cn.md │ │ │ ├── YOLO11_Detect_YUV420SP.py │ │ │ ├── config_yolo11_detect_bayese_640x640_nv12.yaml │ │ │ ├── cpp │ │ │ ├── CMakeLists.txt │ │ │ ├── README.md │ │ │ └── main.cc │ │ │ ├── eval_YOLO11_Detect_generate_labels.py │ │ │ ├── imgs │ │ │ ├── YOLOv11_Detect_Origin.png │ │ │ ├── YOLOv11_Detect_Quantize.png │ │ │ ├── demo_rdkx5_yolov11n_detect.jpg │ │ │ ├── ltrb2xyxy.jpg │ │ │ ├── yolo11n_detect_bayese_640x640_nv12.png │ │ │ └── yolo11n_detect_bayese_640x640_nv12_modified.png │ │ │ └── ptq_models │ │ │ └── download.md │ ├── YOLO12 │ │ ├── YOLO12-Detect_NCHWRGB │ │ │ ├── config_yolov12_detect_nchw.yaml │ │ │ ├── cpp │ │ │ │ ├── CMakeLists.txt │ │ │ │ ├── README.md │ │ │ │ └── main.cc │ │ │ └── ptq_models │ │ │ │ └── downloads.md │ │ └── YOLO12-Detect_YUV420SP │ │ │ ├── README.md │ │ │ ├── README_cn.md │ │ │ ├── YOLO12_Detect_YUV420SP.py │ │ │ ├── config_yolov12_detect_nv12.yaml │ │ │ ├── cpp │ │ │ ├── CMakeLists.txt │ │ │ ├── README.md │ │ │ └── main.cc │ │ │ ├── eval_YOLO12_Detect_generate_labels.py │ │ │ ├── imgs │ │ │ ├── yolov12n_detect_bayese_640x640_nv12.png │ │ │ └── yolov12n_detect_bayese_640x640_nv12_modified.png │ │ │ ├── jupyter_YOLO12_Detect.ipynb │ │ │ └── ptq_models │ │ │ └── README.md │ ├── YOLOv10 │ │ ├── README.md │ │ ├── README_cn.md │ │ ├── YOLOv10_Detect.py │ │ ├── cpp │ │ │ ├── CMakeLists.txt │ │ │ ├── README.md │ │ │ └── main.cc │ │ ├── imgs │ │ │ ├── YOLOv10_Detect_Origin.png │ │ │ ├── YOLOv10_Detect_Quantize.png │ │ │ ├── YOLOv8_Detect_Quantize.png │ │ │ └── demo_rdkx5_yolov10n_detect.jpg │ │ ├── jupyter_BSP_YOLOv10_Detect.ipynb │ │ ├── jupyter_result.jpg │ │ ├── models │ │ │ └── download.md │ │ └── ptq_yamls │ │ │ ├── yolov10_detect_bayese_640x640_nchw.yaml │ │ │ ├── yolov10_detect_bayese_640x640_nv12.yaml │ │ │ └── yolov10_detect_bernoulli2_640x640_nv12.yaml │ ├── YOLOv5 │ │ ├── README.md │ │ ├── README_cn.md │ │ ├── YOLOv5_Detect.py │ │ ├── cpp │ │ │ ├── CMakeLists.txt │ │ │ ├── README.md │ │ │ └── main.cc │ │ ├── imgs │ │ │ ├── demo_rdkx5_yolov5s_tag2.0_detect.jpg │ │ │ ├── yolov5n_tag_v7.0_detect_640x640_bayese_nv12.png │ │ │ └── yolov5s_tag_v2.0_detect_640x640_bayese_nv12.png │ │ ├── jupyter_BSP_YOLOv5.ipynb │ │ ├── models │ │ │ └── download.md │ │ └── ptq_yamls │ │ │ ├── yolov5_detect_bayese_640x640_nchw.yaml │ │ │ └── yolov5_detect_bayese_640x640_nv12.yaml │ └── YOLOv8 │ │ ├── YOLOv8-Detect_NCHWRGB │ │ ├── README.md │ │ ├── README_cn.md │ │ ├── config_yolov8_detect_nchw.yaml │ │ ├── cpp │ │ │ ├── CMakeLists.txt │ │ │ ├── README.md │ │ │ └── main.cc │ │ ├── eval_cpp │ │ │ ├── CMakeLists.txt │ │ │ ├── README.md │ │ │ ├── eval_convert.py │ │ │ └── main.cc │ │ └── ptq_models │ │ │ └── README.md │ │ └── YOLOv8-Detect_YUV420SP │ │ ├── README.md │ │ ├── README_cn.md │ │ ├── YOLOv8_Detect_YUV420SP.py │ │ ├── config_yolov8_detect_nv12.yaml │ │ ├── cpp │ │ ├── CMakeLists.txt │ │ ├── README.md │ │ └── main.cc │ │ ├── eval │ │ ├── HB_ONNX_YOLOv8_Detect.py │ │ ├── HB_ONNX_eval_generate_labels.py │ │ ├── ONNX_YOLOv8_Detect.py │ │ ├── ONNX_eval_generate_labels.py │ │ ├── ONNX_nv12_eval_generate_labels.py │ │ ├── Pytorch_eval_generate_labels.py │ │ ├── README.md │ │ ├── README_cn.md │ │ ├── eval_cpp │ │ │ ├── CMakeLists.txt │ │ │ ├── README.md │ │ │ ├── eval_convert.py │ │ │ └── main.cc │ │ └── eval_pycocotools.py │ │ ├── eval_YOLOv8_Detect_generate_labels.py │ │ ├── imgs │ │ ├── YOLOv8_Detect_Origin.png │ │ ├── YOLOv8_Detect_Quantize.png │ │ ├── demo_rdkx5_yolov8n_detect.jpg │ │ ├── ltrb2xyxy.jpg │ │ ├── yolov8n_detect_bayese_640x640_nv12.png │ │ └── yolov8n_detect_bayese_640x640_nv12_modified.png │ │ ├── jupyter_YOLOv8_Detect.ipynb │ │ └── ptq_models │ │ └── README.md ├── llm │ ├── clip │ │ ├── README.md │ │ ├── README_cn.md │ │ ├── bpe_simple_vocab_16e6.txt.gz │ │ ├── clip.ipynb │ │ ├── dog.jpg │ │ ├── download.sh │ │ └── simple_tokenizer.py │ └── yoloworld │ │ ├── README.md │ │ ├── README_cn.md │ │ ├── dog.jpeg │ │ ├── download.sh │ │ ├── offline_vocabulary_embeddings.json │ │ └── yoloworld.ipynb ├── solutions │ ├── RDK_LLM_Solutions │ │ ├── README.md │ │ ├── README_cn.md │ │ └── imgs │ │ │ └── RDK_LLM_Solution.jpg │ └── RDK_Video_Solutions │ │ ├── IPC_Camera │ │ ├── README.md │ │ └── README_cn.md │ │ ├── MIPI_Camera │ │ ├── README.md │ │ └── README_cn.md │ │ ├── README.md │ │ ├── README_cn.md │ │ ├── USB_Camera │ │ ├── README.md │ │ └── README_cn.md │ │ └── imgs │ │ ├── Parallel_Programming.png │ │ ├── RDK_Video_Solution.jpg │ │ ├── Serial_Programming.png │ │ ├── TROS_IPC_Camera.png │ │ ├── TROS_MIPI_Camera.png │ │ └── TROS_USB_Camera.png └── tools │ ├── batch_eval_pycocotools │ ├── README.md │ ├── batch_scp.sh │ ├── cn_COCO2017val.md │ ├── en_COCO2017val.md │ ├── eval_batch_cpp.py │ ├── eval_batch_python.py │ ├── eval_batch_pytorch_generate_labels.py │ ├── eval_batch_pytorch_generate_labels_seg.py │ ├── eval_pycocotools.py │ ├── eval_pycocotools_seg.py │ ├── eval_pytorch_generate_labels.py │ └── eval_pytorch_generate_labels_seg.py │ ├── batch_mapper │ ├── README.md │ └── batch_mapper.py │ ├── batch_perf │ ├── README_cn.md │ └── batch_perf.py │ └── generate_calibration_data │ ├── README.md │ └── generate_calibration_data.py ├── requirement.txt └── resource ├── assets ├── bus.jpg ├── kite.jpg ├── small_img_1.jpg ├── small_img_2.jpg ├── zebra_cls.jpg └── zidane.jpg ├── datasets ├── COCO2017 │ ├── README.md │ └── assets │ │ ├── bus.jpg │ │ ├── kite.jpg │ │ ├── kite_small.jpg │ │ └── zidane.jpg ├── ImageNet │ ├── README.md │ └── README_cn.md └── PascalVOC │ ├── README.md │ └── README_cn.md └── imgs ├── FAQ.jpg ├── ImageNet.png ├── basic_usage.png ├── basic_usage_res.png ├── demo_rdkx5_yolov10n_detect.jpg ├── into_jupyter.png ├── jupyter_start.png ├── model_zoo_logo.jpg ├── paddleocr.png └── vscode_demo.jpg /.gitignore: -------------------------------------------------------------------------------- 1 | *__pycache__* 2 | *.ipynb_checkpoints* 3 | *.vscode* 4 | *.pt 5 | *pth 6 | *.onnx 7 | *.bin 8 | *.hbm 9 | *wuc_note* 10 | *.vscode* 11 | *build* 12 | *_result.jpg* 13 | *py_result.jpg 14 | *jupyter_result.jpg* 15 | *coco2017_image_result* 16 | *coco2017_val_pridect.json 17 | *coco2017_val_pridect_nv12.json 18 | *cpp_json_result.txt* 19 | 20 | 21 | *zip 22 | resource/datasets/COCO2017/annotations/* 23 | resource/datasets/COCO2017/val2017/* 24 | 25 | demos/detect/YOLO11/YOLO11-Detect_YUV420SP/eval/* 26 | 27 | demos/detect/YOLO12/YOLO12-Detect_NCHWRGB/eval_cpp 28 | demos/detect/YOLO12/YOLO12-Detect_YUV420SP/eval_cpp -------------------------------------------------------------------------------- /demos/OCR/PaddleOCR/README_cn.md: -------------------------------------------------------------------------------- 1 | [English](./README.md) | 简体中文 2 | 3 | # PaddleOCR 文字识别 4 | 5 | - [PaddleOCR 文字识别](#paddleocr-文字识别) 6 | - [1. PaddleOCR 简介](#1-paddleocr-简介) 7 | - [2. 性能数据](#2-性能数据) 8 | - [3. 模型下载地址](#3-模型下载地址) 9 | - [4. 部署测试](#4-部署测试) 10 | 11 | 12 | ## 1. PaddleOCR 简介 13 | 14 | PaddleOCR 是百度飞桨基于深度学习的光学字符识别(OCR)工具,利用 PaddlePaddle 框架来执行图片中的文字识别任务。该仓库通过图像预处理、文字检测、文字识别等多个阶段,将图像中的文字转换为可编辑的文本。PaddleOCR 支持多语言和多字体的识别,适合各种复杂场景下的文字提取任务。PaddleOCR 还支持自定义训练,用户可以根据特定需求准备训练数据,进一步优化模型表现。 15 | 16 | 在实际应用中,PaddleOCR 的工作流程包括以下几个步骤: 17 | 18 | - **图像预处理**:对输入的图像进行去噪、尺寸调整等处理,使其适合后续的检测和识别。 19 | - **文字检测**:通过深度学习模型检测图像中的文字区域,生成检测框。 20 | - **文字识别**:对检测框内的文字内容进行识别,生成最终的文字结果。 21 | 22 | 本仓库提供的示例根据 PaddleOCR 官方提供的案例,通过模型转换、模型量化和图像后处理后可实际运行可清晰识别字符,可运行 jupyter 脚本文件得到模型推理的结果。 23 | 24 | **github 地址**:https://github.com/PaddlePaddle/PaddleOCR 25 | 26 | ![alt text](../../../resource/imgs/paddleocr.png) 27 | 28 | ## 2. 性能数据 29 | 30 | **RDK X5 & RDK X5 Module** 31 | 32 | 数据集 ICDAR2019-ArT 33 | 34 | | 模型(公版) | 尺寸(像素) | 参数量 | BPU吞吐量 | 35 | | ------------ | ------- | ----- | ---------- | 36 | | PP-OCRv3_det | 640x640 | 3.8 M | 158.12 FPS | 37 | | PP-OCRv3_rec | 48x320 | 9.6 M | 245.68 FPS | 38 | 39 | 40 | **RDK X3 & RDK X3 Module** 41 | 42 | 数据集 ICDAR2019-ArT 43 | 44 | | 模型(公版) | 尺寸(像素) | 参数量 | BPU吞吐量 | 45 | | ------------ | ------- | ----- | ---------- | 46 | | PP-OCRv3_det | 640x640 | 3.8 M | 41.96 FPS | 47 | | PP-OCRv3_rec | 48x320 | 9.6 M | 78.92 FPS | 48 | 49 | 50 | ## 3. 模型下载地址 51 | 52 | **.bin 文件下载**: 53 | 54 | 可以使用脚本 [download.sh](./model/download.sh) 一键下载所有此模型结构的 .bin 模型文件: 55 | 56 | ```shell 57 | wget https://archive.d-robotics.cc/downloads/rdk_model_zoo/rdk_x3/en_PP-OCRv3_det_640x640_nv12.bin 58 | wget https://archive.d-robotics.cc/downloads/rdk_model_zoo/rdk_x3/en_PP-OCRv3_rec_48x320_rgb.bin 59 | ``` 60 | 61 | ## 4. 部署测试 62 | 63 | 在下载完毕 .bin 文件后,可以执行 Python/Jupyter 脚本文件,在板端实际运行体验实际测试效果。需要更改测试图片,可额外下载数据集后,放入到data文件夹下并更改 Python/Jupyter 文件中图片的路径 64 | 65 | ![paddleocr](./data/paddleocr.png) 66 | -------------------------------------------------------------------------------- /demos/OCR/PaddleOCR/data/paddleocr.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/D-Robotics/rdk_model_zoo/6da0511b4a5f4ac49eadd936d8e0228412d8dd32/demos/OCR/PaddleOCR/data/paddleocr.png -------------------------------------------------------------------------------- /demos/OCR/PaddleOCR/data/paddleocr_test.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/D-Robotics/rdk_model_zoo/6da0511b4a5f4ac49eadd936d8e0228412d8dd32/demos/OCR/PaddleOCR/data/paddleocr_test.jpg -------------------------------------------------------------------------------- /demos/OCR/PaddleOCR/model/download.sh: -------------------------------------------------------------------------------- 1 | # Download bin models weights 2 | 3 | wget -P $(dirname $0) https://archive.d-robotics.cc/downloads/rdk_model_zoo/rdk_x5/en_PP-OCRv3_det_640x640_nv12.bin 4 | wget -P $(dirname $0) https://archive.d-robotics.cc/downloads/rdk_model_zoo/rdk_x5/en_PP-OCRv3_rec_48x320_rgb.bin -------------------------------------------------------------------------------- /demos/OCR/PaddleOCR/output/predict.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/D-Robotics/rdk_model_zoo/6da0511b4a5f4ac49eadd936d8e0228412d8dd32/demos/OCR/PaddleOCR/output/predict.jpg -------------------------------------------------------------------------------- /demos/OCR/PaddleOCR/yaml/paddleocr_det_config.yaml: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2021-2024 D-Robotics Corporation 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | 16 | model_parameters: 17 | onnx_model: './en_PP-OCRv3_det_infer.onnx' 18 | march: "bayes-e" 19 | layer_out_dump: False 20 | working_dir: 'model_output' 21 | output_model_file_prefix: 'en_PP-OCRv3_det_infer-deploy_640x640_nv12' 22 | 23 | 24 | input_parameters: 25 | input_name: "" 26 | input_type_rt: 'nv12' 27 | input_type_train: 'rgb' 28 | input_layout_train: 'NCHW' 29 | input_shape: '' 30 | norm_type: 'data_mean_and_scale' 31 | mean_value: 123.675 116.28 103.53 32 | scale_value: 0.01712475 0.017507 0.01742919 33 | 34 | 35 | calibration_parameters: 36 | cal_data_dir: './calibration_data_rgb_f32' 37 | cal_data_type: 'float32' 38 | calibration_type: 'default' 39 | 40 | 41 | compiler_parameters: 42 | compile_mode: 'latency' 43 | debug: False 44 | optimize_level: 'O3' 45 | 46 | -------------------------------------------------------------------------------- /demos/OCR/PaddleOCR/yaml/paddleocr_rec_config.yaml: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2021-2024 D-Robotics Corporation 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | 16 | model_parameters: 17 | onnx_model: './en_PP-OCRv3_rec_infer.onnx' 18 | march: "bayes-e" 19 | layer_out_dump: False 20 | working_dir: 'model_output' 21 | output_model_file_prefix: 'en_PP-OCRv3_rec_infer-deploy_48x320_rgb' 22 | node_info: { 23 | "p2o.Softmax.0": { 24 | 'ON': 'BPU', 25 | 'InputType': 'int16', 26 | 'OutputType': 'int16' 27 | }, 28 | "p2o.Softmax.1": { 29 | 'ON': 'BPU', 30 | 'InputType': 'int16', 31 | 'OutputType': 'int16' 32 | }, 33 | "p2o.Softmax.2": { 34 | 'ON': 'BPU', 35 | 'InputType': 'int16', 36 | 'OutputType': 'int16' 37 | } 38 | } 39 | 40 | 41 | input_parameters: 42 | input_name: "" 43 | input_type_rt: 'featuremap' 44 | input_layout_rt: 'NCHW' 45 | input_type_train: 'featuremap' 46 | input_layout_train: 'NCHW' 47 | input_shape: '' 48 | norm_type: 'no_preprocess' 49 | 50 | 51 | calibration_parameters: 52 | cal_data_dir: './calibration_data_rgb_f32' 53 | cal_data_type: 'float32' 54 | calibration_type: 'default' 55 | optimization: "set_all_nodes_int16" 56 | 57 | 58 | compiler_parameters: 59 | compile_mode: 'latency' 60 | debug: False 61 | optimize_level: 'O3' 62 | 63 | -------------------------------------------------------------------------------- /demos/Pose/YOLO11-Pose/README.md: -------------------------------------------------------------------------------- 1 | English | [简体中文](./README_cn.md) 2 | 3 | # YOLOv11 Pose 4 | 5 | TODO -------------------------------------------------------------------------------- /demos/Pose/YOLO11-Pose/cpp/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | cmake_minimum_required(VERSION 2.8) 2 | 3 | project(rdk_yolo11_pose) 4 | 5 | 6 | # 设置OpenCV包 7 | find_package(OpenCV REQUIRED) 8 | 9 | # # 添加可执行文件 10 | # add_executable(main main.cc) 11 | # # 链接OpenCV库 12 | # target_link_libraries(main ${OpenCV_LIBS}) 13 | 14 | 15 | 16 | # libdnn.so depends on system software dynamic link library, use -Wl,-unresolved-symbols=ignore-in-shared-libs to shield during compilation 17 | # set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11 -Wl,-unresolved-symbols=ignore-in-shared-libs") 18 | 19 | # set(CMAKE_CXX_FLAGS_DEBUG " -Wall -Werror -g -O0 ") 20 | # set(CMAKE_C_FLAGS_DEBUG " -Wall -Werror -g -O0 ") 21 | # set(CMAKE_CXX_FLAGS_RELEASE " -Wall -Werror -O3 ") 22 | # set(CMAKE_C_FLAGS_RELEASE " -Wall -Werror -O3 ") 23 | # if (NOT CMAKE_BUILD_TYPE) 24 | # set(CMAKE_BUILD_TYPE Release) 25 | # endif () 26 | 27 | message(STATUS "Build type: ${CMAKE_BUILD_TYPE}") 28 | # define dnn lib path 29 | set(DNN_PATH "/usr/include/dnn") 30 | 31 | set(DNN_LIB_PATH "/usr/lib/") 32 | 33 | include_directories(${DNN_PATH}) 34 | link_directories(${DNN_LIB_PATH}) 35 | 36 | add_executable(main main.cc) 37 | target_link_libraries(main 38 | ${OpenCV_LIBS} 39 | dnn 40 | pthread 41 | rt 42 | dl) -------------------------------------------------------------------------------- /demos/Pose/YOLO11-Pose/cpp/README.md: -------------------------------------------------------------------------------- 1 | 2 | 3 | 注: 所有的Terminal命令基于以下目录`./rdk_model_zoo/demos/detect/YOLOv8/cpp` 4 | 5 | 安装依赖 6 | ```bash 7 | sudo apt update 8 | # OpenCV 9 | sudo apt install libopencv-dev python3-opencv libopencv-contrib-dev 10 | ``` 11 | 12 | 13 | BPU推理库已经在RDK平台的RDK OS中自带. 14 | - 头文件 15 | ```bash 16 | /usr/include/dnn 17 | . 18 | ├── hb_dnn_ext.h 19 | ├── hb_dnn.h 20 | ├── hb_dnn_status.h 21 | ├── hb_sys.h 22 | └── plugin 23 | ├── hb_dnn_dtype.h 24 | ├── hb_dnn_layer.h 25 | ├── hb_dnn_ndarray.h 26 | ├── hb_dnn_plugin.h 27 | └── hb_dnn_tuple.h 28 | ``` 29 | 30 | - 推理库 31 | ```bash 32 | /usr/lib/ 33 | . 34 | ├── libdnn.so 35 | └── libhbrt_bayes_aarch64.so 36 | ``` 37 | 38 | 39 | 上述头文件和动态库也可以通过OpenExploer发布物获取 40 | OE路径: `package/host/host_package/x5_aarch64/dnn_1.24.5.tar.gz` 41 | 42 | 清空之前的编译产物 (如果有) 43 | ```bash 44 | rm -rf build 45 | ``` 46 | 47 | 编译 48 | ```bash 49 | mkdir -p build && cd build 50 | cmake .. 51 | make 52 | ``` 53 | 54 | 运行 55 | ```bash 56 | ./main 57 | ``` -------------------------------------------------------------------------------- /demos/Pose/YOLO11-Pose/imgs/YOLO11_demo.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/D-Robotics/rdk_model_zoo/6da0511b4a5f4ac49eadd936d8e0228412d8dd32/demos/Pose/YOLO11-Pose/imgs/YOLO11_demo.jpeg -------------------------------------------------------------------------------- /demos/Pose/YOLO11-Pose/imgs/yolo11n_pose_bayese_640x640_nv12.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/D-Robotics/rdk_model_zoo/6da0511b4a5f4ac49eadd936d8e0228412d8dd32/demos/Pose/YOLO11-Pose/imgs/yolo11n_pose_bayese_640x640_nv12.png -------------------------------------------------------------------------------- /demos/Pose/YOLO11-Pose/imgs/yolo11n_pose_bayese_640x640_nv12_modified.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/D-Robotics/rdk_model_zoo/6da0511b4a5f4ac49eadd936d8e0228412d8dd32/demos/Pose/YOLO11-Pose/imgs/yolo11n_pose_bayese_640x640_nv12_modified.png -------------------------------------------------------------------------------- /demos/Pose/YOLO11-Pose/models/download.md: -------------------------------------------------------------------------------- 1 | # Download weights 2 | 3 | - [Download weights](#download-weights) 4 | - [Bayes-e (RDK X5 \& RDK X5 Module)](#bayes-e-rdk-x5--rdk-x5-module) 5 | - [bin - nv12](#bin---nv12) 6 | - [Bayes (RDK Ultra \& RDK Ultra Module)](#bayes-rdk-ultra--rdk-ultra-module) 7 | - [Nash-e (RDK S100)](#nash-e-rdk-s100) 8 | - [Nash-m (RDK S100P)](#nash-m-rdk-s100p) 9 | - [Bernoulli2 (RDK X3 \& RDK X3 Module)](#bernoulli2-rdk-x3--rdk-x3-module) 10 | - [bin - nv12](#bin---nv12-1) 11 | 12 | 13 | 14 | ## Bayes-e (RDK X5 & RDK X5 Module) 15 | ### bin - nv12 16 | YOLOv11n - Detect 17 | ```bash 18 | wget https://archive.d-robotics.cc/downloads/rdk_model_zoo/rdk_x5/yolo11n_pose_bayese_640x640_nv12_modified.bin 19 | ``` 20 | YOLOv11s - Detect 21 | ```bash 22 | wget https://archive.d-robotics.cc/downloads/rdk_model_zoo/rdk_x5/yolo11s_pose_bayese_640x640_nv12_modified.bin 23 | ``` 24 | YOLOv11m - Detect 25 | ```bash 26 | wget https://archive.d-robotics.cc/downloads/rdk_model_zoo/rdk_x5/yolo11m_pose_bayese_640x640_nv12_modified.bin 27 | ``` 28 | YOLOv11l - Detect 29 | ```bash 30 | wget https://archive.d-robotics.cc/downloads/rdk_model_zoo/rdk_x5/yolo11l_pose_bayese_640x640_nv12_modified.bin 31 | ``` 32 | YOLOv11x - Detect 33 | ```bash 34 | wget https://archive.d-robotics.cc/downloads/rdk_model_zoo/rdk_x5/yolo11x_pose_bayese_640x640_nv12_modified.bin 35 | ``` 36 | 37 | ## Bayes (RDK Ultra & RDK Ultra Module) 38 | 39 | ## Nash-e (RDK S100) 40 | 41 | 42 | ## Nash-m (RDK S100P) 43 | 44 | 45 | ## Bernoulli2 (RDK X3 & RDK X3 Module) 46 | ### bin - nv12 47 | YOLOv11n - Detect 48 | ```bash 49 | ``` -------------------------------------------------------------------------------- /demos/Pose/YOLO11-Pose/ptq_yamls/yolo11_pose_bayese_640x640_nchw.yaml: -------------------------------------------------------------------------------- 1 | model_parameters: 2 | onnx_model: 'yolo11n-pose.onnx' 3 | march: "bayes-e" 4 | layer_out_dump: False 5 | working_dir: 'yolo11n_pose_bayese_640x640_nv12' 6 | output_model_file_prefix: 'yolo11n_pose_bayese_640x640_nv12' 7 | # YOLO11-Pose n, s, m 8 | node_info: {"/model.10/m/m.0/attn/Softmax": {'ON': 'BPU','InputType': 'int16','OutputType': 'int16'}} 9 | # YOLO11-Pose l, x 10 | # node_info: {"/model.10/m/m.0/attn/Softmax": {'ON': 'BPU','InputType': 'int16','OutputType': 'int16'}, 11 | # "/model.10/m/m.1/attn/Softmax": {'ON': 'BPU','InputType': 'int16','OutputType': 'int16'}} 12 | input_parameters: 13 | # input_batch: 8 14 | input_name: "" 15 | input_type_rt: 'rgb' 16 | input_layout_rt: 'NCHW' 17 | input_type_train: 'rgb' 18 | input_layout_train: 'NCHW' 19 | norm_type: 'data_scale' 20 | scale_value: 0.003921568627451 21 | calibration_parameters: 22 | cal_data_dir: './calibration_data_rgb_f32_640' 23 | cal_data_type: 'float32' 24 | compiler_parameters: 25 | compile_mode: 'latency' 26 | debug: False 27 | optimize_level: 'O3' 28 | -------------------------------------------------------------------------------- /demos/Pose/YOLO11-Pose/ptq_yamls/yolo11_pose_bayese_640x640_nv12.yaml: -------------------------------------------------------------------------------- 1 | model_parameters: 2 | onnx_model: 'yolo11n-pose.onnx' 3 | march: "bayes-e" 4 | layer_out_dump: False 5 | working_dir: 'yolo11n_pose_bayese_640x640_nv12' 6 | output_model_file_prefix: 'yolo11n_pose_bayese_640x640_nv12' 7 | # YOLO11-Pose n, s, m 8 | node_info: {"/model.10/m/m.0/attn/Softmax": {'ON': 'BPU','InputType': 'int16','OutputType': 'int16'}} 9 | # YOLO11-Pose l, x 10 | # node_info: {"/model.10/m/m.0/attn/Softmax": {'ON': 'BPU','InputType': 'int16','OutputType': 'int16'}, 11 | # "/model.10/m/m.1/attn/Softmax": {'ON': 'BPU','InputType': 'int16','OutputType': 'int16'}} 12 | input_parameters: 13 | # input_batch: 8 14 | input_name: "" 15 | input_type_rt: 'nv12' 16 | input_type_train: 'rgb' 17 | input_layout_train: 'NCHW' 18 | norm_type: 'data_scale' 19 | scale_value: 0.003921568627451 20 | calibration_parameters: 21 | cal_data_dir: './calibration_data_rgb_f32_640' 22 | cal_data_type: 'float32' 23 | compiler_parameters: 24 | compile_mode: 'latency' 25 | debug: False 26 | optimize_level: 'O3' -------------------------------------------------------------------------------- /demos/Seg/YOLO11-Seg/YOLO11-Seg_NCHWRGB/yolov8_instance_seg_bayese_640x640_nchw.yaml: -------------------------------------------------------------------------------- 1 | model_parameters: 2 | onnx_model: './yolov8n-seg.onnx' 3 | march: "bayes-e" 4 | layer_out_dump: False 5 | working_dir: 'yolov8n_instance_seg_bayese_640x640_nchw' 6 | output_model_file_prefix: 'yolov8n_instance_seg_bayese_640x640_nchw' 7 | input_parameters: 8 | input_name: "" 9 | input_type_rt: 'rgb' 10 | input_layout_rt: 'NCHW' 11 | input_type_train: 'rgb' 12 | input_layout_train: 'NCHW' 13 | norm_type: 'data_scale' 14 | scale_value: 0.003921568627451 15 | calibration_parameters: 16 | cal_data_dir: './calibration_data_rgb_f32_coco_640' 17 | cal_data_type: 'float32' 18 | compiler_parameters: 19 | compile_mode: 'latency' 20 | debug: False 21 | optimize_level: 'O3' -------------------------------------------------------------------------------- /demos/Seg/YOLO11-Seg/YOLO11-Seg_YUV420SP/README.md: -------------------------------------------------------------------------------- 1 | English | [简体中文](./README_cn.md) 2 | 3 | # YOLO11 Instance Segmentation -------------------------------------------------------------------------------- /demos/Seg/YOLO11-Seg/YOLO11-Seg_YUV420SP/config_yolo11_seg_bayese_640x640_nv12.yaml: -------------------------------------------------------------------------------- 1 | model_parameters: 2 | onnx_model: 'yolo11n-seg.onnx' 3 | march: "bayes-e" 4 | layer_out_dump: False 5 | working_dir: 'yolo11n_seg_bayese_640x640_nv12' 6 | output_model_file_prefix: 'yolo11n_seg_bayese_640x640_nv12' 7 | # YOLO11 n, s, m 8 | node_info: {"/model.10/m/m.0/attn/Softmax": {'ON': 'BPU','InputType': 'int16','OutputType': 'int16'}} 9 | # YOLO11 l, x 10 | # node_info: {"/model.10/m/m.0/attn/Softmax": {'ON': 'BPU','InputType': 'int16','OutputType': 'int16'}, 11 | # "/model.10/m/m.1/attn/Softmax": {'ON': 'BPU','InputType': 'int16','OutputType': 'int16'}} 12 | input_parameters: 13 | # input_batch: 8 14 | input_name: "" 15 | input_type_rt: 'nv12' 16 | input_type_train: 'rgb' 17 | input_layout_train: 'NCHW' 18 | norm_type: 'data_scale' 19 | scale_value: 0.003921568627451 20 | calibration_parameters: 21 | cal_data_dir: './calibration_data_rgb_f32_640' 22 | cal_data_type: 'float32' 23 | compiler_parameters: 24 | compile_mode: 'latency' 25 | debug: False 26 | optimize_level: 'O3' -------------------------------------------------------------------------------- /demos/Seg/YOLO11-Seg/YOLO11-Seg_YUV420SP/cpp/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | cmake_minimum_required(VERSION 2.8) 2 | 3 | project(rdk_yolo11_seg) 4 | 5 | 6 | # 设置OpenCV包 7 | find_package(OpenCV REQUIRED) 8 | 9 | # # 添加可执行文件 10 | # add_executable(main main.cc) 11 | # # 链接OpenCV库 12 | # target_link_libraries(main ${OpenCV_LIBS}) 13 | 14 | 15 | 16 | # libdnn.so depends on system software dynamic link library, use -Wl,-unresolved-symbols=ignore-in-shared-libs to shield during compilation 17 | set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11 -Wl,-unresolved-symbols=ignore-in-shared-libs") 18 | 19 | # set(CMAKE_CXX_FLAGS_DEBUG " -Wall -Werror -g -O0 ") 20 | # set(CMAKE_C_FLAGS_DEBUG " -Wall -Werror -g -O0 ") 21 | # set(CMAKE_CXX_FLAGS_RELEASE " -Wall -Werror -O3 ") 22 | # set(CMAKE_C_FLAGS_RELEASE " -Wall -Werror -O3 ") 23 | if (NOT CMAKE_BUILD_TYPE) 24 | set(CMAKE_BUILD_TYPE Release) 25 | endif () 26 | 27 | message(STATUS "Build type: ${CMAKE_BUILD_TYPE}") 28 | # define dnn lib path 29 | set(DNN_PATH "/usr/include/dnn") 30 | 31 | set(DNN_LIB_PATH "/usr/lib/") 32 | 33 | include_directories(${DNN_PATH}) 34 | link_directories(${DNN_LIB_PATH}) 35 | 36 | add_executable(main main.cc) 37 | target_link_libraries(main 38 | ${OpenCV_LIBS} 39 | dnn 40 | pthread 41 | rt 42 | dl) -------------------------------------------------------------------------------- /demos/Seg/YOLO11-Seg/YOLO11-Seg_YUV420SP/cpp/README.md: -------------------------------------------------------------------------------- 1 | 2 | 3 | 注: 所有的Terminal命令基于以下目录`./rdk_model_zoo/demos/detect/YOLOv8/cpp` 4 | 5 | 安装依赖 6 | ```bash 7 | sudo apt update 8 | # OpenCV 9 | sudo apt install libopencv-dev python3-opencv libopencv-contrib-dev 10 | ``` 11 | 12 | 13 | BPU推理库已经在RDK平台的RDK OS中自带. 14 | - 头文件 15 | ```bash 16 | /usr/include/dnn 17 | . 18 | ├── hb_dnn_ext.h 19 | ├── hb_dnn.h 20 | ├── hb_dnn_status.h 21 | ├── hb_sys.h 22 | └── plugin 23 | ├── hb_dnn_dtype.h 24 | ├── hb_dnn_layer.h 25 | ├── hb_dnn_ndarray.h 26 | ├── hb_dnn_plugin.h 27 | └── hb_dnn_tuple.h 28 | ``` 29 | 30 | - 推理库 31 | ```bash 32 | /usr/lib/ 33 | . 34 | ├── libdnn.so 35 | └── libhbrt_bayes_aarch64.so 36 | ``` 37 | 38 | 39 | 上述头文件和动态库也可以通过OpenExploer发布物获取 40 | OE路径: `package/host/host_package/x5_aarch64/dnn_1.24.5.tar.gz` 41 | 42 | 清空之前的编译产物 (如果有) 43 | ```bash 44 | rm -rf build 45 | ``` 46 | 47 | 编译 48 | ```bash 49 | mkdir -p build && cd build 50 | cmake .. 51 | make 52 | ``` 53 | 54 | 运行 55 | ```bash 56 | ./main 57 | ``` -------------------------------------------------------------------------------- /demos/Seg/YOLO11-Seg/YOLO11-Seg_YUV420SP/imgs/YOLOv8_Instance_Segmentation_Origin.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/D-Robotics/rdk_model_zoo/6da0511b4a5f4ac49eadd936d8e0228412d8dd32/demos/Seg/YOLO11-Seg/YOLO11-Seg_YUV420SP/imgs/YOLOv8_Instance_Segmentation_Origin.png -------------------------------------------------------------------------------- /demos/Seg/YOLO11-Seg/YOLO11-Seg_YUV420SP/imgs/YOLOv8_Instance_Segmentation_Quantize.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/D-Robotics/rdk_model_zoo/6da0511b4a5f4ac49eadd936d8e0228412d8dd32/demos/Seg/YOLO11-Seg/YOLO11-Seg_YUV420SP/imgs/YOLOv8_Instance_Segmentation_Quantize.png -------------------------------------------------------------------------------- /demos/Seg/YOLO11-Seg/YOLO11-Seg_YUV420SP/imgs/demo_rdkx5_yolov8n_seg.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/D-Robotics/rdk_model_zoo/6da0511b4a5f4ac49eadd936d8e0228412d8dd32/demos/Seg/YOLO11-Seg/YOLO11-Seg_YUV420SP/imgs/demo_rdkx5_yolov8n_seg.jpg -------------------------------------------------------------------------------- /demos/Seg/YOLO11-Seg/YOLO11-Seg_YUV420SP/imgs/yolo11nseg_detect_bayese_640x640_nv12.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/D-Robotics/rdk_model_zoo/6da0511b4a5f4ac49eadd936d8e0228412d8dd32/demos/Seg/YOLO11-Seg/YOLO11-Seg_YUV420SP/imgs/yolo11nseg_detect_bayese_640x640_nv12.png -------------------------------------------------------------------------------- /demos/Seg/YOLO11-Seg/YOLO11-Seg_YUV420SP/imgs/yolo11nseg_detect_bayese_640x640_nv12_modified.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/D-Robotics/rdk_model_zoo/6da0511b4a5f4ac49eadd936d8e0228412d8dd32/demos/Seg/YOLO11-Seg/YOLO11-Seg_YUV420SP/imgs/yolo11nseg_detect_bayese_640x640_nv12_modified.png -------------------------------------------------------------------------------- /demos/Seg/YOLO11-Seg/YOLO11-Seg_YUV420SP/ptq_models/download.md: -------------------------------------------------------------------------------- 1 | # Download weights 2 | 3 | - [Download weights](#download-weights) 4 | - [Bayes-e (RDK X5 \& RDK X5 Module)](#bayes-e-rdk-x5--rdk-x5-module) 5 | - [bin - nv12](#bin---nv12) 6 | - [Bayes (RDK Ultra \& RDK Ultra Module)](#bayes-rdk-ultra--rdk-ultra-module) 7 | - [Nash-e (RDK S100)](#nash-e-rdk-s100) 8 | - [Nash-m (RDK S100P)](#nash-m-rdk-s100p) 9 | - [Bernoulli2 (RDK X3 \& RDK X3 Module)](#bernoulli2-rdk-x3--rdk-x3-module) 10 | 11 | 12 | 13 | ## Bayes-e (RDK X5 & RDK X5 Module) 14 | ### bin - nv12 15 | YOLOv8n - Instance-Segmentation 16 | ```bash 17 | wget https://archive.d-robotics.cc/downloads/rdk_model_zoo/rdk_x5/yolov8n_instance_seg_bayese_640x640_nv12_modified.bin 18 | ``` 19 | YOLOv8s - Instance-Segmentation 20 | ```bash 21 | wget https://archive.d-robotics.cc/downloads/rdk_model_zoo/rdk_x5/yolov8s_instance_seg_bayese_640x640_nv12_modified.bin 22 | ``` 23 | YOLOv8m - Instance-Segmentation 24 | ```bash 25 | wget https://archive.d-robotics.cc/downloads/rdk_model_zoo/rdk_x5/yolov8m_instance_seg_bayese_640x640_nv12_modified.bin 26 | ``` 27 | YOLOv8l - Instance-Segmentation 28 | ```bash 29 | wget https://archive.d-robotics.cc/downloads/rdk_model_zoo/rdk_x5/yolov8l_instance_seg_bayese_640x640_nv12_modified.bin 30 | ``` 31 | YOLOv8x - Instance-Segmentation 32 | ```bash 33 | wget https://archive.d-robotics.cc/downloads/rdk_model_zoo/rdk_x5/yolov8x_instance_seg_bayese_640x640_nv12_modified.bin 34 | ``` 35 | 36 | ## Bayes (RDK Ultra & RDK Ultra Module) 37 | 38 | 39 | ## Nash-e (RDK S100) 40 | 41 | 42 | ## Nash-m (RDK S100P) 43 | 44 | 45 | ## Bernoulli2 (RDK X3 & RDK X3 Module) -------------------------------------------------------------------------------- /demos/Seg/YOLOE-11-Seg-Prompt-Free/YOLOE-11-Seg-Prompt-Free_YUV420SP/config_yolo11e_seg_pf_bayese_640x640_nv12.yaml: -------------------------------------------------------------------------------- 1 | model_parameters: 2 | onnx_model: 'yolo11n-seg.onnx' 3 | march: "bayes-e" 4 | layer_out_dump: False 5 | working_dir: 'yolo11n_seg_bayese_640x640_nv12' 6 | output_model_file_prefix: 'yolo11n_seg_bayese_640x640_nv12' 7 | # YOLO11 n, s, m 8 | node_info: {"/model.10/m/m.0/attn/Softmax": {'ON': 'BPU','InputType': 'int16','OutputType': 'int16'}} 9 | # YOLO11 l, x 10 | # node_info: {"/model.10/m/m.0/attn/Softmax": {'ON': 'BPU','InputType': 'int16','OutputType': 'int16'}, 11 | # "/model.10/m/m.1/attn/Softmax": {'ON': 'BPU','InputType': 'int16','OutputType': 'int16'}} 12 | input_parameters: 13 | # input_batch: 8 14 | input_name: "" 15 | input_type_rt: 'nv12' 16 | input_type_train: 'rgb' 17 | input_layout_train: 'NCHW' 18 | norm_type: 'data_scale' 19 | scale_value: 0.003921568627451 20 | calibration_parameters: 21 | cal_data_dir: './calibration_data_rgb_f32_640' 22 | cal_data_type: 'float32' 23 | compiler_parameters: 24 | compile_mode: 'latency' 25 | debug: False 26 | optimize_level: 'O3' -------------------------------------------------------------------------------- /demos/Seg/YOLOE-11-Seg-Prompt-Free/YOLOE-11-Seg-Prompt-Free_YUV420SP/imgs/demo_rdkx5_indoor.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/D-Robotics/rdk_model_zoo/6da0511b4a5f4ac49eadd936d8e0228412d8dd32/demos/Seg/YOLOE-11-Seg-Prompt-Free/YOLOE-11-Seg-Prompt-Free_YUV420SP/imgs/demo_rdkx5_indoor.jpg -------------------------------------------------------------------------------- /demos/Seg/YOLOE-11-Seg-Prompt-Free/YOLOE-11-Seg-Prompt-Free_YUV420SP/imgs/demo_rdkx5_indoor2.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/D-Robotics/rdk_model_zoo/6da0511b4a5f4ac49eadd936d8e0228412d8dd32/demos/Seg/YOLOE-11-Seg-Prompt-Free/YOLOE-11-Seg-Prompt-Free_YUV420SP/imgs/demo_rdkx5_indoor2.jpg -------------------------------------------------------------------------------- /demos/Seg/YOLOE-11-Seg-Prompt-Free/YOLOE-11-Seg-Prompt-Free_YUV420SP/imgs/demo_rdkx5_outdoor.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/D-Robotics/rdk_model_zoo/6da0511b4a5f4ac49eadd936d8e0228412d8dd32/demos/Seg/YOLOE-11-Seg-Prompt-Free/YOLOE-11-Seg-Prompt-Free_YUV420SP/imgs/demo_rdkx5_outdoor.jpg -------------------------------------------------------------------------------- /demos/Seg/YOLOE-11-Seg-Prompt-Free/YOLOE-11-Seg-Prompt-Free_YUV420SP/ptq_models/download.md: -------------------------------------------------------------------------------- 1 | # Download weights 2 | 3 | 4 | ## Bayes-e (RDK X5 & RDK X5 Module) 5 | ### bin - nv12 6 | YOLOE-11 Instance Segmentation Prompt Free 7 | ```bash 8 | wget https://archive.d-robotics.cc/downloads/rdk_model_zoo/rdk_x5/AAA_RDK_YOLO/yoloe-11s-seg-pf_bayese_640x640_nv12.bin 9 | ``` -------------------------------------------------------------------------------- /demos/Seg/YOLOv8-Seg/cpp/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | cmake_minimum_required(VERSION 2.8) 2 | 3 | project(rdk_yolo11_seg) 4 | 5 | 6 | # 设置OpenCV包 7 | find_package(OpenCV REQUIRED) 8 | 9 | # # 添加可执行文件 10 | # add_executable(main main.cc) 11 | # # 链接OpenCV库 12 | # target_link_libraries(main ${OpenCV_LIBS}) 13 | 14 | 15 | 16 | # libdnn.so depends on system software dynamic link library, use -Wl,-unresolved-symbols=ignore-in-shared-libs to shield during compilation 17 | set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11 -Wl,-unresolved-symbols=ignore-in-shared-libs") 18 | 19 | # set(CMAKE_CXX_FLAGS_DEBUG " -Wall -Werror -g -O0 ") 20 | # set(CMAKE_C_FLAGS_DEBUG " -Wall -Werror -g -O0 ") 21 | # set(CMAKE_CXX_FLAGS_RELEASE " -Wall -Werror -O3 ") 22 | # set(CMAKE_C_FLAGS_RELEASE " -Wall -Werror -O3 ") 23 | if (NOT CMAKE_BUILD_TYPE) 24 | set(CMAKE_BUILD_TYPE Release) 25 | endif () 26 | 27 | message(STATUS "Build type: ${CMAKE_BUILD_TYPE}") 28 | # define dnn lib path 29 | set(DNN_PATH "/usr/include/dnn") 30 | 31 | set(DNN_LIB_PATH "/usr/lib/") 32 | 33 | include_directories(${DNN_PATH}) 34 | link_directories(${DNN_LIB_PATH}) 35 | 36 | add_executable(main main.cc) 37 | target_link_libraries(main 38 | ${OpenCV_LIBS} 39 | dnn 40 | pthread 41 | rt 42 | dl) -------------------------------------------------------------------------------- /demos/Seg/YOLOv8-Seg/cpp/README.md: -------------------------------------------------------------------------------- 1 | 2 | 3 | 注: 所有的Terminal命令基于以下目录`./rdk_model_zoo/demos/detect/YOLOv8/cpp` 4 | 5 | 安装依赖 6 | ```bash 7 | sudo apt update 8 | # OpenCV 9 | sudo apt install libopencv-dev python3-opencv libopencv-contrib-dev 10 | ``` 11 | 12 | 13 | BPU推理库已经在RDK平台的RDK OS中自带. 14 | - 头文件 15 | ```bash 16 | /usr/include/dnn 17 | . 18 | ├── hb_dnn_ext.h 19 | ├── hb_dnn.h 20 | ├── hb_dnn_status.h 21 | ├── hb_sys.h 22 | └── plugin 23 | ├── hb_dnn_dtype.h 24 | ├── hb_dnn_layer.h 25 | ├── hb_dnn_ndarray.h 26 | ├── hb_dnn_plugin.h 27 | └── hb_dnn_tuple.h 28 | ``` 29 | 30 | - 推理库 31 | ```bash 32 | /usr/lib/ 33 | . 34 | ├── libdnn.so 35 | └── libhbrt_bayes_aarch64.so 36 | ``` 37 | 38 | 39 | 上述头文件和动态库也可以通过OpenExploer发布物获取 40 | OE路径: `package/host/host_package/x5_aarch64/dnn_1.24.5.tar.gz` 41 | 42 | 清空之前的编译产物 (如果有) 43 | ```bash 44 | rm -rf build 45 | ``` 46 | 47 | 编译 48 | ```bash 49 | mkdir -p build && cd build 50 | cmake .. 51 | make 52 | ``` 53 | 54 | 运行 55 | ```bash 56 | ./main 57 | ``` -------------------------------------------------------------------------------- /demos/Seg/YOLOv8-Seg/imgs/YOLOv8_Instance_Segmentation_Origin.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/D-Robotics/rdk_model_zoo/6da0511b4a5f4ac49eadd936d8e0228412d8dd32/demos/Seg/YOLOv8-Seg/imgs/YOLOv8_Instance_Segmentation_Origin.png -------------------------------------------------------------------------------- /demos/Seg/YOLOv8-Seg/imgs/YOLOv8_Instance_Segmentation_Quantize.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/D-Robotics/rdk_model_zoo/6da0511b4a5f4ac49eadd936d8e0228412d8dd32/demos/Seg/YOLOv8-Seg/imgs/YOLOv8_Instance_Segmentation_Quantize.png -------------------------------------------------------------------------------- /demos/Seg/YOLOv8-Seg/imgs/demo_rdkx5_yolov8n_seg.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/D-Robotics/rdk_model_zoo/6da0511b4a5f4ac49eadd936d8e0228412d8dd32/demos/Seg/YOLOv8-Seg/imgs/demo_rdkx5_yolov8n_seg.jpg -------------------------------------------------------------------------------- /demos/Seg/YOLOv8-Seg/imgs/yolov8n_instance_seg_bayese_640x640_nv12.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/D-Robotics/rdk_model_zoo/6da0511b4a5f4ac49eadd936d8e0228412d8dd32/demos/Seg/YOLOv8-Seg/imgs/yolov8n_instance_seg_bayese_640x640_nv12.png -------------------------------------------------------------------------------- /demos/Seg/YOLOv8-Seg/imgs/yolov8n_instance_seg_bayese_640x640_nv12_modified.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/D-Robotics/rdk_model_zoo/6da0511b4a5f4ac49eadd936d8e0228412d8dd32/demos/Seg/YOLOv8-Seg/imgs/yolov8n_instance_seg_bayese_640x640_nv12_modified.png -------------------------------------------------------------------------------- /demos/Seg/YOLOv8-Seg/models/download.md: -------------------------------------------------------------------------------- 1 | # Download weights 2 | 3 | - [Download weights](#download-weights) 4 | - [Bayes-e (RDK X5 \& RDK X5 Module)](#bayes-e-rdk-x5--rdk-x5-module) 5 | - [bin - nv12](#bin---nv12) 6 | - [Bayes (RDK Ultra \& RDK Ultra Module)](#bayes-rdk-ultra--rdk-ultra-module) 7 | - [Nash-e (RDK S100)](#nash-e-rdk-s100) 8 | - [Nash-m (RDK S100P)](#nash-m-rdk-s100p) 9 | - [Bernoulli2 (RDK X3 \& RDK X3 Module)](#bernoulli2-rdk-x3--rdk-x3-module) 10 | 11 | 12 | 13 | ## Bayes-e (RDK X5 & RDK X5 Module) 14 | ### bin - nv12 15 | YOLOv8n - Instance-Segmentation 16 | ```bash 17 | wget https://archive.d-robotics.cc/downloads/rdk_model_zoo/rdk_x5/yolov8n_instance_seg_bayese_640x640_nv12_modified.bin 18 | ``` 19 | YOLOv8s - Instance-Segmentation 20 | ```bash 21 | wget https://archive.d-robotics.cc/downloads/rdk_model_zoo/rdk_x5/yolov8s_instance_seg_bayese_640x640_nv12_modified.bin 22 | ``` 23 | YOLOv8m - Instance-Segmentation 24 | ```bash 25 | wget https://archive.d-robotics.cc/downloads/rdk_model_zoo/rdk_x5/yolov8m_instance_seg_bayese_640x640_nv12_modified.bin 26 | ``` 27 | YOLOv8l - Instance-Segmentation 28 | ```bash 29 | wget https://archive.d-robotics.cc/downloads/rdk_model_zoo/rdk_x5/yolov8l_instance_seg_bayese_640x640_nv12_modified.bin 30 | ``` 31 | YOLOv8x - Instance-Segmentation 32 | ```bash 33 | wget https://archive.d-robotics.cc/downloads/rdk_model_zoo/rdk_x5/yolov8x_instance_seg_bayese_640x640_nv12_modified.bin 34 | ``` 35 | 36 | ## Bayes (RDK Ultra & RDK Ultra Module) 37 | 38 | 39 | ## Nash-e (RDK S100) 40 | 41 | 42 | ## Nash-m (RDK S100P) 43 | 44 | 45 | ## Bernoulli2 (RDK X3 & RDK X3 Module) -------------------------------------------------------------------------------- /demos/Seg/YOLOv8-Seg/ptq_yamls/yolov8_instance_seg_bayese_640x640_nchw.yaml: -------------------------------------------------------------------------------- 1 | model_parameters: 2 | onnx_model: './yolov8n-seg.onnx' 3 | march: "bayes-e" 4 | layer_out_dump: False 5 | working_dir: 'yolov8n_instance_seg_bayese_640x640_nchw' 6 | output_model_file_prefix: 'yolov8n_instance_seg_bayese_640x640_nchw' 7 | input_parameters: 8 | input_name: "" 9 | input_type_rt: 'rgb' 10 | input_layout_rt: 'NCHW' 11 | input_type_train: 'rgb' 12 | input_layout_train: 'NCHW' 13 | norm_type: 'data_scale' 14 | scale_value: 0.003921568627451 15 | calibration_parameters: 16 | cal_data_dir: './calibration_data_rgb_f32_coco_640' 17 | cal_data_type: 'float32' 18 | compiler_parameters: 19 | compile_mode: 'latency' 20 | debug: False 21 | optimize_level: 'O3' -------------------------------------------------------------------------------- /demos/Seg/YOLOv8-Seg/ptq_yamls/yolov8_instance_seg_bayese_640x640_nv12.yaml: -------------------------------------------------------------------------------- 1 | model_parameters: 2 | onnx_model: './yolov8n-seg.onnx' 3 | march: "bayes-e" 4 | layer_out_dump: False 5 | working_dir: 'yolov8n_instance_seg_bayese_640x640_nv12' 6 | output_model_file_prefix: 'yolov8n_instance_seg_bayese_640x640_nv12' 7 | input_parameters: 8 | input_name: "" 9 | input_type_rt: 'nv12' 10 | input_type_train: 'rgb' 11 | input_layout_train: 'NCHW' 12 | norm_type: 'data_scale' 13 | scale_value: 0.003921568627451 14 | calibration_parameters: 15 | cal_data_dir: './calibration_data_rgb_f32_640' 16 | cal_data_type: 'float32' 17 | compiler_parameters: 18 | compile_mode: 'latency' 19 | debug: False 20 | optimize_level: 'O3' -------------------------------------------------------------------------------- /demos/classification/ConvNeXt/data/ConvNeXt_Block.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/D-Robotics/rdk_model_zoo/6da0511b4a5f4ac49eadd936d8e0228412d8dd32/demos/classification/ConvNeXt/data/ConvNeXt_Block.png -------------------------------------------------------------------------------- /demos/classification/ConvNeXt/data/cheetah.JPEG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/D-Robotics/rdk_model_zoo/6da0511b4a5f4ac49eadd936d8e0228412d8dd32/demos/classification/ConvNeXt/data/cheetah.JPEG -------------------------------------------------------------------------------- /demos/classification/ConvNeXt/data/inference.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/D-Robotics/rdk_model_zoo/6da0511b4a5f4ac49eadd936d8e0228412d8dd32/demos/classification/ConvNeXt/data/inference.png -------------------------------------------------------------------------------- /demos/classification/ConvNeXt/model/download.sh: -------------------------------------------------------------------------------- 1 | # Download bin models weights 2 | 3 | wget -P $(dirname $0) https://archive.d-robotics.cc/downloads/rdk_model_zoo/rdk_x5/ConvNeXt_atto_224x224_nv12.bin 4 | wget -P $(dirname $0) https://archive.d-robotics.cc/downloads/rdk_model_zoo/rdk_x5/ConvNeXt_femto_224x224_nv12.bin 5 | wget -P $(dirname $0) https://archive.d-robotics.cc/downloads/rdk_model_zoo/rdk_x5/ConvNeXt_nano_224x224_nv12.bin 6 | wget -P $(dirname $0) https://archive.d-robotics.cc/downloads/rdk_model_zoo/rdk_x5/ConvNeXt_pico_224x224_nv12.bin -------------------------------------------------------------------------------- /demos/classification/ConvNeXt/yaml/ConvNeXt_femto.yaml: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2021-2024 D-Robotics Corporation 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | 16 | model_parameters: 17 | onnx_model: '../../../01_common/model_zoo/mapper/classification/ConvNeXt/convnext_atto.onnx' 18 | march: "bayes-e" 19 | layer_out_dump: False 20 | working_dir: 'ConvNeXt-deploy_224x224_nv12' 21 | output_model_file_prefix: 'ConvNeXt-deploy_224x224_nv12' 22 | 23 | 24 | 25 | node_info: { 26 | "/stages/stages.3/blocks/blocks.0/norm/ReduceMean_1": { 27 | 'ON': 'BPU', 28 | 'InputType': 'int16', 29 | 'OutputType': 'int16' 30 | }, 31 | "/stages/stages.3/downsample/downsample.0/Pow": { 32 | 'ON': 'BPU', 33 | 'InputType': 'int16', 34 | 'OutputType': 'int16' 35 | }, 36 | "/stages/stages.3/downsample/downsample.0/Div_mul": { 37 | 'ON': 'BPU', 38 | 'InputType': 'int16', 39 | 'OutputType': 'int16' 40 | }, 41 | "/stages/stages.3/downsample/downsample.0/Sub": { 42 | 'ON': 'BPU', 43 | 'InputType': 'int16', 44 | 'OutputType': 'int16' 45 | }, 46 | "/stages/stages.3/downsample/downsample.0/ReduceMean": { 47 | 'ON': 'BPU', 48 | 'InputType': 'int16', 49 | 'OutputType': 'int16' 50 | }, 51 | } 52 | 53 | 54 | input_parameters: 55 | input_name: "" 56 | input_type_rt: 'nv12' 57 | input_type_train: 'rgb' 58 | input_layout_train: 'NCHW' 59 | input_shape: '' 60 | norm_type: 'data_mean_and_scale' 61 | mean_value: 123.675 116.28 103.53 62 | scale_value: 0.01712475 0.017507 0.01742919 63 | 64 | 65 | calibration_parameters: 66 | cal_data_dir: './calibration_data_rgb_f32' 67 | cal_data_type: 'float32' 68 | calibration_type: 'default' 69 | 70 | 71 | compiler_parameters: 72 | compile_mode: 'latency' 73 | debug: False 74 | optimize_level: 'O3' -------------------------------------------------------------------------------- /demos/classification/EdgeNeXt/data/EdgeNeXt_architecture.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/D-Robotics/rdk_model_zoo/6da0511b4a5f4ac49eadd936d8e0228412d8dd32/demos/classification/EdgeNeXt/data/EdgeNeXt_architecture.png -------------------------------------------------------------------------------- /demos/classification/EdgeNeXt/data/Zebra.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/D-Robotics/rdk_model_zoo/6da0511b4a5f4ac49eadd936d8e0228412d8dd32/demos/classification/EdgeNeXt/data/Zebra.jpg -------------------------------------------------------------------------------- /demos/classification/EdgeNeXt/data/inference.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/D-Robotics/rdk_model_zoo/6da0511b4a5f4ac49eadd936d8e0228412d8dd32/demos/classification/EdgeNeXt/data/inference.png -------------------------------------------------------------------------------- /demos/classification/EdgeNeXt/model/download.sh: -------------------------------------------------------------------------------- 1 | # Download bin models weights 2 | 3 | wget -P $(dirname $0) https://archive.d-robotics.cc/downloads/rdk_model_zoo/rdk_x5/EdgeNeXt_base_224x224_nv12.bin 4 | wget -P $(dirname $0) https://archive.d-robotics.cc/downloads/rdk_model_zoo/rdk_x5/EdgeNeXt_small_224x224_nv12.bin 5 | wget -P $(dirname $0) https://archive.d-robotics.cc/downloads/rdk_model_zoo/rdk_x5/EdgeNeXt_x_small_224x224_nv12.bin 6 | wget -P $(dirname $0) https://archive.d-robotics.cc/downloads/rdk_model_zoo/rdk_x5/EdgeNeXt_xx_small_224x224_nv12.bin -------------------------------------------------------------------------------- /demos/classification/EdgeNeXt/yaml/EdgeNeXt_base_config.yaml: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2021-2024 D-Robotics Corporation 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | model_parameters: 16 | onnx_model: './edgenext_base.onnx' 17 | march: "bayes-e" 18 | layer_out_dump: False 19 | working_dir: 'EdgeNeXt_base_224x224_nv12' 20 | output_model_file_prefix: 'EdgeNeXt_base_224x224_nv12' 21 | node_info: { 22 | "/stages/stages.1/blocks/blocks.2/xca/Softmax": { 23 | 'ON': 'BPU', 24 | 'InputType': 'int16', 25 | 'OutputType': 'int16' 26 | }, 27 | "/stages/stages.2/blocks/blocks.8/xca/Softmax": { 28 | 'ON': 'BPU', 29 | 'InputType': 'int16', 30 | 'OutputType': 'int16' 31 | }, 32 | "/stages/stages.3/blocks/blocks.2/xca/Softmax": { 33 | 'ON': 'BPU', 34 | 'InputType': 'int16', 35 | 'OutputType': 'int16' 36 | } 37 | } 38 | 39 | input_parameters: 40 | input_name: "" 41 | input_type_rt: 'nv12' 42 | input_type_train: 'rgb' 43 | input_layout_train: 'NCHW' 44 | norm_type: 'data_mean_and_scale' 45 | mean_value: 123.675 116.28 103.53 46 | scale_value: 0.01712475 0.017507 0.01742919 47 | 48 | calibration_parameters: 49 | cal_data_dir: './calibration_data_rgb_f32' 50 | cal_data_type: 'float32' 51 | calibration_type: 'max' 52 | max_percentile: 0.999 53 | 54 | compiler_parameters: 55 | compile_mode: 'latency' 56 | debug: False 57 | optimize_level: 'O3' 58 | 59 | -------------------------------------------------------------------------------- /demos/classification/EdgeNeXt/yaml/EdgeNeXt_small_config.yaml: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2021-2024 D-Robotics Corporation 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | model_parameters: 16 | onnx_model: './edgenext_small.onnx' 17 | march: "bayes-e" 18 | layer_out_dump: False 19 | working_dir: 'EdgeNeXt_small_224x224_nv12' 20 | output_model_file_prefix: 'EdgeNeXt_small_224x224_nv12' 21 | node_info: { 22 | "/stages/stages.1/blocks/blocks.2/xca/Softmax": { 23 | 'ON': 'BPU', 24 | 'InputType': 'int16', 25 | 'OutputType': 'int16' 26 | }, 27 | "/stages/stages.2/blocks/blocks.8/xca/Softmax": { 28 | 'ON': 'BPU', 29 | 'InputType': 'int16', 30 | 'OutputType': 'int16' 31 | }, 32 | "/stages/stages.3/blocks/blocks.2/xca/Softmax": { 33 | 'ON': 'BPU', 34 | 'InputType': 'int16', 35 | 'OutputType': 'int16' 36 | } 37 | } 38 | 39 | input_parameters: 40 | input_name: "" 41 | input_type_rt: 'nv12' 42 | input_type_train: 'rgb' 43 | input_layout_train: 'NCHW' 44 | norm_type: 'data_mean_and_scale' 45 | mean_value: 123.675 116.28 103.53 46 | scale_value: 0.01712475 0.017507 0.01742919 47 | 48 | calibration_parameters: 49 | cal_data_dir: './calibration_data_rgb_f32' 50 | cal_data_type: 'float32' 51 | calibration_type: 'max' 52 | max_percentile: 0.999 53 | 54 | compiler_parameters: 55 | compile_mode: 'latency' 56 | debug: False 57 | optimize_level: 'O3' 58 | 59 | -------------------------------------------------------------------------------- /demos/classification/EdgeNeXt/yaml/EdgeNeXt_x_small_config.yaml: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2021-2024 D-Robotics Corporation 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | model_parameters: 16 | onnx_model: './edgenext_x_small.onnx' 17 | march: "bayes-e" 18 | layer_out_dump: False 19 | working_dir: 'EdgeNeXt_x_small_224x224_nv12' 20 | output_model_file_prefix: 'EdgeNeXt_x_small_224x224_nv12' 21 | node_info: { 22 | "/stages/stages.1/blocks/blocks.2/xca/Softmax": { 23 | 'ON': 'BPU', 24 | 'InputType': 'int16', 25 | 'OutputType': 'int16' 26 | }, 27 | "/stages/stages.2/blocks/blocks.8/xca/Softmax": { 28 | 'ON': 'BPU', 29 | 'InputType': 'int16', 30 | 'OutputType': 'int16' 31 | }, 32 | "/stages/stages.3/blocks/blocks.2/xca/Softmax": { 33 | 'ON': 'BPU', 34 | 'InputType': 'int16', 35 | 'OutputType': 'int16' 36 | } 37 | } 38 | 39 | input_parameters: 40 | input_name: "" 41 | input_type_rt: 'nv12' 42 | input_type_train: 'rgb' 43 | input_layout_train: 'NCHW' 44 | norm_type: 'data_mean_and_scale' 45 | mean_value: 123.675 116.28 103.53 46 | scale_value: 0.01712475 0.017507 0.01742919 47 | 48 | calibration_parameters: 49 | cal_data_dir: './calibration_data_rgb_f32' 50 | cal_data_type: 'float32' 51 | calibration_type: 'max' 52 | max_percentile: 0.999 53 | 54 | compiler_parameters: 55 | compile_mode: 'latency' 56 | debug: False 57 | optimize_level: 'O3' 58 | 59 | -------------------------------------------------------------------------------- /demos/classification/EfficientFormer/data/EfficientFormer_architecture.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/D-Robotics/rdk_model_zoo/6da0511b4a5f4ac49eadd936d8e0228412d8dd32/demos/classification/EfficientFormer/data/EfficientFormer_architecture.png -------------------------------------------------------------------------------- /demos/classification/EfficientFormer/data/bittern.JPEG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/D-Robotics/rdk_model_zoo/6da0511b4a5f4ac49eadd936d8e0228412d8dd32/demos/classification/EfficientFormer/data/bittern.JPEG -------------------------------------------------------------------------------- /demos/classification/EfficientFormer/data/inference.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/D-Robotics/rdk_model_zoo/6da0511b4a5f4ac49eadd936d8e0228412d8dd32/demos/classification/EfficientFormer/data/inference.png -------------------------------------------------------------------------------- /demos/classification/EfficientFormer/data/latency_profiling.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/D-Robotics/rdk_model_zoo/6da0511b4a5f4ac49eadd936d8e0228412d8dd32/demos/classification/EfficientFormer/data/latency_profiling.png -------------------------------------------------------------------------------- /demos/classification/EfficientFormer/model/download.sh: -------------------------------------------------------------------------------- 1 | # Download bin models weights 2 | 3 | wget -P $(dirname $0) https://archive.d-robotics.cc/downloads/rdk_model_zoo/rdk_x5/EfficientFormer_l1_224x224_nv12.bin 4 | wget -P $(dirname $0) https://archive.d-robotics.cc/downloads/rdk_model_zoo/rdk_x5/EfficientFormer_l3_224x224_nv12.bin -------------------------------------------------------------------------------- /demos/classification/EfficientFormer/yaml/EfficientFormer_l1_config.yaml: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2021-2024 D-Robotics Corporation 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | model_parameters: 16 | onnx_model: './efficientformer_l1.onnx' 17 | march: "bayes-e" 18 | layer_out_dump: False 19 | working_dir: 'EfficientFormer_224x224_nv12_int16' 20 | output_model_file_prefix: 'EfficientFormer_224x224_nv12' 21 | 22 | node_info: { 23 | "/stages/stages.3/blocks/blocks.4/token_mixer/Softmax": { 24 | 'ON': 'BPU', 25 | 'InputType': 'int16', 26 | 'OutputType': 'int16' 27 | } 28 | } 29 | 30 | 31 | input_parameters: 32 | input_name: "" 33 | input_type_rt: 'nv12' 34 | input_type_train: 'rgb' 35 | input_layout_train: 'NCHW' 36 | input_shape: '' 37 | norm_type: 'data_mean_and_scale' 38 | mean_value: 123.675 116.28 103.53 39 | scale_value: 0.01712475 0.017507 0.01742919 40 | 41 | 42 | calibration_parameters: 43 | cal_data_dir: './calibration_data_rgb_f32' 44 | cal_data_type: 'float32' 45 | calibration_type: 'default' 46 | optimization: "set_all_nodes_int16" 47 | 48 | compiler_parameters: 49 | compile_mode: 'latency' 50 | debug: False 51 | optimize_level: 'O3' 52 | 53 | -------------------------------------------------------------------------------- /demos/classification/EfficientFormer/yaml/EfficientFormer_l3_config.yaml: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2021-2024 D-Robotics Corporation 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | model_parameters: 16 | onnx_model: './efficientformer_l3.onnx' 17 | march: "bayes-e" 18 | layer_out_dump: False 19 | working_dir: 'EfficientFormer_224x224_nv12_int16' 20 | output_model_file_prefix: 'EfficientFormer_224x224_nv12' 21 | 22 | node_info: { 23 | "/stages/stages.3/blocks/blocks.3/token_mixer/Softmax": { 24 | 'ON': 'BPU', 25 | 'InputType': 'int16', 26 | 'OutputType': 'int16' 27 | }, 28 | "/stages/stages.3/blocks/blocks.4/token_mixer/Softmax": { 29 | 'ON': 'BPU', 30 | 'InputType': 'int16', 31 | 'OutputType': 'int16' 32 | }, 33 | "/stages/stages.3/blocks/blocks.5/token_mixer/Softmax": { 34 | 'ON': 'BPU', 35 | 'InputType': 'int16', 36 | 'OutputType': 'int16' 37 | }, 38 | "/stages/stages.3/blocks/blocks.6/token_mixer/Softmax": { 39 | 'ON': 'BPU', 40 | 'InputType': 'int16', 41 | 'OutputType': 'int16' 42 | } 43 | } 44 | 45 | 46 | input_parameters: 47 | input_name: "" 48 | input_type_rt: 'nv12' 49 | input_type_train: 'rgb' 50 | input_layout_train: 'NCHW' 51 | input_shape: '' 52 | norm_type: 'data_mean_and_scale' 53 | mean_value: 123.675 116.28 103.53 54 | scale_value: 0.01712475 0.017507 0.01742919 55 | 56 | 57 | calibration_parameters: 58 | cal_data_dir: './calibration_data_rgb_f32' 59 | cal_data_type: 'float32' 60 | calibration_type: 'default' 61 | optimization: "set_all_nodes_int16" 62 | 63 | compiler_parameters: 64 | compile_mode: 'latency' 65 | debug: False 66 | optimize_level: 'O3' 67 | jobs: 64 68 | 69 | -------------------------------------------------------------------------------- /demos/classification/EfficientFormerV2/data/EfficientFormerV2_architecture.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/D-Robotics/rdk_model_zoo/6da0511b4a5f4ac49eadd936d8e0228412d8dd32/demos/classification/EfficientFormerV2/data/EfficientFormerV2_architecture.png -------------------------------------------------------------------------------- /demos/classification/EfficientFormerV2/data/goldfish.JPEG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/D-Robotics/rdk_model_zoo/6da0511b4a5f4ac49eadd936d8e0228412d8dd32/demos/classification/EfficientFormerV2/data/goldfish.JPEG -------------------------------------------------------------------------------- /demos/classification/EfficientFormerV2/data/inference.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/D-Robotics/rdk_model_zoo/6da0511b4a5f4ac49eadd936d8e0228412d8dd32/demos/classification/EfficientFormerV2/data/inference.png -------------------------------------------------------------------------------- /demos/classification/EfficientFormerV2/model/download.sh: -------------------------------------------------------------------------------- 1 | # Download bin models weights 2 | 3 | wget -P $(dirname $0) https://archive.d-robotics.cc/downloads/rdk_model_zoo/rdk_x5/EfficientFormerv2_s2_224x224_nv12.bin 4 | wget -P $(dirname $0) https://archive.d-robotics.cc/downloads/rdk_model_zoo/rdk_x5/EfficientFormerv2_s1_224x224_nv12.bin 5 | wget -P $(dirname $0) https://archive.d-robotics.cc/downloads/rdk_model_zoo/rdk_x5/EfficientFormerv2_s0_224x224_nv12.bin -------------------------------------------------------------------------------- /demos/classification/EfficientFormerV2/yaml/EfficientFormerv2_s0_config.yaml: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2021-2024 D-Robotics Corporation 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | model_parameters: 16 | onnx_model: './efficientformerv2_s0.onnx' 17 | march: "bayes-e" 18 | layer_out_dump: False 19 | working_dir: 'EfficientFormerv2_s0_int16_model_output' 20 | output_model_file_prefix: 'EfficientFormerv2_s0_224x224_nv12' 21 | 22 | node_info: { 23 | "/stages/stages.2/blocks/blocks.4/token_mixer/Softmax": { 24 | 'ON': 'BPU', 25 | 'InputType': 'int16', 26 | 'OutputType': 'int16' 27 | }, 28 | "/stages/stages.2/blocks/blocks.5/token_mixer/Softmax": { 29 | 'ON': 'BPU', 30 | 'InputType': 'int16', 31 | 'OutputType': 'int16' 32 | }, 33 | "/stages/stages.3/downsample/attn/Softmax": { 34 | 'ON': 'BPU', 35 | 'InputType': 'int16', 36 | 'OutputType': 'int16' 37 | }, 38 | "/stages/stages.3/blocks/blocks.2/token_mixer/Softmax": { 39 | 'ON': 'BPU', 40 | 'InputType': 'int16', 41 | 'OutputType': 'int16' 42 | }, 43 | "/stages/stages.3/blocks/blocks.3/token_mixer/Softmax": { 44 | 'ON': 'BPU', 45 | 'InputType': 'int16', 46 | 'OutputType': 'int16' 47 | } 48 | } 49 | 50 | debug_mode: "dump_calibration_data" 51 | 52 | input_parameters: 53 | input_name: "" 54 | input_type_rt: 'nv12' 55 | input_type_train: 'rgb' 56 | input_layout_train: 'NCHW' 57 | input_shape: '' 58 | norm_type: 'data_mean_and_scale' 59 | mean_value: 123.675 116.28 103.53 60 | scale_value: 0.01712475 0.017507 0.01742919 61 | 62 | calibration_parameters: 63 | cal_data_dir: './calibration_data_rgb_f32' 64 | cal_data_type: 'float32' 65 | calibration_type: 'max' 66 | max_percentile: 0.999 67 | optimization: "set_all_nodes_int16" 68 | 69 | compiler_parameters: 70 | compile_mode: 'latency' 71 | debug: False 72 | optimize_level: 'O3' -------------------------------------------------------------------------------- /demos/classification/EfficientFormerV2/yaml/EfficientFormerv2_s1_config.yaml: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2021-2024 D-Robotics Corporation 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | model_parameters: 16 | onnx_model: './efficientformerv2_s1.onnx' 17 | march: "bayes-e" 18 | layer_out_dump: False 19 | working_dir: 'EfficientFormerv2_s1_224x224_nv12' 20 | output_model_file_prefix: 'EfficientFormerv2_s1_224x224_nv12' 21 | 22 | node_info: { 23 | "/stages/stages.2/blocks/blocks.7/token_mixer/Softmax": { 24 | 'ON': 'BPU', 25 | 'InputType': 'int16', 26 | 'OutputType': 'int16' 27 | }, 28 | "/stages/stages.2/blocks/blocks.8/token_mixer/Softmax": { 29 | 'ON': 'BPU', 30 | 'InputType': 'int16', 31 | 'OutputType': 'int16' 32 | }, 33 | "/stages/stages.3/downsample/attn/Softmax": { 34 | 'ON': 'BPU', 35 | 'InputType': 'int16', 36 | 'OutputType': 'int16' 37 | }, 38 | "/stages/stages.3/blocks/blocks.4/token_mixer/Softmax": { 39 | 'ON': 'BPU', 40 | 'InputType': 'int16', 41 | 'OutputType': 'int16' 42 | }, 43 | "/stages/stages.3/blocks/blocks.5/token_mixer/Softmax": { 44 | 'ON': 'BPU', 45 | 'InputType': 'int16', 46 | 'OutputType': 'int16' 47 | } 48 | } 49 | 50 | input_parameters: 51 | input_name: "" 52 | input_type_rt: 'nv12' 53 | input_type_train: 'rgb' 54 | input_layout_train: 'NCHW' 55 | input_shape: '' 56 | norm_type: 'data_mean_and_scale' 57 | mean_value: 123.675 116.28 103.53 58 | scale_value: 0.01712475 0.017507 0.01742919 59 | 60 | calibration_parameters: 61 | cal_data_dir: './calibration_data_rgb_f32' 62 | cal_data_type: 'float32' 63 | calibration_type: 'max' 64 | max_percentile: 0.999 65 | 66 | compiler_parameters: 67 | compile_mode: 'latency' 68 | debug: False 69 | optimize_level: 'O3' -------------------------------------------------------------------------------- /demos/classification/EfficientNet/data/EfficientNet_architecture.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/D-Robotics/rdk_model_zoo/6da0511b4a5f4ac49eadd936d8e0228412d8dd32/demos/classification/EfficientNet/data/EfficientNet_architecture.png -------------------------------------------------------------------------------- /demos/classification/EfficientNet/data/inference.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/D-Robotics/rdk_model_zoo/6da0511b4a5f4ac49eadd936d8e0228412d8dd32/demos/classification/EfficientNet/data/inference.png -------------------------------------------------------------------------------- /demos/classification/EfficientNet/data/redshank.JPEG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/D-Robotics/rdk_model_zoo/6da0511b4a5f4ac49eadd936d8e0228412d8dd32/demos/classification/EfficientNet/data/redshank.JPEG -------------------------------------------------------------------------------- /demos/classification/EfficientNet/model/download.sh: -------------------------------------------------------------------------------- 1 | # Download bin models weights 2 | 3 | wget -P $(dirname $0) https://archive.d-robotics.cc/downloads/rdk_model_zoo/rdk_x5/EfficientNet_B2_224x224_nv12.bin 4 | wget -P $(dirname $0) https://archive.d-robotics.cc/downloads/rdk_model_zoo/rdk_x5/EfficientNet_B3_224x224_nv12.bin 5 | wget -P $(dirname $0) https://archive.d-robotics.cc/downloads/rdk_model_zoo/rdk_x5/EfficientNet_B4_224x224_nv12.bin -------------------------------------------------------------------------------- /demos/classification/EfficientNet/yaml/EfficientNet_B2_config.yaml: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2021-2024 D-Robotics Corporation 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | 16 | model_parameters: 17 | onnx_model: './efficientnet_b2.onnx' 18 | march: "bayes-e" 19 | layer_out_dump: False 20 | working_dir: 'EfficientNet_224x224_nv12' 21 | output_model_file_prefix: 'EfficientNet_224x224_nv12' 22 | node_info: { 23 | "/blocks/blocks.5/blocks.5.0/conv_pwl/Conv": { 24 | 'ON': 'BPU', 25 | 'InputType': 'int16', 26 | 'OutputType': 'int16' 27 | }, 28 | "/blocks/blocks.0/blocks.0.0/conv_dw/Conv": { 29 | 'ON': 'BPU', 30 | 'InputType': 'int16', 31 | 'OutputType': 'int16' 32 | }, 33 | "/blocks/blocks.6/blocks.6.0/conv_pwl/Conv": { 34 | 'ON': 'BPU', 35 | 'InputType': 'int16', 36 | 'OutputType': 'int16' 37 | }, 38 | "/blocks/blocks.6/blocks.6.0/se/Mul": { 39 | 'ON': 'BPU', 40 | 'InputType': 'int16', 41 | 'OutputType': 'int16' 42 | }, 43 | "/blocks/blocks.6/blocks.6.0/se/ReduceMean": { 44 | 'ON': 'BPU', 45 | 'InputType': 'int16', 46 | 'OutputType': 'int16' 47 | }, 48 | "/blocks/blocks.6/blocks.6.0/conv_dw/Conv": { 49 | 'ON': 'BPU', 50 | 'InputType': 'int16', 51 | 'OutputType': 'int16' 52 | }, 53 | } 54 | debug_mode: 'dump_calibration_data' 55 | 56 | 57 | input_parameters: 58 | input_name: "" 59 | input_type_rt: 'nv12' 60 | input_type_train: 'rgb' 61 | input_layout_train: 'NCHW' 62 | input_shape: '' 63 | norm_type: 'data_mean_and_scale' 64 | mean_value: 123.675 116.28 103.53 65 | scale_value: 0.01712475 0.017507 0.01742919 66 | 67 | 68 | calibration_parameters: 69 | cal_data_dir: './calibration_data_rgb_f32' 70 | cal_data_type: 'float32' 71 | calibration_type: 'default' 72 | 73 | 74 | compiler_parameters: 75 | compile_mode: 'latency' 76 | debug: False 77 | optimize_level: 'O3' -------------------------------------------------------------------------------- /demos/classification/EfficientNet/yaml/EfficientNet_B3_config.yaml: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2021-2024 D-Robotics Corporation 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | 16 | model_parameters: 17 | onnx_model: './efficientnet_b3.onnx' 18 | march: "bayes-e" 19 | layer_out_dump: False 20 | working_dir: 'model_output' 21 | output_model_file_prefix: 'EfficientNet_224x224_nv12' 22 | debug_mode: 'dump_calibration_data' 23 | 24 | 25 | input_parameters: 26 | input_name: "" 27 | input_type_rt: 'nv12' 28 | input_type_train: 'rgb' 29 | input_layout_train: 'NCHW' 30 | input_shape: '' 31 | norm_type: 'data_mean_and_scale' 32 | mean_value: 123.675 116.28 103.53 33 | scale_value: 0.01712475 0.017507 0.01742919 34 | 35 | 36 | calibration_parameters: 37 | cal_data_dir: './calibration_data_rgb_f32' 38 | cal_data_type: 'float32' 39 | calibration_type: 'default' 40 | 41 | 42 | compiler_parameters: 43 | compile_mode: 'latency' 44 | debug: False 45 | optimize_level: 'O3' -------------------------------------------------------------------------------- /demos/classification/EfficientViT_MSRA/data/Comparison between Transformer & CNN.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/D-Robotics/rdk_model_zoo/6da0511b4a5f4ac49eadd936d8e0228412d8dd32/demos/classification/EfficientViT_MSRA/data/Comparison between Transformer & CNN.png -------------------------------------------------------------------------------- /demos/classification/EfficientViT_MSRA/data/EfficientViT_msra_architecture.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/D-Robotics/rdk_model_zoo/6da0511b4a5f4ac49eadd936d8e0228412d8dd32/demos/classification/EfficientViT_MSRA/data/EfficientViT_msra_architecture.png -------------------------------------------------------------------------------- /demos/classification/EfficientViT_MSRA/data/MHSA computation.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/D-Robotics/rdk_model_zoo/6da0511b4a5f4ac49eadd936d8e0228412d8dd32/demos/classification/EfficientViT_MSRA/data/MHSA computation.jpg -------------------------------------------------------------------------------- /demos/classification/EfficientViT_MSRA/data/hook.JPEG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/D-Robotics/rdk_model_zoo/6da0511b4a5f4ac49eadd936d8e0228412d8dd32/demos/classification/EfficientViT_MSRA/data/hook.JPEG -------------------------------------------------------------------------------- /demos/classification/EfficientViT_MSRA/data/inference.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/D-Robotics/rdk_model_zoo/6da0511b4a5f4ac49eadd936d8e0228412d8dd32/demos/classification/EfficientViT_MSRA/data/inference.png -------------------------------------------------------------------------------- /demos/classification/EfficientViT_MSRA/model/download.sh: -------------------------------------------------------------------------------- 1 | # Download bin models weights 2 | 3 | wget -P $(dirname $0) https://archive.d-robotics.cc/downloads/rdk_model_zoo/rdk_x5/EfficientViT_m5_224x224_nv12.bin -------------------------------------------------------------------------------- /demos/classification/FastViT/data/FastViT_architecture.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/D-Robotics/rdk_model_zoo/6da0511b4a5f4ac49eadd936d8e0228412d8dd32/demos/classification/FastViT/data/FastViT_architecture.png -------------------------------------------------------------------------------- /demos/classification/FastViT/data/bucket.JPEG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/D-Robotics/rdk_model_zoo/6da0511b4a5f4ac49eadd936d8e0228412d8dd32/demos/classification/FastViT/data/bucket.JPEG -------------------------------------------------------------------------------- /demos/classification/FastViT/data/inference.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/D-Robotics/rdk_model_zoo/6da0511b4a5f4ac49eadd936d8e0228412d8dd32/demos/classification/FastViT/data/inference.png -------------------------------------------------------------------------------- /demos/classification/FastViT/model/download.sh: -------------------------------------------------------------------------------- 1 | # Download bin models weights 2 | 3 | wget -P $(dirname $0) https://archive.d-robotics.cc/downloads/rdk_model_zoo/rdk_x5/FastViT_SA12_224x224_nv12.bin 4 | wget -P $(dirname $0) https://archive.d-robotics.cc/downloads/rdk_model_zoo/rdk_x5/FastViT_S12_224x224_nv12.bin 5 | wget -P $(dirname $0) https://archive.d-robotics.cc/downloads/rdk_model_zoo/rdk_x5/FastViT_T12_224x224_nv12.bin 6 | wget -P $(dirname $0) https://archive.d-robotics.cc/downloads/rdk_model_zoo/rdk_x5/FastViT_T8_224x224_nv12.bin -------------------------------------------------------------------------------- /demos/classification/FastViT/yaml/FastViT_S12_config.yaml: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2024 D-Robotics.All Rights Reserved. 2 | # 3 | # The material in this file is confidential and contains trade secrets 4 | # of D-Robotics Inc. This is proprietary information owned by 5 | # D-Robotics Inc. No part of this work may be disclosed, 6 | # reproduced, copied, transmitted, or used in any way for any purpose, 7 | # without the express written permission of D-Robotics Inc. 8 | 9 | 10 | 11 | model_parameters: 12 | onnx_model: '../../../01_common/model_zoo/mapper/classification/FastViT/fastvit_s12.onnx' 13 | march: "bayes-e" 14 | layer_out_dump: False 15 | working_dir: 'FastViT_224x224_nv12_mix' 16 | output_model_file_prefix: 'FastViT_224x224_nv12' 17 | 18 | 19 | node_info: { 20 | "/patch_embed/patch_embed.0/activation/Mul_1": { 21 | 'ON': 'BPU', 22 | 'InputType': 'int16', 23 | 'OutputType': 'int16' 24 | }, 25 | "/patch_embed/patch_embed.1/activation/Mul_1": { 26 | 'ON': 'BPU', 27 | 'InputType': 'int16', 28 | 'OutputType': 'int16' 29 | }, 30 | "/patch_embed/patch_embed.2/activation/Mul_1": { 31 | 'ON': 'BPU', 32 | 'InputType': 'int16', 33 | 'OutputType': 'int16' 34 | }, 35 | "/network.2/network.2.0/convffn/ff2/Conv": { 36 | 'ON': 'BPU', 37 | 'InputType': 'int16', 38 | 'OutputType': 'int16' 39 | }, 40 | "/gap/GlobalAveragePool": { 41 | 'ON': 'BPU', 42 | 'InputType': 'int16', 43 | 'OutputType': 'int16' 44 | } 45 | } 46 | 47 | 48 | input_parameters: 49 | input_name: "" 50 | input_type_rt: 'nv12' 51 | input_type_train: 'rgb' 52 | input_layout_train: 'NCHW' 53 | input_shape: '' 54 | norm_type: 'data_mean_and_scale' 55 | mean_value: 123.675 116.28 103.53 56 | scale_value: 0.01712475 0.017507 0.01742919 57 | 58 | 59 | calibration_parameters: 60 | cal_data_dir: './calibration_data_rgb_f32' 61 | cal_data_type: 'float32' 62 | calibration_type: 'default' 63 | 64 | compiler_parameters: 65 | compile_mode: 'latency' 66 | debug: False 67 | optimize_level: 'O3' 68 | 69 | -------------------------------------------------------------------------------- /demos/classification/FastViT/yaml/FastViT_SA12_config.yaml: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2024 D-Robotics.All Rights Reserved. 2 | # 3 | # The material in this file is confidential and contains trade secrets 4 | # of D-Robotics Inc. This is proprietary information owned by 5 | # D-Robotics Inc. No part of this work may be disclosed, 6 | # reproduced, copied, transmitted, or used in any way for any purpose, 7 | # without the express written permission of D-Robotics Inc. 8 | 9 | 10 | 11 | model_parameters: 12 | onnx_model: '../../../01_common/model_zoo/mapper/classification/FastViT/fastvit_sa12.onnx' 13 | march: "bayes-e" 14 | layer_out_dump: False 15 | working_dir: 'FastViT_224x224_nv12_mix' 16 | output_model_file_prefix: 'FastViT_224x224_nv12' 17 | 18 | node_info: { 19 | "/network.7/network.7.0/token_mixer/Softmax": { 20 | 'ON': 'BPU', 21 | 'InputType': 'int16', 22 | 'OutputType': 'int16' 23 | }, 24 | "/network.7/network.7.1/token_mixer/Softmax": { 25 | 'ON': 'BPU', 26 | 'InputType': 'int16', 27 | 'OutputType': 'int16' 28 | }, 29 | "/network.2/network.2.0/token_mixer/reparam_conv/Conv": { 30 | 'ON': 'BPU', 31 | 'InputType': 'int16', 32 | 'OutputType': 'int16' 33 | }, 34 | "/patch_embed/patch_embed.0/activation/Mul_1": { 35 | 'ON': 'BPU', 36 | 'InputType': 'int16', 37 | 'OutputType': 'int16' 38 | }, 39 | "/patch_embed/patch_embed.1/reparam_conv/Conv": { 40 | 'ON': 'BPU', 41 | 'InputType': 'int16', 42 | 'OutputType': 'int16' 43 | }, 44 | "/patch_embed/patch_embed.1/activation/Mul_1": { 45 | 'ON': 'BPU', 46 | 'InputType': 'int16', 47 | 'OutputType': 'int16' 48 | } 49 | } 50 | 51 | 52 | 53 | 54 | input_parameters: 55 | input_name: "" 56 | input_type_rt: 'nv12' 57 | input_type_train: 'rgb' 58 | input_layout_train: 'NCHW' 59 | input_shape: '' 60 | norm_type: 'data_mean_and_scale' 61 | mean_value: 123.675 116.28 103.53 62 | scale_value: 0.01712475 0.017507 0.01742919 63 | 64 | 65 | calibration_parameters: 66 | cal_data_dir: './calibration_data_rgb_f32' 67 | cal_data_type: 'float32' 68 | calibration_type: 'default' 69 | 70 | compiler_parameters: 71 | compile_mode: 'latency' 72 | debug: False 73 | optimize_level: 'O3' 74 | 75 | -------------------------------------------------------------------------------- /demos/classification/FastViT/yaml/FastViT_T12_config.yaml: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2024 D-Robotics.All Rights Reserved. 2 | # 3 | # The material in this file is confidential and contains trade secrets 4 | # of D-Robotics Inc. This is proprietary information owned by 5 | # D-Robotics Inc. No part of this work may be disclosed, 6 | # reproduced, copied, transmitted, or used in any way for any purpose, 7 | # without the express written permission of D-Robotics Inc. 8 | 9 | 10 | 11 | model_parameters: 12 | onnx_model: '../../../01_common/model_zoo/mapper/classification/FastViT/fastvit_t12.onnx' 13 | march: "bayes-e" 14 | layer_out_dump: False 15 | working_dir: 'FastViT_224x224_nv12_mix' 16 | output_model_file_prefix: 'FastViT_224x224_nv12' 17 | 18 | node_info: { 19 | "/patch_embed/patch_embed.0/reparam_conv/Conv": { 20 | 'ON': 'BPU', 21 | 'InputType': 'int16', 22 | 'OutputType': 'int16' 23 | }, 24 | "/patch_embed/patch_embed.1/activation/Mul_1": { 25 | 'ON': 'BPU', 26 | 'InputType': 'int16', 27 | 'OutputType': 'int16' 28 | }, 29 | "/patch_embed/patch_embed.2/activation/Mul_1": { 30 | 'ON': 'BPU', 31 | 'InputType': 'int16', 32 | 'OutputType': 'int16' 33 | }, 34 | "/network.6/network.6.0/convffn/fc2/Conv": { 35 | 'ON': 'BPU', 36 | 'InputType': 'int16', 37 | 'OutputType': 'int16' 38 | } 39 | } 40 | 41 | 42 | input_parameters: 43 | input_name: "" 44 | input_type_rt: 'nv12' 45 | input_type_train: 'rgb' 46 | input_layout_train: 'NCHW' 47 | input_shape: '' 48 | norm_type: 'data_mean_and_scale' 49 | mean_value: 123.675 116.28 103.53 50 | scale_value: 0.01712475 0.017507 0.01742919 51 | 52 | 53 | calibration_parameters: 54 | cal_data_dir: './calibration_data_rgb_f32' 55 | cal_data_type: 'float32' 56 | calibration_type: 'default' 57 | 58 | compiler_parameters: 59 | compile_mode: 'latency' 60 | debug: False 61 | optimize_level: 'O3' 62 | 63 | -------------------------------------------------------------------------------- /demos/classification/FasterNet/data/FLOPs of Nets.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/D-Robotics/rdk_model_zoo/6da0511b4a5f4ac49eadd936d8e0228412d8dd32/demos/classification/FasterNet/data/FLOPs of Nets.png -------------------------------------------------------------------------------- /demos/classification/FasterNet/data/FasterNet_architecture.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/D-Robotics/rdk_model_zoo/6da0511b4a5f4ac49eadd936d8e0228412d8dd32/demos/classification/FasterNet/data/FasterNet_architecture.png -------------------------------------------------------------------------------- /demos/classification/FasterNet/data/drake.JPEG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/D-Robotics/rdk_model_zoo/6da0511b4a5f4ac49eadd936d8e0228412d8dd32/demos/classification/FasterNet/data/drake.JPEG -------------------------------------------------------------------------------- /demos/classification/FasterNet/data/inference.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/D-Robotics/rdk_model_zoo/6da0511b4a5f4ac49eadd936d8e0228412d8dd32/demos/classification/FasterNet/data/inference.png -------------------------------------------------------------------------------- /demos/classification/FasterNet/model/download.sh: -------------------------------------------------------------------------------- 1 | # Download bin models weights 2 | 3 | wget -P $(dirname $0) https://archive.d-robotics.cc/downloads/rdk_model_zoo/rdk_x5/FasterNet_S_224x224_nv12.bin 4 | wget -P $(dirname $0) https://archive.d-robotics.cc/downloads/rdk_model_zoo/rdk_x5/FasterNet_T0_224x224_nv12.bin 5 | wget -P $(dirname $0) https://archive.d-robotics.cc/downloads/rdk_model_zoo/rdk_x5/FasterNet_T1_224x224_nv12.bin 6 | wget -P $(dirname $0) https://archive.d-robotics.cc/downloads/rdk_model_zoo/rdk_x5/FasterNet_T2_224x224_nv12.bin -------------------------------------------------------------------------------- /demos/classification/FasterNet/yaml/FasterNet_S_config.yaml: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2021-2024 D-Robotics Corporation 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | 16 | model_parameters: 17 | onnx_model: './fasternet_s.onnx' 18 | march: "bayes-e" 19 | layer_out_dump: False 20 | working_dir: 'model_output' 21 | output_model_file_prefix: 'FasterNet_224x224_nv12' 22 | remove_node_type: 'Quantize;Dequantize;Transpose;Cast;Reshape' 23 | 24 | input_parameters: 25 | input_name: "" 26 | input_type_rt: 'nv12' 27 | input_type_train: 'rgb' 28 | input_layout_train: 'NCHW' 29 | input_shape: '' 30 | norm_type: 'data_mean_and_scale' 31 | mean_value: 123.675 116.28 103.53 32 | scale_value: 0.01712475 0.017507 0.01742919 33 | 34 | calibration_parameters: 35 | cal_data_dir: './calibration_data_rgb_f32' 36 | cal_data_type: 'float32' 37 | calibration_type: 'default' 38 | 39 | compiler_parameters: 40 | compile_mode: 'latency' 41 | debug: False 42 | optimize_level: 'O3' 43 | -------------------------------------------------------------------------------- /demos/classification/FasterNet/yaml/FasterNet_T0_config.yaml: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2021-2024 D-Robotics Corporation 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | 16 | model_parameters: 17 | onnx_model: './fasternet_t0.onnx' 18 | march: "bayes-e" 19 | layer_out_dump: False 20 | working_dir: 'FasterNet_224x224_nv12_mix' 21 | output_model_file_prefix: 'FasterNet_224x224_nv12' 22 | remove_node_type: 'Quantize;Dequantize;Transpose;Cast;Reshape' 23 | node_info: { 24 | "/stages/stages.0/blocks/blocks.0/mlp/mlp.3/Conv": { 25 | 'ON': 'BPU', 26 | 'InputType': 'int16', 27 | 'OutputType': 'int16' 28 | }, 29 | "/stages/stages.0/blocks/blocks.0/mlp/mlp.2/Mul_1": { 30 | 'ON': 'BPU', 31 | 'InputType': 'int16', 32 | 'OutputType': 'int16' 33 | } 34 | } 35 | 36 | input_parameters: 37 | input_name: "" 38 | input_type_rt: 'nv12' 39 | input_type_train: 'rgb' 40 | input_layout_train: 'NCHW' 41 | input_shape: '' 42 | norm_type: 'data_mean_and_scale' 43 | mean_value: 123.675 116.28 103.53 44 | scale_value: 0.01712475 0.017507 0.01742919 45 | 46 | calibration_parameters: 47 | cal_data_dir: './calibration_data_rgb_f32' 48 | cal_data_type: 'float32' 49 | calibration_type: 'default' 50 | 51 | compiler_parameters: 52 | compile_mode: 'latency' 53 | debug: False 54 | optimize_level: 'O3' -------------------------------------------------------------------------------- /demos/classification/FasterNet/yaml/FasterNet_T1_config.yaml: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2021-2024 D-Robotics Corporation 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | 16 | model_parameters: 17 | onnx_model: './fasternet_t1.onnx' 18 | march: "bayes-e" 19 | layer_out_dump: False 20 | working_dir: 'FasterNet_224x224_nv12' 21 | output_model_file_prefix: 'FasterNet_224x224_nv12' 22 | remove_node_type: 'Quantize;Dequantize;Transpose;Cast;Reshape' 23 | 24 | input_parameters: 25 | input_name: "" 26 | input_type_rt: 'nv12' 27 | input_type_train: 'rgb' 28 | input_layout_train: 'NCHW' 29 | input_shape: '' 30 | norm_type: 'data_mean_and_scale' 31 | mean_value: 123.675 116.28 103.53 32 | scale_value: 0.01712475 0.017507 0.01742919 33 | 34 | calibration_parameters: 35 | cal_data_dir: './calibration_data_rgb_f32' 36 | cal_data_type: 'float32' 37 | calibration_type: 'default' 38 | 39 | compiler_parameters: 40 | compile_mode: 'latency' 41 | debug: False 42 | optimize_level: 'O3' -------------------------------------------------------------------------------- /demos/classification/FasterNet/yaml/FasterNet_T2_config.yaml: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2021-2024 D-Robotics Corporation 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | 16 | model_parameters: 17 | onnx_model: './fasternet_t2.onnx' 18 | march: "bayes-e" 19 | layer_out_dump: False 20 | working_dir: 'FasterNet_224x224_nv12' 21 | output_model_file_prefix: 'FasterNet_224x224_nv12' 22 | remove_node_type: 'Quantize;Dequantize;Transpose;Cast;Reshape' 23 | 24 | input_parameters: 25 | input_name: "" 26 | input_type_rt: 'nv12' 27 | input_type_train: 'rgb' 28 | input_layout_train: 'NCHW' 29 | input_shape: '' 30 | norm_type: 'data_mean_and_scale' 31 | mean_value: 123.675 116.28 103.53 32 | scale_value: 0.01712475 0.017507 0.01742919 33 | 34 | calibration_parameters: 35 | cal_data_dir: './calibration_data_rgb_f32' 36 | cal_data_type: 'float32' 37 | calibration_type: 'default' 38 | 39 | compiler_parameters: 40 | compile_mode: 'latency' 41 | debug: False 42 | optimize_level: 'O3' -------------------------------------------------------------------------------- /demos/classification/GoogLeNet/data/GoogLeNet_architecture.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/D-Robotics/rdk_model_zoo/6da0511b4a5f4ac49eadd936d8e0228412d8dd32/demos/classification/GoogLeNet/data/GoogLeNet_architecture.png -------------------------------------------------------------------------------- /demos/classification/GoogLeNet/data/indigo_bunting.JPEG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/D-Robotics/rdk_model_zoo/6da0511b4a5f4ac49eadd936d8e0228412d8dd32/demos/classification/GoogLeNet/data/indigo_bunting.JPEG -------------------------------------------------------------------------------- /demos/classification/GoogLeNet/data/inference.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/D-Robotics/rdk_model_zoo/6da0511b4a5f4ac49eadd936d8e0228412d8dd32/demos/classification/GoogLeNet/data/inference.png -------------------------------------------------------------------------------- /demos/classification/GoogLeNet/model/download.sh: -------------------------------------------------------------------------------- 1 | # Download bin models weights 2 | 3 | wget -P $(dirname $0) https://archive.d-robotics.cc/downloads/rdk_model_zoo/rdk_x5/googlenet_224x224_nv12.bin -------------------------------------------------------------------------------- /demos/classification/MobileNetV1/README_cn.md: -------------------------------------------------------------------------------- 1 | [English](./README.md) | 简体中文 2 | 3 | # CNN - MobilenetV1 4 | 5 | - [CNN - MobilenetV1](#cnn---mobilenetv1) 6 | - [1. 简介](#1-简介) 7 | - [2. 模型性能数据](#2-模型性能数据) 8 | - [3. 模型下载](#3-模型下载) 9 | - [4. 部署测试](#4-部署测试) 10 | 11 | ## 1. 简介 12 | 13 | - **论文地址**: [MobileNets: Efficient Convolutional Neural Networks for Mobile Vision Applications](https://arxiv.org/abs/1704.04861) 14 | 15 | - **Github 仓库**: [models/research/slim/nets/mobilenet_v1.md at master · tensorflow/models (github.com)](https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet_v1.md) 16 | 17 | Mobilenetv1 提出了一种用于嵌入式设备的轻量级神经网络。利用深度可分离卷积构造轻量级深度神经网络。其核心思想是巧妙地将标准卷积分解为 **深度可分类卷积(depthwise convolution)** 和 **点态卷积(pointwise convolution)** 。通过分离标准卷积,可以减少两步卷积操作的中间输出特征映射的数量,从而有效地减少网络参数。 18 | 19 | ![](./data/depthwise&pointwise.png) 20 | 21 | **MobilenetV1 模型特点**: 22 | 23 | - **深度可分离卷积**:MobileNet 模型是基于深度可分离卷积,这是一种**因式分解卷积**的形式,它将一个标准卷积分解为深度卷积和一种称为点态卷积的 1×1 卷积,最早出现在 InceptionV3 中 24 | - **超参数**。通过宽度因子 $\alpha$ 和分辨率因子 $\rho$ 降低计算量和参数量。 25 | 26 | 27 | ## 2. 模型性能数据 28 | 29 | 以下表格是在 RDK X5 & RDK X5 Module 上实际测试得到的性能数据 30 | 31 | 32 | | 模型 | 尺寸(像素) | 类别数 | 参数量(M) | 浮点Top-1 | 量化Top-1 | 延迟/吞吐量(单线程) | 延迟/吞吐量(多线程) | 帧率 | 33 | | ----------- | ------- | ---- | ------ | ----- | ----- | ----------- | ----------- | ------ | 34 | | MobileNetv1 | 224x224 | 1000 | 1.33 | 71.74 | 65.36 | 1.27 | 2.90 | 1356.25 | 35 | 36 | 37 | 说明: 38 | 1. X5的状态为最佳状态:CPU为8xA55@1.8G, 全核心Performance调度, BPU为1xBayes-e@1G, 共10TOPS等效int8算力。 39 | 2. 单线程延迟为单帧,单线程,单BPU核心的延迟,BPU推理一个任务最理想的情况。 40 | 3. 4线程工程帧率为4个线程同时向双核心BPU塞任务,一般工程中4个线程可以控制单帧延迟较小,同时吃满所有BPU到100%,在吞吐量(FPS)和帧延迟间得到一个较好的平衡。 41 | 4. 8线程极限帧率为8个线程同时向X3的双核心BPU塞任务,目的是为了测试BPU的极限性能,一般来说4核心已经占满,如果8线程比4线程还要好很多,说明模型结构需要提高"计算/访存"比,或者编译时选择优化DDR带宽。 42 | 5. 浮点/定点Top-1:浮点Top-1使用的是模型未量化前onnx的 Top-1 推理精度,量化Top-1则为量化后模型实际推理的精度。 43 | 44 | ## 3. 模型下载 45 | 46 | **.bin 文件下载**: 47 | 48 | 进入model文件夹,使用以下命令行中对 MobileNetV1 模型进行下载: 49 | 50 | ```shell 51 | wget https://archive.d-robotics.cc/downloads/rdk_model_zoo/rdk_x5/mobilenetv1_224x224_nv12.bin 52 | ``` 53 | 54 | 由于此模型是由地平线参考算法进行模型量化后得到的产出物,故该模型不提供 onnx 格式文件。若需要 MobileNetV1 模型量化转换,可以参考本仓库其他模型的转换步骤。 55 | 56 | ## 4. 部署测试 57 | 58 | 在下载完毕 .bin 文件后,可以执行 test_MobileNetV1.ipynb MobileNetV1 模型 jupyter 脚本文件,在板端实际运行体验实际测试效果。需要更改测试图片,可额外下载数据集后,放入到data文件夹下并更改 jupyter 文件中图片的路径 59 | 60 | ![](./data/inference.png) 61 | 62 | -------------------------------------------------------------------------------- /demos/classification/MobileNetV1/data/bulbul.JPEG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/D-Robotics/rdk_model_zoo/6da0511b4a5f4ac49eadd936d8e0228412d8dd32/demos/classification/MobileNetV1/data/bulbul.JPEG -------------------------------------------------------------------------------- /demos/classification/MobileNetV1/data/depthwise&pointwise.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/D-Robotics/rdk_model_zoo/6da0511b4a5f4ac49eadd936d8e0228412d8dd32/demos/classification/MobileNetV1/data/depthwise&pointwise.png -------------------------------------------------------------------------------- /demos/classification/MobileNetV1/data/inference.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/D-Robotics/rdk_model_zoo/6da0511b4a5f4ac49eadd936d8e0228412d8dd32/demos/classification/MobileNetV1/data/inference.png -------------------------------------------------------------------------------- /demos/classification/MobileNetV1/model/download.sh: -------------------------------------------------------------------------------- 1 | # Download bin models weights 2 | 3 | wget -P $(dirname $0) https://archive.d-robotics.cc/downloads/rdk_model_zoo/rdk_x5/mobilenetv1_224x224_nv12.bin -------------------------------------------------------------------------------- /demos/classification/MobileNetV2/data/Scottish_deerhound.JPEG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/D-Robotics/rdk_model_zoo/6da0511b4a5f4ac49eadd936d8e0228412d8dd32/demos/classification/MobileNetV2/data/Scottish_deerhound.JPEG -------------------------------------------------------------------------------- /demos/classification/MobileNetV2/data/inference.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/D-Robotics/rdk_model_zoo/6da0511b4a5f4ac49eadd936d8e0228412d8dd32/demos/classification/MobileNetV2/data/inference.png -------------------------------------------------------------------------------- /demos/classification/MobileNetV2/data/mobilenetv2_architecture.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/D-Robotics/rdk_model_zoo/6da0511b4a5f4ac49eadd936d8e0228412d8dd32/demos/classification/MobileNetV2/data/mobilenetv2_architecture.png -------------------------------------------------------------------------------- /demos/classification/MobileNetV2/data/seperated_conv.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/D-Robotics/rdk_model_zoo/6da0511b4a5f4ac49eadd936d8e0228412d8dd32/demos/classification/MobileNetV2/data/seperated_conv.png -------------------------------------------------------------------------------- /demos/classification/MobileNetV2/model/download.sh: -------------------------------------------------------------------------------- 1 | # Download bin models weights 2 | 3 | wget -P $(dirname $0) https://archive.d-robotics.cc/downloads/rdk_model_zoo/rdk_x5/mobilenetv2_224x224_nv12.bin -------------------------------------------------------------------------------- /demos/classification/MobileNetV3/data/MobileNetV3_architecture.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/D-Robotics/rdk_model_zoo/6da0511b4a5f4ac49eadd936d8e0228412d8dd32/demos/classification/MobileNetV3/data/MobileNetV3_architecture.png -------------------------------------------------------------------------------- /demos/classification/MobileNetV3/data/inference.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/D-Robotics/rdk_model_zoo/6da0511b4a5f4ac49eadd936d8e0228412d8dd32/demos/classification/MobileNetV3/data/inference.png -------------------------------------------------------------------------------- /demos/classification/MobileNetV3/data/kit_fox.JPEG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/D-Robotics/rdk_model_zoo/6da0511b4a5f4ac49eadd936d8e0228412d8dd32/demos/classification/MobileNetV3/data/kit_fox.JPEG -------------------------------------------------------------------------------- /demos/classification/MobileNetV3/model/download.sh: -------------------------------------------------------------------------------- 1 | # Download bin models weights 2 | 3 | wget -P $(dirname $0) https://archive.d-robotics.cc/downloads/rdk_model_zoo/rdk_x5/MobileNetV3_224x224_nv12.bin -------------------------------------------------------------------------------- /demos/classification/MobileNetV3/yaml/MobileNetV3_config.yaml: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2021-2024 D-Robotics Corporation 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | 16 | model_parameters: 17 | onnx_model: './mobilenetv3_large_100.onnx' 18 | march: "bayes-e" 19 | layer_out_dump: False 20 | working_dir: 'MobileNetV3_224x224_nv12' 21 | output_model_file_prefix: 'MobileNetV3_224x224_nv12' 22 | node_info: { 23 | "/blocks/blocks.3/blocks.3.1/conv_pwl/Conv": { 24 | 'ON': 'BPU', 25 | 'InputType': 'int16', 26 | 'OutputType': 'int16' 27 | }, 28 | "/blocks/blocks.0/blocks.0.0/conv_dw/Conv": { 29 | 'ON': 'BPU', 30 | 'InputType': 'int16', 31 | 'OutputType': 'int16' 32 | }, 33 | "/bn1/act/Mul": { 34 | 'ON': 'BPU', 35 | 'InputType': 'int16', 36 | 'OutputType': 'int16' 37 | }, 38 | "/blocks/blocks.0/blocks.0.0/Add": { 39 | 'ON': 'BPU', 40 | 'InputType': 'int16', 41 | 'OutputType': 'int16' 42 | }, 43 | } 44 | 45 | 46 | 47 | input_parameters: 48 | input_name: "" 49 | input_type_rt: 'nv12' 50 | input_type_train: 'rgb' 51 | input_layout_train: 'NCHW' 52 | input_shape: '' 53 | norm_type: 'data_mean_and_scale' 54 | mean_value: 123.675 116.28 103.53 55 | scale_value: 0.01712475 0.017507 0.01742919 56 | 57 | 58 | calibration_parameters: 59 | cal_data_dir: './calibration_data_rgb_f32' 60 | cal_data_type: 'float32' 61 | calibration_type: 'default' 62 | 63 | 64 | compiler_parameters: 65 | compile_mode: 'latency' 66 | debug: False 67 | optimize_level: 'O3' 68 | 69 | -------------------------------------------------------------------------------- /demos/classification/MobileNetV4/data/MobileNetV4_architecture.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/D-Robotics/rdk_model_zoo/6da0511b4a5f4ac49eadd936d8e0228412d8dd32/demos/classification/MobileNetV4/data/MobileNetV4_architecture.png -------------------------------------------------------------------------------- /demos/classification/MobileNetV4/data/great_grey_owl.JPEG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/D-Robotics/rdk_model_zoo/6da0511b4a5f4ac49eadd936d8e0228412d8dd32/demos/classification/MobileNetV4/data/great_grey_owl.JPEG -------------------------------------------------------------------------------- /demos/classification/MobileNetV4/data/inference.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/D-Robotics/rdk_model_zoo/6da0511b4a5f4ac49eadd936d8e0228412d8dd32/demos/classification/MobileNetV4/data/inference.png -------------------------------------------------------------------------------- /demos/classification/MobileNetV4/model/download.sh: -------------------------------------------------------------------------------- 1 | # Download bin models weights 2 | 3 | wget -P $(dirname $0) https://archive.d-robotics.cc/downloads/rdk_model_zoo/rdk_x5/MobileNetV4_conv_medium_224x224_nv12.bin 4 | wget -P $(dirname $0) https://archive.d-robotics.cc/downloads/rdk_model_zoo/rdk_x5/MobileNetV4_conv_small_224x224_nv12.bin -------------------------------------------------------------------------------- /demos/classification/MobileNetV4/yaml/MobileNetV4_medium.yaml: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2021-2024 D-Robotics Corporation 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | 16 | model_parameters: 17 | onnx_model: './mobilenetv4_conv_medium_deploy.onnx' 18 | march: "bayes-e" 19 | layer_out_dump: False 20 | working_dir: 'MobileNetv4_medium_224x224_nv12' 21 | output_model_file_prefix: 'MobileNetv4_medium_224x224_nv12' 22 | 23 | 24 | input_parameters: 25 | input_name: "" 26 | input_type_rt: 'nv12' 27 | input_type_train: 'rgb' 28 | input_layout_train: 'NCHW' 29 | input_shape: '' 30 | norm_type: 'data_mean_and_scale' 31 | mean_value: 123.675 116.28 103.53 32 | scale_value: 0.01712475 0.017507 0.01742919 33 | 34 | 35 | calibration_parameters: 36 | cal_data_dir: './calibration_data_rgb_f32' 37 | cal_data_type: 'float32' 38 | calibration_type: 'default' 39 | 40 | 41 | compiler_parameters: 42 | compile_mode: 'latency' 43 | debug: False 44 | optimize_level: 'O3' -------------------------------------------------------------------------------- /demos/classification/MobileNetV4/yaml/MobileNetV4_small.yaml: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2021-2024 D-Robotics Corporation 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | 16 | model_parameters: 17 | onnx_model: './mobilenetv4_conv_small.onnx' 18 | march: "bayes-e" 19 | layer_out_dump: False 20 | working_dir: 'MobileNetv4_small_224x224_nv12' 21 | output_model_file_prefix: 'MobileNetv4_small_224x224_nv12' 22 | 23 | 24 | input_parameters: 25 | input_name: "" 26 | input_type_rt: 'nv12' 27 | input_type_train: 'rgb' 28 | input_layout_train: 'NCHW' 29 | input_shape: '' 30 | norm_type: 'data_mean_and_scale' 31 | mean_value: 123.675 116.28 103.53 32 | scale_value: 0.01712475 0.017507 0.01742919 33 | 34 | 35 | calibration_parameters: 36 | cal_data_dir: './calibration_data_rgb_f32' 37 | cal_data_type: 'float32' 38 | calibration_type: 'default' 39 | 40 | 41 | compiler_parameters: 42 | compile_mode: 'latency' 43 | debug: False 44 | optimize_level: 'O3' -------------------------------------------------------------------------------- /demos/classification/MobileOne/data/MobileOne_architecture.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/D-Robotics/rdk_model_zoo/6da0511b4a5f4ac49eadd936d8e0228412d8dd32/demos/classification/MobileOne/data/MobileOne_architecture.png -------------------------------------------------------------------------------- /demos/classification/MobileOne/data/inference.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/D-Robotics/rdk_model_zoo/6da0511b4a5f4ac49eadd936d8e0228412d8dd32/demos/classification/MobileOne/data/inference.png -------------------------------------------------------------------------------- /demos/classification/MobileOne/data/tiger_beetle.JPEG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/D-Robotics/rdk_model_zoo/6da0511b4a5f4ac49eadd936d8e0228412d8dd32/demos/classification/MobileOne/data/tiger_beetle.JPEG -------------------------------------------------------------------------------- /demos/classification/MobileOne/model/download.sh: -------------------------------------------------------------------------------- 1 | # Download bin models weights 2 | 3 | wget -P $(dirname $0) https://archive.d-robotics.cc/downloads/rdk_model_zoo/rdk_x5/MobileOne_S0_224x224_nv12.bin 4 | wget -P $(dirname $0) https://archive.d-robotics.cc/downloads/rdk_model_zoo/rdk_x5/MobileOne_S1_224x224_nv12.bin 5 | wget -P $(dirname $0) https://archive.d-robotics.cc/downloads/rdk_model_zoo/rdk_x5/MobileOne_S2_224x224_nv12.bin 6 | wget -P $(dirname $0) https://archive.d-robotics.cc/downloads/rdk_model_zoo/rdk_x5/MobileOne_S3_224x224_nv12.bin 7 | wget -P $(dirname $0) https://archive.d-robotics.cc/downloads/rdk_model_zoo/rdk_x5/MobileOne_S4_224x224_nv12.bin -------------------------------------------------------------------------------- /demos/classification/MobileOne/yaml/MobileOne_S0_config.yaml: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2024 D-Robotics.All Rights Reserved. 2 | # 3 | # The material in this file is confidential and contains trade secrets 4 | # of D-Robotics Inc. This is proprietary information owned by 5 | # D-Robotics Inc. No part of this work may be disclosed, 6 | # reproduced, copied, transmitted, or used in any way for any purpose, 7 | # without the express written permission of D-Robotics Inc. 8 | 9 | 10 | model_parameters: 11 | onnx_model: './mobileone_s0.onnx' 12 | march: "bayes-e" 13 | layer_out_dump: False 14 | working_dir: 'MobileOne_224x224_nv12_int8' 15 | output_model_file_prefix: 'MobileOne_224x224_nv12' 16 | 17 | 18 | input_parameters: 19 | input_name: "" 20 | input_type_rt: 'nv12' 21 | input_type_train: 'rgb' 22 | input_layout_train: 'NCHW' 23 | input_shape: '' 24 | norm_type: 'data_mean_and_scale' 25 | mean_value: 123.675 116.28 103.53 26 | scale_value: 0.01712475 0.017507 0.01742919 27 | 28 | 29 | calibration_parameters: 30 | cal_data_dir: './calibration_data_rgb_f32' 31 | cal_data_type: 'float32' 32 | calibration_type: 'default' 33 | 34 | compiler_parameters: 35 | compile_mode: 'latency' 36 | debug: False 37 | optimize_level: 'O3' 38 | jobs: 32 39 | 40 | -------------------------------------------------------------------------------- /demos/classification/MobileOne/yaml/MobileOne_S1_config.yaml: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2024 D-Robotics.All Rights Reserved. 2 | # 3 | # The material in this file is confidential and contains trade secrets 4 | # of D-Robotics Inc. This is proprietary information owned by 5 | # D-Robotics Inc. No part of this work may be disclosed, 6 | # reproduced, copied, transmitted, or used in any way for any purpose, 7 | # without the express written permission of D-Robotics Inc. 8 | 9 | 10 | model_parameters: 11 | onnx_model: './mobileone_s1.onnx' 12 | march: "bayes-e" 13 | layer_out_dump: False 14 | working_dir: 'MobileOne_224x224_nv12_int8' 15 | output_model_file_prefix: 'MobileOne_224x224_nv12' 16 | 17 | 18 | input_parameters: 19 | input_name: "" 20 | input_type_rt: 'nv12' 21 | input_type_train: 'rgb' 22 | input_layout_train: 'NCHW' 23 | input_shape: '' 24 | norm_type: 'data_mean_and_scale' 25 | mean_value: 123.675 116.28 103.53 26 | scale_value: 0.01712475 0.017507 0.01742919 27 | 28 | 29 | calibration_parameters: 30 | cal_data_dir: './calibration_data_rgb_f32' 31 | cal_data_type: 'float32' 32 | calibration_type: 'default' 33 | 34 | compiler_parameters: 35 | compile_mode: 'latency' 36 | debug: False 37 | optimize_level: 'O3' 38 | jobs: 32 39 | 40 | -------------------------------------------------------------------------------- /demos/classification/MobileOne/yaml/MobileOne_S2_config.yaml: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2024 D-Robotics.All Rights Reserved. 2 | # 3 | # The material in this file is confidential and contains trade secrets 4 | # of D-Robotics Inc. This is proprietary information owned by 5 | # D-Robotics Inc. No part of this work may be disclosed, 6 | # reproduced, copied, transmitted, or used in any way for any purpose, 7 | # without the express written permission of D-Robotics Inc. 8 | 9 | 10 | model_parameters: 11 | onnx_model: './mobileone_s2.onnx' 12 | march: "bayes-e" 13 | layer_out_dump: False 14 | working_dir: 'MobileOne_224x224_nv12_int8' 15 | output_model_file_prefix: 'MobileOne_224x224_nv12' 16 | 17 | 18 | input_parameters: 19 | input_name: "" 20 | input_type_rt: 'nv12' 21 | input_type_train: 'rgb' 22 | input_layout_train: 'NCHW' 23 | input_shape: '' 24 | norm_type: 'data_mean_and_scale' 25 | mean_value: 123.675 116.28 103.53 26 | scale_value: 0.01712475 0.017507 0.01742919 27 | 28 | 29 | calibration_parameters: 30 | cal_data_dir: './calibration_data_rgb_f32' 31 | cal_data_type: 'float32' 32 | calibration_type: 'default' 33 | 34 | compiler_parameters: 35 | compile_mode: 'latency' 36 | debug: False 37 | optimize_level: 'O3' 38 | jobs: 32 39 | 40 | -------------------------------------------------------------------------------- /demos/classification/MobileOne/yaml/MobileOne_S3_config.yaml: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2024 D-Robotics.All Rights Reserved. 2 | # 3 | # The material in this file is confidential and contains trade secrets 4 | # of D-Robotics Inc. This is proprietary information owned by 5 | # D-Robotics Inc. No part of this work may be disclosed, 6 | # reproduced, copied, transmitted, or used in any way for any purpose, 7 | # without the express written permission of D-Robotics Inc. 8 | 9 | 10 | model_parameters: 11 | onnx_model: './mobileone_s3.onnx' 12 | march: "bayes-e" 13 | layer_out_dump: False 14 | working_dir: 'MobileOne_224x224_nv12_int8' 15 | output_model_file_prefix: 'MobileOne_224x224_nv12' 16 | 17 | 18 | input_parameters: 19 | input_name: "" 20 | input_type_rt: 'nv12' 21 | input_type_train: 'rgb' 22 | input_layout_train: 'NCHW' 23 | input_shape: '' 24 | norm_type: 'data_mean_and_scale' 25 | mean_value: 123.675 116.28 103.53 26 | scale_value: 0.01712475 0.017507 0.01742919 27 | 28 | 29 | calibration_parameters: 30 | cal_data_dir: './calibration_data_rgb_f32' 31 | cal_data_type: 'float32' 32 | calibration_type: 'default' 33 | 34 | compiler_parameters: 35 | compile_mode: 'latency' 36 | debug: False 37 | optimize_level: 'O3' 38 | jobs: 32 39 | 40 | -------------------------------------------------------------------------------- /demos/classification/MobileOne/yaml/MobileOne_S4_config.yaml: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2024 D-Robotics.All Rights Reserved. 2 | # 3 | # The material in this file is confidential and contains trade secrets 4 | # of D-Robotics Inc. This is proprietary information owned by 5 | # D-Robotics Inc. No part of this work may be disclosed, 6 | # reproduced, copied, transmitted, or used in any way for any purpose, 7 | # without the express written permission of D-Robotics Inc. 8 | 9 | 10 | model_parameters: 11 | onnx_model: './mobileone_s4.onnx' 12 | march: "bayes-e" 13 | layer_out_dump: False 14 | working_dir: 'MobileOne_224x224_nv12_int8' 15 | output_model_file_prefix: 'MobileOne_224x224_nv12' 16 | 17 | 18 | input_parameters: 19 | input_name: "" 20 | input_type_rt: 'nv12' 21 | input_type_train: 'rgb' 22 | input_layout_train: 'NCHW' 23 | input_shape: '' 24 | norm_type: 'data_mean_and_scale' 25 | mean_value: 123.675 116.28 103.53 26 | scale_value: 0.01712475 0.017507 0.01742919 27 | 28 | 29 | calibration_parameters: 30 | cal_data_dir: './calibration_data_rgb_f32' 31 | cal_data_type: 'float32' 32 | calibration_type: 'default' 33 | 34 | compiler_parameters: 35 | compile_mode: 'latency' 36 | debug: False 37 | optimize_level: 'O3' 38 | jobs: 32 39 | 40 | -------------------------------------------------------------------------------- /demos/classification/RepGhost/data/RepGhost_architecture.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/D-Robotics/rdk_model_zoo/6da0511b4a5f4ac49eadd936d8e0228412d8dd32/demos/classification/RepGhost/data/RepGhost_architecture.png -------------------------------------------------------------------------------- /demos/classification/RepGhost/data/ibex.JPEG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/D-Robotics/rdk_model_zoo/6da0511b4a5f4ac49eadd936d8e0228412d8dd32/demos/classification/RepGhost/data/ibex.JPEG -------------------------------------------------------------------------------- /demos/classification/RepGhost/data/inference.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/D-Robotics/rdk_model_zoo/6da0511b4a5f4ac49eadd936d8e0228412d8dd32/demos/classification/RepGhost/data/inference.png -------------------------------------------------------------------------------- /demos/classification/RepGhost/model/download.sh: -------------------------------------------------------------------------------- 1 | # Download bin models weights 2 | 3 | wget -P $(dirname $0) https://archive.d-robotics.cc/downloads/rdk_model_zoo/rdk_x5/RepGhost_100_224x224_nv12.bin 4 | wget -P $(dirname $0) https://archive.d-robotics.cc/downloads/rdk_model_zoo/rdk_x5/RepGhost_111_224x224_nv12.bin 5 | wget -P $(dirname $0) https://archive.d-robotics.cc/downloads/rdk_model_zoo/rdk_x5/RepGhost_130_224x224_nv12.bin 6 | wget -P $(dirname $0) https://archive.d-robotics.cc/downloads/rdk_model_zoo/rdk_x5/RepGhost_150_224x224_nv12.bin 7 | wget -P $(dirname $0) https://archive.d-robotics.cc/downloads/rdk_model_zoo/rdk_x5/RepGhost_200_224x224_nv12.bin -------------------------------------------------------------------------------- /demos/classification/RepGhost/yaml/RepGhost_100.yaml: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2024 D-Robotics.All Rights Reserved. 2 | # 3 | # The material in this file is confidential and contains trade secrets 4 | # of D-Robotics Inc. This is proprietary information owned by 5 | # D-Robotics Inc. No part of this work may be disclosed, 6 | # reproduced, copied, transmitted, or used in any way for any purpose, 7 | # without the express written permission of D-Robotics Inc. 8 | 9 | 10 | model_parameters: 11 | onnx_model: './repghostnet_100.onnx' 12 | march: "bayes-e" 13 | layer_out_dump: False 14 | working_dir: 'RepGhost_224x224_nv12' 15 | output_model_file_prefix: 'RepGhost_224x224_nv12' 16 | debug_mode: 'dump_calibration_data' 17 | 18 | 19 | input_parameters: 20 | input_name: "" 21 | input_type_rt: 'nv12' 22 | input_type_train: 'rgb' 23 | input_layout_train: 'NCHW' 24 | input_shape: '' 25 | norm_type: 'data_mean_and_scale' 26 | mean_value: 123.675 116.28 103.53 27 | scale_value: 0.01712475 0.017507 0.01742919 28 | 29 | 30 | calibration_parameters: 31 | cal_data_dir: './calibration_data_rgb_f32' 32 | cal_data_type: 'float32' 33 | calibration_type: 'default' 34 | 35 | 36 | compiler_parameters: 37 | compile_mode: 'latency' 38 | debug: False 39 | optimize_level: 'O3' -------------------------------------------------------------------------------- /demos/classification/RepGhost/yaml/RepGhost_111.yaml: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2024 D-Robotics.All Rights Reserved. 2 | # 3 | # The material in this file is confidential and contains trade secrets 4 | # of D-Robotics Inc. This is proprietary information owned by 5 | # D-Robotics Inc. No part of this work may be disclosed, 6 | # reproduced, copied, transmitted, or used in any way for any purpose, 7 | # without the express written permission of D-Robotics Inc. 8 | 9 | 10 | model_parameters: 11 | onnx_model: './repghostnet_111.onnx' 12 | march: "bayes-e" 13 | layer_out_dump: False 14 | working_dir: 'RepGhost_224x224_nv12' 15 | output_model_file_prefix: 'RepGhost_224x224_nv12' 16 | debug_mode: 'dump_calibration_data' 17 | 18 | 19 | input_parameters: 20 | input_name: "" 21 | input_type_rt: 'nv12' 22 | input_type_train: 'rgb' 23 | input_layout_train: 'NCHW' 24 | input_shape: '' 25 | norm_type: 'data_mean_and_scale' 26 | mean_value: 123.675 116.28 103.53 27 | scale_value: 0.01712475 0.017507 0.01742919 28 | 29 | 30 | calibration_parameters: 31 | cal_data_dir: './calibration_data_rgb_f32' 32 | cal_data_type: 'float32' 33 | calibration_type: 'default' 34 | 35 | 36 | compiler_parameters: 37 | compile_mode: 'latency' 38 | debug: False 39 | optimize_level: 'O3' -------------------------------------------------------------------------------- /demos/classification/RepGhost/yaml/RepGhost_130.yaml: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2024 D-Robotics.All Rights Reserved. 2 | # 3 | # The material in this file is confidential and contains trade secrets 4 | # of D-Robotics Inc. This is proprietary information owned by 5 | # D-Robotics Inc. No part of this work may be disclosed, 6 | # reproduced, copied, transmitted, or used in any way for any purpose, 7 | # without the express written permission of D-Robotics Inc. 8 | 9 | 10 | model_parameters: 11 | onnx_model: './repghostnet_130.onnx' 12 | march: "bayes-e" 13 | layer_out_dump: False 14 | working_dir: 'RepGhost_224x224_nv12' 15 | output_model_file_prefix: 'RepGhost_224x224_nv12' 16 | debug_mode: 'dump_calibration_data' 17 | 18 | 19 | input_parameters: 20 | input_name: "" 21 | input_type_rt: 'nv12' 22 | input_type_train: 'rgb' 23 | input_layout_train: 'NCHW' 24 | input_shape: '' 25 | norm_type: 'data_mean_and_scale' 26 | mean_value: 123.675 116.28 103.53 27 | scale_value: 0.01712475 0.017507 0.01742919 28 | 29 | 30 | calibration_parameters: 31 | cal_data_dir: './calibration_data_rgb_f32' 32 | cal_data_type: 'float32' 33 | calibration_type: 'default' 34 | 35 | 36 | compiler_parameters: 37 | compile_mode: 'latency' 38 | debug: False 39 | optimize_level: 'O3' -------------------------------------------------------------------------------- /demos/classification/RepGhost/yaml/RepGhost_150.yaml: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2024 D-Robotics.All Rights Reserved. 2 | # 3 | # The material in this file is confidential and contains trade secrets 4 | # of D-Robotics Inc. This is proprietary information owned by 5 | # D-Robotics Inc. No part of this work may be disclosed, 6 | # reproduced, copied, transmitted, or used in any way for any purpose, 7 | # without the express written permission of D-Robotics Inc. 8 | 9 | 10 | model_parameters: 11 | onnx_model: './repghostnet_150.onnx' 12 | march: "bayes-e" 13 | layer_out_dump: False 14 | working_dir: 'RepGhost_224x224_nv12' 15 | output_model_file_prefix: 'RepGhost_224x224_nv12' 16 | debug_mode: 'dump_calibration_data' 17 | 18 | 19 | input_parameters: 20 | input_name: "" 21 | input_type_rt: 'nv12' 22 | input_type_train: 'rgb' 23 | input_layout_train: 'NCHW' 24 | input_shape: '' 25 | norm_type: 'data_mean_and_scale' 26 | mean_value: 123.675 116.28 103.53 27 | scale_value: 0.01712475 0.017507 0.01742919 28 | 29 | 30 | calibration_parameters: 31 | cal_data_dir: './calibration_data_rgb_f32' 32 | cal_data_type: 'float32' 33 | calibration_type: 'default' 34 | 35 | 36 | compiler_parameters: 37 | compile_mode: 'latency' 38 | debug: False 39 | optimize_level: 'O3' -------------------------------------------------------------------------------- /demos/classification/RepGhost/yaml/RepGhost_200.yaml: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2024 D-Robotics.All Rights Reserved. 2 | # 3 | # The material in this file is confidential and contains trade secrets 4 | # of D-Robotics Inc. This is proprietary information owned by 5 | # D-Robotics Inc. No part of this work may be disclosed, 6 | # reproduced, copied, transmitted, or used in any way for any purpose, 7 | # without the express written permission of D-Robotics Inc. 8 | 9 | 10 | model_parameters: 11 | onnx_model: './repghostnet_200.onnx' 12 | march: "bayes-e" 13 | layer_out_dump: False 14 | working_dir: 'RepGhost_224x224_nv12' 15 | output_model_file_prefix: 'RepGhost_224x224_nv12' 16 | debug_mode: 'dump_calibration_data' 17 | 18 | 19 | input_parameters: 20 | input_name: "" 21 | input_type_rt: 'nv12' 22 | input_type_train: 'rgb' 23 | input_layout_train: 'NCHW' 24 | input_shape: '' 25 | norm_type: 'data_mean_and_scale' 26 | mean_value: 123.675 116.28 103.53 27 | scale_value: 0.01712475 0.017507 0.01742919 28 | 29 | 30 | calibration_parameters: 31 | cal_data_dir: './calibration_data_rgb_f32' 32 | cal_data_type: 'float32' 33 | calibration_type: 'default' 34 | 35 | 36 | compiler_parameters: 37 | compile_mode: 'latency' 38 | debug: False 39 | optimize_level: 'O3' -------------------------------------------------------------------------------- /demos/classification/RepVGG/data/RepVGG_architecture.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/D-Robotics/rdk_model_zoo/6da0511b4a5f4ac49eadd936d8e0228412d8dd32/demos/classification/RepVGG/data/RepVGG_architecture.png -------------------------------------------------------------------------------- /demos/classification/RepVGG/data/gooze.JPEG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/D-Robotics/rdk_model_zoo/6da0511b4a5f4ac49eadd936d8e0228412d8dd32/demos/classification/RepVGG/data/gooze.JPEG -------------------------------------------------------------------------------- /demos/classification/RepVGG/data/inference.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/D-Robotics/rdk_model_zoo/6da0511b4a5f4ac49eadd936d8e0228412d8dd32/demos/classification/RepVGG/data/inference.png -------------------------------------------------------------------------------- /demos/classification/RepVGG/model/download.sh: -------------------------------------------------------------------------------- 1 | # Download bin models weights 2 | 3 | wget -P $(dirname $0) https://archive.d-robotics.cc/downloads/rdk_model_zoo/rdk_x5/RepVGG_A0_224x224_nv12.bin 4 | wget -P $(dirname $0) https://archive.d-robotics.cc/downloads/rdk_model_zoo/rdk_x5/RepVGG_A1_224x224_nv12.bin 5 | wget -P $(dirname $0) https://archive.d-robotics.cc/downloads/rdk_model_zoo/rdk_x5/RepVGG_A2_224x224_nv12.bin 6 | wget -P $(dirname $0) https://archive.d-robotics.cc/downloads/rdk_model_zoo/rdk_x5/RepVGG_B0_224x224_nv12.bin 7 | wget -P $(dirname $0) https://archive.d-robotics.cc/downloads/rdk_model_zoo/rdk_x5/RepVGG_B1g2_224x224_nv12.bin 8 | wget -P $(dirname $0) https://archive.d-robotics.cc/downloads/rdk_model_zoo/rdk_x5/RepVGG_B1g4_224x224_nv12.bin -------------------------------------------------------------------------------- /demos/classification/RepVGG/yaml/RepVGG_A0_config.yaml: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2024 D-Robotics.All Rights Reserved. 2 | # 3 | # The material in this file is confidential and contains trade secrets 4 | # of D-Robotics Inc. This is proprietary information owned by 5 | # D-Robotics Inc. No part of this work may be disclosed, 6 | # reproduced, copied, transmitted, or used in any way for any purpose, 7 | # without the express written permission of D-Robotics Inc. 8 | 9 | 10 | 11 | model_parameters: 12 | onnx_model: './RepVGG-A0.onnx' 13 | march: "bayes-e" 14 | layer_out_dump: False 15 | working_dir: 'RepVGG-A0_224x224_nv12' 16 | output_model_file_prefix: 'RepVGG-A0_224x224_nv12' 17 | debug_mode: "dump_calibration_data" 18 | remove_node_type: 'Quantize;Dequantize;Transpose;Cast;Reshape' 19 | 20 | 21 | input_parameters: 22 | input_name: "" 23 | input_type_rt: 'nv12' 24 | input_type_train: 'rgb' 25 | input_layout_train: 'NCHW' 26 | input_shape: '' 27 | norm_type: 'data_mean_and_scale' 28 | mean_value: 123.675 116.28 103.53 29 | scale_value: 0.01712475 0.017507 0.01742919 30 | 31 | 32 | calibration_parameters: 33 | cal_data_dir: './calibration_data_rgb_f32' 34 | cal_data_type: 'float32' 35 | calibration_type: 'default' 36 | 37 | 38 | compiler_parameters: 39 | compile_mode: 'latency' 40 | debug: False 41 | optimize_level: 'O3' 42 | 43 | -------------------------------------------------------------------------------- /demos/classification/RepVGG/yaml/RepVGG_A1_config.yaml: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2024 D-Robotics.All Rights Reserved. 2 | # 3 | # The material in this file is confidential and contains trade secrets 4 | # of D-Robotics Inc. This is proprietary information owned by 5 | # D-Robotics Inc. No part of this work may be disclosed, 6 | # reproduced, copied, transmitted, or used in any way for any purpose, 7 | # without the express written permission of D-Robotics Inc. 8 | 9 | 10 | 11 | model_parameters: 12 | onnx_model: './RepVGG-A1.onnx' 13 | march: "bayes-e" 14 | layer_out_dump: False 15 | working_dir: 'RepVGG-A1_224x224_nv12' 16 | output_model_file_prefix: 'RepVGG-A1_224x224_nv12' 17 | debug_mode: "dump_calibration_data" 18 | remove_node_type: 'Quantize;Dequantize;Transpose;Cast;Reshape' 19 | 20 | 21 | input_parameters: 22 | input_name: "" 23 | input_type_rt: 'nv12' 24 | input_type_train: 'rgb' 25 | input_layout_train: 'NCHW' 26 | input_shape: '' 27 | norm_type: 'data_mean_and_scale' 28 | mean_value: 123.675 116.28 103.53 29 | scale_value: 0.01712475 0.017507 0.01742919 30 | 31 | 32 | calibration_parameters: 33 | cal_data_dir: './calibration_data_rgb_f32' 34 | cal_data_type: 'float32' 35 | calibration_type: 'default' 36 | 37 | 38 | compiler_parameters: 39 | compile_mode: 'latency' 40 | debug: False 41 | optimize_level: 'O3' 42 | 43 | -------------------------------------------------------------------------------- /demos/classification/RepVGG/yaml/RepVGG_A2_config.yaml: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2024 D-Robotics.All Rights Reserved. 2 | # 3 | # The material in this file is confidential and contains trade secrets 4 | # of D-Robotics Inc. This is proprietary information owned by 5 | # D-Robotics Inc. No part of this work may be disclosed, 6 | # reproduced, copied, transmitted, or used in any way for any purpose, 7 | # without the express written permission of D-Robotics Inc. 8 | 9 | 10 | 11 | model_parameters: 12 | onnx_model: './RepVGG-A2.onnx' 13 | march: "bayes-e" 14 | layer_out_dump: False 15 | working_dir: 'RepVGG-A2_224x224_nv12' 16 | output_model_file_prefix: 'RepVGG-A2_224x224_nv12' 17 | debug_mode: "dump_calibration_data" 18 | remove_node_type: 'Quantize;Dequantize;Transpose;Cast;Reshape' 19 | 20 | 21 | input_parameters: 22 | input_name: "" 23 | input_type_rt: 'nv12' 24 | input_type_train: 'rgb' 25 | input_layout_train: 'NCHW' 26 | input_shape: '' 27 | norm_type: 'data_mean_and_scale' 28 | mean_value: 123.675 116.28 103.53 29 | scale_value: 0.01712475 0.017507 0.01742919 30 | 31 | 32 | calibration_parameters: 33 | cal_data_dir: './calibration_data_rgb_f32' 34 | cal_data_type: 'float32' 35 | calibration_type: 'default' 36 | 37 | 38 | compiler_parameters: 39 | compile_mode: 'latency' 40 | debug: False 41 | optimize_level: 'O3' 42 | 43 | -------------------------------------------------------------------------------- /demos/classification/RepVGG/yaml/RepVGG_B0_config.yaml: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2024 D-Robotics.All Rights Reserved. 2 | # 3 | # The material in this file is confidential and contains trade secrets 4 | # of D-Robotics Inc. This is proprietary information owned by 5 | # D-Robotics Inc. No part of this work may be disclosed, 6 | # reproduced, copied, transmitted, or used in any way for any purpose, 7 | # without the express written permission of D-Robotics Inc. 8 | 9 | 10 | 11 | model_parameters: 12 | onnx_model: './RepVGG-B0.onnx' 13 | march: "bayes-e" 14 | layer_out_dump: False 15 | working_dir: 'RepVGG-B0_224x224_nv12' 16 | output_model_file_prefix: 'RepVGG-B0_224x224_nv12' 17 | debug_mode: "dump_calibration_data" 18 | remove_node_type: 'Quantize;Dequantize;Transpose;Cast;Reshape' 19 | 20 | 21 | input_parameters: 22 | input_name: "" 23 | input_type_rt: 'nv12' 24 | input_type_train: 'rgb' 25 | input_layout_train: 'NCHW' 26 | input_shape: '' 27 | norm_type: 'data_mean_and_scale' 28 | mean_value: 123.675 116.28 103.53 29 | scale_value: 0.01712475 0.017507 0.01742919 30 | 31 | 32 | calibration_parameters: 33 | cal_data_dir: './calibration_data_rgb_f32' 34 | cal_data_type: 'float32' 35 | calibration_type: 'default' 36 | 37 | 38 | compiler_parameters: 39 | compile_mode: 'latency' 40 | debug: False 41 | optimize_level: 'O3' 42 | 43 | -------------------------------------------------------------------------------- /demos/classification/RepVGG/yaml/RepVGG_B1g2_config.yaml: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2024 D-Robotics.All Rights Reserved. 2 | # 3 | # The material in this file is confidential and contains trade secrets 4 | # of D-Robotics Inc. This is proprietary information owned by 5 | # D-Robotics Inc. No part of this work may be disclosed, 6 | # reproduced, copied, transmitted, or used in any way for any purpose, 7 | # without the express written permission of D-Robotics Inc. 8 | 9 | 10 | 11 | model_parameters: 12 | onnx_model: './RepVGG-B1g2.onnx' 13 | march: "bayes-e" 14 | layer_out_dump: False 15 | working_dir: 'RepVGG-B1g2_224x224_nv12' 16 | output_model_file_prefix: 'RepVGG-B1g2_224x224_nv12' 17 | debug_mode: "dump_calibration_data" 18 | remove_node_type: 'Quantize;Dequantize;Transpose;Cast;Reshape' 19 | 20 | 21 | input_parameters: 22 | input_name: "" 23 | input_type_rt: 'nv12' 24 | input_type_train: 'rgb' 25 | input_layout_train: 'NCHW' 26 | input_shape: '' 27 | norm_type: 'data_mean_and_scale' 28 | mean_value: 123.675 116.28 103.53 29 | scale_value: 0.01712475 0.017507 0.01742919 30 | 31 | 32 | calibration_parameters: 33 | cal_data_dir: './calibration_data_rgb_f32' 34 | cal_data_type: 'float32' 35 | calibration_type: 'default' 36 | 37 | 38 | compiler_parameters: 39 | compile_mode: 'latency' 40 | debug: False 41 | optimize_level: 'O3' 42 | 43 | -------------------------------------------------------------------------------- /demos/classification/RepVGG/yaml/RepVGG_B1g4_config.yaml: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2024 D-Robotics.All Rights Reserved. 2 | # 3 | # The material in this file is confidential and contains trade secrets 4 | # of D-Robotics Inc. This is proprietary information owned by 5 | # D-Robotics Inc. No part of this work may be disclosed, 6 | # reproduced, copied, transmitted, or used in any way for any purpose, 7 | # without the express written permission of D-Robotics Inc. 8 | 9 | 10 | 11 | model_parameters: 12 | onnx_model: './RepVGG-B1g4.onnx' 13 | march: "bayes-e" 14 | layer_out_dump: False 15 | working_dir: 'RepVGG-B1g4_224x224_nv12' 16 | output_model_file_prefix: 'RepVGG-B1g4_224x224_nv12' 17 | debug_mode: "dump_calibration_data" 18 | remove_node_type: 'Quantize;Dequantize;Transpose;Cast;Reshape' 19 | 20 | 21 | input_parameters: 22 | input_name: "" 23 | input_type_rt: 'nv12' 24 | input_type_train: 'rgb' 25 | input_layout_train: 'NCHW' 26 | input_shape: '' 27 | norm_type: 'data_mean_and_scale' 28 | mean_value: 123.675 116.28 103.53 29 | scale_value: 0.01712475 0.017507 0.01742919 30 | 31 | 32 | calibration_parameters: 33 | cal_data_dir: './calibration_data_rgb_f32' 34 | cal_data_type: 'float32' 35 | calibration_type: 'default' 36 | 37 | 38 | compiler_parameters: 39 | compile_mode: 'latency' 40 | debug: False 41 | optimize_level: 'O3' 42 | 43 | -------------------------------------------------------------------------------- /demos/classification/RepViT/data/RepViT_DW.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/D-Robotics/rdk_model_zoo/6da0511b4a5f4ac49eadd936d8e0228412d8dd32/demos/classification/RepViT/data/RepViT_DW.png -------------------------------------------------------------------------------- /demos/classification/RepViT/data/RepViT_architecture.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/D-Robotics/rdk_model_zoo/6da0511b4a5f4ac49eadd936d8e0228412d8dd32/demos/classification/RepViT/data/RepViT_architecture.png -------------------------------------------------------------------------------- /demos/classification/RepViT/data/inference.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/D-Robotics/rdk_model_zoo/6da0511b4a5f4ac49eadd936d8e0228412d8dd32/demos/classification/RepViT/data/inference.png -------------------------------------------------------------------------------- /demos/classification/RepViT/data/yurt.JPEG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/D-Robotics/rdk_model_zoo/6da0511b4a5f4ac49eadd936d8e0228412d8dd32/demos/classification/RepViT/data/yurt.JPEG -------------------------------------------------------------------------------- /demos/classification/RepViT/model/download.sh: -------------------------------------------------------------------------------- 1 | # Download bin models weights 2 | 3 | wget -P $(dirname $0) https://archive.d-robotics.cc/downloads/rdk_model_zoo/rdk_x5/RepViT_m0_9_224x224_nv12.bin 4 | wget -P $(dirname $0) https://archive.d-robotics.cc/downloads/rdk_model_zoo/rdk_x5/RepViT_m1_0_224x224_nv12.bin 5 | wget -P $(dirname $0) https://archive.d-robotics.cc/downloads/rdk_model_zoo/rdk_x5/RepViT_m1_1_224x224_nv12.bin -------------------------------------------------------------------------------- /demos/classification/RepViT/yaml/RepViT_m0_9_config.yaml: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2024 D-Robotics.All Rights Reserved. 2 | # 3 | # The material in this file is confidential and contains trade secrets 4 | # of D-Robotics Inc. This is proprietary information owned by 5 | # D-Robotics Inc. No part of this work may be disclosed, 6 | # reproduced, copied, transmitted, or used in any way for any purpose, 7 | # without the express written permission of D-Robotics Inc. 8 | 9 | 10 | 11 | model_parameters: 12 | onnx_model: './repvit_m0_9.onnx' 13 | march: "bayes-e" 14 | layer_out_dump: False 15 | working_dir: 'RepViT_224x224_nv12' 16 | output_model_file_prefix: 'RepViT_224x224_nv12' 17 | remove_node_type: 'Quantize;Dequantize;Transpose;Cast;Reshape' 18 | 19 | 20 | input_parameters: 21 | input_name: "" 22 | input_type_rt: 'nv12' 23 | input_type_train: 'rgb' 24 | input_layout_train: 'NCHW' 25 | input_shape: '' 26 | norm_type: 'data_mean_and_scale' 27 | mean_value: 123.675 116.28 103.53 28 | scale_value: 0.01712475 0.017507 0.01742919 29 | 30 | 31 | calibration_parameters: 32 | cal_data_dir: './calibration_data_rgb_f32' 33 | cal_data_type: 'float32' 34 | calibration_type: 'default' 35 | 36 | 37 | compiler_parameters: 38 | compile_mode: 'latency' 39 | debug: False 40 | optimize_level: 'O3' 41 | 42 | -------------------------------------------------------------------------------- /demos/classification/RepViT/yaml/RepViT_m1_0_config.yaml: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2024 D-Robotics.All Rights Reserved. 2 | # 3 | # The material in this file is confidential and contains trade secrets 4 | # of D-Robotics Inc. This is proprietary information owned by 5 | # D-Robotics Inc. No part of this work may be disclosed, 6 | # reproduced, copied, transmitted, or used in any way for any purpose, 7 | # without the express written permission of D-Robotics Inc. 8 | 9 | 10 | 11 | model_parameters: 12 | onnx_model: './repvit_m1_0.onnx' 13 | march: "bayes-e" 14 | layer_out_dump: False 15 | working_dir: 'RepViT_224x224_nv12' 16 | output_model_file_prefix: 'RepViT_224x224_nv12' 17 | remove_node_type: 'Quantize;Dequantize;Transpose;Cast;Reshape' 18 | 19 | 20 | input_parameters: 21 | input_name: "" 22 | input_type_rt: 'nv12' 23 | input_type_train: 'rgb' 24 | input_layout_train: 'NCHW' 25 | input_shape: '' 26 | norm_type: 'data_mean_and_scale' 27 | mean_value: 123.675 116.28 103.53 28 | scale_value: 0.01712475 0.017507 0.01742919 29 | 30 | 31 | calibration_parameters: 32 | cal_data_dir: './calibration_data_rgb_f32' 33 | cal_data_type: 'float32' 34 | calibration_type: 'default' 35 | 36 | 37 | compiler_parameters: 38 | compile_mode: 'latency' 39 | debug: False 40 | optimize_level: 'O3' 41 | 42 | -------------------------------------------------------------------------------- /demos/classification/RepViT/yaml/RepViT_m1_1_config.yaml: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2024 D-Robotics.All Rights Reserved. 2 | # 3 | # The material in this file is confidential and contains trade secrets 4 | # of D-Robotics Inc. This is proprietary information owned by 5 | # D-Robotics Inc. No part of this work may be disclosed, 6 | # reproduced, copied, transmitted, or used in any way for any purpose, 7 | # without the express written permission of D-Robotics Inc. 8 | 9 | 10 | 11 | model_parameters: 12 | onnx_model: './repvit_m1_1.onnx' 13 | march: "bayes-e" 14 | layer_out_dump: False 15 | working_dir: 'RepViT_224x224_nv12' 16 | output_model_file_prefix: 'RepViT_224x224_nv12' 17 | remove_node_type: 'Quantize;Dequantize;Transpose;Cast;Reshape' 18 | 19 | 20 | input_parameters: 21 | input_name: "" 22 | input_type_rt: 'nv12' 23 | input_type_train: 'rgb' 24 | input_layout_train: 'NCHW' 25 | input_shape: '' 26 | norm_type: 'data_mean_and_scale' 27 | mean_value: 123.675 116.28 103.53 28 | scale_value: 0.01712475 0.017507 0.01742919 29 | 30 | 31 | calibration_parameters: 32 | cal_data_dir: './calibration_data_rgb_f32' 33 | cal_data_type: 'float32' 34 | calibration_type: 'default' 35 | 36 | 37 | compiler_parameters: 38 | compile_mode: 'latency' 39 | debug: False 40 | optimize_level: 'O3' 41 | 42 | -------------------------------------------------------------------------------- /demos/classification/ResNeXt/data/ResNet&ResNeXt.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/D-Robotics/rdk_model_zoo/6da0511b4a5f4ac49eadd936d8e0228412d8dd32/demos/classification/ResNeXt/data/ResNet&ResNeXt.png -------------------------------------------------------------------------------- /demos/classification/ResNeXt/data/bee_eater.JPEG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/D-Robotics/rdk_model_zoo/6da0511b4a5f4ac49eadd936d8e0228412d8dd32/demos/classification/ResNeXt/data/bee_eater.JPEG -------------------------------------------------------------------------------- /demos/classification/ResNeXt/data/inference.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/D-Robotics/rdk_model_zoo/6da0511b4a5f4ac49eadd936d8e0228412d8dd32/demos/classification/ResNeXt/data/inference.png -------------------------------------------------------------------------------- /demos/classification/ResNeXt/model/download.sh: -------------------------------------------------------------------------------- 1 | # Download bin models weights 2 | 3 | wget -P $(dirname $0) https://archive.d-robotics.cc/downloads/rdk_model_zoo/rdk_x5/ResNeXt50_32x4d_224x224_nv12.bin -------------------------------------------------------------------------------- /demos/classification/ResNeXt/yaml/ResNeXt50_32x4d_config.yaml: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2024 D-Robotics.All Rights Reserved. 2 | # 3 | # The material in this file is confidential and contains trade secrets 4 | # of D-Robotics Inc. This is proprietary information owned by 5 | # D-Robotics Inc. No part of this work may be disclosed, 6 | # reproduced, copied, transmitted, or used in any way for any purpose, 7 | # without the express written permission of D-Robotics Inc. 8 | 9 | model_parameters: 10 | onnx_model: './ResNeXt50_32x4d.onnx' 11 | march: "bayes-e" 12 | layer_out_dump: False 13 | working_dir: 'ResNeXt50_32x4d_224x224_nv12' 14 | output_model_file_prefix: 'ResNeXt50_32x4d_224x224_nv12' 15 | 16 | 17 | input_parameters: 18 | input_name: "" 19 | input_type_rt: 'nv12' 20 | input_type_train: 'rgb' 21 | input_layout_train: 'NCHW' 22 | input_shape: '' 23 | norm_type: 'data_mean_and_scale' 24 | mean_value: 123.675 116.28 103.53 25 | scale_value: 0.01712475 0.017507 0.01742919 26 | 27 | 28 | calibration_parameters: 29 | cal_data_dir: './calibration_data_rgb_f32' 30 | cal_data_type: 'float32' 31 | calibration_type: 'default' 32 | 33 | 34 | compiler_parameters: 35 | compile_mode: 'latency' 36 | debug: False 37 | optimize_level: 'O3' -------------------------------------------------------------------------------- /demos/classification/ResNet/data/ResNet_architecture.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/D-Robotics/rdk_model_zoo/6da0511b4a5f4ac49eadd936d8e0228412d8dd32/demos/classification/ResNet/data/ResNet_architecture.png -------------------------------------------------------------------------------- /demos/classification/ResNet/data/ResNet_architecture2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/D-Robotics/rdk_model_zoo/6da0511b4a5f4ac49eadd936d8e0228412d8dd32/demos/classification/ResNet/data/ResNet_architecture2.png -------------------------------------------------------------------------------- /demos/classification/ResNet/data/inference.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/D-Robotics/rdk_model_zoo/6da0511b4a5f4ac49eadd936d8e0228412d8dd32/demos/classification/ResNet/data/inference.png -------------------------------------------------------------------------------- /demos/classification/ResNet/data/white_wolf.JPEG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/D-Robotics/rdk_model_zoo/6da0511b4a5f4ac49eadd936d8e0228412d8dd32/demos/classification/ResNet/data/white_wolf.JPEG -------------------------------------------------------------------------------- /demos/classification/ResNet/model/download.sh: -------------------------------------------------------------------------------- 1 | # Download bin models weights 2 | 3 | wget -P $(dirname $0) https://archive.d-robotics.cc/downloads/rdk_model_zoo/rdk_x5/resnet18_224x224_nv12.bin -------------------------------------------------------------------------------- /demos/classification/VargConvNet/data/box_turtle.JPEG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/D-Robotics/rdk_model_zoo/6da0511b4a5f4ac49eadd936d8e0228412d8dd32/demos/classification/VargConvNet/data/box_turtle.JPEG -------------------------------------------------------------------------------- /demos/classification/VargConvNet/model/download.sh: -------------------------------------------------------------------------------- 1 | # Download bin models weights 2 | 3 | wget -P $(dirname $0) https://archive.d-robotics.cc/downloads/rdk_model_zoo/rdk_x5/vargconvnet_224x224_nv12.bin -------------------------------------------------------------------------------- /demos/detect/FCOS/imgs/demo_rdkx5_fcos_detect.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/D-Robotics/rdk_model_zoo/6da0511b4a5f4ac49eadd936d8e0228412d8dd32/demos/detect/FCOS/imgs/demo_rdkx5_fcos_detect.jpg -------------------------------------------------------------------------------- /demos/detect/FCOS/imgs/fcos_512x512_nv12.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/D-Robotics/rdk_model_zoo/6da0511b4a5f4ac49eadd936d8e0228412d8dd32/demos/detect/FCOS/imgs/fcos_512x512_nv12.png -------------------------------------------------------------------------------- /demos/detect/FCOS/imgs/fcos_efficientnetb0_512x512_nv12.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/D-Robotics/rdk_model_zoo/6da0511b4a5f4ac49eadd936d8e0228412d8dd32/demos/detect/FCOS/imgs/fcos_efficientnetb0_512x512_nv12.png -------------------------------------------------------------------------------- /demos/detect/FCOS/imgs/fcos_efficientnetb2_768x768_nv12.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/D-Robotics/rdk_model_zoo/6da0511b4a5f4ac49eadd936d8e0228412d8dd32/demos/detect/FCOS/imgs/fcos_efficientnetb2_768x768_nv12.png -------------------------------------------------------------------------------- /demos/detect/FCOS/imgs/fcos_efficientnetb3_896x896_nv12.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/D-Robotics/rdk_model_zoo/6da0511b4a5f4ac49eadd936d8e0228412d8dd32/demos/detect/FCOS/imgs/fcos_efficientnetb3_896x896_nv12.png -------------------------------------------------------------------------------- /demos/detect/FCOS/jupyter_result.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/D-Robotics/rdk_model_zoo/6da0511b4a5f4ac49eadd936d8e0228412d8dd32/demos/detect/FCOS/jupyter_result.jpg -------------------------------------------------------------------------------- /demos/detect/FCOS/models/download.md: -------------------------------------------------------------------------------- 1 | # Download weights 2 | 3 | 4 | ## Bayes-e (RDK X5 & RDK X5 Module) 5 | ### bin - nv12 6 | fcos_efficientnetb0_512x512 7 | ```bash 8 | wget https://archive.d-robotics.cc/downloads/rdk_model_zoo/rdk_x5/fcos_efficientnetb0_detect_512x512_bayese_nv12.bin 9 | ``` 10 | 11 | fcos_efficientnetb2_768x768 12 | ```bash 13 | wget https://archive.d-robotics.cc/downloads/rdk_model_zoo/rdk_x5/fcos_efficientnetb2_detect_768x768_bayese_nv12.bin 14 | ``` 15 | 16 | fcos_efficientnetb3_896x896 17 | ```bash 18 | wget https://archive.d-robotics.cc/downloads/rdk_model_zoo/rdk_x5/fcos_efficientnetb3_detect_896x896_bayese_nv12.bin 19 | ``` 20 | 21 | ## Bayes (RDK Ultra & RDK Ultra Module) 22 | 23 | 24 | ## Nash-e (RDK S100) 25 | 26 | 27 | ## Nash-m (RDK S100P) 28 | 29 | 30 | ## Bernoulli2 (RDK X3 & RDK X3 Module) 31 | ### bin - nv12 32 | fcos 33 | ```bash 34 | cp /app/model/basic/fcos_512x512_nv12.bin . 35 | ``` -------------------------------------------------------------------------------- /demos/detect/YOLO11/YOLO11-Detect_NCHWRGB/config_yolo11_detect_bayese_640x640_nchw.yaml: -------------------------------------------------------------------------------- 1 | model_parameters: 2 | onnx_model: './yolo11n.onnx' 3 | march: "bayes-e" 4 | layer_out_dump: False 5 | working_dir: 'bin_dir/yolo11n_detect_bayese_640x640_nv12' 6 | output_model_file_prefix: 'yolo11n_detect_bayese_640x640_nv12' 7 | # YOLO11 n, s, m 8 | node_info: {"/model.10/m/m.0/attn/Softmax": {'ON': 'BPU','InputType': 'int16','OutputType': 'int16'}} 9 | # YOLO11 l, x 10 | # node_info: {"/model.10/m/m.0/attn/Softmax": {'ON': 'BPU','InputType': 'int16','OutputType': 'int16'}, 11 | # "/model.10/m/m.1/attn/Softmax": {'ON': 'BPU','InputType': 'int16','OutputType': 'int16'}} 12 | input_parameters: 13 | # input_batch: 8 14 | input_name: "" 15 | input_type_rt: 'rgb' 16 | input_layout_rt: 'NCHW' 17 | input_type_train: 'rgb' 18 | input_layout_train: 'NCHW' 19 | norm_type: 'data_scale' 20 | scale_value: 0.003921568627451 21 | calibration_parameters: 22 | cal_data_dir: './calibration_data_rgb_f32_640' 23 | cal_data_type: 'float32' 24 | compiler_parameters: 25 | compile_mode: 'latency' 26 | debug: False 27 | optimize_level: 'O3' 28 | -------------------------------------------------------------------------------- /demos/detect/YOLO11/YOLO11-Detect_NCHWRGB/cpp/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | cmake_minimum_required(VERSION 2.8) 2 | 3 | project(rdk_yolo11_detect_nchwrgb) 4 | 5 | 6 | # 设置OpenCV包 7 | find_package(OpenCV REQUIRED) 8 | 9 | # # 添加可执行文件 10 | # add_executable(main main.cc) 11 | # # 链接OpenCV库 12 | # target_link_libraries(main ${OpenCV_LIBS}) 13 | 14 | 15 | 16 | # libdnn.so depends on system software dynamic link library, use -Wl,-unresolved-symbols=ignore-in-shared-libs to shield during compilation 17 | set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11 -Wl,-unresolved-symbols=ignore-in-shared-libs") 18 | 19 | set(CMAKE_CXX_FLAGS_DEBUG " -Wall -Werror -g -O0 ") 20 | set(CMAKE_C_FLAGS_DEBUG " -Wall -Werror -g -O0 ") 21 | set(CMAKE_CXX_FLAGS_RELEASE " -Wall -Werror -O3 ") 22 | set(CMAKE_C_FLAGS_RELEASE " -Wall -Werror -O3 ") 23 | if (NOT CMAKE_BUILD_TYPE) 24 | set(CMAKE_BUILD_TYPE Release) 25 | endif () 26 | 27 | message(STATUS "Build type: ${CMAKE_BUILD_TYPE}") 28 | # define dnn lib path 29 | set(DNN_PATH "/usr/include/dnn") 30 | 31 | set(DNN_LIB_PATH "/usr/lib/") 32 | 33 | include_directories(${DNN_PATH}) 34 | link_directories(${DNN_LIB_PATH}) 35 | 36 | add_executable(main main.cc) 37 | target_link_libraries(main 38 | ${OpenCV_LIBS} 39 | dnn 40 | pthread 41 | rt 42 | dl) -------------------------------------------------------------------------------- /demos/detect/YOLO11/YOLO11-Detect_NCHWRGB/cpp/README.md: -------------------------------------------------------------------------------- 1 | 2 | 3 | 注: 所有的Terminal命令基于以下目录`./rdk_model_zoo/demos/detect/YOLOv8/cpp` 4 | 5 | 安装依赖 6 | ```bash 7 | sudo apt update 8 | # OpenCV 9 | sudo apt install libopencv-dev python3-opencv libopencv-contrib-dev 10 | ``` 11 | 12 | 13 | BPU推理库已经在RDK平台的RDK OS中自带. 14 | - 头文件 15 | ```bash 16 | /usr/include/dnn 17 | . 18 | ├── hb_dnn_ext.h 19 | ├── hb_dnn.h 20 | ├── hb_dnn_status.h 21 | ├── hb_sys.h 22 | └── plugin 23 | ├── hb_dnn_dtype.h 24 | ├── hb_dnn_layer.h 25 | ├── hb_dnn_ndarray.h 26 | ├── hb_dnn_plugin.h 27 | └── hb_dnn_tuple.h 28 | ``` 29 | 30 | - 推理库 31 | ```bash 32 | /usr/lib/ 33 | . 34 | ├── libdnn.so 35 | └── libhbrt_bayes_aarch64.so 36 | ``` 37 | 38 | 39 | 上述头文件和动态库也可以通过OpenExploer发布物获取 40 | OE路径: `package/host/host_package/x5_aarch64/dnn_1.24.5.tar.gz` 41 | 42 | 清空之前的编译产物 (如果有) 43 | ```bash 44 | rm -rf build 45 | ``` 46 | 47 | 编译 48 | ```bash 49 | mkdir -p build && cd build 50 | cmake .. 51 | make 52 | ``` 53 | 54 | 运行 55 | ```bash 56 | ./main 57 | ``` -------------------------------------------------------------------------------- /demos/detect/YOLO11/YOLO11-Detect_NCHWRGB/ptq_models/download.md: -------------------------------------------------------------------------------- 1 | # Download weights 2 | 3 | ## Bayes-e (RDK X5 & RDK X5 Module) 4 | ### bin - NCHWRGB 5 | YOLO11n - Detect 6 | ```bash 7 | wget https://archive.d-robotics.cc/downloads/rdk_model_zoo/rdk_x5/AAA_RDK_YOLO/yolo11_detect_rgb/yolo11n_detect_bayese_640x640_nchwrgb_modified.bin 8 | ``` 9 | YOLOs - Detect 10 | ```bash 11 | wget https://archive.d-robotics.cc/downloads/rdk_model_zoo/rdk_x5/AAA_RDK_YOLO/yolo11_detect_rgb/yolo11s_detect_bayese_640x640_nchwrgb_modified.bin 12 | ``` 13 | YOLO11m - Detect 14 | ```bash 15 | wget https://archive.d-robotics.cc/downloads/rdk_model_zoo/rdk_x5/AAA_RDK_YOLO/yolo11_detect_rgb/yolo11m_detect_bayese_640x640_nchwrgb_modified.bin 16 | ``` 17 | YOLO11l - Detect 18 | ```bash 19 | wget https://archive.d-robotics.cc/downloads/rdk_model_zoo/rdk_x5/AAA_RDK_YOLO/yolo11_detect_rgb/yolo11l_detect_bayese_640x640_nchwrgb_modified.bin 20 | ``` 21 | YOLO11x - Detect 22 | ```bash 23 | wget https://archive.d-robotics.cc/downloads/rdk_model_zoo/rdk_x5/AAA_RDK_YOLO/yolo11_detect_rgb/yolo11x_detect_bayese_640x640_nchwrgb_modified.bin 24 | ``` 25 | -------------------------------------------------------------------------------- /demos/detect/YOLO11/YOLO11-Detect_YUV420SP/config_yolo11_detect_bayese_640x640_nv12.yaml: -------------------------------------------------------------------------------- 1 | model_parameters: 2 | onnx_model: './yolo11n.onnx' 3 | march: "bayes-e" 4 | layer_out_dump: False 5 | working_dir: 'bin_dir/yolo11n_detect_bayese_640x640_nv12' 6 | output_model_file_prefix: 'yolo11n_detect_bayese_640x640_nv12' 7 | # YOLO11 n, s, m 8 | node_info: {"/model.10/m/m.0/attn/Softmax": {'ON': 'BPU','InputType': 'int16','OutputType': 'int16'}} 9 | # YOLO11 l, x 10 | # node_info: {"/model.10/m/m.0/attn/Softmax": {'ON': 'BPU','InputType': 'int16','OutputType': 'int16'}, 11 | # "/model.10/m/m.1/attn/Softmax": {'ON': 'BPU','InputType': 'int16','OutputType': 'int16'}} 12 | input_parameters: 13 | # input_batch: 8 14 | input_name: "" 15 | input_type_rt: 'nv12' 16 | input_type_train: 'rgb' 17 | input_layout_train: 'NCHW' 18 | norm_type: 'data_scale' 19 | scale_value: 0.003921568627451 20 | calibration_parameters: 21 | cal_data_dir: './calibration_data_rgb_f32_640' 22 | cal_data_type: 'float32' 23 | compiler_parameters: 24 | compile_mode: 'latency' 25 | debug: False 26 | optimize_level: 'O3' -------------------------------------------------------------------------------- /demos/detect/YOLO11/YOLO11-Detect_YUV420SP/cpp/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | cmake_minimum_required(VERSION 2.8) 2 | 3 | project(rdk_yolov8_detect) 4 | 5 | 6 | # 设置OpenCV包 7 | find_package(OpenCV REQUIRED) 8 | 9 | # # 添加可执行文件 10 | # add_executable(main main.cc) 11 | # # 链接OpenCV库 12 | # target_link_libraries(main ${OpenCV_LIBS}) 13 | 14 | 15 | 16 | # libdnn.so depends on system software dynamic link library, use -Wl,-unresolved-symbols=ignore-in-shared-libs to shield during compilation 17 | set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11 -Wl,-unresolved-symbols=ignore-in-shared-libs") 18 | 19 | set(CMAKE_CXX_FLAGS_DEBUG " -Wall -Werror -g -O0 ") 20 | set(CMAKE_C_FLAGS_DEBUG " -Wall -Werror -g -O0 ") 21 | set(CMAKE_CXX_FLAGS_RELEASE " -Wall -Werror -O3 ") 22 | set(CMAKE_C_FLAGS_RELEASE " -Wall -Werror -O3 ") 23 | if (NOT CMAKE_BUILD_TYPE) 24 | set(CMAKE_BUILD_TYPE Release) 25 | endif () 26 | 27 | message(STATUS "Build type: ${CMAKE_BUILD_TYPE}") 28 | # define dnn lib path 29 | set(DNN_PATH "/usr/include/dnn") 30 | 31 | set(DNN_LIB_PATH "/usr/lib/") 32 | 33 | include_directories(${DNN_PATH}) 34 | link_directories(${DNN_LIB_PATH}) 35 | 36 | add_executable(main main.cc) 37 | target_link_libraries(main 38 | ${OpenCV_LIBS} 39 | dnn 40 | pthread 41 | rt 42 | dl) -------------------------------------------------------------------------------- /demos/detect/YOLO11/YOLO11-Detect_YUV420SP/cpp/README.md: -------------------------------------------------------------------------------- 1 | 2 | 3 | 注: 所有的Terminal命令基于以下目录`./rdk_model_zoo/demos/detect/YOLOv8/cpp` 4 | 5 | 安装依赖 6 | ```bash 7 | sudo apt update 8 | # OpenCV 9 | sudo apt install libopencv-dev python3-opencv libopencv-contrib-dev 10 | ``` 11 | 12 | 13 | BPU推理库已经在RDK平台的RDK OS中自带. 14 | - 头文件 15 | ```bash 16 | /usr/include/dnn 17 | . 18 | ├── hb_dnn_ext.h 19 | ├── hb_dnn.h 20 | ├── hb_dnn_status.h 21 | ├── hb_sys.h 22 | └── plugin 23 | ├── hb_dnn_dtype.h 24 | ├── hb_dnn_layer.h 25 | ├── hb_dnn_ndarray.h 26 | ├── hb_dnn_plugin.h 27 | └── hb_dnn_tuple.h 28 | ``` 29 | 30 | - 推理库 31 | ```bash 32 | /usr/lib/ 33 | . 34 | ├── libdnn.so 35 | └── libhbrt_bayes_aarch64.so 36 | ``` 37 | 38 | 39 | 上述头文件和动态库也可以通过OpenExploer发布物获取 40 | OE路径: `package/host/host_package/x5_aarch64/dnn_1.24.5.tar.gz` 41 | 42 | 清空之前的编译产物 (如果有) 43 | ```bash 44 | rm -rf build 45 | ``` 46 | 47 | 编译 48 | ```bash 49 | mkdir -p build && cd build 50 | cmake .. 51 | make 52 | ``` 53 | 54 | 运行 55 | ```bash 56 | ./main 57 | ``` -------------------------------------------------------------------------------- /demos/detect/YOLO11/YOLO11-Detect_YUV420SP/imgs/YOLOv11_Detect_Origin.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/D-Robotics/rdk_model_zoo/6da0511b4a5f4ac49eadd936d8e0228412d8dd32/demos/detect/YOLO11/YOLO11-Detect_YUV420SP/imgs/YOLOv11_Detect_Origin.png -------------------------------------------------------------------------------- /demos/detect/YOLO11/YOLO11-Detect_YUV420SP/imgs/YOLOv11_Detect_Quantize.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/D-Robotics/rdk_model_zoo/6da0511b4a5f4ac49eadd936d8e0228412d8dd32/demos/detect/YOLO11/YOLO11-Detect_YUV420SP/imgs/YOLOv11_Detect_Quantize.png -------------------------------------------------------------------------------- /demos/detect/YOLO11/YOLO11-Detect_YUV420SP/imgs/demo_rdkx5_yolov11n_detect.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/D-Robotics/rdk_model_zoo/6da0511b4a5f4ac49eadd936d8e0228412d8dd32/demos/detect/YOLO11/YOLO11-Detect_YUV420SP/imgs/demo_rdkx5_yolov11n_detect.jpg -------------------------------------------------------------------------------- /demos/detect/YOLO11/YOLO11-Detect_YUV420SP/imgs/ltrb2xyxy.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/D-Robotics/rdk_model_zoo/6da0511b4a5f4ac49eadd936d8e0228412d8dd32/demos/detect/YOLO11/YOLO11-Detect_YUV420SP/imgs/ltrb2xyxy.jpg -------------------------------------------------------------------------------- /demos/detect/YOLO11/YOLO11-Detect_YUV420SP/imgs/yolo11n_detect_bayese_640x640_nv12.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/D-Robotics/rdk_model_zoo/6da0511b4a5f4ac49eadd936d8e0228412d8dd32/demos/detect/YOLO11/YOLO11-Detect_YUV420SP/imgs/yolo11n_detect_bayese_640x640_nv12.png -------------------------------------------------------------------------------- /demos/detect/YOLO11/YOLO11-Detect_YUV420SP/imgs/yolo11n_detect_bayese_640x640_nv12_modified.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/D-Robotics/rdk_model_zoo/6da0511b4a5f4ac49eadd936d8e0228412d8dd32/demos/detect/YOLO11/YOLO11-Detect_YUV420SP/imgs/yolo11n_detect_bayese_640x640_nv12_modified.png -------------------------------------------------------------------------------- /demos/detect/YOLO12/YOLO12-Detect_NCHWRGB/config_yolov12_detect_nchw.yaml: -------------------------------------------------------------------------------- 1 | model_parameters: 2 | onnx_model: './yolo12n.onnx' 3 | march: "bayes-e" # X3: Bernoulli2, Ultra: Bayes, S100: Nash-e, S100P: Nash-m 4 | layer_out_dump: False 5 | working_dir: 'yolo12n_detect_bayese_640x640_rgb' 6 | output_model_file_prefix: 'yolo12n_detect_bayese_640x640_rgb' 7 | input_parameters: 8 | input_name: "" 9 | input_type_rt: 'rgb' 10 | input_layout_rt: 'NCHW' 11 | input_type_train: 'rgb' 12 | input_layout_train: 'NCHW' 13 | norm_type: 'data_scale' 14 | scale_value: 0.003921568627451 15 | calibration_parameters: 16 | cal_data_dir: './calibration_data_rgb_f32_640' 17 | cal_data_type: 'float32' 18 | compiler_parameters: 19 | compile_mode: 'latency' 20 | debug: False 21 | optimize_level: 'O3' -------------------------------------------------------------------------------- /demos/detect/YOLO12/YOLO12-Detect_NCHWRGB/cpp/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | cmake_minimum_required(VERSION 2.8) 2 | 3 | project(rdk_yolo12_detect_nchwrgb) 4 | 5 | 6 | # 设置OpenCV包 7 | find_package(OpenCV REQUIRED) 8 | 9 | # # 添加可执行文件 10 | # add_executable(main main.cc) 11 | # # 链接OpenCV库 12 | # target_link_libraries(main ${OpenCV_LIBS}) 13 | 14 | 15 | 16 | # libdnn.so depends on system software dynamic link library, use -Wl,-unresolved-symbols=ignore-in-shared-libs to shield during compilation 17 | set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11 -Wl,-unresolved-symbols=ignore-in-shared-libs") 18 | 19 | set(CMAKE_CXX_FLAGS_DEBUG " -Wall -Werror -g -O0 ") 20 | set(CMAKE_C_FLAGS_DEBUG " -Wall -Werror -g -O0 ") 21 | set(CMAKE_CXX_FLAGS_RELEASE " -Wall -Werror -O3 ") 22 | set(CMAKE_C_FLAGS_RELEASE " -Wall -Werror -O3 ") 23 | if (NOT CMAKE_BUILD_TYPE) 24 | set(CMAKE_BUILD_TYPE Release) 25 | endif () 26 | 27 | message(STATUS "Build type: ${CMAKE_BUILD_TYPE}") 28 | # define dnn lib path 29 | set(DNN_PATH "/usr/include/dnn") 30 | 31 | set(DNN_LIB_PATH "/usr/lib/") 32 | 33 | include_directories(${DNN_PATH}) 34 | link_directories(${DNN_LIB_PATH}) 35 | 36 | add_executable(main main.cc) 37 | target_link_libraries(main 38 | ${OpenCV_LIBS} 39 | dnn 40 | pthread 41 | rt 42 | dl) -------------------------------------------------------------------------------- /demos/detect/YOLO12/YOLO12-Detect_NCHWRGB/cpp/README.md: -------------------------------------------------------------------------------- 1 | 2 | 3 | 注: 所有的Terminal命令基于以下目录`./rdk_model_zoo/demos/detect/YOLO12/YOLO12-Detect_NCHWRGB/cpp` 4 | 5 | 安装依赖 6 | ```bash 7 | sudo apt update 8 | # OpenCV 9 | sudo apt install libopencv-dev python3-opencv libopencv-contrib-dev 10 | ``` 11 | 12 | 13 | BPU推理库已经在RDK平台的RDK OS中自带. 14 | - 头文件 15 | ```bash 16 | /usr/include/dnn 17 | . 18 | ├── hb_dnn_ext.h 19 | ├── hb_dnn.h 20 | ├── hb_dnn_status.h 21 | ├── hb_sys.h 22 | └── plugin 23 | ├── hb_dnn_dtype.h 24 | ├── hb_dnn_layer.h 25 | ├── hb_dnn_ndarray.h 26 | ├── hb_dnn_plugin.h 27 | └── hb_dnn_tuple.h 28 | ``` 29 | 30 | - 推理库 31 | ```bash 32 | /usr/lib/ 33 | . 34 | ├── libdnn.so 35 | └── libhbrt_bayes_aarch64.so 36 | ``` 37 | 38 | 39 | 上述头文件和动态库也可以通过OpenExploer发布物获取 40 | OE路径: `package/host/host_package/x5_aarch64/dnn_1.24.5.tar.gz` 41 | 42 | 清空之前的编译产物 (如果有) 43 | ```bash 44 | rm -rf build 45 | ``` 46 | 47 | 编译 48 | ```bash 49 | mkdir -p build && cd build 50 | cmake .. 51 | make 52 | ``` 53 | 54 | 运行 55 | ```bash 56 | ./main 57 | ``` -------------------------------------------------------------------------------- /demos/detect/YOLO12/YOLO12-Detect_NCHWRGB/ptq_models/downloads.md: -------------------------------------------------------------------------------- 1 | # YOLO12-Detect_NCHWRGB (Bayes-e) 2 | 3 | ## YOLO12n 4 | ```bash 5 | wget https://archive.d-robotics.cc/downloads/rdk_model_zoo/rdk_x5/AAA_RDK_YOLO/yolo12_detect_rgb/yolov12n_detect_bayese_640x640_nchwrgb_modified.bin 6 | ``` 7 | 8 | ## YOLO12s 9 | ```bash 10 | ``` 11 | 12 | ## YOLO12m 13 | ```bash 14 | ``` 15 | 16 | ## YOLO12l 17 | ```bash 18 | ``` 19 | 20 | ## YOLO12x 21 | ```bash 22 | ``` -------------------------------------------------------------------------------- /demos/detect/YOLO12/YOLO12-Detect_YUV420SP/config_yolov12_detect_nv12.yaml: -------------------------------------------------------------------------------- 1 | model_parameters: 2 | onnx_model: './yolo12n.onnx' 3 | march: "bayes-e" # X3: Bernoulli2, Ultra: Bayes, S100: Nash-e, S100P: Nash-m 4 | layer_out_dump: False 5 | working_dir: 'yolo12n_detect_bayese_640x640_nv12' 6 | output_model_file_prefix: 'yolo12n_detect_bayese_640x640_nv12' 7 | input_parameters: 8 | input_name: "" 9 | input_type_rt: 'nv12' 10 | input_type_train: 'rgb' 11 | input_layout_train: 'NCHW' 12 | norm_type: 'data_scale' 13 | scale_value: 0.003921568627451 14 | calibration_parameters: 15 | cal_data_dir: './calibration_data_rgb_f32_640' 16 | cal_data_type: 'float32' 17 | compiler_parameters: 18 | compile_mode: 'latency' 19 | debug: False 20 | optimize_level: 'O3' -------------------------------------------------------------------------------- /demos/detect/YOLO12/YOLO12-Detect_YUV420SP/cpp/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | cmake_minimum_required(VERSION 2.8) 2 | 3 | project(rdk_yolov12_detect) 4 | 5 | 6 | # 设置OpenCV包 7 | find_package(OpenCV REQUIRED) 8 | 9 | # # 添加可执行文件 10 | # add_executable(main main.cc) 11 | # # 链接OpenCV库 12 | # target_link_libraries(main ${OpenCV_LIBS}) 13 | 14 | 15 | 16 | # libdnn.so depends on system software dynamic link library, use -Wl,-unresolved-symbols=ignore-in-shared-libs to shield during compilation 17 | set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11 -Wl,-unresolved-symbols=ignore-in-shared-libs") 18 | 19 | set(CMAKE_CXX_FLAGS_DEBUG " -Wall -Werror -g -O0 ") 20 | set(CMAKE_C_FLAGS_DEBUG " -Wall -Werror -g -O0 ") 21 | set(CMAKE_CXX_FLAGS_RELEASE " -Wall -Werror -O3 ") 22 | set(CMAKE_C_FLAGS_RELEASE " -Wall -Werror -O3 ") 23 | if (NOT CMAKE_BUILD_TYPE) 24 | set(CMAKE_BUILD_TYPE Release) 25 | endif () 26 | 27 | message(STATUS "Build type: ${CMAKE_BUILD_TYPE}") 28 | # define dnn lib path 29 | set(DNN_PATH "/usr/include/dnn") 30 | 31 | set(DNN_LIB_PATH "/usr/lib/") 32 | 33 | include_directories(${DNN_PATH}) 34 | link_directories(${DNN_LIB_PATH}) 35 | 36 | add_executable(main main.cc) 37 | target_link_libraries(main 38 | ${OpenCV_LIBS} 39 | dnn 40 | pthread 41 | rt 42 | dl) -------------------------------------------------------------------------------- /demos/detect/YOLO12/YOLO12-Detect_YUV420SP/cpp/README.md: -------------------------------------------------------------------------------- 1 | 2 | 3 | 注: 所有的Terminal命令基于以下目录`./rdk_model_zoo/demos/detect/YOLOv8/cpp` 4 | 5 | 安装依赖 6 | ```bash 7 | sudo apt update 8 | # OpenCV 9 | sudo apt install libopencv-dev python3-opencv libopencv-contrib-dev 10 | ``` 11 | 12 | 13 | BPU推理库已经在RDK平台的RDK OS中自带. 14 | - 头文件 15 | ```bash 16 | /usr/include/dnn 17 | . 18 | ├── hb_dnn_ext.h 19 | ├── hb_dnn.h 20 | ├── hb_dnn_status.h 21 | ├── hb_sys.h 22 | └── plugin 23 | ├── hb_dnn_dtype.h 24 | ├── hb_dnn_layer.h 25 | ├── hb_dnn_ndarray.h 26 | ├── hb_dnn_plugin.h 27 | └── hb_dnn_tuple.h 28 | ``` 29 | 30 | - 推理库 31 | ```bash 32 | /usr/lib/ 33 | . 34 | ├── libdnn.so 35 | └── libhbrt_bayes_aarch64.so 36 | ``` 37 | 38 | 39 | 上述头文件和动态库也可以通过OpenExploer发布物获取 40 | OE路径: `package/host/host_package/x5_aarch64/dnn_1.24.5.tar.gz` 41 | 42 | 清空之前的编译产物 (如果有) 43 | ```bash 44 | rm -rf build 45 | ``` 46 | 47 | 编译 48 | ```bash 49 | mkdir -p build && cd build 50 | cmake .. 51 | make 52 | ``` 53 | 54 | 运行 55 | ```bash 56 | ./main 57 | ``` -------------------------------------------------------------------------------- /demos/detect/YOLO12/YOLO12-Detect_YUV420SP/imgs/yolov12n_detect_bayese_640x640_nv12.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/D-Robotics/rdk_model_zoo/6da0511b4a5f4ac49eadd936d8e0228412d8dd32/demos/detect/YOLO12/YOLO12-Detect_YUV420SP/imgs/yolov12n_detect_bayese_640x640_nv12.png -------------------------------------------------------------------------------- /demos/detect/YOLO12/YOLO12-Detect_YUV420SP/imgs/yolov12n_detect_bayese_640x640_nv12_modified.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/D-Robotics/rdk_model_zoo/6da0511b4a5f4ac49eadd936d8e0228412d8dd32/demos/detect/YOLO12/YOLO12-Detect_YUV420SP/imgs/yolov12n_detect_bayese_640x640_nv12_modified.png -------------------------------------------------------------------------------- /demos/detect/YOLO12/YOLO12-Detect_YUV420SP/ptq_models/README.md: -------------------------------------------------------------------------------- 1 | # YOLOv12-Detect_YUV420SP (Bayes-e) 2 | 3 | ## YOLOv12n 4 | ```bash 5 | wget https://archive.d-robotics.cc/downloads/rdk_model_zoo/rdk_x5/AAA_RDK_YOLO/yolo12_detect_nv12/yolov12n_detect_bayese_640x640_nv12_modified.bin 6 | ``` 7 | 8 | ## YOLOv12s 9 | ```bash 10 | ``` 11 | 12 | ## YOLOv12m 13 | ```bash 14 | ``` 15 | 16 | ## YOLOv12l 17 | ```bash 18 | ``` 19 | 20 | ## YOLOv12x 21 | ```bash 22 | ``` -------------------------------------------------------------------------------- /demos/detect/YOLOv10/cpp/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | cmake_minimum_required(VERSION 2.8) 2 | 3 | project(rdk_yolov8_detect) 4 | 5 | 6 | # 设置OpenCV包 7 | find_package(OpenCV REQUIRED) 8 | 9 | # # 添加可执行文件 10 | # add_executable(main main.cc) 11 | # # 链接OpenCV库 12 | # target_link_libraries(main ${OpenCV_LIBS}) 13 | 14 | 15 | 16 | # libdnn.so depends on system software dynamic link library, use -Wl,-unresolved-symbols=ignore-in-shared-libs to shield during compilation 17 | set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11 -Wl,-unresolved-symbols=ignore-in-shared-libs") 18 | 19 | set(CMAKE_CXX_FLAGS_DEBUG " -Wall -Werror -g -O0 ") 20 | set(CMAKE_C_FLAGS_DEBUG " -Wall -Werror -g -O0 ") 21 | set(CMAKE_CXX_FLAGS_RELEASE " -Wall -Werror -O3 ") 22 | set(CMAKE_C_FLAGS_RELEASE " -Wall -Werror -O3 ") 23 | if (NOT CMAKE_BUILD_TYPE) 24 | set(CMAKE_BUILD_TYPE Release) 25 | endif () 26 | 27 | message(STATUS "Build type: ${CMAKE_BUILD_TYPE}") 28 | # define dnn lib path 29 | set(DNN_PATH "/usr/include/dnn") 30 | 31 | set(DNN_LIB_PATH "/usr/lib/") 32 | 33 | include_directories(${DNN_PATH}) 34 | link_directories(${DNN_LIB_PATH}) 35 | 36 | add_executable(main main.cc) 37 | target_link_libraries(main 38 | ${OpenCV_LIBS} 39 | dnn 40 | pthread 41 | rt 42 | dl) -------------------------------------------------------------------------------- /demos/detect/YOLOv10/cpp/README.md: -------------------------------------------------------------------------------- 1 | 2 | 3 | 注: 所有的Terminal命令基于以下目录`./rdk_model_zoo/demos/detect/YOLOv8/cpp` 4 | 5 | 安装依赖 6 | ```bash 7 | sudo apt update 8 | # OpenCV 9 | sudo apt install libopencv-dev python3-opencv libopencv-contrib-dev 10 | ``` 11 | 12 | 13 | BPU推理库已经在RDK平台的RDK OS中自带. 14 | - 头文件 15 | ```bash 16 | /usr/include/dnn 17 | . 18 | ├── hb_dnn_ext.h 19 | ├── hb_dnn.h 20 | ├── hb_dnn_status.h 21 | ├── hb_sys.h 22 | └── plugin 23 | ├── hb_dnn_dtype.h 24 | ├── hb_dnn_layer.h 25 | ├── hb_dnn_ndarray.h 26 | ├── hb_dnn_plugin.h 27 | └── hb_dnn_tuple.h 28 | ``` 29 | 30 | - 推理库 31 | ```bash 32 | /usr/lib/ 33 | . 34 | ├── libdnn.so 35 | └── libhbrt_bayes_aarch64.so 36 | ``` 37 | 38 | 39 | 上述头文件和动态库也可以通过OpenExploer发布物获取 40 | OE路径: `package/host/host_package/x5_aarch64/dnn_1.24.5.tar.gz` 41 | 42 | 清空之前的编译产物 (如果有) 43 | ```bash 44 | rm -rf build 45 | ``` 46 | 47 | 编译 48 | ```bash 49 | mkdir -p build && cd build 50 | cmake .. 51 | make 52 | ``` 53 | 54 | 运行 55 | ```bash 56 | ./main 57 | ``` -------------------------------------------------------------------------------- /demos/detect/YOLOv10/imgs/YOLOv10_Detect_Origin.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/D-Robotics/rdk_model_zoo/6da0511b4a5f4ac49eadd936d8e0228412d8dd32/demos/detect/YOLOv10/imgs/YOLOv10_Detect_Origin.png -------------------------------------------------------------------------------- /demos/detect/YOLOv10/imgs/YOLOv10_Detect_Quantize.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/D-Robotics/rdk_model_zoo/6da0511b4a5f4ac49eadd936d8e0228412d8dd32/demos/detect/YOLOv10/imgs/YOLOv10_Detect_Quantize.png -------------------------------------------------------------------------------- /demos/detect/YOLOv10/imgs/YOLOv8_Detect_Quantize.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/D-Robotics/rdk_model_zoo/6da0511b4a5f4ac49eadd936d8e0228412d8dd32/demos/detect/YOLOv10/imgs/YOLOv8_Detect_Quantize.png -------------------------------------------------------------------------------- /demos/detect/YOLOv10/imgs/demo_rdkx5_yolov10n_detect.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/D-Robotics/rdk_model_zoo/6da0511b4a5f4ac49eadd936d8e0228412d8dd32/demos/detect/YOLOv10/imgs/demo_rdkx5_yolov10n_detect.jpg -------------------------------------------------------------------------------- /demos/detect/YOLOv10/jupyter_result.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/D-Robotics/rdk_model_zoo/6da0511b4a5f4ac49eadd936d8e0228412d8dd32/demos/detect/YOLOv10/jupyter_result.jpg -------------------------------------------------------------------------------- /demos/detect/YOLOv10/models/download.md: -------------------------------------------------------------------------------- 1 | # Download weights 2 | 3 | - [Download weights](#download-weights) 4 | - [Bayes-e (RDK X5 \& RDK X5 Module)](#bayes-e-rdk-x5--rdk-x5-module) 5 | - [bin - nv12](#bin---nv12) 6 | - [Bayes (RDK Ultra \& RDK Ultra Module)](#bayes-rdk-ultra--rdk-ultra-module) 7 | - [Nash-e (RDK S100)](#nash-e-rdk-s100) 8 | - [Nash-m (RDK S100P)](#nash-m-rdk-s100p) 9 | - [Bernoulli2 (RDK X3 \& RDK X3 Module)](#bernoulli2-rdk-x3--rdk-x3-module) 10 | - [bin - nv12](#bin---nv12-1) 11 | 12 | 13 | 14 | ## Bayes-e (RDK X5 & RDK X5 Module) 15 | ### bin - nv12 16 | YOLOv10n - Detect 17 | ```bash 18 | wget https://archive.d-robotics.cc/downloads/rdk_model_zoo/rdk_x5/yolov10n_detect_bayese_640x640_nv12_modified.bin 19 | ``` 20 | YOLOv10s - Detect 21 | ```bash 22 | wget https://archive.d-robotics.cc/downloads/rdk_model_zoo/rdk_x5/yolov10s_detect_bayese_640x640_nv12_modified.bin 23 | ``` 24 | YOLOv10m - Detect 25 | ```bash 26 | wget https://archive.d-robotics.cc/downloads/rdk_model_zoo/rdk_x5/yolov10m_detect_bayese_640x640_nv12_modified.bin 27 | ``` 28 | YOLOv10b - Detect 29 | ```bash 30 | wget https://archive.d-robotics.cc/downloads/rdk_model_zoo/rdk_x5/yolov10b_detect_bayese_640x640_nv12_modified.bin 31 | ``` 32 | YOLOv10l - Detect 33 | ```bash 34 | wget https://archive.d-robotics.cc/downloads/rdk_model_zoo/rdk_x5/yolov10l_detect_bayese_640x640_nv12_modified.bin 35 | ``` 36 | YOLOv10x - Detect 37 | ```bash 38 | wget https://archive.d-robotics.cc/downloads/rdk_model_zoo/rdk_x5/yolov10x_detect_bayese_640x640_nv12_modified.bin 39 | ``` 40 | 41 | ## Bayes (RDK Ultra & RDK Ultra Module) 42 | 43 | 44 | ## Nash-e (RDK S100) 45 | 46 | 47 | ## Nash-m (RDK S100P) 48 | 49 | 50 | ## Bernoulli2 (RDK X3 & RDK X3 Module) 51 | ### bin - nv12 52 | YOLOv10n - Detect 53 | ```bash 54 | wget https://archive.d-robotics.cc/downloads/rdk_model_zoo/rdk_x3/yolov10n_detect_bernoulli2_640x640_nv12_modified.bin 55 | ``` -------------------------------------------------------------------------------- /demos/detect/YOLOv10/ptq_yamls/yolov10_detect_bayese_640x640_nchw.yaml: -------------------------------------------------------------------------------- 1 | model_parameters: 2 | onnx_model: './yolov10n.onnx' 3 | march: "bayes-e" 4 | layer_out_dump: False 5 | working_dir: 'yolov10n_detect_bayese_640x640_nchw' 6 | output_model_file_prefix: 'yolov10n_detect_bayese_640x640_nchw' 7 | node_info: {"/model.10/attn/Softmax": {'ON': 'BPU','InputType': 'int16','OutputType': 'int16'}} 8 | input_parameters: 9 | input_name: "" 10 | input_type_rt: 'rgb' 11 | input_layout_rt: 'NCHW' 12 | input_type_train: 'rgb' 13 | input_layout_train: 'NCHW' 14 | norm_type: 'data_scale' 15 | scale_value: 0.003921568627451 16 | calibration_parameters: 17 | cal_data_dir: './calibration_data_rgb_f32_coco_640' 18 | cal_data_type: 'float32' 19 | compiler_parameters: 20 | compile_mode: 'latency' 21 | debug: False 22 | optimize_level: 'O3' -------------------------------------------------------------------------------- /demos/detect/YOLOv10/ptq_yamls/yolov10_detect_bayese_640x640_nv12.yaml: -------------------------------------------------------------------------------- 1 | model_parameters: 2 | onnx_model: './yolov10n.onnx' 3 | march: "bayes-e" 4 | layer_out_dump: False 5 | working_dir: 'yolov10n_detect_bayese_640x640_nv12' 6 | output_model_file_prefix: 'yolov10n_detect_bayese_640x640_nv12' 7 | node_info: {"/model.10/attn/Softmax": {'ON': 'BPU','InputType': 'int16','OutputType': 'int16'}} 8 | input_parameters: 9 | input_name: "" 10 | input_type_rt: 'nv12' 11 | input_type_train: 'rgb' 12 | input_layout_train: 'NCHW' 13 | norm_type: 'data_scale' 14 | scale_value: 0.003921568627451 15 | calibration_parameters: 16 | cal_data_dir: './calibration_data_rgb_f32_640' 17 | cal_data_type: 'float32' 18 | compiler_parameters: 19 | compile_mode: 'latency' 20 | debug: False 21 | optimize_level: 'O3' -------------------------------------------------------------------------------- /demos/detect/YOLOv10/ptq_yamls/yolov10_detect_bernoulli2_640x640_nv12.yaml: -------------------------------------------------------------------------------- 1 | model_parameters: 2 | onnx_model: 'yolov10n.onnx' 3 | march: "bernoulli2" 4 | layer_out_dump: False 5 | working_dir: 'yolov10n_detect_bernoulli2_640x640_nv12' 6 | output_model_file_prefix: 'yolov10n_detect_bernoulli2_640x640_nv12' 7 | # node_info: {"/model.10/attn/Softmax": {'ON': 'BPU','InputType': 'int16','OutputType': 'int16'}} 8 | input_parameters: 9 | input_name: "" 10 | input_type_rt: 'nv12' 11 | input_type_train: 'rgb' 12 | input_layout_train: 'NCHW' 13 | norm_type: 'data_scale' 14 | scale_value: 0.003921568627451 15 | calibration_parameters: 16 | cal_data_dir: './calibration_data_rgb_f32_coco_640' 17 | cal_data_type: 'float32' 18 | compiler_parameters: 19 | compile_mode: 'latency' 20 | debug: False 21 | optimize_level: 'O3' -------------------------------------------------------------------------------- /demos/detect/YOLOv5/cpp/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | cmake_minimum_required(VERSION 2.8) 2 | 3 | project(rdk_yolov8_detect) 4 | 5 | 6 | # 设置OpenCV包 7 | find_package(OpenCV REQUIRED) 8 | 9 | # # 添加可执行文件 10 | # add_executable(main main.cc) 11 | # # 链接OpenCV库 12 | # target_link_libraries(main ${OpenCV_LIBS}) 13 | 14 | 15 | 16 | # libdnn.so depends on system software dynamic link library, use -Wl,-unresolved-symbols=ignore-in-shared-libs to shield during compilation 17 | set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11 -Wl,-unresolved-symbols=ignore-in-shared-libs") 18 | 19 | set(CMAKE_CXX_FLAGS_DEBUG " -Wall -Werror -g -O0 ") 20 | set(CMAKE_C_FLAGS_DEBUG " -Wall -Werror -g -O0 ") 21 | set(CMAKE_CXX_FLAGS_RELEASE " -Wall -Werror -O3 ") 22 | set(CMAKE_C_FLAGS_RELEASE " -Wall -Werror -O3 ") 23 | if (NOT CMAKE_BUILD_TYPE) 24 | set(CMAKE_BUILD_TYPE Release) 25 | endif () 26 | 27 | message(STATUS "Build type: ${CMAKE_BUILD_TYPE}") 28 | # define dnn lib path 29 | set(DNN_PATH "/usr/include/dnn") 30 | 31 | set(DNN_LIB_PATH "/usr/lib/") 32 | 33 | include_directories(${DNN_PATH}) 34 | link_directories(${DNN_LIB_PATH}) 35 | 36 | add_executable(main main.cc) 37 | target_link_libraries(main 38 | ${OpenCV_LIBS} 39 | dnn 40 | pthread 41 | rt 42 | dl) -------------------------------------------------------------------------------- /demos/detect/YOLOv5/cpp/README.md: -------------------------------------------------------------------------------- 1 | 2 | 3 | 注: 所有的Terminal命令基于以下目录`./rdk_model_zoo/demos/detect/YOLOv8/cpp` 4 | 5 | 安装依赖 6 | ```bash 7 | sudo apt update 8 | # OpenCV 9 | sudo apt install libopencv-dev python3-opencv libopencv-contrib-dev 10 | ``` 11 | 12 | 13 | BPU推理库已经在RDK平台的RDK OS中自带. 14 | - 头文件 15 | ```bash 16 | /usr/include/dnn 17 | . 18 | ├── hb_dnn_ext.h 19 | ├── hb_dnn.h 20 | ├── hb_dnn_status.h 21 | ├── hb_sys.h 22 | └── plugin 23 | ├── hb_dnn_dtype.h 24 | ├── hb_dnn_layer.h 25 | ├── hb_dnn_ndarray.h 26 | ├── hb_dnn_plugin.h 27 | └── hb_dnn_tuple.h 28 | ``` 29 | 30 | - 推理库 31 | ```bash 32 | /usr/lib/ 33 | . 34 | ├── libdnn.so 35 | └── libhbrt_bayes_aarch64.so 36 | ``` 37 | 38 | 39 | 上述头文件和动态库也可以通过OpenExploer发布物获取 40 | OE路径: `package/host/host_package/x5_aarch64/dnn_1.24.5.tar.gz` 41 | 42 | 清空之前的编译产物 (如果有) 43 | ```bash 44 | rm -rf build 45 | ``` 46 | 47 | 编译 48 | ```bash 49 | mkdir -p build && cd build 50 | cmake .. 51 | make 52 | ``` 53 | 54 | 运行 55 | ```bash 56 | ./main 57 | ``` -------------------------------------------------------------------------------- /demos/detect/YOLOv5/imgs/demo_rdkx5_yolov5s_tag2.0_detect.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/D-Robotics/rdk_model_zoo/6da0511b4a5f4ac49eadd936d8e0228412d8dd32/demos/detect/YOLOv5/imgs/demo_rdkx5_yolov5s_tag2.0_detect.jpg -------------------------------------------------------------------------------- /demos/detect/YOLOv5/imgs/yolov5n_tag_v7.0_detect_640x640_bayese_nv12.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/D-Robotics/rdk_model_zoo/6da0511b4a5f4ac49eadd936d8e0228412d8dd32/demos/detect/YOLOv5/imgs/yolov5n_tag_v7.0_detect_640x640_bayese_nv12.png -------------------------------------------------------------------------------- /demos/detect/YOLOv5/imgs/yolov5s_tag_v2.0_detect_640x640_bayese_nv12.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/D-Robotics/rdk_model_zoo/6da0511b4a5f4ac49eadd936d8e0228412d8dd32/demos/detect/YOLOv5/imgs/yolov5s_tag_v2.0_detect_640x640_bayese_nv12.png -------------------------------------------------------------------------------- /demos/detect/YOLOv5/ptq_yamls/yolov5_detect_bayese_640x640_nchw.yaml: -------------------------------------------------------------------------------- 1 | # 所有tag版本的yolov5均可使用此yaml文件 2 | model_parameters: 3 | onnx_model: './onnx/yolov5n_tag_v7.0_detect.onnx' 4 | march: "bayes-e" # X3: Bernoulli2, Ultra: Bayes, S100: Nash-e, S100P: Nash-m 5 | layer_out_dump: False 6 | working_dir: 'yolov5n_tag_v7.0_detect_640x640_bayese_nv12' 7 | output_model_file_prefix: 'yolov5n_tag_v7.0_detect_640x640_bayese_nv12' 8 | input_parameters: 9 | input_type_rt: 'rgb' 10 | input_layout_rt: 'NCHW' 11 | input_type_train: 'rgb' 12 | input_layout_train: 'NCHW' 13 | norm_type: 'data_scale' 14 | scale_value: 0.003921568627451 15 | calibration_parameters: 16 | cal_data_dir: './calibration_data_rgb_f32_coco_640' 17 | cal_data_type: 'float32' 18 | calibration_type: 'default' 19 | compiler_parameters: 20 | compile_mode: 'latency' 21 | debug: False 22 | optimize_level: 'O3' -------------------------------------------------------------------------------- /demos/detect/YOLOv5/ptq_yamls/yolov5_detect_bayese_640x640_nv12.yaml: -------------------------------------------------------------------------------- 1 | # 所有tag版本的yolov5均可使用此yaml文件 2 | model_parameters: 3 | onnx_model: './onnx/yolov5n_tag_v7.0_detect.onnx' 4 | march: "bayes-e" # X3: Bernoulli2, Ultra: Bayes, S100: Nash-e, S100P: Nash-m 5 | layer_out_dump: False 6 | working_dir: 'yolov5n_tag_v7.0_detect_640x640_bayese_nv12' 7 | output_model_file_prefix: 'yolov5n_tag_v7.0_detect_640x640_bayese_nv12' 8 | input_parameters: 9 | input_type_rt: 'nv12' 10 | input_type_train: 'rgb' 11 | input_layout_train: 'NCHW' 12 | norm_type: 'data_scale' 13 | scale_value: 0.003921568627451 14 | calibration_parameters: 15 | cal_data_dir: './calibration_data_rgb_f32_coco_640' 16 | cal_data_type: 'float32' 17 | calibration_type: 'default' 18 | compiler_parameters: 19 | compile_mode: 'latency' 20 | debug: False 21 | optimize_level: 'O3' -------------------------------------------------------------------------------- /demos/detect/YOLOv8/YOLOv8-Detect_NCHWRGB/README.md: -------------------------------------------------------------------------------- 1 | English | [简体中文](./README_cn.md) 2 | 3 | # YOLOv8-Detect NCHWRGB 4 | 5 | -------------------------------------------------------------------------------- /demos/detect/YOLOv8/YOLOv8-Detect_NCHWRGB/README_cn.md: -------------------------------------------------------------------------------- 1 | [English](./README.md) | 简体中文 2 | 3 | # YOLOv8-Detect NCHWRGB 4 | 5 | -------------------------------------------------------------------------------- /demos/detect/YOLOv8/YOLOv8-Detect_NCHWRGB/config_yolov8_detect_nchw.yaml: -------------------------------------------------------------------------------- 1 | model_parameters: 2 | onnx_model: './yolov8n.onnx' 3 | march: "bayes-e" # X3: Bernoulli2, Ultra: Bayes, S100: Nash-e, S100P: Nash-m 4 | layer_out_dump: False 5 | working_dir: 'yolov8n_detect_bayese_640x640_nchw' 6 | output_model_file_prefix: 'yolov8n_detect_bayese_640x640_nchw' 7 | input_parameters: 8 | input_name: "" 9 | input_type_rt: 'rgb' 10 | input_layout_rt: 'NCHW' 11 | input_type_train: 'rgb' 12 | input_layout_train: 'NCHW' 13 | norm_type: 'data_scale' 14 | scale_value: 0.003921568627451 15 | calibration_parameters: 16 | cal_data_dir: './calibration_data_rgb_f32_coco_640' 17 | cal_data_type: 'float32' 18 | compiler_parameters: 19 | compile_mode: 'latency' 20 | debug: False 21 | optimize_level: 'O3' -------------------------------------------------------------------------------- /demos/detect/YOLOv8/YOLOv8-Detect_NCHWRGB/cpp/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | cmake_minimum_required(VERSION 2.8) 2 | 3 | project(rdk_yolov8_detect_nchwrgb) 4 | 5 | 6 | # 设置OpenCV包 7 | find_package(OpenCV REQUIRED) 8 | 9 | # # 添加可执行文件 10 | # add_executable(main main.cc) 11 | # # 链接OpenCV库 12 | # target_link_libraries(main ${OpenCV_LIBS}) 13 | 14 | 15 | 16 | # libdnn.so depends on system software dynamic link library, use -Wl,-unresolved-symbols=ignore-in-shared-libs to shield during compilation 17 | set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11 -Wl,-unresolved-symbols=ignore-in-shared-libs") 18 | 19 | set(CMAKE_CXX_FLAGS_DEBUG " -Wall -Werror -g -O0 ") 20 | set(CMAKE_C_FLAGS_DEBUG " -Wall -Werror -g -O0 ") 21 | set(CMAKE_CXX_FLAGS_RELEASE " -Wall -Werror -O3 ") 22 | set(CMAKE_C_FLAGS_RELEASE " -Wall -Werror -O3 ") 23 | if (NOT CMAKE_BUILD_TYPE) 24 | set(CMAKE_BUILD_TYPE Release) 25 | endif () 26 | 27 | message(STATUS "Build type: ${CMAKE_BUILD_TYPE}") 28 | # define dnn lib path 29 | set(DNN_PATH "/usr/include/dnn") 30 | 31 | set(DNN_LIB_PATH "/usr/lib/") 32 | 33 | include_directories(${DNN_PATH}) 34 | link_directories(${DNN_LIB_PATH}) 35 | 36 | add_executable(main main.cc) 37 | target_link_libraries(main 38 | ${OpenCV_LIBS} 39 | dnn 40 | pthread 41 | rt 42 | dl) -------------------------------------------------------------------------------- /demos/detect/YOLOv8/YOLOv8-Detect_NCHWRGB/cpp/README.md: -------------------------------------------------------------------------------- 1 | 2 | 3 | 注: 所有的Terminal命令基于以下目录`./rdk_model_zoo/demos/detect/YOLOv8/cpp` 4 | 5 | 安装依赖 6 | ```bash 7 | sudo apt update 8 | # OpenCV 9 | sudo apt install libopencv-dev python3-opencv libopencv-contrib-dev 10 | ``` 11 | 12 | 13 | BPU推理库已经在RDK平台的RDK OS中自带. 14 | - 头文件 15 | ```bash 16 | /usr/include/dnn 17 | . 18 | ├── hb_dnn_ext.h 19 | ├── hb_dnn.h 20 | ├── hb_dnn_status.h 21 | ├── hb_sys.h 22 | └── plugin 23 | ├── hb_dnn_dtype.h 24 | ├── hb_dnn_layer.h 25 | ├── hb_dnn_ndarray.h 26 | ├── hb_dnn_plugin.h 27 | └── hb_dnn_tuple.h 28 | ``` 29 | 30 | - 推理库 31 | ```bash 32 | /usr/lib/ 33 | . 34 | ├── libdnn.so 35 | └── libhbrt_bayes_aarch64.so 36 | ``` 37 | 38 | 39 | 上述头文件和动态库也可以通过OpenExploer发布物获取 40 | OE路径: `package/host/host_package/x5_aarch64/dnn_1.24.5.tar.gz` 41 | 42 | 清空之前的编译产物 (如果有) 43 | ```bash 44 | rm -rf build 45 | ``` 46 | 47 | 编译 48 | ```bash 49 | mkdir -p build && cd build 50 | cmake .. 51 | make 52 | ``` 53 | 54 | 运行 55 | ```bash 56 | ./main 57 | ``` -------------------------------------------------------------------------------- /demos/detect/YOLOv8/YOLOv8-Detect_NCHWRGB/eval_cpp/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | cmake_minimum_required(VERSION 2.8) 2 | 3 | project(rdk_yolov8_detect_nchwrgb_eval) 4 | 5 | 6 | # 设置OpenCV包 7 | find_package(OpenCV REQUIRED) 8 | 9 | # libdnn.so depends on system software dynamic link library, use -Wl,-unresolved-symbols=ignore-in-shared-libs to shield during compilation 10 | set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++17 -Wl,-unresolved-symbols=ignore-in-shared-libs") 11 | 12 | set(CMAKE_CXX_FLAGS_DEBUG " -Wall -Werror -g -O0 ") 13 | set(CMAKE_C_FLAGS_DEBUG " -Wall -Werror -g -O0 ") 14 | set(CMAKE_CXX_FLAGS_RELEASE " -Wall -Werror -O3 ") 15 | set(CMAKE_C_FLAGS_RELEASE " -Wall -Werror -O3 ") 16 | if (NOT CMAKE_BUILD_TYPE) 17 | set(CMAKE_BUILD_TYPE Release) 18 | endif () 19 | 20 | message(STATUS "Build type: ${CMAKE_BUILD_TYPE}") 21 | # define dnn lib path 22 | set(DNN_PATH "/usr/include/dnn") 23 | 24 | set(DNN_LIB_PATH "/usr/lib/") 25 | 26 | include_directories(${DNN_PATH}) 27 | link_directories(${DNN_LIB_PATH}) 28 | 29 | add_executable(main main.cc) 30 | target_link_libraries(main 31 | ${OpenCV_LIBS} 32 | dnn 33 | pthread 34 | rt 35 | dl) -------------------------------------------------------------------------------- /demos/detect/YOLOv8/YOLOv8-Detect_NCHWRGB/eval_cpp/README.md: -------------------------------------------------------------------------------- 1 | 2 | 3 | 注: 所有的Terminal命令基于以下目录`./rdk_model_zoo/demos/detect/YOLOv8/cpp` 4 | 5 | 安装依赖 6 | ```bash 7 | sudo apt update 8 | # OpenCV 9 | sudo apt install libopencv-dev python3-opencv libopencv-contrib-dev 10 | ``` 11 | 12 | 13 | BPU推理库已经在RDK平台的RDK OS中自带. 14 | - 头文件 15 | ```bash 16 | /usr/include/dnn 17 | . 18 | ├── hb_dnn_ext.h 19 | ├── hb_dnn.h 20 | ├── hb_dnn_status.h 21 | ├── hb_sys.h 22 | └── plugin 23 | ├── hb_dnn_dtype.h 24 | ├── hb_dnn_layer.h 25 | ├── hb_dnn_ndarray.h 26 | ├── hb_dnn_plugin.h 27 | └── hb_dnn_tuple.h 28 | ``` 29 | 30 | - 推理库 31 | ```bash 32 | /usr/lib/ 33 | . 34 | ├── libdnn.so 35 | └── libhbrt_bayes_aarch64.so 36 | ``` 37 | 38 | 39 | 上述头文件和动态库也可以通过OpenExploer发布物获取 40 | OE路径: `package/host/host_package/x5_aarch64/dnn_1.24.5.tar.gz` 41 | 42 | 清空之前的编译产物 (如果有) 43 | ```bash 44 | rm -rf build 45 | ``` 46 | 47 | 编译 48 | ```bash 49 | mkdir -p build && cd build 50 | cmake .. 51 | make 52 | ``` 53 | 54 | 运行 55 | ```bash 56 | ./main 57 | ``` 58 | 59 | 将生成的*.txt结果转化为*.json结果. 60 | ```bash 61 | python3 eval_convert.py 62 | ``` 63 | -------------------------------------------------------------------------------- /demos/detect/YOLOv8/YOLOv8-Detect_NCHWRGB/ptq_models/README.md: -------------------------------------------------------------------------------- 1 | # YOLOv8-Detect_NCHWRGB (Bayes-e) 2 | 3 | ## YOLOv8n 4 | ```bash 5 | wget https://archive.d-robotics.cc/downloads/rdk_model_zoo/rdk_x5/AAA_RDK_YOLO/yolov8_detect_rgb/yolov8n_detect_bayese_640x640_nchwrgb_modified.bin 6 | ``` 7 | 8 | ## YOLOv8s 9 | ```bash 10 | wget https://archive.d-robotics.cc/downloads/rdk_model_zoo/rdk_x5/AAA_RDK_YOLO/yolov8_detect_rgb/yolov8s_detect_bayese_640x640_nchwrgb_modified.bin 11 | ``` 12 | 13 | ## YOLOv8m 14 | ```bash 15 | wget https://archive.d-robotics.cc/downloads/rdk_model_zoo/rdk_x5/AAA_RDK_YOLO/yolov8_detect_rgb/yolov8m_detect_bayese_640x640_nchwrgb_modified.bin 16 | ``` 17 | 18 | ## YOLOv8l 19 | ```bash 20 | wget https://archive.d-robotics.cc/downloads/rdk_model_zoo/rdk_x5/AAA_RDK_YOLO/yolov8_detect_rgb/yolov8l_detect_bayese_640x640_nchwrgb_modified.bin 21 | ``` 22 | 23 | ## YOLOv8x 24 | ```bash 25 | wget https://archive.d-robotics.cc/downloads/rdk_model_zoo/rdk_x5/AAA_RDK_YOLO/yolov8_detect_rgb/yolov8x_detect_bayese_640x640_nchwrgb_modified.bin 26 | ``` -------------------------------------------------------------------------------- /demos/detect/YOLOv8/YOLOv8-Detect_YUV420SP/README.md: -------------------------------------------------------------------------------- 1 | English | [简体中文](./README_cn.md) 2 | 3 | # YOLOv8 Detect -------------------------------------------------------------------------------- /demos/detect/YOLOv8/YOLOv8-Detect_YUV420SP/config_yolov8_detect_nv12.yaml: -------------------------------------------------------------------------------- 1 | model_parameters: 2 | onnx_model: './yolov8n.onnx' 3 | march: "bayes-e" # X3: Bernoulli2, Ultra: Bayes, S100: Nash-e, S100P: Nash-m 4 | layer_out_dump: False 5 | working_dir: 'yolov8n_detect_bayese_640x640_nv12' 6 | output_model_file_prefix: 'yolov8n_detect_bayese_640x640_nv12' 7 | input_parameters: 8 | input_name: "" 9 | input_type_rt: 'nv12' 10 | input_type_train: 'rgb' 11 | input_layout_train: 'NCHW' 12 | norm_type: 'data_scale' 13 | scale_value: 0.003921568627451 14 | calibration_parameters: 15 | cal_data_dir: './calibration_data_rgb_f32_640' 16 | cal_data_type: 'float32' 17 | compiler_parameters: 18 | compile_mode: 'latency' 19 | debug: False 20 | optimize_level: 'O3' -------------------------------------------------------------------------------- /demos/detect/YOLOv8/YOLOv8-Detect_YUV420SP/cpp/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | cmake_minimum_required(VERSION 2.8) 2 | 3 | project(rdk_yolov8_detect) 4 | 5 | 6 | # 设置OpenCV包 7 | find_package(OpenCV REQUIRED) 8 | 9 | # # 添加可执行文件 10 | # add_executable(main main.cc) 11 | # # 链接OpenCV库 12 | # target_link_libraries(main ${OpenCV_LIBS}) 13 | 14 | 15 | 16 | # libdnn.so depends on system software dynamic link library, use -Wl,-unresolved-symbols=ignore-in-shared-libs to shield during compilation 17 | set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11 -Wl,-unresolved-symbols=ignore-in-shared-libs") 18 | 19 | set(CMAKE_CXX_FLAGS_DEBUG " -Wall -Werror -g -O0 ") 20 | set(CMAKE_C_FLAGS_DEBUG " -Wall -Werror -g -O0 ") 21 | set(CMAKE_CXX_FLAGS_RELEASE " -Wall -Werror -O3 ") 22 | set(CMAKE_C_FLAGS_RELEASE " -Wall -Werror -O3 ") 23 | if (NOT CMAKE_BUILD_TYPE) 24 | set(CMAKE_BUILD_TYPE Release) 25 | endif () 26 | 27 | message(STATUS "Build type: ${CMAKE_BUILD_TYPE}") 28 | # define dnn lib path 29 | set(DNN_PATH "/usr/include/dnn") 30 | 31 | set(DNN_LIB_PATH "/usr/lib/") 32 | 33 | include_directories(${DNN_PATH}) 34 | link_directories(${DNN_LIB_PATH}) 35 | 36 | add_executable(main main.cc) 37 | target_link_libraries(main 38 | ${OpenCV_LIBS} 39 | dnn 40 | pthread 41 | rt 42 | dl) -------------------------------------------------------------------------------- /demos/detect/YOLOv8/YOLOv8-Detect_YUV420SP/cpp/README.md: -------------------------------------------------------------------------------- 1 | 2 | 3 | 注: 所有的Terminal命令基于以下目录`./rdk_model_zoo/demos/detect/YOLOv8/cpp` 4 | 5 | 安装依赖 6 | ```bash 7 | sudo apt update 8 | # OpenCV 9 | sudo apt install libopencv-dev python3-opencv libopencv-contrib-dev 10 | ``` 11 | 12 | 13 | BPU推理库已经在RDK平台的RDK OS中自带. 14 | - 头文件 15 | ```bash 16 | /usr/include/dnn 17 | . 18 | ├── hb_dnn_ext.h 19 | ├── hb_dnn.h 20 | ├── hb_dnn_status.h 21 | ├── hb_sys.h 22 | └── plugin 23 | ├── hb_dnn_dtype.h 24 | ├── hb_dnn_layer.h 25 | ├── hb_dnn_ndarray.h 26 | ├── hb_dnn_plugin.h 27 | └── hb_dnn_tuple.h 28 | ``` 29 | 30 | - 推理库 31 | ```bash 32 | /usr/lib/ 33 | . 34 | ├── libdnn.so 35 | └── libhbrt_bayes_aarch64.so 36 | ``` 37 | 38 | 39 | 上述头文件和动态库也可以通过OpenExploer发布物获取 40 | OE路径: `package/host/host_package/x5_aarch64/dnn_1.24.5.tar.gz` 41 | 42 | 清空之前的编译产物 (如果有) 43 | ```bash 44 | rm -rf build 45 | ``` 46 | 47 | 编译 48 | ```bash 49 | mkdir -p build && cd build 50 | cmake .. 51 | make 52 | ``` 53 | 54 | 运行 55 | ```bash 56 | ./main 57 | ``` -------------------------------------------------------------------------------- /demos/detect/YOLOv8/YOLOv8-Detect_YUV420SP/eval/README.md: -------------------------------------------------------------------------------- 1 | English | [简体中文](./README_cn.md) 2 | 3 | -------------------------------------------------------------------------------- /demos/detect/YOLOv8/YOLOv8-Detect_YUV420SP/eval/README_cn.md: -------------------------------------------------------------------------------- 1 | [English](./README.md) | 简体中文 2 | 3 | 4 | 5 | -------------------------------------------------------------------------------- /demos/detect/YOLOv8/YOLOv8-Detect_YUV420SP/eval/eval_cpp/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | cmake_minimum_required(VERSION 2.8) 2 | 3 | project(rdk_yolov8_detect_nchwrgb_eval) 4 | 5 | 6 | # 设置OpenCV包 7 | find_package(OpenCV REQUIRED) 8 | 9 | # libdnn.so depends on system software dynamic link library, use -Wl,-unresolved-symbols=ignore-in-shared-libs to shield during compilation 10 | set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++17 -Wl,-unresolved-symbols=ignore-in-shared-libs") 11 | 12 | set(CMAKE_CXX_FLAGS_DEBUG " -Wall -Werror -g -O0 ") 13 | set(CMAKE_C_FLAGS_DEBUG " -Wall -Werror -g -O0 ") 14 | set(CMAKE_CXX_FLAGS_RELEASE " -Wall -Werror -O3 ") 15 | set(CMAKE_C_FLAGS_RELEASE " -Wall -Werror -O3 ") 16 | if (NOT CMAKE_BUILD_TYPE) 17 | set(CMAKE_BUILD_TYPE Release) 18 | endif () 19 | 20 | message(STATUS "Build type: ${CMAKE_BUILD_TYPE}") 21 | # define dnn lib path 22 | set(DNN_PATH "/usr/include/dnn") 23 | 24 | set(DNN_LIB_PATH "/usr/lib/") 25 | 26 | include_directories(${DNN_PATH}) 27 | link_directories(${DNN_LIB_PATH}) 28 | 29 | add_executable(main main.cc) 30 | target_link_libraries(main 31 | ${OpenCV_LIBS} 32 | dnn 33 | pthread 34 | rt 35 | dl) -------------------------------------------------------------------------------- /demos/detect/YOLOv8/YOLOv8-Detect_YUV420SP/eval/eval_cpp/README.md: -------------------------------------------------------------------------------- 1 | 2 | 3 | 注: 所有的Terminal命令基于以下目录`./rdk_model_zoo/demos/detect/YOLOv8/cpp` 4 | 5 | 安装依赖 6 | ```bash 7 | sudo apt update 8 | # OpenCV 9 | sudo apt install libopencv-dev python3-opencv libopencv-contrib-dev 10 | ``` 11 | 12 | 13 | BPU推理库已经在RDK平台的RDK OS中自带. 14 | - 头文件 15 | ```bash 16 | /usr/include/dnn 17 | . 18 | ├── hb_dnn_ext.h 19 | ├── hb_dnn.h 20 | ├── hb_dnn_status.h 21 | ├── hb_sys.h 22 | └── plugin 23 | ├── hb_dnn_dtype.h 24 | ├── hb_dnn_layer.h 25 | ├── hb_dnn_ndarray.h 26 | ├── hb_dnn_plugin.h 27 | └── hb_dnn_tuple.h 28 | ``` 29 | 30 | - 推理库 31 | ```bash 32 | /usr/lib/ 33 | . 34 | ├── libdnn.so 35 | └── libhbrt_bayes_aarch64.so 36 | ``` 37 | 38 | 39 | 上述头文件和动态库也可以通过OpenExploer发布物获取 40 | OE路径: `package/host/host_package/x5_aarch64/dnn_1.24.5.tar.gz` 41 | 42 | 清空之前的编译产物 (如果有) 43 | ```bash 44 | rm -rf build 45 | ``` 46 | 47 | 编译 48 | ```bash 49 | mkdir -p build && cd build 50 | cmake .. 51 | make 52 | ``` 53 | 54 | 运行 55 | ```bash 56 | ./main 57 | ``` 58 | 59 | 将生成的*.txt结果转化为*.json结果. 60 | ```bash 61 | python3 eval_convert.py 62 | ``` 63 | -------------------------------------------------------------------------------- /demos/detect/YOLOv8/YOLOv8-Detect_YUV420SP/eval/eval_pycocotools.py: -------------------------------------------------------------------------------- 1 | #!/user/bin/env python 2 | 3 | # Copyright (c) 2024,WuChao D-Robotics. 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | 17 | # 注意: 此程序推荐在BPU工具链Docker运行 18 | # Attention: This program runs on ToolChain Docker recommended. 19 | 20 | 21 | from pycocotools.coco import COCO 22 | from pycocotools.cocoeval import COCOeval 23 | 24 | import logging 25 | # 日志模块配置 26 | # logging configs 27 | logging.basicConfig( 28 | level = logging.DEBUG, 29 | format = '[%(name)s] [%(asctime)s.%(msecs)03d] [%(levelname)s] %(message)s', 30 | datefmt='%H:%M:%S') 31 | logger = logging.getLogger("RDK_YOLO") 32 | def main(): 33 | coco_true = COCO(annotation_file='instances_val2017.json') # 标准数据集(真值) 34 | coco_pre = coco_true.loadRes('predict_results.json') # 预测数据集(预测值) 35 | 36 | coco_evaluator = COCOeval(cocoGt=coco_true, cocoDt=coco_pre, iouType="bbox") #计算bbox值 37 | coco_evaluator.evaluate() 38 | coco_evaluator.accumulate() 39 | coco_evaluator.summarize() 40 | 41 | 42 | if __name__ == "__main__": 43 | main() -------------------------------------------------------------------------------- /demos/detect/YOLOv8/YOLOv8-Detect_YUV420SP/imgs/YOLOv8_Detect_Origin.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/D-Robotics/rdk_model_zoo/6da0511b4a5f4ac49eadd936d8e0228412d8dd32/demos/detect/YOLOv8/YOLOv8-Detect_YUV420SP/imgs/YOLOv8_Detect_Origin.png -------------------------------------------------------------------------------- /demos/detect/YOLOv8/YOLOv8-Detect_YUV420SP/imgs/YOLOv8_Detect_Quantize.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/D-Robotics/rdk_model_zoo/6da0511b4a5f4ac49eadd936d8e0228412d8dd32/demos/detect/YOLOv8/YOLOv8-Detect_YUV420SP/imgs/YOLOv8_Detect_Quantize.png -------------------------------------------------------------------------------- /demos/detect/YOLOv8/YOLOv8-Detect_YUV420SP/imgs/demo_rdkx5_yolov8n_detect.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/D-Robotics/rdk_model_zoo/6da0511b4a5f4ac49eadd936d8e0228412d8dd32/demos/detect/YOLOv8/YOLOv8-Detect_YUV420SP/imgs/demo_rdkx5_yolov8n_detect.jpg -------------------------------------------------------------------------------- /demos/detect/YOLOv8/YOLOv8-Detect_YUV420SP/imgs/ltrb2xyxy.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/D-Robotics/rdk_model_zoo/6da0511b4a5f4ac49eadd936d8e0228412d8dd32/demos/detect/YOLOv8/YOLOv8-Detect_YUV420SP/imgs/ltrb2xyxy.jpg -------------------------------------------------------------------------------- /demos/detect/YOLOv8/YOLOv8-Detect_YUV420SP/imgs/yolov8n_detect_bayese_640x640_nv12.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/D-Robotics/rdk_model_zoo/6da0511b4a5f4ac49eadd936d8e0228412d8dd32/demos/detect/YOLOv8/YOLOv8-Detect_YUV420SP/imgs/yolov8n_detect_bayese_640x640_nv12.png -------------------------------------------------------------------------------- /demos/detect/YOLOv8/YOLOv8-Detect_YUV420SP/imgs/yolov8n_detect_bayese_640x640_nv12_modified.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/D-Robotics/rdk_model_zoo/6da0511b4a5f4ac49eadd936d8e0228412d8dd32/demos/detect/YOLOv8/YOLOv8-Detect_YUV420SP/imgs/yolov8n_detect_bayese_640x640_nv12_modified.png -------------------------------------------------------------------------------- /demos/detect/YOLOv8/YOLOv8-Detect_YUV420SP/ptq_models/README.md: -------------------------------------------------------------------------------- 1 | # YOLOv8-Detect_YUV420SP (Bayes-e) 2 | 3 | ## YOLOv8n 4 | ```bash 5 | wget https://archive.d-robotics.cc/downloads/rdk_model_zoo/rdk_x5/AAA_RDK_YOLO/yolov8_detect_nv12/yolov8n_detect_bayese_640x640_nv12_modified.bin 6 | ``` 7 | 8 | ## YOLOv8s 9 | ```bash 10 | wget https://archive.d-robotics.cc/downloads/rdk_model_zoo/rdk_x5/AAA_RDK_YOLO/yolov8_detect_nv12/yolov8s_detect_bayese_640x640_nv12_modified.bin 11 | ``` 12 | 13 | ## YOLOv8m 14 | ```bash 15 | wget https://archive.d-robotics.cc/downloads/rdk_model_zoo/rdk_x5/AAA_RDK_YOLO/yolov8_detect_nv12/yolov8m_detect_bayese_640x640_nv12_modified.bin 16 | ``` 17 | 18 | ## YOLOv8l 19 | ```bash 20 | wget https://archive.d-robotics.cc/downloads/rdk_model_zoo/rdk_x5/AAA_RDK_YOLO/yolov8_detect_nv12/yolov8l_detect_bayese_640x640_nv12_modified.bin 21 | ``` 22 | 23 | ## YOLOv8x 24 | ```bash 25 | wget https://archive.d-robotics.cc/downloads/rdk_model_zoo/rdk_x5/AAA_RDK_YOLO/yolov8_detect_nv12/yolov8x_detect_bayese_640x640_nv12_modified.bin 26 | ``` -------------------------------------------------------------------------------- /demos/llm/clip/README_cn.md: -------------------------------------------------------------------------------- 1 | [English](./README.md) | 简体中文 2 | 3 | Clip 4 | ======= 5 | 6 | # 1. 模型介绍 7 | 8 | CLIP(Contrastive Language-Image Pre-training)是由OpenAI开发的一种多模态(文本和图像)预训练模型。CLIP模型通过学习如何对文本和图像进行对比,从而实现跨模态的理解。这种对比学习的方法使得CLIP能够在没有任何监督标签的情况下学习到文本和图像之间的语义关系。 9 | 10 | CLIP模型的核心思想是将文本和图像嵌入到一个共同的语义空间中,使得相关的文本描述和图像内容在这个空间中的表示彼此靠近,而不相关的则远离。这种设计使得CLIP模型能够在各种任务上表现出色,如图像分类、图像检索、文本分类等。 11 | 12 | CLIP模型的特点: 13 | 14 | 1.**多模态嵌入**:CLIP模型首先将文本和图像分别嵌入到一个共享的多维空间中。这个空间被设计成能够捕捉文本描述和图像内容之间的语义关系。 15 | 16 | 2.**对比学习**:CLIP使用对比学习的方法来训练模型。在对比学习中,模型被要求将相关的文本描述和图像内容映射到空间中的相邻位置,而不相关的则映射到远离的位置。这样,模型学习到了如何区分相关和不相关的文本-图像对。 17 | 18 | 3.**训练数据**:CLIP使用大规模的文本和图像数据集进行预训练,其中文本描述和图像内容是从互联网上收集而来的。这些数据集包含了各种不同的文本描述和图像内容,帮助模型学习到更广泛的语义关系。 19 | 20 | 4.**自监督学习**:CLIP模型采用了自监督学习的方法,即模型在训练过程中不需要人工标注的标签。相反,模型利用数据集中的文本描述和图像内容之间的自然关联来学习。 21 | 22 | 5.**跨任务应用**:由于CLIP学习到了文本和图像之间的通用语义关系,因此可以在各种任务上进行微调,如图像分类、图像检索、文本分类等。这种通用性使得CLIP在不同领域和任务上都能取得很好的表现。 23 | 24 | # 2. 模型下载地址 25 | 26 | 地瓜异构.bin模型文件已经上传至云服务器中,可通过 wget 命令在服务器网站中下载: 27 | 28 | ```shell 29 | wget https://archive.d-robotics.cc/downloads/rdk_model_zoo/rdk_x5/text_encoder.onnx 30 | wget https://archive.d-robotics.cc/downloads/rdk_model_zoo/rdk_x5/img_encoder.bin 31 | ``` 32 | 33 | 将img_encoder.bin和text_encoder.onnx放入与当前README.md的同级目录即可。 34 | 35 | # 3. 输入输出数据 36 | 37 | ## 2.1 Image Encoder 38 | 39 | - 输入数据 40 | 41 | | 输入数据 | 数据类型 | 大小 | 数据排布格式 | 42 | | -------- | -------- | ------------------------------- | ------------ | 43 | | image | FLOAT32 | 1 x 3 x 224 x 224 | NCHW | 44 | 45 | - 输出数据 46 | 47 | | 输出数据 | 数据类型 | 大小 | 数据排布格式 | 48 | | -------- | -------- | ------------------------------- | ------------ | 49 | | image_feature | FLOAT32 | 1 x 512 | NCHW | 50 | 51 | ## 2.2 Text Encoder 52 | 53 | - 输入数据 54 | 55 | | 输入数据 | 数据类型 | 大小 | 数据排布格式 | 56 | | -------- | -------- | ------------------------------- | ------------ | 57 | | texts | INT32 | num_text x 77 | NCHW | 58 | 59 | - 输出数据 60 | 61 | | 输出数据 | 数据类型 | 大小 | 数据排布格式 | 62 | | -------- | -------- | ------------------------------- | ------------ | 63 | | text_features | FLOAT32 | feature_dim x 512 | NCHW | -------------------------------------------------------------------------------- /demos/llm/clip/bpe_simple_vocab_16e6.txt.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/D-Robotics/rdk_model_zoo/6da0511b4a5f4ac49eadd936d8e0228412d8dd32/demos/llm/clip/bpe_simple_vocab_16e6.txt.gz -------------------------------------------------------------------------------- /demos/llm/clip/dog.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/D-Robotics/rdk_model_zoo/6da0511b4a5f4ac49eadd936d8e0228412d8dd32/demos/llm/clip/dog.jpg -------------------------------------------------------------------------------- /demos/llm/clip/download.sh: -------------------------------------------------------------------------------- 1 | wget -P $(dirname $0) https://archive.d-robotics.cc/downloads/rdk_model_zoo/rdk_x5/text_encoder.onnx 2 | wget -P $(dirname $0) https://archive.d-robotics.cc/downloads/rdk_model_zoo/rdk_x5/img_encoder.bin -------------------------------------------------------------------------------- /demos/llm/yoloworld/README.md: -------------------------------------------------------------------------------- 1 | English| [简体中文](./README_cn.md) 2 | 3 | Yolo World 4 | ======= 5 | 6 | # 1. Model introduction 7 | 8 | YOLO (You Only Look Once) is a real-time object detection system whose core concept is to convert the object detection task into a single regression problem. YOLO World is the latest improved version of this model, offering higher accuracy and speed. It introduces an innovative approach to enhance YOLO's open vocabulary detection capabilities through visual language modeling and pre-training on large-scale datasets. Specifically, this method involves a new re-parameterizable Visual-Language Path Aggregation Network (RepVL-PAN) and region-text contrastive loss to facilitate interaction between visual and linguistic information. This approach efficiently detects a wide range of objects in zero-shot scenarios. On the challenging LVIS dataset, YOLO-World achieves an AP of 35.4 at 52.0 FPS on a V100, surpassing many state-of-the-art methods in both accuracy and speed. Additionally, the fine-tuned YOLO-World performs exceptionally well on various downstream tasks, including object detection and open-vocabulary instance segmentation. 9 | 10 | # 2. Model download link 11 | 12 | D-Robotics .bin model file has been uploaded to the cloud server, you can use `wget` to download from the server: 13 | 14 | ```shell 15 | wget https://archive.d-robotics.cc/downloads/rdk_model_zoo/rdk_x5/yolo_world.bin 16 | ``` 17 | 18 | yolo_world.bin should be in the same directory as the current README.md. 19 | 20 | # 3. Input and Output Data 21 | 22 | ## 3.1 Image Encoder 23 | 24 | - Input Data 25 | 26 | | Input Data | Data Type | Shape | Layout | 27 | | -------- | -------- | ------------------------------- | ------------ | 28 | | image | FLOAT32 | 1 x 3 x 640 x 640 | NCHW | 29 | | text | FLOAT32 | 1 x 3 x 512 | NCHW | 30 | 31 | - Output Data 32 | 33 | | Input Data | Data Type | Shape | Layout | 34 | | -------- | -------- | ------------------------------- | ------------ | 35 | | classes_score | FLOAT32 | 1 x 8400 x 32 | NCHW | 36 | | bboxes | FLOAT32 | 1 x 8400 x 4 | NCHW | -------------------------------------------------------------------------------- /demos/llm/yoloworld/README_cn.md: -------------------------------------------------------------------------------- 1 | [English](./README.md) | 简体中文 2 | 3 | Yolo World 4 | ======= 5 | 6 | # 1. 模型介绍 7 | 8 | YOLO (You Only Look Once) 是一种实时目标检测系统,其核心理念是将目标检测任务转换为单次回归问题。YOLO World 是该模型的最新改进版本,具有更高的准确性和速度。这是一种通过视觉语言建模和大规模数据集的预训练来增强 YOLO 开放词汇检测能力的创新方法。具体来说,这是一种新的可重新参数化的视觉语言路径聚合网络 (RepVL-PAN) 和区域文本对比损失,以促进视觉信息和语言信息之间的交互。该方法在零样本情况下高效地检测到广泛的对象。在具有挑战性的 LVIS 数据集上,YOLO-World 在 V100 上以 52.0 FPS 实现了 35.4 的 AP,超越了许多最新的先进方法,无论在准确性还是速度方面。此外,微调后的 YOLO-World 在多个下游任务中表现出色,包括目标检测和开放词汇实例分割。 9 | 10 | # 2. 模型下载地址 11 | 12 | 地瓜异构.bin模型文件已经上传至云服务器中,可通过 wget 命令在服务器网站中下载: 13 | 14 | ```shell 15 | wget https://archive.d-robotics.cc/downloads/rdk_model_zoo/rdk_x5/yolo_world.bin 16 | ``` 17 | 18 | 将yolo_world.bin放入与当前README.md的同级目录即可。 19 | 20 | # 3. 输入输出数据 21 | 22 | 23 | - 输入数据 24 | 25 | | 输入数据 | 数据类型 | 大小 | 数据排布格式 | 26 | | -------- | -------- | ------------------------------- | ------------ | 27 | | image | FLOAT32 | 1 x 3 x 640 x 640 | NCHW | 28 | | text | FLOAT32 | 1 x 3 x 512 | NCHW | 29 | 30 | 31 | - 输出数据 32 | 33 | | 输出数据 | 数据类型 | 大小 | 数据排布格式 | 34 | | -------- | -------- | ------------------------------- | ------------ | 35 | | classes_score | FLOAT32 | 1 x 8400 x 32 | NCHW | 36 | | bboxes | FLOAT32 | 1 x 8400 x 4 | NCHW | 37 | -------------------------------------------------------------------------------- /demos/llm/yoloworld/dog.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/D-Robotics/rdk_model_zoo/6da0511b4a5f4ac49eadd936d8e0228412d8dd32/demos/llm/yoloworld/dog.jpeg -------------------------------------------------------------------------------- /demos/llm/yoloworld/download.sh: -------------------------------------------------------------------------------- 1 | wget -P $(dirname $0) https://archive.d-robotics.cc/downloads/rdk_model_zoo/rdk_x5/yolo_world.bin -------------------------------------------------------------------------------- /demos/solutions/RDK_LLM_Solutions/README.md: -------------------------------------------------------------------------------- 1 | 2 | ![](imgs/RDK_LLM_Solution.jpg) 3 | 4 | English | [简体中文](./README_cn.md) 5 | 6 | # RDK LLM Solution 7 | 8 | -------------------------------------------------------------------------------- /demos/solutions/RDK_LLM_Solutions/README_cn.md: -------------------------------------------------------------------------------- 1 | ![](imgs/RDK_LLM_Solution.jpg) 2 | 3 | [English](./README.md) | 简体中文 4 | 5 | # RDK LLM Solution 6 | 7 | ## 摘要 8 | 9 | 目前调研来看,带NPU的SBC运行YOLO这种视觉模型,NPU的速度是CPU速度的几十到几百倍,但是运行LLM这种语言模型,NPU的速度最多也只有CPU的1.2~1.6倍,NPU方案会限制上下文长度到256等非常短的长度,同时让NPU运行LLM会花费更多的人力和物力成本,所以现阶段使用CPU运行语言任务,NPU专注于视觉任务,也不失为一种较为合理的搭配方式。 10 | 11 | 本文参考社区大佬@潜沉10的文章,在RDK X5上使用llama.cpp框架运行语言大模型。分别测试了thread_num=4和8的情况,测试共8家的语言大模型,一共63个,涵盖0.5B到14B等不同参数量,其中能跑到10token/s以上的9个,5token/s以上的14个,1 token/s以上的52个。使用的GGUF模型文件也保存在了百度网盘,欢迎大家在RDK X5上来尝试,希望大家玩的开心。 12 | 13 | ## 测试结果 14 | 飞书文档: [https://horizonrobotics.feishu.cn/docx/LQU9dYyjcoXJ9hxJdUYc2l4InEf](https://horizonrobotics.feishu.cn/docx/LQU9dYyjcoXJ9hxJdUYc2l4InEf) 15 | 16 | ## 使用方式 17 | 18 | ### 参考: 19 | 20 | RDK使用llama.cpp运行语言大模型: [https://developer.d-robotics.cc/forumDetail/256524800871478519](https://developer.d-robotics.cc/forumDetail/256524800871478519) 21 | 22 | llama.cpp: [https://github.com/ggerganov/llama.cpp/blob/master/docs/build.md](https://github.com/ggerganov/llama.cpp/blob/master/docs/build.md) 23 | 24 | 25 | GLM的部分模型: [https://huggingface.co/THUDM/glm-edge-1.5b-chat-gguf/blob/main/README_zh.md](https://huggingface.co/THUDM/glm-edge-1.5b-chat-gguf/blob/main/README_zh.md) 26 | 27 | ### 下载编译llama.cpp 28 | 29 | ```bash 30 | git clone https://github.com/ggerganov/llama.cpp 31 | cd llama.cpp 32 | cmake -B build 33 | cmake --build build --config Release 34 | ``` 35 | 36 | ### 添加环境变量 37 | 38 | ```bash 39 | PATH=/media/rootfs/99_projects/test_llama.cpp/llama.cpp/build/bin:$PATH 40 | ``` 41 | 42 | ### 使用以下命令运行 43 | 44 | 使用固定提问,使得生成128个token,然后Ctrl+C查看性能结果。 45 | 46 | ```bash 47 | llama-cli \ 48 | -m \ 49 | -n 512 -c 2048 \ 50 | -p "You are a helpful assistant" -co -cnv \ 51 | --threads 8 52 | 53 | # 如果是RWKV这种不带KV Cache的LLM,需要添加以下参数 54 | llama-cli \ 55 | -m rwkv-6-finch-3b-Q8_0.gguf \ 56 | -n 512 -c 2048 \ 57 | -p "You are a helpful assistant" -co -cnv \ 58 | --threads 8 --no-context-shift 59 | ``` -------------------------------------------------------------------------------- /demos/solutions/RDK_LLM_Solutions/imgs/RDK_LLM_Solution.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/D-Robotics/rdk_model_zoo/6da0511b4a5f4ac49eadd936d8e0228412d8dd32/demos/solutions/RDK_LLM_Solutions/imgs/RDK_LLM_Solution.jpg -------------------------------------------------------------------------------- /demos/solutions/RDK_Video_Solutions/IPC_Camera/README.md: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/D-Robotics/rdk_model_zoo/6da0511b4a5f4ac49eadd936d8e0228412d8dd32/demos/solutions/RDK_Video_Solutions/IPC_Camera/README.md -------------------------------------------------------------------------------- /demos/solutions/RDK_Video_Solutions/IPC_Camera/README_cn.md: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/D-Robotics/rdk_model_zoo/6da0511b4a5f4ac49eadd936d8e0228412d8dd32/demos/solutions/RDK_Video_Solutions/IPC_Camera/README_cn.md -------------------------------------------------------------------------------- /demos/solutions/RDK_Video_Solutions/MIPI_Camera/README.md: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/D-Robotics/rdk_model_zoo/6da0511b4a5f4ac49eadd936d8e0228412d8dd32/demos/solutions/RDK_Video_Solutions/MIPI_Camera/README.md -------------------------------------------------------------------------------- /demos/solutions/RDK_Video_Solutions/MIPI_Camera/README_cn.md: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/D-Robotics/rdk_model_zoo/6da0511b4a5f4ac49eadd936d8e0228412d8dd32/demos/solutions/RDK_Video_Solutions/MIPI_Camera/README_cn.md -------------------------------------------------------------------------------- /demos/solutions/RDK_Video_Solutions/README.md: -------------------------------------------------------------------------------- 1 | ![](imgs/RDK_Video_Solution.jpg) 2 | 3 | English | [简体中文](./README_cn.md) 4 | 5 | 6 | # RDK Express 7 | 8 | IPC Camera 9 | 10 | 11 | USB Camera 12 | 13 | 14 | MIPI Camera -------------------------------------------------------------------------------- /demos/solutions/RDK_Video_Solutions/README_cn.md: -------------------------------------------------------------------------------- 1 | ![](imgs/RDK_Video_Solution.jpg) 2 | 3 | [English](./README.md) | 简体中文 4 | 5 | # RDK Video Solution 6 | 7 | 在实时视频流推理中,RDK Video Solution主要优化了以下几方面: 8 | 9 | - 程序初始化和加载模型时间:在实时视频流推理的任务中,这些时间只需要消耗一次,不需要重复加载程序和模型。 10 | 11 | - 前处理时间:演示程序为了快速得到推理效果和尽可能大的兼容性,使用OpenCV准备nv12的输入数据,涉及到了比较冗余的色彩空间转换,导致了耗时长。实际量产中会使用更加高效的视频通路,使用JPU,VPU,Codec等硬件来准备输入数据,VPS/VSE等硬件来前处理,BPU来完成归一化等计算,这些都会比OpenCV快,且不消耗CPU。 12 | 13 | - 推理时间:实际量产中都会设计多线程和多进程等异步处理逻辑,而不是一个简单的while循环,只有多线程异步处理才会让CPU,BPU,JPU,VPU,Codec,VSE等硬件异步计算,同时工作。可以参考Model Zoo最新README将编译好的bin模型给TROS使用,只要摄像头帧率够,都可以跑满BPU的吞吐量。 14 | 15 | - 关于渲染,量产中,渲染是一个较低的需求,这部分一般不会在板卡运行。如果运行也会使用硬件OSD叠加,不会使用OpenCV。 16 | 17 | ## 串行程序设计和并行程序设计 18 | ### 串行程序设计 19 | ![](imgs/Serial_Programming.png) 20 | 21 | ### 并行程序设计 22 | ![](imgs/Parallel_Programming.png) 23 | 24 | ## DataFlow参考 25 | 26 | ### IPC Camera (TROS DataFlow) 27 | ![](imgs/TROS_IPC_Camera.png) 28 | 29 | ### USB Camera (TROS DataFlow) 30 | ![](imgs/TROS_USB_Camera.png) 31 | 32 | ### MIPI Camera (TROS DataFlow) 33 | ![](imgs/TROS_MIPI_Camera.png) -------------------------------------------------------------------------------- /demos/solutions/RDK_Video_Solutions/USB_Camera/README.md: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/D-Robotics/rdk_model_zoo/6da0511b4a5f4ac49eadd936d8e0228412d8dd32/demos/solutions/RDK_Video_Solutions/USB_Camera/README.md -------------------------------------------------------------------------------- /demos/solutions/RDK_Video_Solutions/USB_Camera/README_cn.md: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/D-Robotics/rdk_model_zoo/6da0511b4a5f4ac49eadd936d8e0228412d8dd32/demos/solutions/RDK_Video_Solutions/USB_Camera/README_cn.md -------------------------------------------------------------------------------- /demos/solutions/RDK_Video_Solutions/imgs/Parallel_Programming.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/D-Robotics/rdk_model_zoo/6da0511b4a5f4ac49eadd936d8e0228412d8dd32/demos/solutions/RDK_Video_Solutions/imgs/Parallel_Programming.png -------------------------------------------------------------------------------- /demos/solutions/RDK_Video_Solutions/imgs/RDK_Video_Solution.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/D-Robotics/rdk_model_zoo/6da0511b4a5f4ac49eadd936d8e0228412d8dd32/demos/solutions/RDK_Video_Solutions/imgs/RDK_Video_Solution.jpg -------------------------------------------------------------------------------- /demos/solutions/RDK_Video_Solutions/imgs/Serial_Programming.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/D-Robotics/rdk_model_zoo/6da0511b4a5f4ac49eadd936d8e0228412d8dd32/demos/solutions/RDK_Video_Solutions/imgs/Serial_Programming.png -------------------------------------------------------------------------------- /demos/solutions/RDK_Video_Solutions/imgs/TROS_IPC_Camera.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/D-Robotics/rdk_model_zoo/6da0511b4a5f4ac49eadd936d8e0228412d8dd32/demos/solutions/RDK_Video_Solutions/imgs/TROS_IPC_Camera.png -------------------------------------------------------------------------------- /demos/solutions/RDK_Video_Solutions/imgs/TROS_MIPI_Camera.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/D-Robotics/rdk_model_zoo/6da0511b4a5f4ac49eadd936d8e0228412d8dd32/demos/solutions/RDK_Video_Solutions/imgs/TROS_MIPI_Camera.png -------------------------------------------------------------------------------- /demos/solutions/RDK_Video_Solutions/imgs/TROS_USB_Camera.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/D-Robotics/rdk_model_zoo/6da0511b4a5f4ac49eadd936d8e0228412d8dd32/demos/solutions/RDK_Video_Solutions/imgs/TROS_USB_Camera.png -------------------------------------------------------------------------------- /demos/tools/batch_eval_pycocotools/README.md: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/D-Robotics/rdk_model_zoo/6da0511b4a5f4ac49eadd936d8e0228412d8dd32/demos/tools/batch_eval_pycocotools/README.md -------------------------------------------------------------------------------- /demos/tools/batch_eval_pycocotools/batch_scp.sh: -------------------------------------------------------------------------------- 1 | scp root@10.64.60.202:/media/rootfs/rdk_model_zoo_x3/demos/detect/YOLO11/YOLO11-Detect_YUV420SP/eval/eval_cpp/*json . 2 | 3 | scp root@10.64.60.202:/media/rootfs/rdk_model_zoo_x3/demos/detect/YOLO11/YOLO11-Detect_NCHWRGB/eval_cpp/*json . 4 | 5 | scp root@10.64.60.202:/media/rootfs/rdk_model_zoo_x3/demos/detect/YOLO11/YOLO11-Detect_YUV420SP/*json . -------------------------------------------------------------------------------- /demos/tools/batch_eval_pycocotools/en_COCO2017val.md: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/D-Robotics/rdk_model_zoo/6da0511b4a5f4ac49eadd936d8e0228412d8dd32/demos/tools/batch_eval_pycocotools/en_COCO2017val.md -------------------------------------------------------------------------------- /demos/tools/batch_eval_pycocotools/eval_batch_pytorch_generate_labels.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/D-Robotics/rdk_model_zoo/6da0511b4a5f4ac49eadd936d8e0228412d8dd32/demos/tools/batch_eval_pycocotools/eval_batch_pytorch_generate_labels.py -------------------------------------------------------------------------------- /demos/tools/batch_eval_pycocotools/eval_pytorch_generate_labels.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/D-Robotics/rdk_model_zoo/6da0511b4a5f4ac49eadd936d8e0228412d8dd32/demos/tools/batch_eval_pycocotools/eval_pytorch_generate_labels.py -------------------------------------------------------------------------------- /demos/tools/batch_mapper/README.md: -------------------------------------------------------------------------------- 1 | - [BATCH MAPPER CN](#batch-mapper-cn) 2 | - [BATCH MAPPER EN](#batch-mapper-en) 3 | 4 | 5 | # BATCH MAPPER CN 6 | 7 | Batch Mapper用于将某一个目录下的onnx模型批量的按照某一个yaml配置进行编译,batch会帮你完成以下步骤: 8 | - 遍历目录下的所有onnx模型文件 9 | - 生成对应的yaml文件在当前目录 10 | - 开始编译,编译的产物会统一在`ws_path`文件夹下 11 | - 编译结束后,将编译日志cp到编译工作目录 12 | - 按照要求移除反量化节点 13 | - 移除反量化节点后,将移除日志cp到编译工作目录 14 | - 将编译产物拷贝到发布目录 15 | 16 | 如何在后台挂起编译? 17 | ```bash 18 | # 安装tmux 19 | sudo apt update 20 | sudo apt install tmux 21 | # 使用tmux 22 | tmux new -s batch_mapper 23 | # 运行docker和命令, 例如 24 | sudo docker run --gpus all -it -v /ws:/open_explorer hub.hobot.cc/aitools/ai_toolchain_ubuntu_20_x5_gpu:v1.2.8 25 | python3 batch_mapper.py 26 | python3 batch_mapper.py 2>&1 | tee batch_mapper.txt # 运行并保存日志 27 | # 断开tmux 28 | 按下Ctrl + B, 然后按下D 29 | # 断开terminal 30 | exit 31 | # 查看tmux的会话 32 | tmux ls 33 | # 重新连接到tmux 34 | tmux attach -t batch_mapper 35 | # 关闭tmux会话 36 | tmux kill-session -t batch_mapper 37 | ``` 38 | 39 | 40 | 41 | # BATCH MAPPER EN 42 | 43 | Batch Mapper is used to batch compile ONNX models in a specific directory according to a certain YAML configuration. Batch will help you complete the following steps: 44 | - Traverse all ONNX model files in the directory 45 | - Generate corresponding YAML files in the current directory 46 | - Start compiling, with compilation results unified under the `ws_path` folder 47 | - After compilation ends, copy the compilation logs to the compilation work directory 48 | - Remove dequantization nodes as required 49 | - After removing dequantization nodes, copy the removal logs to the compilation work directory 50 | - Copy the compiled results to the release directory 51 | 52 | How to run the compilation in the background using tmux? 53 | ```bash 54 | # Install tmux 55 | sudo apt update 56 | sudo apt install tmux 57 | # Use tmux 58 | tmux new -s batch_mapper 59 | # Run Docker and command, for example 60 | sudo docker run --gpus all -it -v /ws:/open_explorer hub.hobot.cc/aitools/ai_toolchain_ubuntu_20_x5_gpu:v1.2.8 python3 batch_mapper.py 61 | # Detach from tmux 62 | Press Ctrl+B, then press D 63 | # Exit terminal 64 | exit 65 | # List tmux sessions 66 | tmux ls 67 | # Reattach to tmux 68 | tmux attach -t batch_mapper 69 | # Kill tmux session 70 | tmux kill-session -t batch_mapper 71 | ``` -------------------------------------------------------------------------------- /demos/tools/batch_perf/README_cn.md: -------------------------------------------------------------------------------- 1 | # BATCH PERF 2 | 3 | 使用Python脚本, 对某一个特定文件夹内的所有*bin结尾的模型文件进行perf. 4 | 5 | 顺序: perf的顺序按照模型文件的大小,从小到大. 6 | 7 | 线程数量: 从1到MAX_NUM, 其中MAX_NUM一般设置到2. 8 | 9 | 映射 10 | ```bash 11 | alias perf='python3 /demos/basic/batch_perf/batch_perf.py' 12 | ``` 13 | -------------------------------------------------------------------------------- /demos/tools/generate_calibration_data/README.md: -------------------------------------------------------------------------------- 1 | # 如何生成校准数据集 2 | 3 | 4 | ## 方法一 5 | 使用OpenExplore包每个转换示例的02_preprocess.sh脚本, 生成校准数据集。 6 | OpenExplore包的获取方式参考本仓库总README的**RDK算法工具链资源**小节。 7 | 8 | 如果遇到类似`Can't reshape 1354752 in (1,3,640,640)`的错误,请修改同级目录下preprocess.py文件中的分辨率,修改为准备转化的onnx一样大小的分辨率,并删除所有的校准数据集,再重新运行02脚本,生成校准数据集。 9 | 目前这个示例的校准数据集来自../../../01_common/calibration data/coco目录,生成在./calibration_data_rgb_f32目录。 10 | 11 | ## 方法二 12 | 使用OpenCV和numpy等库准备校准数据,校准数据除了yaml中配置的减通道均值和归一化的操作,剩下的全部和训练对齐。 -------------------------------------------------------------------------------- /requirement.txt: -------------------------------------------------------------------------------- 1 | ftfy 2 | regex 3 | scipy 4 | # pycocotools 5 | onnx 6 | onnxruntime 7 | notebook 8 | 9 | 10 | --index-url http://archive.d-robotics.cc/simple/ 11 | --trusted-host archive.d-robotics.cc 12 | 13 | bpu_infer_lib_x5 14 | -------------------------------------------------------------------------------- /resource/assets/bus.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/D-Robotics/rdk_model_zoo/6da0511b4a5f4ac49eadd936d8e0228412d8dd32/resource/assets/bus.jpg -------------------------------------------------------------------------------- /resource/assets/kite.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/D-Robotics/rdk_model_zoo/6da0511b4a5f4ac49eadd936d8e0228412d8dd32/resource/assets/kite.jpg -------------------------------------------------------------------------------- /resource/assets/small_img_1.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/D-Robotics/rdk_model_zoo/6da0511b4a5f4ac49eadd936d8e0228412d8dd32/resource/assets/small_img_1.jpg -------------------------------------------------------------------------------- /resource/assets/small_img_2.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/D-Robotics/rdk_model_zoo/6da0511b4a5f4ac49eadd936d8e0228412d8dd32/resource/assets/small_img_2.jpg -------------------------------------------------------------------------------- /resource/assets/zebra_cls.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/D-Robotics/rdk_model_zoo/6da0511b4a5f4ac49eadd936d8e0228412d8dd32/resource/assets/zebra_cls.jpg -------------------------------------------------------------------------------- /resource/assets/zidane.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/D-Robotics/rdk_model_zoo/6da0511b4a5f4ac49eadd936d8e0228412d8dd32/resource/assets/zidane.jpg -------------------------------------------------------------------------------- /resource/datasets/COCO2017/README.md: -------------------------------------------------------------------------------- 1 | [English](./README.md) | 简体中文 2 | 3 | 4 | ## COCO2017数据集简单介绍 5 | MS COCO的全称是Microsoft Common Objects in Context,起源于微软于2014年出资标注的Microsoft COCO数据集,与ImageNet竞赛一样,被视为是计算机视觉领域最受关注和最权威的比赛之一。 6 | 7 | ## Introduction to COCO2017 8 | Microsoft Common Objects in Context, originating from the Microsoft COCO dataset annotated by Microsoft in 2014. Like the ImageNet competition, it is considered one of the most watched and authoritative competitions in the field of computer vision. 9 | 10 | 11 | 12 | ## COCO2017数据集下载 (Download COCO2017) 13 | 14 | ### Images 15 | 16 | 2017 Train images [118K/18GB]: http://images.cocodataset.org/zips/train2017.zip 17 | 18 | 2017 Val images [5K/1GB]: http://images.cocodataset.org/zips/val2017.zip 19 | 20 | 2017 Test images [41K/6GB]: http://images.cocodataset.org/zips/test2017.zip 21 | 22 | ### Annotations 23 | 2017 Train/Val annotations [241MB]: http://images.cocodataset.org/annotations/annotations_trainval2017.zip 24 | 25 | 2017 Stuff Train/Val annotations [1.1GB]: http://images.cocodataset.org/annotations/stuff_annotations_trainval2017.zip 26 | 27 | 2017 Panoptic Train/Val annotations [821MB]: http://images.cocodataset.org/annotations/panoptic_annotations_trainval2017.zip 28 | 29 | 30 | ## 参考 (Reference) 31 | [https://cocodataset.org/](https://cocodataset.org/) 32 | 33 | [https://docs.ultralytics.com/zh/datasets/detect/coco/](https://docs.ultralytics.com/zh/datasets/detect/coco/) 34 | 35 | [https://blog.csdn.net/weixin_50727642/article/details/122892088](https://blog.csdn.net/weixin_50727642/article/details/122892088) 36 | 37 | -------------------------------------------------------------------------------- /resource/datasets/COCO2017/assets/bus.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/D-Robotics/rdk_model_zoo/6da0511b4a5f4ac49eadd936d8e0228412d8dd32/resource/datasets/COCO2017/assets/bus.jpg -------------------------------------------------------------------------------- /resource/datasets/COCO2017/assets/kite.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/D-Robotics/rdk_model_zoo/6da0511b4a5f4ac49eadd936d8e0228412d8dd32/resource/datasets/COCO2017/assets/kite.jpg -------------------------------------------------------------------------------- /resource/datasets/COCO2017/assets/kite_small.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/D-Robotics/rdk_model_zoo/6da0511b4a5f4ac49eadd936d8e0228412d8dd32/resource/datasets/COCO2017/assets/kite_small.jpg -------------------------------------------------------------------------------- /resource/datasets/COCO2017/assets/zidane.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/D-Robotics/rdk_model_zoo/6da0511b4a5f4ac49eadd936d8e0228412d8dd32/resource/datasets/COCO2017/assets/zidane.jpg -------------------------------------------------------------------------------- /resource/datasets/ImageNet/README.md: -------------------------------------------------------------------------------- 1 | English| [简体中文](./README_cn.md) 2 | 3 | 4 | -------------------------------------------------------------------------------- /resource/datasets/ImageNet/README_cn.md: -------------------------------------------------------------------------------- 1 | [English](./README.md) | 简体中文 2 | 3 | ## 参考 4 | 5 | ImageNet: [https://image-net.org/](https://image-net.org/) -------------------------------------------------------------------------------- /resource/datasets/PascalVOC/README.md: -------------------------------------------------------------------------------- 1 | English| [简体中文](./README_cn.md) 2 | 3 | 4 | -------------------------------------------------------------------------------- /resource/datasets/PascalVOC/README_cn.md: -------------------------------------------------------------------------------- 1 | [English](./README.md) | 简体中文 2 | 3 | ## 参考 4 | 5 | PASCAL VOC: [http://host.robots.ox.ac.uk/pascal/VOC/](http://host.robots.ox.ac.uk/pascal/VOC/) 6 | 7 | 目标检测数据集PASCAL VOC简介: [https://blog.csdn.net/generalsong/article/details/108471378](https://blog.csdn.net/generalsong/article/details/108471378) -------------------------------------------------------------------------------- /resource/imgs/FAQ.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/D-Robotics/rdk_model_zoo/6da0511b4a5f4ac49eadd936d8e0228412d8dd32/resource/imgs/FAQ.jpg -------------------------------------------------------------------------------- /resource/imgs/ImageNet.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/D-Robotics/rdk_model_zoo/6da0511b4a5f4ac49eadd936d8e0228412d8dd32/resource/imgs/ImageNet.png -------------------------------------------------------------------------------- /resource/imgs/basic_usage.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/D-Robotics/rdk_model_zoo/6da0511b4a5f4ac49eadd936d8e0228412d8dd32/resource/imgs/basic_usage.png -------------------------------------------------------------------------------- /resource/imgs/basic_usage_res.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/D-Robotics/rdk_model_zoo/6da0511b4a5f4ac49eadd936d8e0228412d8dd32/resource/imgs/basic_usage_res.png -------------------------------------------------------------------------------- /resource/imgs/demo_rdkx5_yolov10n_detect.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/D-Robotics/rdk_model_zoo/6da0511b4a5f4ac49eadd936d8e0228412d8dd32/resource/imgs/demo_rdkx5_yolov10n_detect.jpg -------------------------------------------------------------------------------- /resource/imgs/into_jupyter.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/D-Robotics/rdk_model_zoo/6da0511b4a5f4ac49eadd936d8e0228412d8dd32/resource/imgs/into_jupyter.png -------------------------------------------------------------------------------- /resource/imgs/jupyter_start.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/D-Robotics/rdk_model_zoo/6da0511b4a5f4ac49eadd936d8e0228412d8dd32/resource/imgs/jupyter_start.png -------------------------------------------------------------------------------- /resource/imgs/model_zoo_logo.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/D-Robotics/rdk_model_zoo/6da0511b4a5f4ac49eadd936d8e0228412d8dd32/resource/imgs/model_zoo_logo.jpg -------------------------------------------------------------------------------- /resource/imgs/paddleocr.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/D-Robotics/rdk_model_zoo/6da0511b4a5f4ac49eadd936d8e0228412d8dd32/resource/imgs/paddleocr.png -------------------------------------------------------------------------------- /resource/imgs/vscode_demo.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/D-Robotics/rdk_model_zoo/6da0511b4a5f4ac49eadd936d8e0228412d8dd32/resource/imgs/vscode_demo.jpg --------------------------------------------------------------------------------