├── .gitignore
├── .vscode
├── configurationCache.log
├── settings.json
└── targets.log
├── README.md
├── demo
├── HRNet
│ ├── .idea
│ │ ├── .gitignore
│ │ ├── HRNet.iml
│ │ ├── inspectionProfiles
│ │ │ ├── Project_Default.xml
│ │ │ └── profiles_settings.xml
│ │ ├── misc.xml
│ │ └── modules.xml
│ ├── HRNet.png
│ ├── README.md
│ ├── __pycache__
│ │ ├── draw_utils.cpython-39.pyc
│ │ └── transforms.cpython-39.pyc
│ ├── draw_utils.py
│ ├── infer-onnxruntime.py
│ ├── model
│ │ ├── __init__.py
│ │ ├── __pycache__
│ │ │ ├── __init__.cpython-39.pyc
│ │ │ └── hrnet.cpython-39.pyc
│ │ └── hrnet.py
│ ├── my_dataset_coco.py
│ ├── person.png
│ ├── person1.jpeg
│ ├── person2.jpeg
│ ├── person_keypoints.json
│ ├── plot_curve.py
│ ├── predict.py
│ ├── requirements.txt
│ ├── test.py
│ ├── test_result.jpg
│ ├── train.py
│ ├── train_multi_GPU.py
│ ├── train_utils
│ │ ├── __init__.py
│ │ ├── coco_eval.py
│ │ ├── coco_utils.py
│ │ ├── distributed_utils.py
│ │ ├── group_by_aspect_ratio.py
│ │ ├── loss.py
│ │ └── train_eval_utils.py
│ ├── transforms.py
│ └── validation.py
├── centernet
│ ├── .gitignore
│ ├── .idea
│ │ ├── .gitignore
│ │ ├── centernet-pytorch-main.iml
│ │ ├── inspectionProfiles
│ │ │ ├── Project_Default.xml
│ │ │ └── profiles_settings.xml
│ │ ├── misc.xml
│ │ └── modules.xml
│ ├── LICENSE
│ ├── README.md
│ ├── centernet.py
│ ├── export_onnx.py
│ ├── get_map.py
│ ├── img
│ │ └── street.jpg
│ ├── infer-onnxruntime.py
│ ├── info.md
│ ├── mypredict.py
│ ├── nets
│ │ ├── centernet.py
│ │ ├── centernet_training.py
│ │ ├── hourglass.py
│ │ └── resnet50.py
│ ├── onnx_simplify.py
│ ├── predict.py
│ ├── requirements.txt
│ ├── summary.py
│ ├── train.py
│ ├── utils
│ │ ├── __init__.py
│ │ ├── callbacks.py
│ │ ├── dataloader.py
│ │ ├── utils.py
│ │ ├── utils_bbox.py
│ │ ├── utils_fit.py
│ │ └── utils_map.py
│ ├── vision_for_centernet.py
│ ├── voc_annotation.py
│ └── 常见问题汇总.md
├── detr-main
│ ├── .circleci
│ │ └── config.yml
│ ├── .github
│ │ ├── CODE_OF_CONDUCT.md
│ │ ├── CONTRIBUTING.md
│ │ ├── DETR.png
│ │ └── ISSUE_TEMPLATE
│ │ │ ├── bugs.md
│ │ │ ├── questions-help-support.md
│ │ │ └── unexpected-problems-bugs.md
│ ├── .gitignore
│ ├── Dockerfile
│ ├── LICENSE
│ ├── README.md
│ ├── d2
│ │ ├── README.md
│ │ ├── configs
│ │ │ ├── detr_256_6_6_torchvision.yaml
│ │ │ └── detr_segm_256_6_6_torchvision.yaml
│ │ ├── converter.py
│ │ ├── detr
│ │ │ ├── __init__.py
│ │ │ ├── config.py
│ │ │ ├── dataset_mapper.py
│ │ │ └── detr.py
│ │ └── train_net.py
│ ├── datasets
│ │ ├── __init__.py
│ │ ├── coco.py
│ │ ├── coco_eval.py
│ │ ├── coco_panoptic.py
│ │ ├── panoptic_eval.py
│ │ └── transforms.py
│ ├── demo.jpg
│ ├── engine.py
│ ├── hubconf.py
│ ├── infer-onnxruntime.py
│ ├── load_from_cpp.py
│ ├── main.py
│ ├── models
│ │ ├── __init__.py
│ │ ├── backbone.py
│ │ ├── detr.py
│ │ ├── detr_bak.py
│ │ ├── matcher.py
│ │ ├── position_encoding.py
│ │ ├── segmentation.py
│ │ └── transformer.py
│ ├── mypredict.py
│ ├── requirements.txt
│ ├── run_with_submitit.py
│ ├── street.jpg
│ ├── test_all.py
│ ├── tools.py
│ ├── tox.ini
│ └── util
│ │ ├── __init__.py
│ │ ├── box_ops.py
│ │ ├── misc.py
│ │ └── plot_utils.py
├── detr_demo
│ ├── demo.jpg
│ ├── export_onnx.py
│ ├── infer-onnxruntime.py
│ ├── model.py
│ ├── onnx_simplify.py
│ ├── predict.py
│ └── tools.py
├── swin
│ ├── __pycache__
│ │ ├── model.cpython-39.pyc
│ │ ├── my_dataset.cpython-39.pyc
│ │ └── utils.cpython-39.pyc
│ ├── class_indices.json
│ ├── create_confusion_matrix.py
│ ├── export_onnx.py
│ ├── infer-onnxruntime.py
│ ├── model.py
│ ├── my_dataset.py
│ ├── onnx_simplify.py
│ ├── predict.py
│ ├── predict_bak.py
│ ├── read_onnx.py
│ ├── select_incorrect_samples.py
│ ├── train.py
│ └── utils.py
├── unet
│ ├── #U5e38#U89c1#U95ee#U9898#U6c47#U603b.md
│ ├── .gitignore
│ ├── LICENSE
│ ├── Medical_Datasets
│ │ ├── ImageSets
│ │ │ └── Segmentation
│ │ │ │ ├── README.md
│ │ │ │ ├── train.txt
│ │ │ │ └── trainval.txt
│ │ ├── Images
│ │ │ ├── 0.png
│ │ │ ├── 1.png
│ │ │ ├── 10.png
│ │ │ ├── 11.png
│ │ │ ├── 12.png
│ │ │ ├── 13.png
│ │ │ ├── 14.png
│ │ │ ├── 15.png
│ │ │ ├── 16.png
│ │ │ ├── 17.png
│ │ │ ├── 18.png
│ │ │ ├── 19.png
│ │ │ ├── 2.png
│ │ │ ├── 20.png
│ │ │ ├── 21.png
│ │ │ ├── 22.png
│ │ │ ├── 23.png
│ │ │ ├── 24.png
│ │ │ ├── 25.png
│ │ │ ├── 26.png
│ │ │ ├── 27.png
│ │ │ ├── 28.png
│ │ │ ├── 29.png
│ │ │ ├── 3.png
│ │ │ ├── 4.png
│ │ │ ├── 5.png
│ │ │ ├── 6.png
│ │ │ ├── 7.png
│ │ │ ├── 8.png
│ │ │ └── 9.png
│ │ └── Labels
│ │ │ ├── 0.png
│ │ │ ├── 1.png
│ │ │ ├── 10.png
│ │ │ ├── 11.png
│ │ │ ├── 12.png
│ │ │ ├── 13.png
│ │ │ ├── 14.png
│ │ │ ├── 15.png
│ │ │ ├── 16.png
│ │ │ ├── 17.png
│ │ │ ├── 18.png
│ │ │ ├── 19.png
│ │ │ ├── 2.png
│ │ │ ├── 20.png
│ │ │ ├── 21.png
│ │ │ ├── 22.png
│ │ │ ├── 23.png
│ │ │ ├── 24.png
│ │ │ ├── 25.png
│ │ │ ├── 26.png
│ │ │ ├── 27.png
│ │ │ ├── 28.png
│ │ │ ├── 29.png
│ │ │ ├── 3.png
│ │ │ ├── 4.png
│ │ │ ├── 5.png
│ │ │ ├── 6.png
│ │ │ ├── 7.png
│ │ │ ├── 8.png
│ │ │ └── 9.png
│ ├── README.md
│ ├── VOCdevkit
│ │ └── VOC2007
│ │ │ ├── ImageSets
│ │ │ └── Segmentation
│ │ │ │ └── README.md
│ │ │ ├── JPEGImages
│ │ │ └── README.md
│ │ │ └── SegmentationClass
│ │ │ └── README.md
│ ├── datasets
│ │ ├── JPEGImages
│ │ │ └── 1.jpg
│ │ ├── SegmentationClass
│ │ │ └── 1.png
│ │ └── before
│ │ │ ├── 1.jpg
│ │ │ └── 1.json
│ ├── export.py
│ ├── get_miou.py
│ ├── img
│ │ ├── cell.png
│ │ └── street.jpg
│ ├── json_to_dataset.py
│ ├── logs
│ │ └── README.MD
│ ├── model_data
│ │ └── README.md
│ ├── nets
│ │ ├── __init__.py
│ │ ├── unet.py
│ │ ├── unet_training.py
│ │ └── vgg.py
│ ├── predict.py
│ ├── requirements.txt
│ ├── summary.py
│ ├── train.py
│ ├── train_medical.py
│ ├── unet.py
│ ├── utils
│ │ ├── __init__.py
│ │ ├── callbacks.py
│ │ ├── dataloader.py
│ │ ├── dataloader_medical.py
│ │ ├── utils.py
│ │ ├── utils_fit.py
│ │ └── utils_metrics.py
│ ├── voc_annotation.py
│ └── voc_annotation_medical.py
├── vit
│ ├── class_indices.json
│ ├── demo.jpg
│ ├── export_onnx.py
│ ├── flops.py
│ ├── infer-onnxruntime.py
│ ├── my_dataset.py
│ ├── onnx_simplify.py
│ ├── predict.py
│ ├── train.py
│ ├── utils.py
│ └── vit_model.py
├── yolov5
│ ├── detect-for-yolov5-6.0.sh
│ ├── export-yolov5-6.0.sh
│ ├── yolov5-6.0
│ │ ├── .dockerignore
│ │ ├── .gitattributes
│ │ ├── .github
│ │ │ ├── FUNDING.yml
│ │ │ ├── ISSUE_TEMPLATE
│ │ │ │ ├── bug-report.md
│ │ │ │ ├── feature-request.md
│ │ │ │ └── question.md
│ │ │ ├── dependabot.yml
│ │ │ └── workflows
│ │ │ │ ├── ci-testing.yml
│ │ │ │ ├── codeql-analysis.yml
│ │ │ │ ├── greetings.yml
│ │ │ │ ├── rebase.yml
│ │ │ │ └── stale.yml
│ │ ├── .gitignore
│ │ ├── Arial.ttf
│ │ ├── CONTRIBUTING.md
│ │ ├── Dockerfile
│ │ ├── LICENSE
│ │ ├── README.md
│ │ ├── detect.py
│ │ ├── export.py
│ │ ├── hubconf.py
│ │ ├── models
│ │ │ ├── __init__.py
│ │ │ ├── common.py
│ │ │ ├── experimental.py
│ │ │ ├── hub
│ │ │ │ ├── anchors.yaml
│ │ │ │ ├── yolov3-spp.yaml
│ │ │ │ ├── yolov3-tiny.yaml
│ │ │ │ ├── yolov3.yaml
│ │ │ │ ├── yolov5-bifpn.yaml
│ │ │ │ ├── yolov5-fpn.yaml
│ │ │ │ ├── yolov5-p2.yaml
│ │ │ │ ├── yolov5-p6.yaml
│ │ │ │ ├── yolov5-p7.yaml
│ │ │ │ ├── yolov5-panet.yaml
│ │ │ │ ├── yolov5l6.yaml
│ │ │ │ ├── yolov5m6.yaml
│ │ │ │ ├── yolov5n6.yaml
│ │ │ │ ├── yolov5s-ghost.yaml
│ │ │ │ ├── yolov5s-transformer.yaml
│ │ │ │ ├── yolov5s6.yaml
│ │ │ │ └── yolov5x6.yaml
│ │ │ ├── tf.py
│ │ │ ├── yolo.py
│ │ │ ├── yolov5l.yaml
│ │ │ ├── yolov5m.yaml
│ │ │ ├── yolov5n.yaml
│ │ │ ├── yolov5s.yaml
│ │ │ └── yolov5x.yaml
│ │ ├── requirements.txt
│ │ ├── train.py
│ │ ├── tutorial.ipynb
│ │ ├── utils
│ │ │ ├── __init__.py
│ │ │ ├── activations.py
│ │ │ ├── augmentations.py
│ │ │ ├── autoanchor.py
│ │ │ ├── aws
│ │ │ │ ├── __init__.py
│ │ │ │ ├── mime.sh
│ │ │ │ ├── resume.py
│ │ │ │ └── userdata.sh
│ │ │ ├── callbacks.py
│ │ │ ├── datasets.py
│ │ │ ├── downloads.py
│ │ │ ├── flask_rest_api
│ │ │ │ ├── README.md
│ │ │ │ ├── example_request.py
│ │ │ │ └── restapi.py
│ │ │ ├── general.py
│ │ │ ├── google_app_engine
│ │ │ │ ├── Dockerfile
│ │ │ │ ├── additional_requirements.txt
│ │ │ │ └── app.yaml
│ │ │ ├── loggers
│ │ │ │ ├── __init__.py
│ │ │ │ └── wandb
│ │ │ │ │ ├── README.md
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ ├── log_dataset.py
│ │ │ │ │ ├── sweep.py
│ │ │ │ │ ├── sweep.yaml
│ │ │ │ │ └── wandb_utils.py
│ │ │ ├── loss.py
│ │ │ ├── metrics.py
│ │ │ ├── plots.py
│ │ │ └── torch_utils.py
│ │ └── val.py
│ ├── yolov5.cpp
│ └── yolov5.hpp
└── yolov7
│ ├── README.md
│ ├── cfg
│ ├── baseline
│ │ ├── r50-csp.yaml
│ │ ├── x50-csp.yaml
│ │ ├── yolor-csp-x.yaml
│ │ ├── yolor-csp.yaml
│ │ ├── yolor-d6.yaml
│ │ ├── yolor-e6.yaml
│ │ ├── yolor-p6.yaml
│ │ ├── yolor-w6.yaml
│ │ ├── yolov3-spp.yaml
│ │ ├── yolov3.yaml
│ │ └── yolov4-csp.yaml
│ └── deploy
│ │ ├── yolov7-d6.yaml
│ │ ├── yolov7-e6.yaml
│ │ ├── yolov7-e6e.yaml
│ │ ├── yolov7-tiny-silu.yaml
│ │ ├── yolov7-w6.yaml
│ │ ├── yolov7.yaml
│ │ └── yolov7x.yaml
│ ├── data
│ ├── coco.yaml
│ ├── hyp.scratch.p5.yaml
│ ├── hyp.scratch.p6.yaml
│ └── hyp.scratch.tiny.yaml
│ ├── detect.py
│ ├── export_onnx.py
│ ├── figure
│ └── performance.png
│ ├── hubconf.py
│ ├── infer-onnxruntime.py
│ ├── models
│ ├── __init__.py
│ ├── common.py
│ ├── experimental.py
│ ├── export.py
│ └── yolo.py
│ ├── onnx_simplify.py
│ ├── scripts
│ └── get_coco.sh
│ ├── test.py
│ ├── train.py
│ ├── transforms.py
│ └── utils
│ ├── __init__.py
│ ├── activations.py
│ ├── autoanchor.py
│ ├── aws
│ ├── __init__.py
│ ├── mime.sh
│ ├── resume.py
│ └── userdata.sh
│ ├── datasets.py
│ ├── general.py
│ ├── google_app_engine
│ ├── Dockerfile
│ ├── additional_requirements.txt
│ └── app.yaml
│ ├── google_utils.py
│ ├── loss.py
│ ├── metrics.py
│ ├── plots.py
│ ├── torch_utils.py
│ └── wandb_logging
│ ├── __init__.py
│ ├── log_dataset.py
│ └── wandb_utils.py
├── sideline_learn
├── PixelShuffle
│ └── PixelShuffle.py
├── cpp-move
│ ├── forward.cpp
│ ├── move.cpp
│ └── test.cpp
├── multi-thread
│ ├── 0624.cpp
│ ├── 1.cpp
│ ├── Makefile
│ ├── async.cpp
│ ├── fix_tow_con.cpp
│ ├── no_used_future.cpp
│ ├── one_con.cpp
│ ├── readme.md
│ ├── src
│ │ └── package_task.cpp
│ ├── tow_con.cpp
│ ├── used_future.cpp
│ ├── used_future_set.cpp
│ ├── used_share_future_set.cpp
│ └── workspace
│ │ └── pro
├── ncnn_multi_thread
│ ├── CMakeLists.txt
│ ├── fastdet_test.cpp
│ ├── include
│ │ ├── fastdet.h
│ │ └── infer.hpp
│ ├── multi_thead_infer.cpp
│ └── src
│ │ ├── fastdet.cpp
│ │ └── infer.cpp
├── paddle_code
│ ├── swim-t
│ │ ├── Swim-t.py
│ │ ├── Swimblock.py
│ │ ├── mask.py
│ │ └── swim_t.md
│ └── vi-t
│ │ ├── attention.py
│ │ ├── vit.py
│ │ └── vit_model.md
├── tools
│ ├── nms.py
│ └── pic2video2.py
├── warpaffine-cuda
│ ├── CMakeLists.txt
│ ├── build
│ │ ├── CMakeCache.txt
│ │ ├── CMakeFiles
│ │ │ ├── 3.16.3
│ │ │ │ ├── CMakeDetermineCompilerABI_C.bin
│ │ │ │ ├── CMakeDetermineCompilerABI_CXX.bin
│ │ │ │ ├── CompilerIdC
│ │ │ │ │ ├── CMakeCCompilerId.c
│ │ │ │ │ └── a.out
│ │ │ │ └── CompilerIdCXX
│ │ │ │ │ ├── CMakeCXXCompilerId.cpp
│ │ │ │ │ └── a.out
│ │ │ ├── CMakeRuleHashes.txt
│ │ │ ├── Makefile2
│ │ │ ├── TargetDirectories.txt
│ │ │ ├── cmake.check_cache
│ │ │ ├── progress.marks
│ │ │ ├── test.dir
│ │ │ │ ├── CXX.includecache
│ │ │ │ ├── build.make
│ │ │ │ ├── depend.internal
│ │ │ │ ├── depend.make
│ │ │ │ ├── flags.make
│ │ │ │ ├── link.txt
│ │ │ │ └── progress.make
│ │ │ └── warpaffine.dir
│ │ │ │ ├── build.make
│ │ │ │ ├── depend.internal
│ │ │ │ ├── depend.make
│ │ │ │ ├── flags.make
│ │ │ │ ├── link.txt
│ │ │ │ ├── progress.make
│ │ │ │ └── src
│ │ │ │ ├── warpaffine_generated_warpaffine.cu.o.cmake.pre-gen
│ │ │ │ └── warpaffine_generated_warpaffine.cu.o.depend
│ │ ├── Makefile
│ │ ├── libwarpaffine.so
│ │ └── test
│ ├── include
│ │ └── warpaffine.h
│ ├── scripts
│ │ └── warpaffine.py
│ └── src
│ │ ├── test.cpp
│ │ └── warpaffine.cu
└── yolo5-6.0-ros
│ ├── .catkin_workspace
│ ├── README.md
│ ├── Screenshot from 2022-07-30 09-39-36.png
│ └── src
│ ├── CMakeLists.txt
│ ├── base
│ ├── CMakeLists.txt
│ ├── code_info.txt
│ ├── include
│ │ └── base
│ │ │ └── base.h
│ ├── nodelet_plugins.xml
│ ├── package.xml
│ ├── src
│ │ └── base.cpp
│ └── srv
│ │ └── RosImage.srv
│ ├── client
│ ├── CMakeLists.txt
│ ├── include
│ │ └── client_nodelet
│ │ │ └── client_nodelet.h
│ ├── launch
│ │ └── client_nodelet.launch
│ ├── nodelet_plugins.xml
│ ├── package.xml
│ └── src
│ │ └── client_nodelet.cpp
│ ├── yolov5-6.0
│ ├── CMakeLists.txt
│ ├── include
│ │ ├── basic_transform.h
│ │ ├── calibrator.h
│ │ ├── common.hpp
│ │ ├── cuda_utils.h
│ │ ├── logging.h
│ │ ├── macros.h
│ │ ├── preprocess.h
│ │ ├── utils.h
│ │ ├── yololayer.h
│ │ └── yolov5-detect.h
│ ├── src
│ │ ├── basic_transform.cu
│ │ ├── build_engine.cpp
│ │ ├── calibrator.cpp
│ │ ├── detect.cpp
│ │ ├── preprocess.cu
│ │ ├── yololayer.cu
│ │ └── yolov5.cpp
│ └── tools
│ │ ├── gen_wts.py
│ │ └── yolov5_trt.py
│ ├── yolov5-infer
│ ├── CMakeLists.txt
│ ├── include
│ │ └── yolov5_infer_nodelet
│ │ │ └── yolov5_infer_nodelet.h
│ ├── launch
│ │ └── yolov5_infer_nodelet.launch
│ ├── nodelet_plugins.xml
│ ├── package.xml
│ └── src
│ │ └── yolov5_infer_nodelet.cpp
│ └── yolov5-server
│ ├── CMakeLists.txt
│ ├── include
│ └── yolov5_server_nodelet
│ │ └── yolov5_server_nodelet.h
│ ├── launch
│ └── yolov5_server_nodelet.launch
│ ├── nodelet_plugins.xml
│ ├── package.xml
│ └── src
│ └── yolov5_server_nodelet.cpp
├── trt_cpp
├── CMakeLists.txt
├── Makefile
├── README.md
├── src
│ ├── main.cpp
│ └── trt
│ │ ├── common
│ │ ├── basic_tools.cu
│ │ ├── basic_tools.hpp
│ │ ├── cuda-tools.cpp
│ │ ├── cuda-tools.hpp
│ │ ├── matrix.cpp
│ │ ├── matrix.hpp
│ │ ├── mix-memory.cpp
│ │ ├── mix-memory.hpp
│ │ ├── simple-logger.cpp
│ │ ├── simple-logger.hpp
│ │ ├── trt-tensor.cpp
│ │ └── trt-tensor.hpp
│ │ ├── demo-infer
│ │ ├── centernet
│ │ │ ├── centernet.cu
│ │ │ └── centernet.h
│ │ ├── demo-infer.cu
│ │ ├── demo-infer.hpp
│ │ ├── detr
│ │ │ ├── detr.cu
│ │ │ └── detr.h
│ │ ├── hrnet
│ │ │ ├── hrnet.cu
│ │ │ └── hrnet.h
│ │ ├── unet
│ │ │ ├── unet.cu
│ │ │ └── unet.h
│ │ ├── vit
│ │ │ ├── vit.cu
│ │ │ └── vit.h
│ │ ├── yolov5
│ │ │ ├── yolov5.cpp
│ │ │ └── yolov5.hpp
│ │ ├── yolov5seg
│ │ │ ├── README.md
│ │ │ ├── yolact.png
│ │ │ ├── yolov5seg.cu
│ │ │ ├── yolov5seg.h
│ │ │ └── yolov5seg.png
│ │ └── yolov7
│ │ │ ├── yolov7.cu
│ │ │ └── yolov7.h
│ │ └── infer
│ │ ├── trt-infer.cpp
│ │ └── trt-infer.hpp
├── workspace
│ ├── cat.jpg
│ ├── centernet-gpu—pred.jpg
│ ├── centernet-pred-street.jpg
│ ├── centernet-pred.jpg
│ ├── demo-infer.txt
│ ├── detr-pred.jpg
│ ├── flower.jpg
│ ├── hrnet-cuda-pred.jpg
│ ├── hrnet-pred.jpg
│ ├── person.png
│ ├── person1.jpeg
│ ├── person2.jpeg
│ ├── street.jpg
│ └── warp-affine.jpg
└── 模型权重.md
└── trt_py
├── README.md
├── basic_infer
├── __pycache__
│ └── infer.cpython-39.pyc
├── _init__.py
├── infer.py
├── infer_bak.py
├── transforms.py
└── warpaffine.py
├── build_engine
├── __pycache__
│ └── common.cpython-39.pyc
├── batch_1
│ ├── build_engine_b1.py
│ └── common.py
└── batch_dynamic
│ └── build_engine_batch_image.py
├── centernet
├── CV学习笔记tensorrt之centernet加速.md
├── centernet_infer.py
├── centernet_infer_dynamci_batch.py
└── images
│ ├── cat.jpg
│ └── street.jpg
├── detr
├── detr_infer.py
├── detr_infer_dynamic_batch.py
└── images
│ ├── cat.jpg
│ └── street.jpg
├── hrnet
├── hrnet-pred.jpg
├── hrnet_inferece.py
└── images
│ ├── person.png
│ ├── person1.png
│ └── person2.png
├── util
└── util.py
├── yolov5
├── images
│ ├── cat.jpg
│ └── street.jpg
└── yolov5_infer.py
└── yolov7
├── images
└── street.jpg
├── infer-onnxruntime.py
├── transforms.py
└── yolov7_infer.py
/.gitignore:
--------------------------------------------------------------------------------
1 | trt_cpp/objs/
2 | *.o
3 | *.mk
4 | trt_cpp/build/
5 | trt_cpp/workspace/pro
6 | trt_cpp/workspace/demo_infer
7 | *.trtmodel
8 | *.pth
9 | *.onnx
10 | *.tensor
11 | *tensor
12 | *.idea
13 | *.__pycache__
14 | *.pyengine
15 | *.vscode
16 | *.log
17 | *.cmake
18 | *.pyc
19 | *.vscode
20 | *.idea
21 | *.pt
22 | *.jpg
23 | sideline_learn/yolo5-6.0-ros/build_isolated
24 | sideline_learn/yolo5-6.0-ros/devel_isolated
25 |
26 | sideline_learn/ncnn_multi_thread/build
27 | sideline_learn/ncnn_multi_thread/data
28 | sideline_learn/ncnn_multi_thread/ncnn
--------------------------------------------------------------------------------
/.vscode/configurationCache.log:
--------------------------------------------------------------------------------
1 | {"buildTargets":[],"launchTargets":[],"customConfigurationProvider":{"workspaceBrowse":{"browsePath":[],"compilerArgs":[]},"fileIndex":[]}}
--------------------------------------------------------------------------------
/.vscode/settings.json:
--------------------------------------------------------------------------------
1 | {
2 | "makefile.extensionOutputFolder": "./.vscode",
3 | "files.associations": {
4 | "memory": "cpp",
5 | "iostream": "cpp",
6 | "array": "cpp",
7 | "atomic": "cpp",
8 | "bit": "cpp",
9 | "*.tcc": "cpp",
10 | "cctype": "cpp",
11 | "chrono": "cpp",
12 | "clocale": "cpp",
13 | "cmath": "cpp",
14 | "condition_variable": "cpp",
15 | "cstdarg": "cpp",
16 | "cstdint": "cpp",
17 | "cstdio": "cpp",
18 | "cstdlib": "cpp",
19 | "ctime": "cpp",
20 | "cwchar": "cpp",
21 | "cwctype": "cpp",
22 | "map": "cpp",
23 | "unordered_map": "cpp",
24 | "vector": "cpp",
25 | "exception": "cpp",
26 | "fstream": "cpp",
27 | "functional": "cpp",
28 | "future": "cpp",
29 | "initializer_list": "cpp",
30 | "iosfwd": "cpp",
31 | "istream": "cpp",
32 | "limits": "cpp",
33 | "mutex": "cpp",
34 | "new": "cpp",
35 | "optional": "cpp",
36 | "ostream": "cpp",
37 | "ratio": "cpp",
38 | "sstream": "cpp",
39 | "stdexcept": "cpp",
40 | "streambuf": "cpp",
41 | "string": "cpp",
42 | "string_view": "cpp",
43 | "system_error": "cpp",
44 | "thread": "cpp",
45 | "type_traits": "cpp",
46 | "tuple": "cpp",
47 | "typeinfo": "cpp",
48 | "utility": "cpp",
49 | "deque": "cpp",
50 | "iomanip": "cpp"
51 | }
52 | }
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | ### CV学习笔记
2 |
3 | #### 1、前言
4 |
5 | 本仓库为记录学习过程中,主要是在CV领域的一些学习经验,方便日后进行回顾,同时希望能够给有需要的同学提供一些帮助,希望遇到问题能够及时联系与探讨,如果有问题或者建议,可以+v:Rex1586662742,后面数字为qq
6 |
7 | TensoRT代码:https://github.com/shouxieai/learning-cuda-trt
8 |
9 | 项目代码:
10 |
11 | https://github.com/WZMIAOMIAO/deep-learning-for-image-processing
12 |
13 | https://www.bilibili.com/video/BV1yA411M7T4?spm_id_from=333.999.0.0
14 |
15 | 个人学习仓库地址:https://github.com/Rex-LK/tensorrt_learning
16 |
17 | 部分模型链接:https://pan.baidu.com/s/18yIsypWMg0sT_uAR_MhDSA 提取码: sh7c
18 |
19 | #### 2、内容
20 |
21 | 2.1、demo: 开源项目
22 |
23 | 2.2、sideline_learn: 日常学习的知识点,如ros、多线程等
24 |
25 | 2.3、trt_cpp: 使用cpp进行tensorRT
26 |
27 | 2.4、trt_py: 使用python进行tensorRT
28 |
29 | #### 3、更新日志
30 | 202401025:删除了onnx转engine的代码,便于复现,现在模型可以使用如下的方式进行转换:
31 | trtexec --onnx=yolov5s_mobv3s.onnx --saveEngine=test.engine
32 |
33 | 202211-27:更新了yolov5-7.0 的 实例分割代码
34 |
35 | 2022-07-30:增加了ros中使用yolov5&tensorRT的demo,包含两种启动方式,单节点启动以及client/server的方式,具体过程见sideline_learn/yolov5-6.0-ros/README.md
36 |
37 | 2022-07-09:增加了yolov7算法,支持cpp-tensorrt和python-tensorrt两种加速方式
38 |
39 |
40 | 2022-06-04:增加了hrnet人体关键点检测,使用cupy和torch两种后处理方式,同时在cpp中使用了gpu解码。
41 |
42 | 2022-06-1:优化了代码存放的文件夹,便于以后进行回溯和分类
43 |
44 | 2022-05-30:增加了cuda-python-tensorrt的推理方式
45 |
46 | onnx2trt:/build_engine/batch_1/build_engine_single_image.py
47 |
48 | 推理demo,目前只支持centernet以及detr、后续会进行其他模型的支持,例如:
49 |
50 | /centernet/centernet_infer.py.
51 |
52 | 原始项目都可以根目录下的demo文件夹找到,可以追溯到原项目地址。
53 |
54 | 2022-05-20:在onnx项目下增加了hrnet-tensorrt加速方式,预测效果与demo/HRnet作者里面的预测结果一致
55 |
56 | ...centernet、vit、unet、等均已经实现
57 |
--------------------------------------------------------------------------------
/demo/HRNet/.idea/.gitignore:
--------------------------------------------------------------------------------
1 | # 默认忽略的文件
2 | /shelf/
3 | /workspace.xml
4 |
--------------------------------------------------------------------------------
/demo/HRNet/.idea/HRNet.iml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
--------------------------------------------------------------------------------
/demo/HRNet/.idea/inspectionProfiles/profiles_settings.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
--------------------------------------------------------------------------------
/demo/HRNet/.idea/misc.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
--------------------------------------------------------------------------------
/demo/HRNet/.idea/modules.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
--------------------------------------------------------------------------------
/demo/HRNet/HRNet.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Rex-LK/tensorrt_learning/45df1f33cab6a66a0d75c0bb26a3978d539dc888/demo/HRNet/HRNet.png
--------------------------------------------------------------------------------
/demo/HRNet/__pycache__/draw_utils.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Rex-LK/tensorrt_learning/45df1f33cab6a66a0d75c0bb26a3978d539dc888/demo/HRNet/__pycache__/draw_utils.cpython-39.pyc
--------------------------------------------------------------------------------
/demo/HRNet/__pycache__/transforms.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Rex-LK/tensorrt_learning/45df1f33cab6a66a0d75c0bb26a3978d539dc888/demo/HRNet/__pycache__/transforms.cpython-39.pyc
--------------------------------------------------------------------------------
/demo/HRNet/infer-onnxruntime.py:
--------------------------------------------------------------------------------
1 | import transforms
2 | import onnxruntime
3 | import torch
4 | import cv2
5 | import numpy as np
6 | if __name__ == "__main__":
7 |
8 | resize_hw = (256, 192)
9 | img_path = "person.png"
10 | data_transform = transforms.Compose([
11 | transforms.AffineTransform(scale=(1.25, 1.25), fixed_size=resize_hw),
12 | transforms.ToTensor(),
13 | transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
14 | ])
15 |
16 | # read single-person image
17 | img = cv2.imread(img_path)
18 | img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
19 | img_tensor, target = data_transform(img, {"box": [0, 0, img.shape[1] - 1, img.shape[0] - 1]})
20 | img_tensor = torch.unsqueeze(img_tensor, dim=0)
21 | session = onnxruntime.InferenceSession("/home/rex/Desktop/cv_demo/cv_model/hrnet/hrnet.onnx", providers=["CPUExecutionProvider"])
22 | pred = session.run(["predict"], {"image": img_tensor.numpy()})[0]
23 | print(pred)
24 |
25 |
--------------------------------------------------------------------------------
/demo/HRNet/model/__init__.py:
--------------------------------------------------------------------------------
1 | from .hrnet import HighResolutionNet
2 |
--------------------------------------------------------------------------------
/demo/HRNet/model/__pycache__/__init__.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Rex-LK/tensorrt_learning/45df1f33cab6a66a0d75c0bb26a3978d539dc888/demo/HRNet/model/__pycache__/__init__.cpython-39.pyc
--------------------------------------------------------------------------------
/demo/HRNet/model/__pycache__/hrnet.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Rex-LK/tensorrt_learning/45df1f33cab6a66a0d75c0bb26a3978d539dc888/demo/HRNet/model/__pycache__/hrnet.cpython-39.pyc
--------------------------------------------------------------------------------
/demo/HRNet/person.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Rex-LK/tensorrt_learning/45df1f33cab6a66a0d75c0bb26a3978d539dc888/demo/HRNet/person.png
--------------------------------------------------------------------------------
/demo/HRNet/person1.jpeg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Rex-LK/tensorrt_learning/45df1f33cab6a66a0d75c0bb26a3978d539dc888/demo/HRNet/person1.jpeg
--------------------------------------------------------------------------------
/demo/HRNet/person2.jpeg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Rex-LK/tensorrt_learning/45df1f33cab6a66a0d75c0bb26a3978d539dc888/demo/HRNet/person2.jpeg
--------------------------------------------------------------------------------
/demo/HRNet/person_keypoints.json:
--------------------------------------------------------------------------------
1 | {
2 | "keypoints": ["nose","left_eye","right_eye","left_ear","right_ear","left_shoulder","right_shoulder","left_elbow","right_elbow","left_wrist","right_wrist","left_hip","right_hip","left_knee","right_knee","left_ankle","right_ankle"],
3 | "skeleton": [[16,14],[14,12],[17,15],[15,13],[12,13],[6,12],[7,13],[6,7],[6,8],[7,9],[8,10],[9,11],[2,3],[1,2],[1,3],[2,4],[3,5],[4,6],[5,7]],
4 | "flip_pairs": [[1,2], [3,4], [5,6], [7,8], [9,10], [11,12], [13,14], [15,16]],
5 | "kps_weights": [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.2, 1.2, 1.5, 1.5, 1.0, 1.0, 1.2, 1.2, 1.5, 1.5],
6 | "upper_body_ids": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
7 | "lower_body_ids": [11, 12, 13, 14, 15, 16]
8 | }
--------------------------------------------------------------------------------
/demo/HRNet/plot_curve.py:
--------------------------------------------------------------------------------
1 | import datetime
2 | import matplotlib.pyplot as plt
3 |
4 |
5 | def plot_loss_and_lr(train_loss, learning_rate):
6 | try:
7 | x = list(range(len(train_loss)))
8 | fig, ax1 = plt.subplots(1, 1)
9 | ax1.plot(x, train_loss, 'r', label='loss')
10 | ax1.set_xlabel("step")
11 | ax1.set_ylabel("loss")
12 | ax1.set_title("Train Loss and lr")
13 | plt.legend(loc='best')
14 |
15 | ax2 = ax1.twinx()
16 | ax2.plot(x, learning_rate, label='lr')
17 | ax2.set_ylabel("learning rate")
18 | ax2.set_xlim(0, len(train_loss)) # 设置横坐标整数间隔
19 | plt.legend(loc='best')
20 |
21 | handles1, labels1 = ax1.get_legend_handles_labels()
22 | handles2, labels2 = ax2.get_legend_handles_labels()
23 | plt.legend(handles1 + handles2, labels1 + labels2, loc='upper right')
24 |
25 | fig.subplots_adjust(right=0.8) # 防止出现保存图片显示不全的情况
26 | fig.savefig('./loss_and_lr{}.png'.format(datetime.datetime.now().strftime("%Y%m%d-%H%M%S")))
27 | plt.close()
28 | print("successful save loss curve! ")
29 | except Exception as e:
30 | print(e)
31 |
32 |
33 | def plot_map(mAP):
34 | try:
35 | x = list(range(len(mAP)))
36 | plt.plot(x, mAP, label='mAp')
37 | plt.xlabel('epoch')
38 | plt.ylabel('mAP')
39 | plt.title('Eval mAP')
40 | plt.xlim(0, len(mAP))
41 | plt.legend(loc='best')
42 | plt.savefig('./mAP.png')
43 | plt.close()
44 | print("successful save mAP curve!")
45 | except Exception as e:
46 | print(e)
47 |
--------------------------------------------------------------------------------
/demo/HRNet/requirements.txt:
--------------------------------------------------------------------------------
1 | numpy
2 | opencv_python==4.5.4.60
3 | lxml
4 | torch==1.10.1
5 | torchvision==0.11.1
6 | pycocotools
7 | matplotlib
8 | tqdm
--------------------------------------------------------------------------------
/demo/HRNet/test.py:
--------------------------------------------------------------------------------
1 | import os
2 | import json
3 |
4 | import torch
5 | import numpy as np
6 | import cv2
7 | import matplotlib.pyplot as plt
8 |
9 | from model import HighResolutionNet
10 | from draw_utils import draw_keypoints
11 | import transforms
12 |
13 |
14 |
15 | def predict_single_person():
16 | resize_hw = (256, 192)
17 | img_path = "person.png"
18 | data_transform = transforms.Compose([
19 | transforms.AffineTransform(scale=(1, 1), fixed_size=resize_hw),
20 | ])
21 |
22 | # read single-person image
23 | img = cv2.imread(img_path)
24 | img_tensor, target = data_transform(img, {"box": [0, 0, img.shape[1] - 1, img.shape[0] - 1]})
25 | cv2.imshow("1",img_tensor)
26 | cv2.waitKey(0)
27 |
28 |
29 | predict_single_person()
--------------------------------------------------------------------------------
/demo/HRNet/test_result.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Rex-LK/tensorrt_learning/45df1f33cab6a66a0d75c0bb26a3978d539dc888/demo/HRNet/test_result.jpg
--------------------------------------------------------------------------------
/demo/HRNet/train_utils/__init__.py:
--------------------------------------------------------------------------------
1 | from .group_by_aspect_ratio import GroupedBatchSampler, create_aspect_ratio_groups
2 | from .distributed_utils import init_distributed_mode, save_on_master, mkdir
3 | from .coco_eval import EvalCOCOMetric
4 | from .coco_utils import coco_remove_images_without_annotations, convert_coco_poly_mask, convert_to_coco_api
5 |
--------------------------------------------------------------------------------
/demo/HRNet/train_utils/loss.py:
--------------------------------------------------------------------------------
1 | import torch
2 |
3 |
4 | class KpLoss(object):
5 | def __init__(self):
6 | self.criterion = torch.nn.MSELoss(reduction='none')
7 |
8 | def __call__(self, logits, targets):
9 | assert len(logits.shape) == 4, 'logits should be 4-ndim'
10 | device = logits.device
11 | bs = logits.shape[0]
12 | # [num_kps, H, W] -> [B, num_kps, H, W]
13 | heatmaps = torch.stack([t["heatmap"].to(device) for t in targets])
14 | # [num_kps] -> [B, num_kps]
15 | kps_weights = torch.stack([t["kps_weights"].to(device) for t in targets])
16 |
17 | # [B, num_kps, H, W] -> [B, num_kps]
18 | loss = self.criterion(logits, heatmaps).mean(dim=[2, 3])
19 | loss = torch.sum(loss * kps_weights) / bs
20 | return loss
21 |
--------------------------------------------------------------------------------
/demo/centernet/.idea/.gitignore:
--------------------------------------------------------------------------------
1 | # 默认忽略的文件
2 | /shelf/
3 | /workspace.xml
4 |
--------------------------------------------------------------------------------
/demo/centernet/.idea/centernet-pytorch-main.iml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
--------------------------------------------------------------------------------
/demo/centernet/.idea/inspectionProfiles/profiles_settings.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
--------------------------------------------------------------------------------
/demo/centernet/.idea/misc.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
--------------------------------------------------------------------------------
/demo/centernet/.idea/modules.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
--------------------------------------------------------------------------------
/demo/centernet/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2020 Bubbliiiing
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/demo/centernet/img/street.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Rex-LK/tensorrt_learning/45df1f33cab6a66a0d75c0bb26a3978d539dc888/demo/centernet/img/street.jpg
--------------------------------------------------------------------------------
/demo/centernet/infer-onnxruntime.py:
--------------------------------------------------------------------------------
1 | from tkinter import image_names
2 | import onnxruntime
3 | import cv2
4 | import numpy as np
5 | from PIL import Image
6 | from utils.utils import (cvtColor, get_classes, preprocess_input, resize_image,
7 | show_config)
8 | import torch
9 | image = 'img/street.jpg'
10 | image = Image.open(image)
11 | image_shape = np.array(np.shape(image)[0:2])
12 | image = cvtColor(image)
13 | image_data = resize_image(image, (512,512), None)
14 | image_data = np.expand_dims(np.transpose(preprocess_input(np.array(image_data, dtype='float32')), (2, 0, 1)), 0)
15 | images = np.asarray(image_data).astype(np.float32)
16 | session = onnxruntime.InferenceSession("centernet.onnx", providers=["CPUExecutionProvider"])
17 | pred = session.run(["predict"], {"image": images})[0]
18 | pred = torch.from_numpy(pred)
19 | pred_hms = pred[0,:,0:20]
20 | pred_wh = pred[0,:,20:22]
21 | pred_xy = pred[0,:,22:]
22 | keep = pred_hms.max(-1).values > 0.3
23 | pred_hms = pred_hms[keep]
24 | pred_wh = pred_wh[keep]
25 | pred_xy = pred_xy[keep]
26 | print(pred_wh.shape)
--------------------------------------------------------------------------------
/demo/centernet/info.md:
--------------------------------------------------------------------------------
1 | 1、预处理RGB
2 | 2、可以采用warpaffine 或者直接resize
--------------------------------------------------------------------------------
/demo/centernet/onnx_simplify.py:
--------------------------------------------------------------------------------
1 | from onnxsim import simplify
2 | import onnx
3 | input_path="centernet.onnx"
4 | output_path="centernet_simp.onnx"
5 | onnx_model = onnx.load(input_path)
6 | model_simp, check = simplify(onnx_model,input_shapes={'image': [1, 3, 512, 512]})
7 | assert check, "Simplified ONNX model could not be validated"
8 | onnx.save(model_simp, output_path)
9 | print('finished exporting onnx')
--------------------------------------------------------------------------------
/demo/centernet/requirements.txt:
--------------------------------------------------------------------------------
1 | scipy==1.2.1
2 | numpy==1.17.0
3 | matplotlib==3.1.2
4 | opencv_python==4.1.2.30
5 | torch==1.2.0
6 | torchvision==0.4.0
7 | tqdm==4.60.0
8 | Pillow==8.2.0
9 | h5py==2.10.0
10 |
--------------------------------------------------------------------------------
/demo/centernet/summary.py:
--------------------------------------------------------------------------------
1 | #--------------------------------------------#
2 | # 该部分代码用于看网络参数
3 | #--------------------------------------------#
4 | import torch
5 | from thop import clever_format, profile
6 | from torchsummary import summary
7 |
8 | from nets.centernet import CenterNet_HourglassNet, CenterNet_Resnet50
9 |
10 | if __name__ == "__main__":
11 | input_shape = [512, 512]
12 | num_classes = 20
13 |
14 | device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
15 | model = CenterNet_Resnet50().to(device)
16 | summary(model, (3, input_shape[0], input_shape[1]))
17 |
18 | dummy_input = torch.randn(1, 3, input_shape[0], input_shape[1]).to(device)
19 | flops, params = profile(model.to(device), (dummy_input, ), verbose=False)
20 | #--------------------------------------------------------#
21 | # flops * 2是因为profile没有将卷积作为两个operations
22 | # 有些论文将卷积算乘法、加法两个operations。此时乘2
23 | # 有些论文只考虑乘法的运算次数,忽略加法。此时不乘2
24 | # 本代码选择乘2,参考YOLOX。
25 | #--------------------------------------------------------#
26 | flops = flops * 2
27 | flops, params = clever_format([flops, params], "%.3f")
28 | print('Total GFLOPS: %s' % (flops))
29 | print('Total params: %s' % (params))
30 |
--------------------------------------------------------------------------------
/demo/centernet/utils/__init__.py:
--------------------------------------------------------------------------------
1 | #
--------------------------------------------------------------------------------
/demo/detr-main/.circleci/config.yml:
--------------------------------------------------------------------------------
1 | version: 2.1
2 |
3 | jobs:
4 | python_lint:
5 | docker:
6 | - image: circleci/python:3.7
7 | steps:
8 | - checkout
9 | - run:
10 | command: |
11 | pip install --user --progress-bar off flake8 typing
12 | flake8 .
13 |
14 | test:
15 | docker:
16 | - image: circleci/python:3.7
17 | steps:
18 | - checkout
19 | - run:
20 | command: |
21 | pip install --user --progress-bar off scipy pytest
22 | pip install --user --progress-bar off --pre torch torchvision -f https://download.pytorch.org/whl/nightly/cpu/torch_nightly.html
23 | pip install --user --progress-bar off onnx onnxruntime
24 | pytest .
25 |
26 | workflows:
27 | build:
28 | jobs:
29 | - python_lint
30 | - test
31 |
--------------------------------------------------------------------------------
/demo/detr-main/.github/CODE_OF_CONDUCT.md:
--------------------------------------------------------------------------------
1 | # Code of Conduct
2 |
3 | Facebook has adopted a Code of Conduct that we expect project participants to adhere to.
4 | Please read the [full text](https://code.fb.com/codeofconduct/)
5 | so that you can understand what actions will and will not be tolerated.
6 |
--------------------------------------------------------------------------------
/demo/detr-main/.github/CONTRIBUTING.md:
--------------------------------------------------------------------------------
1 | # Contributing to DETR
2 | We want to make contributing to this project as easy and transparent as
3 | possible.
4 |
5 | ## Our Development Process
6 | Minor changes and improvements will be released on an ongoing basis. Larger changes (e.g., changesets implementing a new paper) will be released on a more periodic basis.
7 |
8 | ## Pull Requests
9 | We actively welcome your pull requests.
10 |
11 | 1. Fork the repo and create your branch from `master`.
12 | 2. If you've added code that should be tested, add tests.
13 | 3. If you've changed APIs, update the documentation.
14 | 4. Ensure the test suite passes.
15 | 5. Make sure your code lints.
16 | 6. If you haven't already, complete the Contributor License Agreement ("CLA").
17 |
18 | ## Contributor License Agreement ("CLA")
19 | In order to accept your pull request, we need you to submit a CLA. You only need
20 | to do this once to work on any of Facebook's open source projects.
21 |
22 | Complete your CLA here:
23 |
24 | ## Issues
25 | We use GitHub issues to track public bugs. Please ensure your description is
26 | clear and has sufficient instructions to be able to reproduce the issue.
27 |
28 | Facebook has a [bounty program](https://www.facebook.com/whitehat/) for the safe
29 | disclosure of security bugs. In those cases, please go through the process
30 | outlined on that page and do not file a public issue.
31 |
32 | ## Coding Style
33 | * 4 spaces for indentation rather than tabs
34 | * 80 character line length
35 | * PEP8 formatting following [Black](https://black.readthedocs.io/en/stable/)
36 |
37 | ## License
38 | By contributing to DETR, you agree that your contributions will be licensed
39 | under the LICENSE file in the root directory of this source tree.
40 |
--------------------------------------------------------------------------------
/demo/detr-main/.github/DETR.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Rex-LK/tensorrt_learning/45df1f33cab6a66a0d75c0bb26a3978d539dc888/demo/detr-main/.github/DETR.png
--------------------------------------------------------------------------------
/demo/detr-main/.github/ISSUE_TEMPLATE/bugs.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: "🐛 Bugs"
3 | about: Report bugs in DETR
4 | title: Please read & provide the following
5 |
6 | ---
7 |
8 | ## Instructions To Reproduce the 🐛 Bug:
9 |
10 | 1. what changes you made (`git diff`) or what code you wrote
11 | ```
12 |
13 | ```
14 | 2. what exact command you run:
15 | 3. what you observed (including __full logs__):
16 | ```
17 |
18 | ```
19 | 4. please simplify the steps as much as possible so they do not require additional resources to
20 | run, such as a private dataset.
21 |
22 | ## Expected behavior:
23 |
24 | If there are no obvious error in "what you observed" provided above,
25 | please tell us the expected behavior.
26 |
27 | ## Environment:
28 |
29 | Provide your environment information using the following command:
30 | ```
31 | python -m torch.utils.collect_env
32 | ```
33 |
--------------------------------------------------------------------------------
/demo/detr-main/.github/ISSUE_TEMPLATE/questions-help-support.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: "How to do something❓"
3 | about: How to do something using DETR?
4 |
5 | ---
6 |
7 | ## ❓ How to do something using DETR
8 |
9 | Describe what you want to do, including:
10 | 1. what inputs you will provide, if any:
11 | 2. what outputs you are expecting:
12 |
13 |
14 | NOTE:
15 |
16 | 1. Only general answers are provided.
17 | If you want to ask about "why X did not work", please use the
18 | [Unexpected behaviors](https://github.com/facebookresearch/detr/issues/new/choose) issue template.
19 |
20 | 2. About how to implement new models / new dataloader / new training logic, etc., check documentation first.
21 |
22 | 3. We do not answer general machine learning / computer vision questions that are not specific to DETR, such as how a model works, how to improve your training/make it converge, or what algorithm/methods can be used to achieve X.
23 |
--------------------------------------------------------------------------------
/demo/detr-main/.github/ISSUE_TEMPLATE/unexpected-problems-bugs.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: "Unexpected behaviors"
3 | about: Run into unexpected behaviors when using DETR
4 | title: Please read & provide the following
5 |
6 | ---
7 |
8 | If you do not know the root cause of the problem, and wish someone to help you, please
9 | post according to this template:
10 |
11 | ## Instructions To Reproduce the Issue:
12 |
13 | 1. what changes you made (`git diff`) or what code you wrote
14 | ```
15 |
16 | ```
17 | 2. what exact command you run:
18 | 3. what you observed (including __full logs__):
19 | ```
20 |
21 | ```
22 | 4. please simplify the steps as much as possible so they do not require additional resources to
23 | run, such as a private dataset.
24 |
25 | ## Expected behavior:
26 |
27 | If there are no obvious error in "what you observed" provided above,
28 | please tell us the expected behavior.
29 |
30 | If you expect the model to converge / work better, note that we do not give suggestions
31 | on how to train a new model.
32 | Only in one of the two conditions we will help with it:
33 | (1) You're unable to reproduce the results in DETR model zoo.
34 | (2) It indicates a DETR bug.
35 |
36 | ## Environment:
37 |
38 | Provide your environment information using the following command:
39 | ```
40 | python -m torch.utils.collect_env
41 | ```
42 |
--------------------------------------------------------------------------------
/demo/detr-main/.gitignore:
--------------------------------------------------------------------------------
1 | .nfs*
2 | *.ipynb
3 | *.pyc
4 | .dumbo.json
5 | .DS_Store
6 | .*.swp
7 | *.pth
8 | **/__pycache__/**
9 | .ipynb_checkpoints/
10 | datasets/data/
11 | experiment-*
12 | *.tmp
13 | *.pkl
14 | **/.mypy_cache/*
15 | .mypy_cache/*
16 | not_tracked_dir/
17 | .vscode
18 |
--------------------------------------------------------------------------------
/demo/detr-main/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM pytorch/pytorch:1.5-cuda10.1-cudnn7-runtime
2 |
3 | ENV DEBIAN_FRONTEND=noninteractive
4 |
5 | RUN apt-get update -qq && \
6 | apt-get install -y git vim libgtk2.0-dev && \
7 | rm -rf /var/cache/apk/*
8 |
9 | RUN pip --no-cache-dir install Cython
10 |
11 | COPY requirements.txt /workspace
12 |
13 | RUN pip --no-cache-dir install -r /workspace/requirements.txt
14 |
--------------------------------------------------------------------------------
/demo/detr-main/d2/configs/detr_256_6_6_torchvision.yaml:
--------------------------------------------------------------------------------
1 | MODEL:
2 | META_ARCHITECTURE: "Detr"
3 | WEIGHTS: "detectron2://ImageNetPretrained/torchvision/R-50.pkl"
4 | PIXEL_MEAN: [123.675, 116.280, 103.530]
5 | PIXEL_STD: [58.395, 57.120, 57.375]
6 | MASK_ON: False
7 | RESNETS:
8 | DEPTH: 50
9 | STRIDE_IN_1X1: False
10 | OUT_FEATURES: ["res2", "res3", "res4", "res5"]
11 | DETR:
12 | GIOU_WEIGHT: 2.0
13 | L1_WEIGHT: 5.0
14 | NUM_OBJECT_QUERIES: 100
15 | DATASETS:
16 | TRAIN: ("coco_2017_train",)
17 | TEST: ("coco_2017_val",)
18 | SOLVER:
19 | IMS_PER_BATCH: 64
20 | BASE_LR: 0.0001
21 | STEPS: (369600,)
22 | MAX_ITER: 554400
23 | WARMUP_FACTOR: 1.0
24 | WARMUP_ITERS: 10
25 | WEIGHT_DECAY: 0.0001
26 | OPTIMIZER: "ADAMW"
27 | BACKBONE_MULTIPLIER: 0.1
28 | CLIP_GRADIENTS:
29 | ENABLED: True
30 | CLIP_TYPE: "full_model"
31 | CLIP_VALUE: 0.01
32 | NORM_TYPE: 2.0
33 | INPUT:
34 | MIN_SIZE_TRAIN: (480, 512, 544, 576, 608, 640, 672, 704, 736, 768, 800)
35 | CROP:
36 | ENABLED: True
37 | TYPE: "absolute_range"
38 | SIZE: (384, 600)
39 | FORMAT: "RGB"
40 | TEST:
41 | EVAL_PERIOD: 4000
42 | DATALOADER:
43 | FILTER_EMPTY_ANNOTATIONS: False
44 | NUM_WORKERS: 4
45 | VERSION: 2
46 |
--------------------------------------------------------------------------------
/demo/detr-main/d2/configs/detr_segm_256_6_6_torchvision.yaml:
--------------------------------------------------------------------------------
1 | MODEL:
2 | META_ARCHITECTURE: "Detr"
3 | # WEIGHTS: "detectron2://ImageNetPretrained/torchvision/R-50.pkl"
4 | PIXEL_MEAN: [123.675, 116.280, 103.530]
5 | PIXEL_STD: [58.395, 57.120, 57.375]
6 | MASK_ON: True
7 | RESNETS:
8 | DEPTH: 50
9 | STRIDE_IN_1X1: False
10 | OUT_FEATURES: ["res2", "res3", "res4", "res5"]
11 | DETR:
12 | GIOU_WEIGHT: 2.0
13 | L1_WEIGHT: 5.0
14 | NUM_OBJECT_QUERIES: 100
15 | FROZEN_WEIGHTS: ''
16 | DATASETS:
17 | TRAIN: ("coco_2017_train",)
18 | TEST: ("coco_2017_val",)
19 | SOLVER:
20 | IMS_PER_BATCH: 64
21 | BASE_LR: 0.0001
22 | STEPS: (55440,)
23 | MAX_ITER: 92400
24 | WARMUP_FACTOR: 1.0
25 | WARMUP_ITERS: 10
26 | WEIGHT_DECAY: 0.0001
27 | OPTIMIZER: "ADAMW"
28 | BACKBONE_MULTIPLIER: 0.1
29 | CLIP_GRADIENTS:
30 | ENABLED: True
31 | CLIP_TYPE: "full_model"
32 | CLIP_VALUE: 0.01
33 | NORM_TYPE: 2.0
34 | INPUT:
35 | MIN_SIZE_TRAIN: (480, 512, 544, 576, 608, 640, 672, 704, 736, 768, 800)
36 | CROP:
37 | ENABLED: True
38 | TYPE: "absolute_range"
39 | SIZE: (384, 600)
40 | FORMAT: "RGB"
41 | TEST:
42 | EVAL_PERIOD: 4000
43 | DATALOADER:
44 | FILTER_EMPTY_ANNOTATIONS: False
45 | NUM_WORKERS: 4
46 | VERSION: 2
47 |
--------------------------------------------------------------------------------
/demo/detr-main/d2/detr/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
2 | from .config import add_detr_config
3 | from .detr import Detr
4 | from .dataset_mapper import DetrDatasetMapper
5 |
--------------------------------------------------------------------------------
/demo/detr-main/d2/detr/config.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
3 | from detectron2.config import CfgNode as CN
4 |
5 |
6 | def add_detr_config(cfg):
7 | """
8 | Add config for DETR.
9 | """
10 | cfg.MODEL.DETR = CN()
11 | cfg.MODEL.DETR.NUM_CLASSES = 80
12 |
13 | # For Segmentation
14 | cfg.MODEL.DETR.FROZEN_WEIGHTS = ''
15 |
16 | # LOSS
17 | cfg.MODEL.DETR.GIOU_WEIGHT = 2.0
18 | cfg.MODEL.DETR.L1_WEIGHT = 5.0
19 | cfg.MODEL.DETR.DEEP_SUPERVISION = True
20 | cfg.MODEL.DETR.NO_OBJECT_WEIGHT = 0.1
21 |
22 | # TRANSFORMER
23 | cfg.MODEL.DETR.NHEADS = 8
24 | cfg.MODEL.DETR.DROPOUT = 0.1
25 | cfg.MODEL.DETR.DIM_FEEDFORWARD = 2048
26 | cfg.MODEL.DETR.ENC_LAYERS = 6
27 | cfg.MODEL.DETR.DEC_LAYERS = 6
28 | cfg.MODEL.DETR.PRE_NORM = False
29 |
30 | cfg.MODEL.DETR.HIDDEN_DIM = 256
31 | cfg.MODEL.DETR.NUM_OBJECT_QUERIES = 100
32 |
33 | cfg.SOLVER.OPTIMIZER = "ADAMW"
34 | cfg.SOLVER.BACKBONE_MULTIPLIER = 0.1
35 |
--------------------------------------------------------------------------------
/demo/detr-main/datasets/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
2 | import torch.utils.data
3 | import torchvision
4 |
5 | from .coco import build as build_coco
6 |
7 |
8 | def get_coco_api_from_dataset(dataset):
9 | for _ in range(10):
10 | # if isinstance(dataset, torchvision.datasets.CocoDetection):
11 | # break
12 | if isinstance(dataset, torch.utils.data.Subset):
13 | dataset = dataset.dataset
14 | if isinstance(dataset, torchvision.datasets.CocoDetection):
15 | return dataset.coco
16 |
17 |
18 | def build_dataset(image_set, args):
19 | if args.dataset_file == 'coco':
20 | return build_coco(image_set, args)
21 | if args.dataset_file == 'coco_panoptic':
22 | # to avoid making panopticapi required for coco
23 | from .coco_panoptic import build as build_coco_panoptic
24 | return build_coco_panoptic(image_set, args)
25 | raise ValueError(f'dataset {args.dataset_file} not supported')
26 |
--------------------------------------------------------------------------------
/demo/detr-main/datasets/panoptic_eval.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
2 | import json
3 | import os
4 |
5 | import util.misc as utils
6 |
7 | try:
8 | from panopticapi.evaluation import pq_compute
9 | except ImportError:
10 | pass
11 |
12 |
13 | class PanopticEvaluator(object):
14 | def __init__(self, ann_file, ann_folder, output_dir="panoptic_eval"):
15 | self.gt_json = ann_file
16 | self.gt_folder = ann_folder
17 | if utils.is_main_process():
18 | if not os.path.exists(output_dir):
19 | os.mkdir(output_dir)
20 | self.output_dir = output_dir
21 | self.predictions = []
22 |
23 | def update(self, predictions):
24 | for p in predictions:
25 | with open(os.path.join(self.output_dir, p["file_name"]), "wb") as f:
26 | f.write(p.pop("png_string"))
27 |
28 | self.predictions += predictions
29 |
30 | def synchronize_between_processes(self):
31 | all_predictions = utils.all_gather(self.predictions)
32 | merged_predictions = []
33 | for p in all_predictions:
34 | merged_predictions += p
35 | self.predictions = merged_predictions
36 |
37 | def summarize(self):
38 | if utils.is_main_process():
39 | json_data = {"annotations": self.predictions}
40 | predictions_json = os.path.join(self.output_dir, "predictions.json")
41 | with open(predictions_json, "w") as f:
42 | f.write(json.dumps(json_data))
43 | return pq_compute(self.gt_json, predictions_json, gt_folder=self.gt_folder, pred_folder=self.output_dir)
44 | return None
45 |
--------------------------------------------------------------------------------
/demo/detr-main/demo.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Rex-LK/tensorrt_learning/45df1f33cab6a66a0d75c0bb26a3978d539dc888/demo/detr-main/demo.jpg
--------------------------------------------------------------------------------
/demo/detr-main/infer-onnxruntime.py:
--------------------------------------------------------------------------------
1 |
2 | import onnxruntime
3 | import cv2
4 | import numpy as np
5 | from torchvision import transforms
6 | from PIL import Image
7 | import torch
8 | from tools import *
9 |
10 |
11 | if __name__ == "__main__":
12 |
13 | data_transform = transforms.Compose([
14 | transforms.Resize(800),
15 | transforms.ToTensor(),
16 | transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
17 | ])
18 | img_path = "demo.jpg"
19 | img_o = Image.open(img_path)
20 | img = data_transform(img_o).unsqueeze(0)
21 |
22 | image_input = img.numpy()
23 |
24 |
25 | session = onnxruntime.InferenceSession("detr_sim.onnx", providers=["CPUExecutionProvider"])
26 | pred = session.run(["predict"], {"image": image_input})[0]
27 | scores = torch.from_numpy(pred[0][:,0:91])
28 | bboxes = torch.from_numpy(pred[0][:,91:])
29 | keep = scores.max(-1).values > 0.7
30 | scores = scores[keep]
31 | bboxes = bboxes[keep]
32 | print(bboxes)
33 | fin_bboxes = rescale_bboxes(bboxes, img_o.size)
34 | plot_results(img_o, scores, fin_bboxes)
35 |
--------------------------------------------------------------------------------
/demo/detr-main/load_from_cpp.py:
--------------------------------------------------------------------------------
1 |
2 | import numpy as np
3 |
4 | #这里发现转化的tensorrt模型无法推理出结果,可以将cpp推理的结果保存下来,到python中查看
5 |
6 | def load_tensor(file):
7 |
8 | with open(file, "rb") as f:
9 | binary_data = f.read()
10 |
11 | magic_number, ndims, dtype = np.frombuffer(binary_data, np.uint32, count=3, offset=0)
12 | assert magic_number == 0xFCCFE2E2, f"{file} not a tensor file."
13 |
14 | dims = np.frombuffer(binary_data, np.uint32, count=ndims, offset=3 * 4)
15 |
16 | if dtype == 0:
17 | np_dtype = np.float32
18 | elif dtype == 1:
19 | np_dtype = np.float16
20 | else:
21 | assert False, f"Unsupport dtype = {dtype}, can not convert to numpy dtype"
22 |
23 | return np.frombuffer(binary_data, np_dtype, offset=(ndims + 3) * 4).reshape(*dims)
24 |
25 |
26 | a = load_tensor('/home/rex/Desktop/tensorrt_learning/trt_cpp/workspace/detr_cpp.tensor')
27 | print(a)
--------------------------------------------------------------------------------
/demo/detr-main/models/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
2 | from .detr import build
3 |
4 |
5 | def build_model(args):
6 | return build(args)
7 |
--------------------------------------------------------------------------------
/demo/detr-main/requirements.txt:
--------------------------------------------------------------------------------
1 | cython
2 | git+https://github.com/cocodataset/cocoapi.git#subdirectory=PythonAPI&egg=pycocotools
3 | submitit
4 | torch>=1.5.0
5 | torchvision>=0.6.0
6 | git+https://github.com/cocodataset/panopticapi.git#egg=panopticapi
7 | scipy
8 | onnx
9 | onnxruntime
10 |
--------------------------------------------------------------------------------
/demo/detr-main/street.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Rex-LK/tensorrt_learning/45df1f33cab6a66a0d75c0bb26a3978d539dc888/demo/detr-main/street.jpg
--------------------------------------------------------------------------------
/demo/detr-main/tox.ini:
--------------------------------------------------------------------------------
1 | [flake8]
2 | max-line-length = 120
3 | ignore = F401,E402,F403,W503,W504
4 |
--------------------------------------------------------------------------------
/demo/detr-main/util/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
2 |
--------------------------------------------------------------------------------
/demo/detr_demo/demo.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Rex-LK/tensorrt_learning/45df1f33cab6a66a0d75c0bb26a3978d539dc888/demo/detr_demo/demo.jpg
--------------------------------------------------------------------------------
/demo/detr_demo/export_onnx.py:
--------------------------------------------------------------------------------
1 | from json import tool
2 | import requests
3 | import matplotlib.pyplot as plt
4 |
5 | from torchvision.models import resnet50
6 | import torchvision.transforms as T
7 |
8 | from tools import *
9 | from model import *
10 |
11 |
12 | if __name__ == '__main__':
13 | detr = DETRdemo(num_classes=91)
14 | state_dict = torch.load('detr_demo.pth')
15 | detr.load_state_dict(state_dict)
16 | detr.eval()
17 |
18 | dummy = torch.zeros(1,3,640,640)
19 | torch.onnx.export(
20 | detr,(dummy,),
21 | "detr.onnx",
22 | input_names=["image"],
23 | output_names=["output"],
24 | opset_version=11
25 | )
26 |
--------------------------------------------------------------------------------
/demo/detr_demo/infer-onnxruntime.py:
--------------------------------------------------------------------------------
1 |
2 | import imghdr
3 | import onnxruntime
4 | import cv2
5 | import numpy as np
6 | from torchvision import transforms
7 | from PIL import Image
8 | import os
9 | import torch
10 | from model import *
11 | from tools import *
12 |
13 |
14 | if __name__ == "__main__":
15 |
16 | data_transform = transforms.Compose([
17 | transforms.Resize(640),
18 | transforms.ToTensor(),
19 | transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
20 | ])
21 | img_path = "demo.jpg"
22 | img_o = Image.open(img_path)
23 | img = data_transform(img_o).unsqueeze(0)
24 |
25 | image_input = img.numpy()
26 |
27 |
28 | session = onnxruntime.InferenceSession("detr_sim.onnx", providers=["CPUExecutionProvider"])
29 | pred = session.run(["output"], {"image": image_input})[0]
30 | print(pred)
31 | scores = torch.from_numpy(pred[0][:,0:91])
32 | bboxes = torch.from_numpy(pred[0][:,91:])
33 | keep = scores.max(-1).values > 0.7
34 | scores = scores[keep]
35 | bboxes = bboxes[keep]
36 | print(bboxes)
37 | fin_bboxes = rescale_bboxes(bboxes, img_o.size)
38 | plot_results(img_o, scores, fin_bboxes)
39 |
--------------------------------------------------------------------------------
/demo/detr_demo/onnx_simplify.py:
--------------------------------------------------------------------------------
1 | from onnxsim import simplify
2 | import onnx
3 | input_path="detr.onnx"
4 | output_path="detr_sim.onnx"
5 | onnx_model = onnx.load(input_path)
6 | model_simp, check = simplify(onnx_model)
7 | assert check, "Simplified ONNX model could not be validated"
8 | onnx.save(model_simp, output_path)
9 | print('finished exporting onnx')
--------------------------------------------------------------------------------
/demo/detr_demo/predict.py:
--------------------------------------------------------------------------------
1 | from json import tool
2 | import requests
3 | import matplotlib.pyplot as plt
4 |
5 |
6 | from tools import *
7 | from model import *
8 | if __name__ == "__main__":
9 |
10 | detr = DETRdemo(num_classes=91)
11 | state_dict = torch.load('detr_demo.pth')
12 | detr.load_state_dict(state_dict)
13 | detr.eval()
14 |
15 | im = Image.open("demo.jpg")
16 |
17 | #记录原图大小,方便进行后处理
18 | img = transform(im).unsqueeze(0)
19 | pred = detr(img)
20 | # 尽可能的将后后处理放在onnx
21 | scores = pred[0][:,0:91]
22 | # 需要恢复成原图大小
23 | bboxes = pred[0][:,91:]
24 | keep = scores.max(-1).values > 0.7
25 | scores = scores[keep]
26 | print(scores.shape)
27 | bboxes = bboxes[keep]
28 | fin_bboxes = rescale_bboxes(bboxes, im.size)
29 | plot_results(im, scores, fin_bboxes)
30 |
--------------------------------------------------------------------------------
/demo/swin/__pycache__/model.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Rex-LK/tensorrt_learning/45df1f33cab6a66a0d75c0bb26a3978d539dc888/demo/swin/__pycache__/model.cpython-39.pyc
--------------------------------------------------------------------------------
/demo/swin/__pycache__/my_dataset.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Rex-LK/tensorrt_learning/45df1f33cab6a66a0d75c0bb26a3978d539dc888/demo/swin/__pycache__/my_dataset.cpython-39.pyc
--------------------------------------------------------------------------------
/demo/swin/__pycache__/utils.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Rex-LK/tensorrt_learning/45df1f33cab6a66a0d75c0bb26a3978d539dc888/demo/swin/__pycache__/utils.cpython-39.pyc
--------------------------------------------------------------------------------
/demo/swin/class_indices.json:
--------------------------------------------------------------------------------
1 | {
2 | "0": "daisy",
3 | "1": "dandelion",
4 | "2": "roses",
5 | "3": "sunflowers",
6 | "4": "tulips"
7 | }
--------------------------------------------------------------------------------
/demo/swin/export_onnx.py:
--------------------------------------------------------------------------------
1 |
2 | from multiprocessing import dummy
3 | import os
4 | import json
5 | import onnx
6 | from psutil import cpu_count
7 | import sys
8 |
9 |
10 | import torch
11 | from PIL import Image
12 | from torchvision import transforms
13 | import matplotlib.pyplot as plt
14 |
15 | from model import swin_tiny_patch4_window7_224 as create_model
16 |
17 |
18 | def main():
19 |
20 | model = create_model(num_classes=5).to("cpu")
21 | # load model weights
22 | model_weight_path = "./weights/model-9.pth"
23 | model.load_state_dict(torch.load(model_weight_path, map_location=device))
24 | model.eval()
25 |
26 |
27 | dummy = torch.zeros(1,3,224,224)
28 | torch.onnx.export(
29 | model,(dummy,),
30 | "swin.onnx",
31 | input_names=["image"],
32 | output_names=["predict"],
33 | opset_version=12,
34 | dynamic_axes={"image":{0:"batch"},"predict":{0:"batch"}}
35 | )
36 |
37 |
38 | if __name__ == '__main__':
39 | main()
40 |
--------------------------------------------------------------------------------
/demo/swin/infer-onnxruntime.py:
--------------------------------------------------------------------------------
1 |
2 | import onnxruntime
3 | import cv2
4 | import numpy as np
5 | from torchvision import transforms
6 | from PIL import Image
7 | import os
8 | import torch
9 |
10 |
11 | if __name__ == "__main__":
12 |
13 | img_size = 224
14 | data_transform = transforms.Compose(
15 | [transforms.Resize(int(img_size * 1.14)),
16 | transforms.CenterCrop(img_size),
17 | transforms.ToTensor(),
18 | transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])])
19 |
20 | # load image
21 | img_path = "flower_photos/test.jpg"
22 | img = Image.open(img_path)
23 | img = data_transform(img)
24 | # expand batch dimension
25 | img = torch.unsqueeze(img, dim=0)
26 | image_input = img.numpy()
27 | session = onnxruntime.InferenceSession("weights/swin.onnx", providers=["CPUExecutionProvider"])
28 | pred = session.run(["predict"], {"image": image_input})[0]
29 | print(pred)
30 |
31 | #t0.51477224 0.28504044 0.05650776 0.08253327 0.06114639
--------------------------------------------------------------------------------
/demo/swin/my_dataset.py:
--------------------------------------------------------------------------------
1 | from PIL import Image
2 | import torch
3 | from torch.utils.data import Dataset
4 |
5 |
6 | class MyDataSet(Dataset):
7 | """自定义数据集"""
8 |
9 | def __init__(self, images_path: list, images_class: list, transform=None):
10 | self.images_path = images_path
11 | self.images_class = images_class
12 | self.transform = transform
13 |
14 | def __len__(self):
15 | return len(self.images_path)
16 |
17 | def __getitem__(self, item):
18 | img = Image.open(self.images_path[item])
19 | # RGB为彩色图片,L为灰度图片
20 | if img.mode != 'RGB':
21 | raise ValueError("image: {} isn't RGB mode.".format(self.images_path[item]))
22 | label = self.images_class[item]
23 |
24 | if self.transform is not None:
25 | img = self.transform(img)
26 |
27 | return img, label
28 |
29 | @staticmethod
30 | def collate_fn(batch):
31 | # 官方实现的default_collate可以参考
32 | # https://github.com/pytorch/pytorch/blob/67b7e751e6b5931a9f45274653f4f653a4e6cdf6/torch/utils/data/_utils/collate.py
33 | images, labels = tuple(zip(*batch))
34 |
35 | images = torch.stack(images, dim=0)
36 | labels = torch.as_tensor(labels)
37 | return images, labels
38 |
--------------------------------------------------------------------------------
/demo/swin/onnx_simplify.py:
--------------------------------------------------------------------------------
1 | from onnxsim import simplify
2 | import onnx
3 | input_path="swin.onnx"
4 | output_path="swin_sim.onnx"
5 | onnx_model = onnx.load(input_path)
6 | model_simp, check = simplify(onnx_model,input_shapes={'image': [1, 3, 224, 224]},dynamic_input_shape==True)
7 | assert check, "Simplified ONNX model could not be validated"
8 | onnx.save(model_simp, output_path)
9 | print('finished exporting onnx')
--------------------------------------------------------------------------------
/demo/swin/read_onnx.py:
--------------------------------------------------------------------------------
1 | import onnx
2 | import onnxruntime
3 | model = onnx.load("swin.onnx")
4 |
5 | model()
--------------------------------------------------------------------------------
/demo/unet/.gitignore:
--------------------------------------------------------------------------------
1 | *.onnx
2 | *.pth
3 | __pycache__
4 |
--------------------------------------------------------------------------------
/demo/unet/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2021 Bubbliiiing
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/demo/unet/Medical_Datasets/ImageSets/Segmentation/README.md:
--------------------------------------------------------------------------------
1 | 存放的是指向文件名称的txt
2 |
3 |
--------------------------------------------------------------------------------
/demo/unet/Medical_Datasets/ImageSets/Segmentation/train.txt:
--------------------------------------------------------------------------------
1 | 0
2 | 1
3 | 10
4 | 11
5 | 12
6 | 13
7 | 14
8 | 15
9 | 16
10 | 17
11 | 18
12 | 19
13 | 2
14 | 20
15 | 21
16 | 22
17 | 23
18 | 24
19 | 25
20 | 26
21 | 27
22 | 28
23 | 29
24 | 3
25 | 4
26 | 5
27 | 6
28 | 7
29 | 8
30 | 9
31 |
--------------------------------------------------------------------------------
/demo/unet/Medical_Datasets/ImageSets/Segmentation/trainval.txt:
--------------------------------------------------------------------------------
1 | 0
2 | 1
3 | 10
4 | 11
5 | 12
6 | 13
7 | 14
8 | 15
9 | 16
10 | 17
11 | 18
12 | 19
13 | 2
14 | 20
15 | 21
16 | 22
17 | 23
18 | 24
19 | 25
20 | 26
21 | 27
22 | 28
23 | 29
24 | 3
25 | 4
26 | 5
27 | 6
28 | 7
29 | 8
30 | 9
31 |
--------------------------------------------------------------------------------
/demo/unet/Medical_Datasets/Images/0.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Rex-LK/tensorrt_learning/45df1f33cab6a66a0d75c0bb26a3978d539dc888/demo/unet/Medical_Datasets/Images/0.png
--------------------------------------------------------------------------------
/demo/unet/Medical_Datasets/Images/1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Rex-LK/tensorrt_learning/45df1f33cab6a66a0d75c0bb26a3978d539dc888/demo/unet/Medical_Datasets/Images/1.png
--------------------------------------------------------------------------------
/demo/unet/Medical_Datasets/Images/10.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Rex-LK/tensorrt_learning/45df1f33cab6a66a0d75c0bb26a3978d539dc888/demo/unet/Medical_Datasets/Images/10.png
--------------------------------------------------------------------------------
/demo/unet/Medical_Datasets/Images/11.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Rex-LK/tensorrt_learning/45df1f33cab6a66a0d75c0bb26a3978d539dc888/demo/unet/Medical_Datasets/Images/11.png
--------------------------------------------------------------------------------
/demo/unet/Medical_Datasets/Images/12.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Rex-LK/tensorrt_learning/45df1f33cab6a66a0d75c0bb26a3978d539dc888/demo/unet/Medical_Datasets/Images/12.png
--------------------------------------------------------------------------------
/demo/unet/Medical_Datasets/Images/13.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Rex-LK/tensorrt_learning/45df1f33cab6a66a0d75c0bb26a3978d539dc888/demo/unet/Medical_Datasets/Images/13.png
--------------------------------------------------------------------------------
/demo/unet/Medical_Datasets/Images/14.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Rex-LK/tensorrt_learning/45df1f33cab6a66a0d75c0bb26a3978d539dc888/demo/unet/Medical_Datasets/Images/14.png
--------------------------------------------------------------------------------
/demo/unet/Medical_Datasets/Images/15.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Rex-LK/tensorrt_learning/45df1f33cab6a66a0d75c0bb26a3978d539dc888/demo/unet/Medical_Datasets/Images/15.png
--------------------------------------------------------------------------------
/demo/unet/Medical_Datasets/Images/16.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Rex-LK/tensorrt_learning/45df1f33cab6a66a0d75c0bb26a3978d539dc888/demo/unet/Medical_Datasets/Images/16.png
--------------------------------------------------------------------------------
/demo/unet/Medical_Datasets/Images/17.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Rex-LK/tensorrt_learning/45df1f33cab6a66a0d75c0bb26a3978d539dc888/demo/unet/Medical_Datasets/Images/17.png
--------------------------------------------------------------------------------
/demo/unet/Medical_Datasets/Images/18.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Rex-LK/tensorrt_learning/45df1f33cab6a66a0d75c0bb26a3978d539dc888/demo/unet/Medical_Datasets/Images/18.png
--------------------------------------------------------------------------------
/demo/unet/Medical_Datasets/Images/19.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Rex-LK/tensorrt_learning/45df1f33cab6a66a0d75c0bb26a3978d539dc888/demo/unet/Medical_Datasets/Images/19.png
--------------------------------------------------------------------------------
/demo/unet/Medical_Datasets/Images/2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Rex-LK/tensorrt_learning/45df1f33cab6a66a0d75c0bb26a3978d539dc888/demo/unet/Medical_Datasets/Images/2.png
--------------------------------------------------------------------------------
/demo/unet/Medical_Datasets/Images/20.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Rex-LK/tensorrt_learning/45df1f33cab6a66a0d75c0bb26a3978d539dc888/demo/unet/Medical_Datasets/Images/20.png
--------------------------------------------------------------------------------
/demo/unet/Medical_Datasets/Images/21.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Rex-LK/tensorrt_learning/45df1f33cab6a66a0d75c0bb26a3978d539dc888/demo/unet/Medical_Datasets/Images/21.png
--------------------------------------------------------------------------------
/demo/unet/Medical_Datasets/Images/22.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Rex-LK/tensorrt_learning/45df1f33cab6a66a0d75c0bb26a3978d539dc888/demo/unet/Medical_Datasets/Images/22.png
--------------------------------------------------------------------------------
/demo/unet/Medical_Datasets/Images/23.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Rex-LK/tensorrt_learning/45df1f33cab6a66a0d75c0bb26a3978d539dc888/demo/unet/Medical_Datasets/Images/23.png
--------------------------------------------------------------------------------
/demo/unet/Medical_Datasets/Images/24.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Rex-LK/tensorrt_learning/45df1f33cab6a66a0d75c0bb26a3978d539dc888/demo/unet/Medical_Datasets/Images/24.png
--------------------------------------------------------------------------------
/demo/unet/Medical_Datasets/Images/25.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Rex-LK/tensorrt_learning/45df1f33cab6a66a0d75c0bb26a3978d539dc888/demo/unet/Medical_Datasets/Images/25.png
--------------------------------------------------------------------------------
/demo/unet/Medical_Datasets/Images/26.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Rex-LK/tensorrt_learning/45df1f33cab6a66a0d75c0bb26a3978d539dc888/demo/unet/Medical_Datasets/Images/26.png
--------------------------------------------------------------------------------
/demo/unet/Medical_Datasets/Images/27.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Rex-LK/tensorrt_learning/45df1f33cab6a66a0d75c0bb26a3978d539dc888/demo/unet/Medical_Datasets/Images/27.png
--------------------------------------------------------------------------------
/demo/unet/Medical_Datasets/Images/28.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Rex-LK/tensorrt_learning/45df1f33cab6a66a0d75c0bb26a3978d539dc888/demo/unet/Medical_Datasets/Images/28.png
--------------------------------------------------------------------------------
/demo/unet/Medical_Datasets/Images/29.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Rex-LK/tensorrt_learning/45df1f33cab6a66a0d75c0bb26a3978d539dc888/demo/unet/Medical_Datasets/Images/29.png
--------------------------------------------------------------------------------
/demo/unet/Medical_Datasets/Images/3.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Rex-LK/tensorrt_learning/45df1f33cab6a66a0d75c0bb26a3978d539dc888/demo/unet/Medical_Datasets/Images/3.png
--------------------------------------------------------------------------------
/demo/unet/Medical_Datasets/Images/4.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Rex-LK/tensorrt_learning/45df1f33cab6a66a0d75c0bb26a3978d539dc888/demo/unet/Medical_Datasets/Images/4.png
--------------------------------------------------------------------------------
/demo/unet/Medical_Datasets/Images/5.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Rex-LK/tensorrt_learning/45df1f33cab6a66a0d75c0bb26a3978d539dc888/demo/unet/Medical_Datasets/Images/5.png
--------------------------------------------------------------------------------
/demo/unet/Medical_Datasets/Images/6.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Rex-LK/tensorrt_learning/45df1f33cab6a66a0d75c0bb26a3978d539dc888/demo/unet/Medical_Datasets/Images/6.png
--------------------------------------------------------------------------------
/demo/unet/Medical_Datasets/Images/7.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Rex-LK/tensorrt_learning/45df1f33cab6a66a0d75c0bb26a3978d539dc888/demo/unet/Medical_Datasets/Images/7.png
--------------------------------------------------------------------------------
/demo/unet/Medical_Datasets/Images/8.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Rex-LK/tensorrt_learning/45df1f33cab6a66a0d75c0bb26a3978d539dc888/demo/unet/Medical_Datasets/Images/8.png
--------------------------------------------------------------------------------
/demo/unet/Medical_Datasets/Images/9.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Rex-LK/tensorrt_learning/45df1f33cab6a66a0d75c0bb26a3978d539dc888/demo/unet/Medical_Datasets/Images/9.png
--------------------------------------------------------------------------------
/demo/unet/Medical_Datasets/Labels/0.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Rex-LK/tensorrt_learning/45df1f33cab6a66a0d75c0bb26a3978d539dc888/demo/unet/Medical_Datasets/Labels/0.png
--------------------------------------------------------------------------------
/demo/unet/Medical_Datasets/Labels/1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Rex-LK/tensorrt_learning/45df1f33cab6a66a0d75c0bb26a3978d539dc888/demo/unet/Medical_Datasets/Labels/1.png
--------------------------------------------------------------------------------
/demo/unet/Medical_Datasets/Labels/10.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Rex-LK/tensorrt_learning/45df1f33cab6a66a0d75c0bb26a3978d539dc888/demo/unet/Medical_Datasets/Labels/10.png
--------------------------------------------------------------------------------
/demo/unet/Medical_Datasets/Labels/11.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Rex-LK/tensorrt_learning/45df1f33cab6a66a0d75c0bb26a3978d539dc888/demo/unet/Medical_Datasets/Labels/11.png
--------------------------------------------------------------------------------
/demo/unet/Medical_Datasets/Labels/12.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Rex-LK/tensorrt_learning/45df1f33cab6a66a0d75c0bb26a3978d539dc888/demo/unet/Medical_Datasets/Labels/12.png
--------------------------------------------------------------------------------
/demo/unet/Medical_Datasets/Labels/13.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Rex-LK/tensorrt_learning/45df1f33cab6a66a0d75c0bb26a3978d539dc888/demo/unet/Medical_Datasets/Labels/13.png
--------------------------------------------------------------------------------
/demo/unet/Medical_Datasets/Labels/14.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Rex-LK/tensorrt_learning/45df1f33cab6a66a0d75c0bb26a3978d539dc888/demo/unet/Medical_Datasets/Labels/14.png
--------------------------------------------------------------------------------
/demo/unet/Medical_Datasets/Labels/15.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Rex-LK/tensorrt_learning/45df1f33cab6a66a0d75c0bb26a3978d539dc888/demo/unet/Medical_Datasets/Labels/15.png
--------------------------------------------------------------------------------
/demo/unet/Medical_Datasets/Labels/16.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Rex-LK/tensorrt_learning/45df1f33cab6a66a0d75c0bb26a3978d539dc888/demo/unet/Medical_Datasets/Labels/16.png
--------------------------------------------------------------------------------
/demo/unet/Medical_Datasets/Labels/17.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Rex-LK/tensorrt_learning/45df1f33cab6a66a0d75c0bb26a3978d539dc888/demo/unet/Medical_Datasets/Labels/17.png
--------------------------------------------------------------------------------
/demo/unet/Medical_Datasets/Labels/18.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Rex-LK/tensorrt_learning/45df1f33cab6a66a0d75c0bb26a3978d539dc888/demo/unet/Medical_Datasets/Labels/18.png
--------------------------------------------------------------------------------
/demo/unet/Medical_Datasets/Labels/19.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Rex-LK/tensorrt_learning/45df1f33cab6a66a0d75c0bb26a3978d539dc888/demo/unet/Medical_Datasets/Labels/19.png
--------------------------------------------------------------------------------
/demo/unet/Medical_Datasets/Labels/2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Rex-LK/tensorrt_learning/45df1f33cab6a66a0d75c0bb26a3978d539dc888/demo/unet/Medical_Datasets/Labels/2.png
--------------------------------------------------------------------------------
/demo/unet/Medical_Datasets/Labels/20.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Rex-LK/tensorrt_learning/45df1f33cab6a66a0d75c0bb26a3978d539dc888/demo/unet/Medical_Datasets/Labels/20.png
--------------------------------------------------------------------------------
/demo/unet/Medical_Datasets/Labels/21.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Rex-LK/tensorrt_learning/45df1f33cab6a66a0d75c0bb26a3978d539dc888/demo/unet/Medical_Datasets/Labels/21.png
--------------------------------------------------------------------------------
/demo/unet/Medical_Datasets/Labels/22.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Rex-LK/tensorrt_learning/45df1f33cab6a66a0d75c0bb26a3978d539dc888/demo/unet/Medical_Datasets/Labels/22.png
--------------------------------------------------------------------------------
/demo/unet/Medical_Datasets/Labels/23.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Rex-LK/tensorrt_learning/45df1f33cab6a66a0d75c0bb26a3978d539dc888/demo/unet/Medical_Datasets/Labels/23.png
--------------------------------------------------------------------------------
/demo/unet/Medical_Datasets/Labels/24.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Rex-LK/tensorrt_learning/45df1f33cab6a66a0d75c0bb26a3978d539dc888/demo/unet/Medical_Datasets/Labels/24.png
--------------------------------------------------------------------------------
/demo/unet/Medical_Datasets/Labels/25.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Rex-LK/tensorrt_learning/45df1f33cab6a66a0d75c0bb26a3978d539dc888/demo/unet/Medical_Datasets/Labels/25.png
--------------------------------------------------------------------------------
/demo/unet/Medical_Datasets/Labels/26.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Rex-LK/tensorrt_learning/45df1f33cab6a66a0d75c0bb26a3978d539dc888/demo/unet/Medical_Datasets/Labels/26.png
--------------------------------------------------------------------------------
/demo/unet/Medical_Datasets/Labels/27.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Rex-LK/tensorrt_learning/45df1f33cab6a66a0d75c0bb26a3978d539dc888/demo/unet/Medical_Datasets/Labels/27.png
--------------------------------------------------------------------------------
/demo/unet/Medical_Datasets/Labels/28.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Rex-LK/tensorrt_learning/45df1f33cab6a66a0d75c0bb26a3978d539dc888/demo/unet/Medical_Datasets/Labels/28.png
--------------------------------------------------------------------------------
/demo/unet/Medical_Datasets/Labels/29.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Rex-LK/tensorrt_learning/45df1f33cab6a66a0d75c0bb26a3978d539dc888/demo/unet/Medical_Datasets/Labels/29.png
--------------------------------------------------------------------------------
/demo/unet/Medical_Datasets/Labels/3.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Rex-LK/tensorrt_learning/45df1f33cab6a66a0d75c0bb26a3978d539dc888/demo/unet/Medical_Datasets/Labels/3.png
--------------------------------------------------------------------------------
/demo/unet/Medical_Datasets/Labels/4.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Rex-LK/tensorrt_learning/45df1f33cab6a66a0d75c0bb26a3978d539dc888/demo/unet/Medical_Datasets/Labels/4.png
--------------------------------------------------------------------------------
/demo/unet/Medical_Datasets/Labels/5.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Rex-LK/tensorrt_learning/45df1f33cab6a66a0d75c0bb26a3978d539dc888/demo/unet/Medical_Datasets/Labels/5.png
--------------------------------------------------------------------------------
/demo/unet/Medical_Datasets/Labels/6.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Rex-LK/tensorrt_learning/45df1f33cab6a66a0d75c0bb26a3978d539dc888/demo/unet/Medical_Datasets/Labels/6.png
--------------------------------------------------------------------------------
/demo/unet/Medical_Datasets/Labels/7.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Rex-LK/tensorrt_learning/45df1f33cab6a66a0d75c0bb26a3978d539dc888/demo/unet/Medical_Datasets/Labels/7.png
--------------------------------------------------------------------------------
/demo/unet/Medical_Datasets/Labels/8.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Rex-LK/tensorrt_learning/45df1f33cab6a66a0d75c0bb26a3978d539dc888/demo/unet/Medical_Datasets/Labels/8.png
--------------------------------------------------------------------------------
/demo/unet/Medical_Datasets/Labels/9.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Rex-LK/tensorrt_learning/45df1f33cab6a66a0d75c0bb26a3978d539dc888/demo/unet/Medical_Datasets/Labels/9.png
--------------------------------------------------------------------------------
/demo/unet/VOCdevkit/VOC2007/ImageSets/Segmentation/README.md:
--------------------------------------------------------------------------------
1 | 存放的是指向文件名称的txt
2 |
3 |
--------------------------------------------------------------------------------
/demo/unet/VOCdevkit/VOC2007/JPEGImages/README.md:
--------------------------------------------------------------------------------
1 | 这里面存放的是训练用的图片文件。
2 |
--------------------------------------------------------------------------------
/demo/unet/VOCdevkit/VOC2007/SegmentationClass/README.md:
--------------------------------------------------------------------------------
1 | 这里面存放的是训练过程中产生的权重。
2 |
--------------------------------------------------------------------------------
/demo/unet/datasets/JPEGImages/1.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Rex-LK/tensorrt_learning/45df1f33cab6a66a0d75c0bb26a3978d539dc888/demo/unet/datasets/JPEGImages/1.jpg
--------------------------------------------------------------------------------
/demo/unet/datasets/SegmentationClass/1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Rex-LK/tensorrt_learning/45df1f33cab6a66a0d75c0bb26a3978d539dc888/demo/unet/datasets/SegmentationClass/1.png
--------------------------------------------------------------------------------
/demo/unet/datasets/before/1.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Rex-LK/tensorrt_learning/45df1f33cab6a66a0d75c0bb26a3978d539dc888/demo/unet/datasets/before/1.jpg
--------------------------------------------------------------------------------
/demo/unet/export.py:
--------------------------------------------------------------------------------
1 | #----------------------------------------------------#
2 | # 将单张图片预测、摄像头检测和FPS测试功能
3 | # 整合到了一个py文件中,通过指定mode进行模式的修改。
4 | #----------------------------------------------------#
5 | import time
6 |
7 | import cv2
8 | import numpy as np
9 | from PIL import Image
10 | import torch
11 | import torch.onnx
12 | from unet import Unet
13 |
14 | if __name__ == "__main__":
15 | unet = Unet()
16 | dummy = torch.zeros(1, 3, 512, 512).cuda()
17 | torch.onnx.export(
18 | unet.net, (dummy,), "unet.onnx", input_names=["images"], output_names=["output"], opset_version=11,
19 | dynamic_axes={
20 | "images":{0: "batch"},
21 | "output":{0: "batch"}
22 | }
23 | )
24 | print("Done")
--------------------------------------------------------------------------------
/demo/unet/img/cell.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Rex-LK/tensorrt_learning/45df1f33cab6a66a0d75c0bb26a3978d539dc888/demo/unet/img/cell.png
--------------------------------------------------------------------------------
/demo/unet/img/street.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Rex-LK/tensorrt_learning/45df1f33cab6a66a0d75c0bb26a3978d539dc888/demo/unet/img/street.jpg
--------------------------------------------------------------------------------
/demo/unet/logs/README.MD:
--------------------------------------------------------------------------------
1 | 这一部分用来存放训练后的文件。
2 | This part is used to store post training documents.
--------------------------------------------------------------------------------
/demo/unet/model_data/README.md:
--------------------------------------------------------------------------------
1 | 这里面存放的是已经训练好的权重,可通过百度网盘下载。
2 |
--------------------------------------------------------------------------------
/demo/unet/nets/__init__.py:
--------------------------------------------------------------------------------
1 | #
--------------------------------------------------------------------------------
/demo/unet/requirements.txt:
--------------------------------------------------------------------------------
1 | scipy
2 | numpy
3 | matplotlib
4 | opencv_python
5 | torch
6 | torchvision
7 | tqdm
8 | Pillow
9 | h5py
--------------------------------------------------------------------------------
/demo/unet/summary.py:
--------------------------------------------------------------------------------
1 | #--------------------------------------------#
2 | # 该部分代码只用于看网络结构,并非测试代码
3 | #--------------------------------------------#
4 | from torchsummary import summary
5 |
6 | from nets.unet import Unet
7 |
8 | if __name__ == "__main__":
9 | model = Unet(num_classes = 21).train().cuda()
10 | # print("model have {} paramerters in total".format(sum(x.numel() for x in model.parameters())))
11 | summary(model, (3, 512, 512))
12 |
--------------------------------------------------------------------------------
/demo/unet/utils/__init__.py:
--------------------------------------------------------------------------------
1 | #
--------------------------------------------------------------------------------
/demo/unet/utils/utils.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | from PIL import Image
3 |
4 | #---------------------------------------------------------#
5 | # 将图像转换成RGB图像,防止灰度图在预测时报错。
6 | # 代码仅仅支持RGB图像的预测,所有其它类型的图像都会转化成RGB
7 | #---------------------------------------------------------#
8 | def cvtColor(image):
9 | if len(np.shape(image)) == 3 and np.shape(image)[2] == 3:
10 | return image
11 | else:
12 | image = image.convert('RGB')
13 | return image
14 |
15 | #---------------------------------------------------#
16 | # 对输入图像进行resize
17 | #---------------------------------------------------#
18 | def resize_image(image, size):
19 | iw, ih = image.size
20 | w, h = size
21 |
22 | scale = min(w/iw, h/ih)
23 | nw = int(iw*scale)
24 | nh = int(ih*scale)
25 |
26 | image = image.resize((nw,nh), Image.BICUBIC)
27 | new_image = Image.new('RGB', size, (128,128,128))
28 | new_image.paste(image, ((w-nw)//2, (h-nh)//2))
29 |
30 | return new_image, nw, nh
31 |
32 | #---------------------------------------------------#
33 | # 获得学习率
34 | #---------------------------------------------------#
35 | def get_lr(optimizer):
36 | for param_group in optimizer.param_groups:
37 | return param_group['lr']
38 |
39 | def preprocess_input(image):
40 | image /= 255.0
41 | return image
42 |
--------------------------------------------------------------------------------
/demo/vit/class_indices.json:
--------------------------------------------------------------------------------
1 | {
2 | "0": "daisy",
3 | "1": "dandelion",
4 | "2": "roses",
5 | "3": "sunflowers",
6 | "4": "tulips"
7 | }
--------------------------------------------------------------------------------
/demo/vit/demo.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Rex-LK/tensorrt_learning/45df1f33cab6a66a0d75c0bb26a3978d539dc888/demo/vit/demo.jpg
--------------------------------------------------------------------------------
/demo/vit/export_onnx.py:
--------------------------------------------------------------------------------
1 |
2 | from multiprocessing import dummy
3 | import os
4 | import json
5 | import onnx
6 | from psutil import cpu_count
7 | import sys
8 |
9 |
10 | import torch
11 | from PIL import Image
12 | from torchvision import transforms
13 | import matplotlib.pyplot as plt
14 |
15 | from vit_model import vit_base_patch16_224_in21k as create_model
16 |
17 |
18 | def main():
19 |
20 | # device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
21 | device = 'cpu'
22 | model = create_model(num_classes=5, has_logits=False).to(device)
23 | # load model weights
24 | model_weight_path = "vit-model-9.pth"
25 | model.load_state_dict(torch.load(model_weight_path, map_location=device))
26 | model.eval()
27 |
28 |
29 | dummy = torch.zeros(1,3,224,224)
30 | torch.onnx.export(
31 | model,(dummy,),
32 | "vit.onnx",
33 | input_names=["image"],
34 | output_names=["predict"],
35 | opset_version=12,
36 | dynamic_axes={"image":{0:"batch"},"predict":{0:"batch"}}
37 | )
38 |
39 |
40 | if __name__ == '__main__':
41 | main()
42 |
--------------------------------------------------------------------------------
/demo/vit/flops.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from fvcore.nn import FlopCountAnalysis
3 |
4 | from vit_model import Attention
5 |
6 |
7 | def main():
8 | # Self-Attention
9 | a1 = Attention(dim=512, num_heads=1)
10 | a1.proj = torch.nn.Identity() # remove Wo
11 |
12 | # Multi-Head Attention
13 | a2 = Attention(dim=512, num_heads=8)
14 |
15 | # [batch_size, num_tokens, total_embed_dim]
16 | t = (torch.rand(32, 1024, 512),)
17 |
18 | flops1 = FlopCountAnalysis(a1, t)
19 | print("Self-Attention FLOPs:", flops1.total())
20 |
21 | flops2 = FlopCountAnalysis(a2, t)
22 | print("Multi-Head Attention FLOPs:", flops2.total())
23 |
24 |
25 | if __name__ == '__main__':
26 | main()
27 |
28 |
--------------------------------------------------------------------------------
/demo/vit/infer-onnxruntime.py:
--------------------------------------------------------------------------------
1 |
2 | import onnxruntime
3 | import cv2
4 | import numpy as np
5 | from torchvision import transforms
6 | from PIL import Image
7 | import os
8 | import torch
9 |
10 |
11 | if __name__ == "__main__":
12 |
13 | img_size = 224
14 | data_transform = transforms.Compose(
15 | [transforms.Resize(256),
16 | transforms.CenterCrop(224),
17 | transforms.ToTensor(),
18 | transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])])
19 | # load image
20 | img_path = "demo.jpg"
21 | img = Image.open(img_path)
22 | img = data_transform(img)
23 | # expand batch dimension
24 | img = torch.unsqueeze(img, dim=0)
25 | image_input = img.numpy()
26 | session = onnxruntime.InferenceSession("vit.onnx", providers=["CPUExecutionProvider"])
27 | pred = session.run(["predict"], {"image": image_input})[0]
28 | print(pred)
--------------------------------------------------------------------------------
/demo/vit/my_dataset.py:
--------------------------------------------------------------------------------
1 | from PIL import Image
2 | import torch
3 | from torch.utils.data import Dataset
4 |
5 |
6 | class MyDataSet(Dataset):
7 | """自定义数据集"""
8 |
9 | def __init__(self, images_path: list, images_class: list, transform=None):
10 | self.images_path = images_path
11 | self.images_class = images_class
12 | self.transform = transform
13 |
14 | def __len__(self):
15 | return len(self.images_path)
16 |
17 | def __getitem__(self, item):
18 | img = Image.open(self.images_path[item])
19 | # RGB为彩色图片,L为灰度图片
20 | if img.mode != 'RGB':
21 | raise ValueError("image: {} isn't RGB mode.".format(self.images_path[item]))
22 | label = self.images_class[item]
23 |
24 | if self.transform is not None:
25 | img = self.transform(img)
26 |
27 | return img, label
28 |
29 | @staticmethod
30 | def collate_fn(batch):
31 | # 官方实现的default_collate可以参考
32 | # https://github.com/pytorch/pytorch/blob/67b7e751e6b5931a9f45274653f4f653a4e6cdf6/torch/utils/data/_utils/collate.py
33 | images, labels = tuple(zip(*batch))
34 |
35 | images = torch.stack(images, dim=0)
36 | labels = torch.as_tensor(labels)
37 | return images, labels
38 |
--------------------------------------------------------------------------------
/demo/vit/onnx_simplify.py:
--------------------------------------------------------------------------------
1 | from onnxsim import simplify
2 | import onnx
3 | input_path="vit.onnx"
4 | output_path="vit.onnx"
5 | onnx_model = onnx.load(input_path)
6 | model_simp, check = simplify(onnx_model,input_shapes={'image': [1, 3, 224, 224]})
7 | assert check, "Simplified ONNX model could not be validated"
8 | onnx.save(model_simp, output_path)
9 | print('finished exporting onnx')
--------------------------------------------------------------------------------
/demo/vit/predict.py:
--------------------------------------------------------------------------------
1 | import os
2 | import json
3 |
4 | import torch
5 | from PIL import Image
6 | from torchvision import transforms
7 | import matplotlib.pyplot as plt
8 |
9 | from vit_model import vit_base_patch16_224_in21k as create_model
10 |
11 |
12 | def main():
13 | device = 'cpu'
14 | data_transform = transforms.Compose(
15 | [transforms.Resize(256),
16 | transforms.CenterCrop(224),
17 | transforms.ToTensor(),
18 | transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])])
19 | img_path = "./demo.jpg"
20 | img = Image.open(img_path)
21 | img = data_transform(img)
22 | img = torch.unsqueeze(img, dim=0)
23 | model = create_model(num_classes=5, has_logits=False).to(device)
24 | model_weight_path = "vit-model-9.pth"
25 | model.load_state_dict(torch.load(model_weight_path, map_location=device))
26 | model.eval()
27 | with torch.no_grad():
28 | # predict class
29 | # output = torch.squeeze(model(img.to(device))).cpu()
30 | output = model(img.to(device))
31 | print(output)
32 | # predict = torch.softmax(output, dim=0)
33 |
34 | if __name__ == '__main__':
35 | main()
36 |
--------------------------------------------------------------------------------
/demo/yolov5/detect-for-yolov5-6.0.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | cd yolov5-6.0
4 |
5 | python detect.py --weights=yolov5s.pt --source=../workspace/car.jpg --iou-thres=0.5 --conf-thres=0.25 --project=../workspace/
6 |
7 | mv ../workspace/exp/car.jpg ../workspace/car-pytorch.jpg
8 | rm -rf ../workspace/exp
--------------------------------------------------------------------------------
/demo/yolov5/export-yolov5-6.0.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | cd yolov5-6.0
4 | python export.py --weights=yolov5s.pt --dynamic --include=onnx --opset=11
5 |
6 | mv yolov5s.onnx ../workspace/
--------------------------------------------------------------------------------
/demo/yolov5/yolov5-6.0/.gitattributes:
--------------------------------------------------------------------------------
1 | # this drop notebooks from GitHub language stats
2 | *.ipynb linguist-vendored
3 |
--------------------------------------------------------------------------------
/demo/yolov5/yolov5-6.0/.github/FUNDING.yml:
--------------------------------------------------------------------------------
1 | # These are supported funding model platforms
2 |
3 | github: glenn-jocher
4 | patreon: ultralytics
5 | open_collective: ultralytics
6 |
--------------------------------------------------------------------------------
/demo/yolov5/yolov5-6.0/.github/ISSUE_TEMPLATE/feature-request.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: "🚀 Feature request"
3 | about: Suggest an idea for this project
4 | title: ''
5 | labels: enhancement
6 | assignees: ''
7 |
8 | ---
9 |
10 | ## 🚀 Feature
11 |
12 |
13 |
14 | ## Motivation
15 |
16 |
18 |
19 | ## Pitch
20 |
21 |
22 |
23 | ## Alternatives
24 |
25 |
26 |
27 | ## Additional context
28 |
29 |
30 |
--------------------------------------------------------------------------------
/demo/yolov5/yolov5-6.0/.github/ISSUE_TEMPLATE/question.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: "❓Question"
3 | about: Ask a general question
4 | title: ''
5 | labels: question
6 | assignees: ''
7 |
8 | ---
9 |
10 | ## ❔Question
11 |
12 | ## Additional context
13 |
--------------------------------------------------------------------------------
/demo/yolov5/yolov5-6.0/.github/dependabot.yml:
--------------------------------------------------------------------------------
1 | version: 2
2 | updates:
3 | - package-ecosystem: pip
4 | directory: "/"
5 | schedule:
6 | interval: weekly
7 | time: "04:00"
8 | open-pull-requests-limit: 10
9 | reviewers:
10 | - glenn-jocher
11 | labels:
12 | - dependencies
13 |
--------------------------------------------------------------------------------
/demo/yolov5/yolov5-6.0/.github/workflows/rebase.yml:
--------------------------------------------------------------------------------
1 | name: Automatic Rebase
2 | # https://github.com/marketplace/actions/automatic-rebase
3 |
4 | on:
5 | issue_comment:
6 | types: [created]
7 |
8 | jobs:
9 | rebase:
10 | name: Rebase
11 | if: github.event.issue.pull_request != '' && contains(github.event.comment.body, '/rebase')
12 | runs-on: ubuntu-latest
13 | steps:
14 | - name: Checkout the latest code
15 | uses: actions/checkout@v2
16 | with:
17 | fetch-depth: 0
18 | - name: Automatic Rebase
19 | uses: cirrus-actions/rebase@1.3.1
20 | env:
21 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
22 |
--------------------------------------------------------------------------------
/demo/yolov5/yolov5-6.0/Arial.ttf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Rex-LK/tensorrt_learning/45df1f33cab6a66a0d75c0bb26a3978d539dc888/demo/yolov5/yolov5-6.0/Arial.ttf
--------------------------------------------------------------------------------
/demo/yolov5/yolov5-6.0/models/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Rex-LK/tensorrt_learning/45df1f33cab6a66a0d75c0bb26a3978d539dc888/demo/yolov5/yolov5-6.0/models/__init__.py
--------------------------------------------------------------------------------
/demo/yolov5/yolov5-6.0/models/hub/yolov3-tiny.yaml:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2 |
3 | # Parameters
4 | nc: 80 # number of classes
5 | depth_multiple: 1.0 # model depth multiple
6 | width_multiple: 1.0 # layer channel multiple
7 | anchors:
8 | - [10,14, 23,27, 37,58] # P4/16
9 | - [81,82, 135,169, 344,319] # P5/32
10 |
11 | # YOLOv3-tiny backbone
12 | backbone:
13 | # [from, number, module, args]
14 | [[-1, 1, Conv, [16, 3, 1]], # 0
15 | [-1, 1, nn.MaxPool2d, [2, 2, 0]], # 1-P1/2
16 | [-1, 1, Conv, [32, 3, 1]],
17 | [-1, 1, nn.MaxPool2d, [2, 2, 0]], # 3-P2/4
18 | [-1, 1, Conv, [64, 3, 1]],
19 | [-1, 1, nn.MaxPool2d, [2, 2, 0]], # 5-P3/8
20 | [-1, 1, Conv, [128, 3, 1]],
21 | [-1, 1, nn.MaxPool2d, [2, 2, 0]], # 7-P4/16
22 | [-1, 1, Conv, [256, 3, 1]],
23 | [-1, 1, nn.MaxPool2d, [2, 2, 0]], # 9-P5/32
24 | [-1, 1, Conv, [512, 3, 1]],
25 | [-1, 1, nn.ZeroPad2d, [[0, 1, 0, 1]]], # 11
26 | [-1, 1, nn.MaxPool2d, [2, 1, 0]], # 12
27 | ]
28 |
29 | # YOLOv3-tiny head
30 | head:
31 | [[-1, 1, Conv, [1024, 3, 1]],
32 | [-1, 1, Conv, [256, 1, 1]],
33 | [-1, 1, Conv, [512, 3, 1]], # 15 (P5/32-large)
34 |
35 | [-2, 1, Conv, [128, 1, 1]],
36 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
37 | [[-1, 8], 1, Concat, [1]], # cat backbone P4
38 | [-1, 1, Conv, [256, 3, 1]], # 19 (P4/16-medium)
39 |
40 | [[19, 15], 1, Detect, [nc, anchors]], # Detect(P4, P5)
41 | ]
42 |
--------------------------------------------------------------------------------
/demo/yolov5/yolov5-6.0/models/hub/yolov3.yaml:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2 |
3 | # Parameters
4 | nc: 80 # number of classes
5 | depth_multiple: 1.0 # model depth multiple
6 | width_multiple: 1.0 # layer channel multiple
7 | anchors:
8 | - [10,13, 16,30, 33,23] # P3/8
9 | - [30,61, 62,45, 59,119] # P4/16
10 | - [116,90, 156,198, 373,326] # P5/32
11 |
12 | # darknet53 backbone
13 | backbone:
14 | # [from, number, module, args]
15 | [[-1, 1, Conv, [32, 3, 1]], # 0
16 | [-1, 1, Conv, [64, 3, 2]], # 1-P1/2
17 | [-1, 1, Bottleneck, [64]],
18 | [-1, 1, Conv, [128, 3, 2]], # 3-P2/4
19 | [-1, 2, Bottleneck, [128]],
20 | [-1, 1, Conv, [256, 3, 2]], # 5-P3/8
21 | [-1, 8, Bottleneck, [256]],
22 | [-1, 1, Conv, [512, 3, 2]], # 7-P4/16
23 | [-1, 8, Bottleneck, [512]],
24 | [-1, 1, Conv, [1024, 3, 2]], # 9-P5/32
25 | [-1, 4, Bottleneck, [1024]], # 10
26 | ]
27 |
28 | # YOLOv3 head
29 | head:
30 | [[-1, 1, Bottleneck, [1024, False]],
31 | [-1, 1, Conv, [512, [1, 1]]],
32 | [-1, 1, Conv, [1024, 3, 1]],
33 | [-1, 1, Conv, [512, 1, 1]],
34 | [-1, 1, Conv, [1024, 3, 1]], # 15 (P5/32-large)
35 |
36 | [-2, 1, Conv, [256, 1, 1]],
37 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
38 | [[-1, 8], 1, Concat, [1]], # cat backbone P4
39 | [-1, 1, Bottleneck, [512, False]],
40 | [-1, 1, Bottleneck, [512, False]],
41 | [-1, 1, Conv, [256, 1, 1]],
42 | [-1, 1, Conv, [512, 3, 1]], # 22 (P4/16-medium)
43 |
44 | [-2, 1, Conv, [128, 1, 1]],
45 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
46 | [[-1, 6], 1, Concat, [1]], # cat backbone P3
47 | [-1, 1, Bottleneck, [256, False]],
48 | [-1, 2, Bottleneck, [256, False]], # 27 (P3/8-small)
49 |
50 | [[27, 22, 15], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5)
51 | ]
52 |
--------------------------------------------------------------------------------
/demo/yolov5/yolov5-6.0/models/hub/yolov5-bifpn.yaml:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2 |
3 | # Parameters
4 | nc: 80 # number of classes
5 | depth_multiple: 1.0 # model depth multiple
6 | width_multiple: 1.0 # layer channel multiple
7 | anchors:
8 | - [10,13, 16,30, 33,23] # P3/8
9 | - [30,61, 62,45, 59,119] # P4/16
10 | - [116,90, 156,198, 373,326] # P5/32
11 |
12 | # YOLOv5 backbone
13 | backbone:
14 | # [from, number, module, args]
15 | [[-1, 1, Focus, [64, 3]], # 0-P1/2
16 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4
17 | [-1, 3, C3, [128]],
18 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8
19 | [-1, 9, C3, [256]],
20 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16
21 | [-1, 9, C3, [512]]
22 | [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32
23 | [-1, 1, SPP, [1024, [5, 9, 13]]],
24 | [-1, 3, C3, [1024, False]], # 9
25 | ]
26 |
27 | # YOLOv5 BiFPN head
28 | head:
29 | [[-1, 1, Conv, [512, 1, 1]],
30 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
31 | [[-1, 6], 1, Concat, [1]], # cat backbone P4
32 | [-1, 3, C3, [512, False]], # 13
33 |
34 | [-1, 1, Conv, [256, 1, 1]],
35 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
36 | [[-1, 4], 1, Concat, [1]], # cat backbone P3
37 | [-1, 3, C3, [256, False]], # 17 (P3/8-small)
38 |
39 | [-1, 1, Conv, [256, 3, 2]],
40 | [[-1, 14, 6], 1, Concat, [1]], # cat P4
41 | [-1, 3, C3, [512, False]], # 20 (P4/16-medium)
42 |
43 | [-1, 1, Conv, [512, 3, 2]],
44 | [[-1, 10], 1, Concat, [1]], # cat head P5
45 | [-1, 3, C3, [1024, False]], # 23 (P5/32-large)
46 |
47 | [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5)
48 | ]
49 |
--------------------------------------------------------------------------------
/demo/yolov5/yolov5-6.0/models/hub/yolov5-fpn.yaml:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2 |
3 | # Parameters
4 | nc: 80 # number of classes
5 | depth_multiple: 1.0 # model depth multiple
6 | width_multiple: 1.0 # layer channel multiple
7 | anchors:
8 | - [10,13, 16,30, 33,23] # P3/8
9 | - [30,61, 62,45, 59,119] # P4/16
10 | - [116,90, 156,198, 373,326] # P5/32
11 |
12 | # YOLOv5 backbone
13 | backbone:
14 | # [from, number, module, args]
15 | [[-1, 1, Focus, [64, 3]], # 0-P1/2
16 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4
17 | [-1, 3, Bottleneck, [128]],
18 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8
19 | [-1, 9, BottleneckCSP, [256]],
20 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16
21 | [-1, 9, BottleneckCSP, [512]],
22 | [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32
23 | [-1, 1, SPP, [1024, [5, 9, 13]]],
24 | [-1, 6, BottleneckCSP, [1024]], # 9
25 | ]
26 |
27 | # YOLOv5 FPN head
28 | head:
29 | [[-1, 3, BottleneckCSP, [1024, False]], # 10 (P5/32-large)
30 |
31 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
32 | [[-1, 6], 1, Concat, [1]], # cat backbone P4
33 | [-1, 1, Conv, [512, 1, 1]],
34 | [-1, 3, BottleneckCSP, [512, False]], # 14 (P4/16-medium)
35 |
36 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
37 | [[-1, 4], 1, Concat, [1]], # cat backbone P3
38 | [-1, 1, Conv, [256, 1, 1]],
39 | [-1, 3, BottleneckCSP, [256, False]], # 18 (P3/8-small)
40 |
41 | [[18, 14, 10], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5)
42 | ]
43 |
--------------------------------------------------------------------------------
/demo/yolov5/yolov5-6.0/models/hub/yolov5-panet.yaml:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2 |
3 | # Parameters
4 | nc: 80 # number of classes
5 | depth_multiple: 1.0 # model depth multiple
6 | width_multiple: 1.0 # layer channel multiple
7 | anchors:
8 | - [10,13, 16,30, 33,23] # P3/8
9 | - [30,61, 62,45, 59,119] # P4/16
10 | - [116,90, 156,198, 373,326] # P5/32
11 |
12 | # YOLOv5 backbone
13 | backbone:
14 | # [from, number, module, args]
15 | [[-1, 1, Focus, [64, 3]], # 0-P1/2
16 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4
17 | [-1, 3, BottleneckCSP, [128]],
18 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8
19 | [-1, 9, BottleneckCSP, [256]],
20 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16
21 | [-1, 9, BottleneckCSP, [512]],
22 | [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32
23 | [-1, 1, SPP, [1024, [5, 9, 13]]],
24 | [-1, 3, BottleneckCSP, [1024, False]], # 9
25 | ]
26 |
27 | # YOLOv5 PANet head
28 | head:
29 | [[-1, 1, Conv, [512, 1, 1]],
30 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
31 | [[-1, 6], 1, Concat, [1]], # cat backbone P4
32 | [-1, 3, BottleneckCSP, [512, False]], # 13
33 |
34 | [-1, 1, Conv, [256, 1, 1]],
35 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
36 | [[-1, 4], 1, Concat, [1]], # cat backbone P3
37 | [-1, 3, BottleneckCSP, [256, False]], # 17 (P3/8-small)
38 |
39 | [-1, 1, Conv, [256, 3, 2]],
40 | [[-1, 14], 1, Concat, [1]], # cat head P4
41 | [-1, 3, BottleneckCSP, [512, False]], # 20 (P4/16-medium)
42 |
43 | [-1, 1, Conv, [512, 3, 2]],
44 | [[-1, 10], 1, Concat, [1]], # cat head P5
45 | [-1, 3, BottleneckCSP, [1024, False]], # 23 (P5/32-large)
46 |
47 | [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5)
48 | ]
49 |
--------------------------------------------------------------------------------
/demo/yolov5/yolov5-6.0/models/hub/yolov5s-ghost.yaml:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2 |
3 | # Parameters
4 | nc: 80 # number of classes
5 | depth_multiple: 0.33 # model depth multiple
6 | width_multiple: 0.50 # layer channel multiple
7 | anchors:
8 | - [10,13, 16,30, 33,23] # P3/8
9 | - [30,61, 62,45, 59,119] # P4/16
10 | - [116,90, 156,198, 373,326] # P5/32
11 |
12 | # YOLOv5 backbone
13 | backbone:
14 | # [from, number, module, args]
15 | [[-1, 1, Focus, [64, 3]], # 0-P1/2
16 | [-1, 1, GhostConv, [128, 3, 2]], # 1-P2/4
17 | [-1, 3, C3Ghost, [128]],
18 | [-1, 1, GhostConv, [256, 3, 2]], # 3-P3/8
19 | [-1, 9, C3Ghost, [256]],
20 | [-1, 1, GhostConv, [512, 3, 2]], # 5-P4/16
21 | [-1, 9, C3Ghost, [512]],
22 | [-1, 1, GhostConv, [1024, 3, 2]], # 7-P5/32
23 | [-1, 1, SPP, [1024, [5, 9, 13]]],
24 | [-1, 3, C3Ghost, [1024, False]], # 9
25 | ]
26 |
27 | # YOLOv5 head
28 | head:
29 | [[-1, 1, GhostConv, [512, 1, 1]],
30 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
31 | [[-1, 6], 1, Concat, [1]], # cat backbone P4
32 | [-1, 3, C3Ghost, [512, False]], # 13
33 |
34 | [-1, 1, GhostConv, [256, 1, 1]],
35 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
36 | [[-1, 4], 1, Concat, [1]], # cat backbone P3
37 | [-1, 3, C3Ghost, [256, False]], # 17 (P3/8-small)
38 |
39 | [-1, 1, GhostConv, [256, 3, 2]],
40 | [[-1, 14], 1, Concat, [1]], # cat head P4
41 | [-1, 3, C3Ghost, [512, False]], # 20 (P4/16-medium)
42 |
43 | [-1, 1, GhostConv, [512, 3, 2]],
44 | [[-1, 10], 1, Concat, [1]], # cat head P5
45 | [-1, 3, C3Ghost, [1024, False]], # 23 (P5/32-large)
46 |
47 | [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5)
48 | ]
49 |
--------------------------------------------------------------------------------
/demo/yolov5/yolov5-6.0/models/hub/yolov5s-transformer.yaml:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2 |
3 | # Parameters
4 | nc: 80 # number of classes
5 | depth_multiple: 0.33 # model depth multiple
6 | width_multiple: 0.50 # layer channel multiple
7 | anchors:
8 | - [10,13, 16,30, 33,23] # P3/8
9 | - [30,61, 62,45, 59,119] # P4/16
10 | - [116,90, 156,198, 373,326] # P5/32
11 |
12 | # YOLOv5 backbone
13 | backbone:
14 | # [from, number, module, args]
15 | [[-1, 1, Focus, [64, 3]], # 0-P1/2
16 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4
17 | [-1, 3, C3, [128]],
18 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8
19 | [-1, 9, C3, [256]],
20 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16
21 | [-1, 9, C3, [512]],
22 | [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32
23 | [-1, 1, SPP, [1024, [5, 9, 13]]],
24 | [-1, 3, C3TR, [1024, False]], # 9 <-------- C3TR() Transformer module
25 | ]
26 |
27 | # YOLOv5 head
28 | head:
29 | [[-1, 1, Conv, [512, 1, 1]],
30 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
31 | [[-1, 6], 1, Concat, [1]], # cat backbone P4
32 | [-1, 3, C3, [512, False]], # 13
33 |
34 | [-1, 1, Conv, [256, 1, 1]],
35 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
36 | [[-1, 4], 1, Concat, [1]], # cat backbone P3
37 | [-1, 3, C3, [256, False]], # 17 (P3/8-small)
38 |
39 | [-1, 1, Conv, [256, 3, 2]],
40 | [[-1, 14], 1, Concat, [1]], # cat head P4
41 | [-1, 3, C3, [512, False]], # 20 (P4/16-medium)
42 |
43 | [-1, 1, Conv, [512, 3, 2]],
44 | [[-1, 10], 1, Concat, [1]], # cat head P5
45 | [-1, 3, C3, [1024, False]], # 23 (P5/32-large)
46 |
47 | [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5)
48 | ]
49 |
--------------------------------------------------------------------------------
/demo/yolov5/yolov5-6.0/models/yolov5l.yaml:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2 |
3 | # Parameters
4 | nc: 80 # number of classes
5 | depth_multiple: 1.0 # model depth multiple
6 | width_multiple: 1.0 # layer channel multiple
7 | anchors:
8 | - [10,13, 16,30, 33,23] # P3/8
9 | - [30,61, 62,45, 59,119] # P4/16
10 | - [116,90, 156,198, 373,326] # P5/32
11 |
12 | # YOLOv5 v6.0 backbone
13 | backbone:
14 | # [from, number, module, args]
15 | [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
16 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4
17 | [-1, 3, C3, [128]],
18 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8
19 | [-1, 6, C3, [256]],
20 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16
21 | [-1, 9, C3, [512]],
22 | [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32
23 | [-1, 3, C3, [1024]],
24 | [-1, 1, SPPF, [1024, 5]], # 9
25 | ]
26 |
27 | # YOLOv5 v6.0 head
28 | head:
29 | [[-1, 1, Conv, [512, 1, 1]],
30 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
31 | [[-1, 6], 1, Concat, [1]], # cat backbone P4
32 | [-1, 3, C3, [512, False]], # 13
33 |
34 | [-1, 1, Conv, [256, 1, 1]],
35 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
36 | [[-1, 4], 1, Concat, [1]], # cat backbone P3
37 | [-1, 3, C3, [256, False]], # 17 (P3/8-small)
38 |
39 | [-1, 1, Conv, [256, 3, 2]],
40 | [[-1, 14], 1, Concat, [1]], # cat head P4
41 | [-1, 3, C3, [512, False]], # 20 (P4/16-medium)
42 |
43 | [-1, 1, Conv, [512, 3, 2]],
44 | [[-1, 10], 1, Concat, [1]], # cat head P5
45 | [-1, 3, C3, [1024, False]], # 23 (P5/32-large)
46 |
47 | [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5)
48 | ]
49 |
--------------------------------------------------------------------------------
/demo/yolov5/yolov5-6.0/models/yolov5m.yaml:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2 |
3 | # Parameters
4 | nc: 80 # number of classes
5 | depth_multiple: 0.67 # model depth multiple
6 | width_multiple: 0.75 # layer channel multiple
7 | anchors:
8 | - [10,13, 16,30, 33,23] # P3/8
9 | - [30,61, 62,45, 59,119] # P4/16
10 | - [116,90, 156,198, 373,326] # P5/32
11 |
12 | # YOLOv5 v6.0 backbone
13 | backbone:
14 | # [from, number, module, args]
15 | [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
16 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4
17 | [-1, 3, C3, [128]],
18 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8
19 | [-1, 6, C3, [256]],
20 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16
21 | [-1, 9, C3, [512]],
22 | [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32
23 | [-1, 3, C3, [1024]],
24 | [-1, 1, SPPF, [1024, 5]], # 9
25 | ]
26 |
27 | # YOLOv5 v6.0 head
28 | head:
29 | [[-1, 1, Conv, [512, 1, 1]],
30 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
31 | [[-1, 6], 1, Concat, [1]], # cat backbone P4
32 | [-1, 3, C3, [512, False]], # 13
33 |
34 | [-1, 1, Conv, [256, 1, 1]],
35 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
36 | [[-1, 4], 1, Concat, [1]], # cat backbone P3
37 | [-1, 3, C3, [256, False]], # 17 (P3/8-small)
38 |
39 | [-1, 1, Conv, [256, 3, 2]],
40 | [[-1, 14], 1, Concat, [1]], # cat head P4
41 | [-1, 3, C3, [512, False]], # 20 (P4/16-medium)
42 |
43 | [-1, 1, Conv, [512, 3, 2]],
44 | [[-1, 10], 1, Concat, [1]], # cat head P5
45 | [-1, 3, C3, [1024, False]], # 23 (P5/32-large)
46 |
47 | [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5)
48 | ]
49 |
--------------------------------------------------------------------------------
/demo/yolov5/yolov5-6.0/models/yolov5n.yaml:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2 |
3 | # Parameters
4 | nc: 80 # number of classes
5 | depth_multiple: 0.33 # model depth multiple
6 | width_multiple: 0.25 # layer channel multiple
7 | anchors:
8 | - [10,13, 16,30, 33,23] # P3/8
9 | - [30,61, 62,45, 59,119] # P4/16
10 | - [116,90, 156,198, 373,326] # P5/32
11 |
12 | # YOLOv5 v6.0 backbone
13 | backbone:
14 | # [from, number, module, args]
15 | [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
16 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4
17 | [-1, 3, C3, [128]],
18 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8
19 | [-1, 6, C3, [256]],
20 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16
21 | [-1, 9, C3, [512]],
22 | [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32
23 | [-1, 3, C3, [1024]],
24 | [-1, 1, SPPF, [1024, 5]], # 9
25 | ]
26 |
27 | # YOLOv5 v6.0 head
28 | head:
29 | [[-1, 1, Conv, [512, 1, 1]],
30 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
31 | [[-1, 6], 1, Concat, [1]], # cat backbone P4
32 | [-1, 3, C3, [512, False]], # 13
33 |
34 | [-1, 1, Conv, [256, 1, 1]],
35 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
36 | [[-1, 4], 1, Concat, [1]], # cat backbone P3
37 | [-1, 3, C3, [256, False]], # 17 (P3/8-small)
38 |
39 | [-1, 1, Conv, [256, 3, 2]],
40 | [[-1, 14], 1, Concat, [1]], # cat head P4
41 | [-1, 3, C3, [512, False]], # 20 (P4/16-medium)
42 |
43 | [-1, 1, Conv, [512, 3, 2]],
44 | [[-1, 10], 1, Concat, [1]], # cat head P5
45 | [-1, 3, C3, [1024, False]], # 23 (P5/32-large)
46 |
47 | [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5)
48 | ]
49 |
--------------------------------------------------------------------------------
/demo/yolov5/yolov5-6.0/models/yolov5s.yaml:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2 |
3 | # Parameters
4 | nc: 80 # number of classes
5 | depth_multiple: 0.33 # model depth multiple
6 | width_multiple: 0.50 # layer channel multiple
7 | anchors:
8 | - [10,13, 16,30, 33,23] # P3/8
9 | - [30,61, 62,45, 59,119] # P4/16
10 | - [116,90, 156,198, 373,326] # P5/32
11 |
12 | # YOLOv5 v6.0 backbone
13 | backbone:
14 | # [from, number, module, args]
15 | [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
16 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4
17 | [-1, 3, C3, [128]],
18 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8
19 | [-1, 6, C3, [256]],
20 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16
21 | [-1, 9, C3, [512]],
22 | [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32
23 | [-1, 3, C3, [1024]],
24 | [-1, 1, SPPF, [1024, 5]], # 9
25 | ]
26 |
27 | # YOLOv5 v6.0 head
28 | head:
29 | [[-1, 1, Conv, [512, 1, 1]],
30 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
31 | [[-1, 6], 1, Concat, [1]], # cat backbone P4
32 | [-1, 3, C3, [512, False]], # 13
33 |
34 | [-1, 1, Conv, [256, 1, 1]],
35 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
36 | [[-1, 4], 1, Concat, [1]], # cat backbone P3
37 | [-1, 3, C3, [256, False]], # 17 (P3/8-small)
38 |
39 | [-1, 1, Conv, [256, 3, 2]],
40 | [[-1, 14], 1, Concat, [1]], # cat head P4
41 | [-1, 3, C3, [512, False]], # 20 (P4/16-medium)
42 |
43 | [-1, 1, Conv, [512, 3, 2]],
44 | [[-1, 10], 1, Concat, [1]], # cat head P5
45 | [-1, 3, C3, [1024, False]], # 23 (P5/32-large)
46 |
47 | [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5)
48 | ]
49 |
--------------------------------------------------------------------------------
/demo/yolov5/yolov5-6.0/models/yolov5x.yaml:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2 |
3 | # Parameters
4 | nc: 80 # number of classes
5 | depth_multiple: 1.33 # model depth multiple
6 | width_multiple: 1.25 # layer channel multiple
7 | anchors:
8 | - [10,13, 16,30, 33,23] # P3/8
9 | - [30,61, 62,45, 59,119] # P4/16
10 | - [116,90, 156,198, 373,326] # P5/32
11 |
12 | # YOLOv5 v6.0 backbone
13 | backbone:
14 | # [from, number, module, args]
15 | [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
16 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4
17 | [-1, 3, C3, [128]],
18 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8
19 | [-1, 6, C3, [256]],
20 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16
21 | [-1, 9, C3, [512]],
22 | [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32
23 | [-1, 3, C3, [1024]],
24 | [-1, 1, SPPF, [1024, 5]], # 9
25 | ]
26 |
27 | # YOLOv5 v6.0 head
28 | head:
29 | [[-1, 1, Conv, [512, 1, 1]],
30 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
31 | [[-1, 6], 1, Concat, [1]], # cat backbone P4
32 | [-1, 3, C3, [512, False]], # 13
33 |
34 | [-1, 1, Conv, [256, 1, 1]],
35 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
36 | [[-1, 4], 1, Concat, [1]], # cat backbone P3
37 | [-1, 3, C3, [256, False]], # 17 (P3/8-small)
38 |
39 | [-1, 1, Conv, [256, 3, 2]],
40 | [[-1, 14], 1, Concat, [1]], # cat head P4
41 | [-1, 3, C3, [512, False]], # 20 (P4/16-medium)
42 |
43 | [-1, 1, Conv, [512, 3, 2]],
44 | [[-1, 10], 1, Concat, [1]], # cat head P5
45 | [-1, 3, C3, [1024, False]], # 23 (P5/32-large)
46 |
47 | [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5)
48 | ]
49 |
--------------------------------------------------------------------------------
/demo/yolov5/yolov5-6.0/requirements.txt:
--------------------------------------------------------------------------------
1 | # pip install -r requirements.txt
2 |
3 | # Base ----------------------------------------
4 | matplotlib>=3.2.2
5 | numpy>=1.18.5
6 | opencv-python>=4.1.2
7 | Pillow>=7.1.2
8 | PyYAML>=5.3.1
9 | requests>=2.23.0
10 | scipy>=1.4.1
11 | torch>=1.7.0
12 | torchvision>=0.8.1
13 | tqdm>=4.41.0
14 |
15 | # Logging -------------------------------------
16 | tensorboard>=2.4.1
17 | # wandb
18 |
19 | # Plotting ------------------------------------
20 | pandas>=1.1.4
21 | seaborn>=0.11.0
22 |
23 | # Export --------------------------------------
24 | # coremltools>=4.1 # CoreML export
25 | # onnx>=1.9.0 # ONNX export
26 | # onnx-simplifier>=0.3.6 # ONNX simplifier
27 | # scikit-learn==0.19.2 # CoreML quantization
28 | # tensorflow>=2.4.1 # TFLite export
29 | # tensorflowjs>=3.9.0 # TF.js export
30 |
31 | # Extras --------------------------------------
32 | # albumentations>=1.0.3
33 | # Cython # for pycocotools https://github.com/cocodataset/cocoapi/issues/172
34 | # pycocotools>=2.0 # COCO mAP
35 | # roboflow
36 | thop # FLOPs computation
37 |
--------------------------------------------------------------------------------
/demo/yolov5/yolov5-6.0/utils/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Rex-LK/tensorrt_learning/45df1f33cab6a66a0d75c0bb26a3978d539dc888/demo/yolov5/yolov5-6.0/utils/__init__.py
--------------------------------------------------------------------------------
/demo/yolov5/yolov5-6.0/utils/aws/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Rex-LK/tensorrt_learning/45df1f33cab6a66a0d75c0bb26a3978d539dc888/demo/yolov5/yolov5-6.0/utils/aws/__init__.py
--------------------------------------------------------------------------------
/demo/yolov5/yolov5-6.0/utils/aws/mime.sh:
--------------------------------------------------------------------------------
1 | # AWS EC2 instance startup 'MIME' script https://aws.amazon.com/premiumsupport/knowledge-center/execute-user-data-ec2/
2 | # This script will run on every instance restart, not only on first start
3 | # --- DO NOT COPY ABOVE COMMENTS WHEN PASTING INTO USERDATA ---
4 |
5 | Content-Type: multipart/mixed; boundary="//"
6 | MIME-Version: 1.0
7 |
8 | --//
9 | Content-Type: text/cloud-config; charset="us-ascii"
10 | MIME-Version: 1.0
11 | Content-Transfer-Encoding: 7bit
12 | Content-Disposition: attachment; filename="cloud-config.txt"
13 |
14 | #cloud-config
15 | cloud_final_modules:
16 | - [scripts-user, always]
17 |
18 | --//
19 | Content-Type: text/x-shellscript; charset="us-ascii"
20 | MIME-Version: 1.0
21 | Content-Transfer-Encoding: 7bit
22 | Content-Disposition: attachment; filename="userdata.txt"
23 |
24 | #!/bin/bash
25 | # --- paste contents of userdata.sh here ---
26 | --//
27 |
--------------------------------------------------------------------------------
/demo/yolov5/yolov5-6.0/utils/aws/resume.py:
--------------------------------------------------------------------------------
1 | # Resume all interrupted trainings in yolov5/ dir including DDP trainings
2 | # Usage: $ python utils/aws/resume.py
3 |
4 | import os
5 | import sys
6 | from pathlib import Path
7 |
8 | import torch
9 | import yaml
10 |
11 | FILE = Path(__file__).resolve()
12 | ROOT = FILE.parents[2] # YOLOv5 root directory
13 | if str(ROOT) not in sys.path:
14 | sys.path.append(str(ROOT)) # add ROOT to PATH
15 |
16 | port = 0 # --master_port
17 | path = Path('').resolve()
18 | for last in path.rglob('*/**/last.pt'):
19 | ckpt = torch.load(last)
20 | if ckpt['optimizer'] is None:
21 | continue
22 |
23 | # Load opt.yaml
24 | with open(last.parent.parent / 'opt.yaml', errors='ignore') as f:
25 | opt = yaml.safe_load(f)
26 |
27 | # Get device count
28 | d = opt['device'].split(',') # devices
29 | nd = len(d) # number of devices
30 | ddp = nd > 1 or (nd == 0 and torch.cuda.device_count() > 1) # distributed data parallel
31 |
32 | if ddp: # multi-GPU
33 | port += 1
34 | cmd = f'python -m torch.distributed.run --nproc_per_node {nd} --master_port {port} train.py --resume {last}'
35 | else: # single-GPU
36 | cmd = f'python train.py --resume {last}'
37 |
38 | cmd += ' > /dev/null 2>&1 &' # redirect output to dev/null and run in daemon thread
39 | print(cmd)
40 | os.system(cmd)
41 |
--------------------------------------------------------------------------------
/demo/yolov5/yolov5-6.0/utils/aws/userdata.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # AWS EC2 instance startup script https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/user-data.html
3 | # This script will run only once on first instance start (for a re-start script see mime.sh)
4 | # /home/ubuntu (ubuntu) or /home/ec2-user (amazon-linux) is working dir
5 | # Use >300 GB SSD
6 |
7 | cd home/ubuntu
8 | if [ ! -d yolov5 ]; then
9 | echo "Running first-time script." # install dependencies, download COCO, pull Docker
10 | git clone https://github.com/ultralytics/yolov5 -b master && sudo chmod -R 777 yolov5
11 | cd yolov5
12 | bash data/scripts/get_coco.sh && echo "COCO done." &
13 | sudo docker pull ultralytics/yolov5:latest && echo "Docker done." &
14 | python -m pip install --upgrade pip && pip install -r requirements.txt && python detect.py && echo "Requirements done." &
15 | wait && echo "All tasks done." # finish background tasks
16 | else
17 | echo "Running re-start script." # resume interrupted runs
18 | i=0
19 | list=$(sudo docker ps -qa) # container list i.e. $'one\ntwo\nthree\nfour'
20 | while IFS= read -r id; do
21 | ((i++))
22 | echo "restarting container $i: $id"
23 | sudo docker start $id
24 | # sudo docker exec -it $id python train.py --resume # single-GPU
25 | sudo docker exec -d $id python utils/aws/resume.py # multi-scenario
26 | done <<<"$list"
27 | fi
28 |
--------------------------------------------------------------------------------
/demo/yolov5/yolov5-6.0/utils/flask_rest_api/example_request.py:
--------------------------------------------------------------------------------
1 | """Perform test request"""
2 | import pprint
3 |
4 | import requests
5 |
6 | DETECTION_URL = "http://localhost:5000/v1/object-detection/yolov5s"
7 | TEST_IMAGE = "zidane.jpg"
8 |
9 | image_data = open(TEST_IMAGE, "rb").read()
10 |
11 | response = requests.post(DETECTION_URL, files={"image": image_data}).json()
12 |
13 | pprint.pprint(response)
14 |
--------------------------------------------------------------------------------
/demo/yolov5/yolov5-6.0/utils/flask_rest_api/restapi.py:
--------------------------------------------------------------------------------
1 | """
2 | Run a rest API exposing the yolov5s object detection model
3 | """
4 | import argparse
5 | import io
6 |
7 | import torch
8 | from PIL import Image
9 | from flask import Flask, request
10 |
11 | app = Flask(__name__)
12 |
13 | DETECTION_URL = "/v1/object-detection/yolov5s"
14 |
15 |
16 | @app.route(DETECTION_URL, methods=["POST"])
17 | def predict():
18 | if not request.method == "POST":
19 | return
20 |
21 | if request.files.get("image"):
22 | image_file = request.files["image"]
23 | image_bytes = image_file.read()
24 |
25 | img = Image.open(io.BytesIO(image_bytes))
26 |
27 | results = model(img, size=640) # reduce size=320 for faster inference
28 | return results.pandas().xyxy[0].to_json(orient="records")
29 |
30 |
31 | if __name__ == "__main__":
32 | parser = argparse.ArgumentParser(description="Flask API exposing YOLOv5 model")
33 | parser.add_argument("--port", default=5000, type=int, help="port number")
34 | args = parser.parse_args()
35 |
36 | model = torch.hub.load("ultralytics/yolov5", "yolov5s", force_reload=True) # force_reload to recache
37 | app.run(host="0.0.0.0", port=args.port) # debug=True causes Restarting with stat
38 |
--------------------------------------------------------------------------------
/demo/yolov5/yolov5-6.0/utils/google_app_engine/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM gcr.io/google-appengine/python
2 |
3 | # Create a virtualenv for dependencies. This isolates these packages from
4 | # system-level packages.
5 | # Use -p python3 or -p python3.7 to select python version. Default is version 2.
6 | RUN virtualenv /env -p python3
7 |
8 | # Setting these environment variables are the same as running
9 | # source /env/bin/activate.
10 | ENV VIRTUAL_ENV /env
11 | ENV PATH /env/bin:$PATH
12 |
13 | RUN apt-get update && apt-get install -y python-opencv
14 |
15 | # Copy the application's requirements.txt and run pip to install all
16 | # dependencies into the virtualenv.
17 | ADD requirements.txt /app/requirements.txt
18 | RUN pip install -r /app/requirements.txt
19 |
20 | # Add the application source code.
21 | ADD . /app
22 |
23 | # Run a WSGI server to serve the application. gunicorn must be declared as
24 | # a dependency in requirements.txt.
25 | CMD gunicorn -b :$PORT main:app
26 |
--------------------------------------------------------------------------------
/demo/yolov5/yolov5-6.0/utils/google_app_engine/additional_requirements.txt:
--------------------------------------------------------------------------------
1 | # add these requirements in your app on top of the existing ones
2 | pip==19.2
3 | Flask==1.0.2
4 | gunicorn==19.9.0
5 |
--------------------------------------------------------------------------------
/demo/yolov5/yolov5-6.0/utils/google_app_engine/app.yaml:
--------------------------------------------------------------------------------
1 | runtime: custom
2 | env: flex
3 |
4 | service: yolov5app
5 |
6 | liveness_check:
7 | initial_delay_sec: 600
8 |
9 | manual_scaling:
10 | instances: 1
11 | resources:
12 | cpu: 1
13 | memory_gb: 4
14 | disk_size_gb: 20
--------------------------------------------------------------------------------
/demo/yolov5/yolov5-6.0/utils/loggers/wandb/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Rex-LK/tensorrt_learning/45df1f33cab6a66a0d75c0bb26a3978d539dc888/demo/yolov5/yolov5-6.0/utils/loggers/wandb/__init__.py
--------------------------------------------------------------------------------
/demo/yolov5/yolov5-6.0/utils/loggers/wandb/log_dataset.py:
--------------------------------------------------------------------------------
1 | import argparse
2 |
3 | from wandb_utils import WandbLogger
4 |
5 | WANDB_ARTIFACT_PREFIX = 'wandb-artifact://'
6 |
7 |
8 | def create_dataset_artifact(opt):
9 | logger = WandbLogger(opt, None, job_type='Dataset Creation') # TODO: return value unused
10 |
11 |
12 | if __name__ == '__main__':
13 | parser = argparse.ArgumentParser()
14 | parser.add_argument('--data', type=str, default='data/coco128.yaml', help='data.yaml path')
15 | parser.add_argument('--single-cls', action='store_true', help='train as single-class dataset')
16 | parser.add_argument('--project', type=str, default='YOLOv5', help='name of W&B Project')
17 | parser.add_argument('--entity', default=None, help='W&B entity')
18 | parser.add_argument('--name', type=str, default='log dataset', help='name of W&B run')
19 |
20 | opt = parser.parse_args()
21 | opt.resume = False # Explicitly disallow resume check for dataset upload job
22 |
23 | create_dataset_artifact(opt)
24 |
--------------------------------------------------------------------------------
/demo/yolov5/yolov5-6.0/utils/loggers/wandb/sweep.py:
--------------------------------------------------------------------------------
1 | import sys
2 | from pathlib import Path
3 |
4 | import wandb
5 |
6 | FILE = Path(__file__).resolve()
7 | ROOT = FILE.parents[3] # YOLOv5 root directory
8 | if str(ROOT) not in sys.path:
9 | sys.path.append(str(ROOT)) # add ROOT to PATH
10 |
11 | from train import train, parse_opt
12 | from utils.general import increment_path
13 | from utils.torch_utils import select_device
14 | from utils.callbacks import Callbacks
15 |
16 |
17 | def sweep():
18 | wandb.init()
19 | # Get hyp dict from sweep agent
20 | hyp_dict = vars(wandb.config).get("_items")
21 |
22 | # Workaround: get necessary opt args
23 | opt = parse_opt(known=True)
24 | opt.batch_size = hyp_dict.get("batch_size")
25 | opt.save_dir = str(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok or opt.evolve))
26 | opt.epochs = hyp_dict.get("epochs")
27 | opt.nosave = True
28 | opt.data = hyp_dict.get("data")
29 | device = select_device(opt.device, batch_size=opt.batch_size)
30 |
31 | # train
32 | train(hyp_dict, opt, device, callbacks=Callbacks())
33 |
34 |
35 | if __name__ == "__main__":
36 | sweep()
37 |
--------------------------------------------------------------------------------
/demo/yolov5/yolov5.hpp:
--------------------------------------------------------------------------------
1 | #ifndef YOLOV5_HPP
2 | #define YOLOV5_HPP
3 |
4 | #include
5 | #include
6 | #include
7 | #include
8 |
9 | /////////////////////////////////////////////////////////////////////////////////////////
10 | // 封装接口类
11 | namespace YoloV5{
12 |
13 | struct Box{
14 | float left, top, right, bottom, confidence;
15 | int class_label;
16 |
17 | Box() = default;
18 |
19 | Box(float left, float top, float right, float bottom, float confidence, int class_label)
20 | :left(left), top(top), right(right), bottom(bottom), confidence(confidence), class_label(class_label){}
21 | };
22 | typedef std::vector BoxArray;
23 |
24 | class Infer{
25 | public:
26 | virtual std::shared_future commit(const cv::Mat& input) = 0;
27 | };
28 |
29 | std::shared_ptr create_infer(
30 | const std::string& file,
31 | int gpuid=0, float confidence_threshold=0.25, float nms_threshold=0.45
32 | );
33 | };
34 |
35 | #endif // YOLOV5_HPP
--------------------------------------------------------------------------------
/demo/yolov7/cfg/baseline/r50-csp.yaml:
--------------------------------------------------------------------------------
1 | # parameters
2 | nc: 80 # number of classes
3 | depth_multiple: 1.0 # model depth multiple
4 | width_multiple: 1.0 # layer channel multiple
5 |
6 | # anchors
7 | anchors:
8 | - [12,16, 19,36, 40,28] # P3/8
9 | - [36,75, 76,55, 72,146] # P4/16
10 | - [142,110, 192,243, 459,401] # P5/32
11 |
12 | # CSP-ResNet backbone
13 | backbone:
14 | # [from, number, module, args]
15 | [[-1, 1, Stem, [128]], # 0-P1/2
16 | [-1, 3, ResCSPC, [128]],
17 | [-1, 1, Conv, [256, 3, 2]], # 2-P3/8
18 | [-1, 4, ResCSPC, [256]],
19 | [-1, 1, Conv, [512, 3, 2]], # 4-P3/8
20 | [-1, 6, ResCSPC, [512]],
21 | [-1, 1, Conv, [1024, 3, 2]], # 6-P3/8
22 | [-1, 3, ResCSPC, [1024]], # 7
23 | ]
24 |
25 | # CSP-Res-PAN head
26 | head:
27 | [[-1, 1, SPPCSPC, [512]], # 8
28 | [-1, 1, Conv, [256, 1, 1]],
29 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
30 | [5, 1, Conv, [256, 1, 1]], # route backbone P4
31 | [[-1, -2], 1, Concat, [1]],
32 | [-1, 2, ResCSPB, [256]], # 13
33 | [-1, 1, Conv, [128, 1, 1]],
34 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
35 | [3, 1, Conv, [128, 1, 1]], # route backbone P3
36 | [[-1, -2], 1, Concat, [1]],
37 | [-1, 2, ResCSPB, [128]], # 18
38 | [-1, 1, Conv, [256, 3, 1]],
39 | [-2, 1, Conv, [256, 3, 2]],
40 | [[-1, 13], 1, Concat, [1]], # cat
41 | [-1, 2, ResCSPB, [256]], # 22
42 | [-1, 1, Conv, [512, 3, 1]],
43 | [-2, 1, Conv, [512, 3, 2]],
44 | [[-1, 8], 1, Concat, [1]], # cat
45 | [-1, 2, ResCSPB, [512]], # 26
46 | [-1, 1, Conv, [1024, 3, 1]],
47 |
48 | [[19,23,27], 1, IDetect, [nc, anchors]], # Detect(P3, P4, P5)
49 | ]
50 |
--------------------------------------------------------------------------------
/demo/yolov7/cfg/baseline/x50-csp.yaml:
--------------------------------------------------------------------------------
1 | # parameters
2 | nc: 80 # number of classes
3 | depth_multiple: 1.0 # model depth multiple
4 | width_multiple: 1.0 # layer channel multiple
5 |
6 | # anchors
7 | anchors:
8 | - [12,16, 19,36, 40,28] # P3/8
9 | - [36,75, 76,55, 72,146] # P4/16
10 | - [142,110, 192,243, 459,401] # P5/32
11 |
12 | # CSP-ResNeXt backbone
13 | backbone:
14 | # [from, number, module, args]
15 | [[-1, 1, Stem, [128]], # 0-P1/2
16 | [-1, 3, ResXCSPC, [128]],
17 | [-1, 1, Conv, [256, 3, 2]], # 2-P3/8
18 | [-1, 4, ResXCSPC, [256]],
19 | [-1, 1, Conv, [512, 3, 2]], # 4-P3/8
20 | [-1, 6, ResXCSPC, [512]],
21 | [-1, 1, Conv, [1024, 3, 2]], # 6-P3/8
22 | [-1, 3, ResXCSPC, [1024]], # 7
23 | ]
24 |
25 | # CSP-ResX-PAN head
26 | head:
27 | [[-1, 1, SPPCSPC, [512]], # 8
28 | [-1, 1, Conv, [256, 1, 1]],
29 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
30 | [5, 1, Conv, [256, 1, 1]], # route backbone P4
31 | [[-1, -2], 1, Concat, [1]],
32 | [-1, 2, ResXCSPB, [256]], # 13
33 | [-1, 1, Conv, [128, 1, 1]],
34 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
35 | [3, 1, Conv, [128, 1, 1]], # route backbone P3
36 | [[-1, -2], 1, Concat, [1]],
37 | [-1, 2, ResXCSPB, [128]], # 18
38 | [-1, 1, Conv, [256, 3, 1]],
39 | [-2, 1, Conv, [256, 3, 2]],
40 | [[-1, 13], 1, Concat, [1]], # cat
41 | [-1, 2, ResXCSPB, [256]], # 22
42 | [-1, 1, Conv, [512, 3, 1]],
43 | [-2, 1, Conv, [512, 3, 2]],
44 | [[-1, 8], 1, Concat, [1]], # cat
45 | [-1, 2, ResXCSPB, [512]], # 26
46 | [-1, 1, Conv, [1024, 3, 1]],
47 |
48 | [[19,23,27], 1, IDetect, [nc, anchors]], # Detect(P3, P4, P5)
49 | ]
50 |
--------------------------------------------------------------------------------
/demo/yolov7/cfg/baseline/yolov3-spp.yaml:
--------------------------------------------------------------------------------
1 | # parameters
2 | nc: 80 # number of classes
3 | depth_multiple: 1.0 # model depth multiple
4 | width_multiple: 1.0 # layer channel multiple
5 |
6 | # anchors
7 | anchors:
8 | - [10,13, 16,30, 33,23] # P3/8
9 | - [30,61, 62,45, 59,119] # P4/16
10 | - [116,90, 156,198, 373,326] # P5/32
11 |
12 | # darknet53 backbone
13 | backbone:
14 | # [from, number, module, args]
15 | [[-1, 1, Conv, [32, 3, 1]], # 0
16 | [-1, 1, Conv, [64, 3, 2]], # 1-P1/2
17 | [-1, 1, Bottleneck, [64]],
18 | [-1, 1, Conv, [128, 3, 2]], # 3-P2/4
19 | [-1, 2, Bottleneck, [128]],
20 | [-1, 1, Conv, [256, 3, 2]], # 5-P3/8
21 | [-1, 8, Bottleneck, [256]],
22 | [-1, 1, Conv, [512, 3, 2]], # 7-P4/16
23 | [-1, 8, Bottleneck, [512]],
24 | [-1, 1, Conv, [1024, 3, 2]], # 9-P5/32
25 | [-1, 4, Bottleneck, [1024]], # 10
26 | ]
27 |
28 | # YOLOv3-SPP head
29 | head:
30 | [[-1, 1, Bottleneck, [1024, False]],
31 | [-1, 1, SPP, [512, [5, 9, 13]]],
32 | [-1, 1, Conv, [1024, 3, 1]],
33 | [-1, 1, Conv, [512, 1, 1]],
34 | [-1, 1, Conv, [1024, 3, 1]], # 15 (P5/32-large)
35 |
36 | [-2, 1, Conv, [256, 1, 1]],
37 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
38 | [[-1, 8], 1, Concat, [1]], # cat backbone P4
39 | [-1, 1, Bottleneck, [512, False]],
40 | [-1, 1, Bottleneck, [512, False]],
41 | [-1, 1, Conv, [256, 1, 1]],
42 | [-1, 1, Conv, [512, 3, 1]], # 22 (P4/16-medium)
43 |
44 | [-2, 1, Conv, [128, 1, 1]],
45 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
46 | [[-1, 6], 1, Concat, [1]], # cat backbone P3
47 | [-1, 1, Bottleneck, [256, False]],
48 | [-1, 2, Bottleneck, [256, False]], # 27 (P3/8-small)
49 |
50 | [[27, 22, 15], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5)
51 | ]
52 |
--------------------------------------------------------------------------------
/demo/yolov7/cfg/baseline/yolov3.yaml:
--------------------------------------------------------------------------------
1 | # parameters
2 | nc: 80 # number of classes
3 | depth_multiple: 1.0 # model depth multiple
4 | width_multiple: 1.0 # layer channel multiple
5 |
6 | # anchors
7 | anchors:
8 | - [10,13, 16,30, 33,23] # P3/8
9 | - [30,61, 62,45, 59,119] # P4/16
10 | - [116,90, 156,198, 373,326] # P5/32
11 |
12 | # darknet53 backbone
13 | backbone:
14 | # [from, number, module, args]
15 | [[-1, 1, Conv, [32, 3, 1]], # 0
16 | [-1, 1, Conv, [64, 3, 2]], # 1-P1/2
17 | [-1, 1, Bottleneck, [64]],
18 | [-1, 1, Conv, [128, 3, 2]], # 3-P2/4
19 | [-1, 2, Bottleneck, [128]],
20 | [-1, 1, Conv, [256, 3, 2]], # 5-P3/8
21 | [-1, 8, Bottleneck, [256]],
22 | [-1, 1, Conv, [512, 3, 2]], # 7-P4/16
23 | [-1, 8, Bottleneck, [512]],
24 | [-1, 1, Conv, [1024, 3, 2]], # 9-P5/32
25 | [-1, 4, Bottleneck, [1024]], # 10
26 | ]
27 |
28 | # YOLOv3 head
29 | head:
30 | [[-1, 1, Bottleneck, [1024, False]],
31 | [-1, 1, Conv, [512, [1, 1]]],
32 | [-1, 1, Conv, [1024, 3, 1]],
33 | [-1, 1, Conv, [512, 1, 1]],
34 | [-1, 1, Conv, [1024, 3, 1]], # 15 (P5/32-large)
35 |
36 | [-2, 1, Conv, [256, 1, 1]],
37 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
38 | [[-1, 8], 1, Concat, [1]], # cat backbone P4
39 | [-1, 1, Bottleneck, [512, False]],
40 | [-1, 1, Bottleneck, [512, False]],
41 | [-1, 1, Conv, [256, 1, 1]],
42 | [-1, 1, Conv, [512, 3, 1]], # 22 (P4/16-medium)
43 |
44 | [-2, 1, Conv, [128, 1, 1]],
45 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
46 | [[-1, 6], 1, Concat, [1]], # cat backbone P3
47 | [-1, 1, Bottleneck, [256, False]],
48 | [-1, 2, Bottleneck, [256, False]], # 27 (P3/8-small)
49 |
50 | [[27, 22, 15], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5)
51 | ]
52 |
--------------------------------------------------------------------------------
/demo/yolov7/data/coco.yaml:
--------------------------------------------------------------------------------
1 | # COCO 2017 dataset http://cocodataset.org
2 |
3 | # download command/URL (optional)
4 | download: bash ./scripts/get_coco.sh
5 |
6 | # train and val data as 1) directory: path/images/, 2) file: path/images.txt, or 3) list: [path1/images/, path2/images/]
7 | train: ./coco/train2017.txt # 118287 images
8 | val: ./coco/val2017.txt # 5000 images
9 | test: ./coco/test-dev2017.txt # 20288 of 40670 images, submit to https://competitions.codalab.org/competitions/20794
10 |
11 | # number of classes
12 | nc: 80
13 |
14 | # class names
15 | names: [ 'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck', 'boat', 'traffic light',
16 | 'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
17 | 'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee',
18 | 'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard',
19 | 'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple',
20 | 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch',
21 | 'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone',
22 | 'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear',
23 | 'hair drier', 'toothbrush' ]
24 |
--------------------------------------------------------------------------------
/demo/yolov7/data/hyp.scratch.p5.yaml:
--------------------------------------------------------------------------------
1 | lr0: 0.01 # initial learning rate (SGD=1E-2, Adam=1E-3)
2 | lrf: 0.1 # final OneCycleLR learning rate (lr0 * lrf)
3 | momentum: 0.937 # SGD momentum/Adam beta1
4 | weight_decay: 0.0005 # optimizer weight decay 5e-4
5 | warmup_epochs: 3.0 # warmup epochs (fractions ok)
6 | warmup_momentum: 0.8 # warmup initial momentum
7 | warmup_bias_lr: 0.1 # warmup initial bias lr
8 | box: 0.05 # box loss gain
9 | cls: 0.3 # cls loss gain
10 | cls_pw: 1.0 # cls BCELoss positive_weight
11 | obj: 0.7 # obj loss gain (scale with pixels)
12 | obj_pw: 1.0 # obj BCELoss positive_weight
13 | iou_t: 0.20 # IoU training threshold
14 | anchor_t: 4.0 # anchor-multiple threshold
15 | fl_gamma: 0.0 # focal loss gamma (efficientDet default gamma=1.5)
16 | hsv_h: 0.015 # image HSV-Hue augmentation (fraction)
17 | hsv_s: 0.7 # image HSV-Saturation augmentation (fraction)
18 | hsv_v: 0.4 # image HSV-Value augmentation (fraction)
19 | degrees: 0.0 # image rotation (+/- deg)
20 | translate: 0.2 # image translation (+/- fraction)
21 | scale: 0.9 # image scale (+/- gain)
22 | shear: 0.0 # image shear (+/- deg)
23 | perspective: 0.0 # image perspective (+/- fraction), range 0-0.001
24 | flipud: 0.0 # image flip up-down (probability)
25 | fliplr: 0.5 # image flip left-right (probability)
26 | mosaic: 1.0 # image mosaic (probability)
27 | mixup: 0.15 # image mixup (probability)
28 | copy_paste: 0.0 # image copy paste (probability)
29 | paste_in: 0.15 # image copy paste (probability)
30 |
--------------------------------------------------------------------------------
/demo/yolov7/data/hyp.scratch.p6.yaml:
--------------------------------------------------------------------------------
1 | lr0: 0.01 # initial learning rate (SGD=1E-2, Adam=1E-3)
2 | lrf: 0.2 # final OneCycleLR learning rate (lr0 * lrf)
3 | momentum: 0.937 # SGD momentum/Adam beta1
4 | weight_decay: 0.0005 # optimizer weight decay 5e-4
5 | warmup_epochs: 3.0 # warmup epochs (fractions ok)
6 | warmup_momentum: 0.8 # warmup initial momentum
7 | warmup_bias_lr: 0.1 # warmup initial bias lr
8 | box: 0.05 # box loss gain
9 | cls: 0.3 # cls loss gain
10 | cls_pw: 1.0 # cls BCELoss positive_weight
11 | obj: 0.7 # obj loss gain (scale with pixels)
12 | obj_pw: 1.0 # obj BCELoss positive_weight
13 | iou_t: 0.20 # IoU training threshold
14 | anchor_t: 4.0 # anchor-multiple threshold
15 | fl_gamma: 0.0 # focal loss gamma (efficientDet default gamma=1.5)
16 | hsv_h: 0.015 # image HSV-Hue augmentation (fraction)
17 | hsv_s: 0.7 # image HSV-Saturation augmentation (fraction)
18 | hsv_v: 0.4 # image HSV-Value augmentation (fraction)
19 | degrees: 0.0 # image rotation (+/- deg)
20 | translate: 0.2 # image translation (+/- fraction)
21 | scale: 0.9 # image scale (+/- gain)
22 | shear: 0.0 # image shear (+/- deg)
23 | perspective: 0.0 # image perspective (+/- fraction), range 0-0.001
24 | flipud: 0.0 # image flip up-down (probability)
25 | fliplr: 0.5 # image flip left-right (probability)
26 | mosaic: 1.0 # image mosaic (probability)
27 | mixup: 0.15 # image mixup (probability)
28 | copy_paste: 0.0 # image copy paste (probability)
29 | paste_in: 0.15 # image copy paste (probability)
30 |
--------------------------------------------------------------------------------
/demo/yolov7/data/hyp.scratch.tiny.yaml:
--------------------------------------------------------------------------------
1 | lr0: 0.01 # initial learning rate (SGD=1E-2, Adam=1E-3)
2 | lrf: 0.01 # final OneCycleLR learning rate (lr0 * lrf)
3 | momentum: 0.937 # SGD momentum/Adam beta1
4 | weight_decay: 0.0005 # optimizer weight decay 5e-4
5 | warmup_epochs: 3.0 # warmup epochs (fractions ok)
6 | warmup_momentum: 0.8 # warmup initial momentum
7 | warmup_bias_lr: 0.1 # warmup initial bias lr
8 | box: 0.05 # box loss gain
9 | cls: 0.5 # cls loss gain
10 | cls_pw: 1.0 # cls BCELoss positive_weight
11 | obj: 1.0 # obj loss gain (scale with pixels)
12 | obj_pw: 1.0 # obj BCELoss positive_weight
13 | iou_t: 0.20 # IoU training threshold
14 | anchor_t: 4.0 # anchor-multiple threshold
15 | fl_gamma: 0.0 # focal loss gamma (efficientDet default gamma=1.5)
16 | hsv_h: 0.015 # image HSV-Hue augmentation (fraction)
17 | hsv_s: 0.7 # image HSV-Saturation augmentation (fraction)
18 | hsv_v: 0.4 # image HSV-Value augmentation (fraction)
19 | degrees: 0.0 # image rotation (+/- deg)
20 | translate: 0.1 # image translation (+/- fraction)
21 | scale: 0.5 # image scale (+/- gain)
22 | shear: 0.0 # image shear (+/- deg)
23 | perspective: 0.0 # image perspective (+/- fraction), range 0-0.001
24 | flipud: 0.0 # image flip up-down (probability)
25 | fliplr: 0.5 # image flip left-right (probability)
26 | mosaic: 1.0 # image mosaic (probability)
27 | mixup: 0.05 # image mixup (probability)
28 | copy_paste: 0.0 # image copy paste (probability)
29 | paste_in: 0.05 # image copy paste (probability)
30 |
--------------------------------------------------------------------------------
/demo/yolov7/export_onnx.py:
--------------------------------------------------------------------------------
1 | # from json import tool
2 | # import requests
3 | # import matplotlib.pyplot as plt
4 |
5 | # from torchvision.models import resnet50
6 | # import torchvision.transforms as T
7 |
8 | # from tools import *
9 | # from model import *
10 |
11 |
12 | # if __name__ == '__main__':
13 | # detr = DETRdemo(num_classes=91)
14 | # state_dict = torch.load('detr_demo.pth')
15 | # detr.load_state_dict(state_dict)
16 | # detr.eval()
17 |
18 | # dummy = torch.zeros(1,3,800,1066)
19 | # torch.onnx.export(
20 | # detr,(dummy,),
21 | # "detr.onnx",
22 | # input_names=["image"],
23 | # output_names=["predict"],
24 | # opset_version=12,
25 | # dynamic_axes={"image":{0:"batch"},"predict":{0:"batch"},}
26 | # )
27 |
28 | # 为了正确导出onnx需要将models/yolo.py文件中56行
29 |
30 | # ```python
31 | # # x = x if self.training else (torch.cat(z, 1),x) # 修改为
32 | # x = x if self.training else (torch.cat(z, 1))
33 | # ```
34 |
35 | # 在detect.py中采用如下方式导出
36 |
37 | # ```
38 | # dummy = torch.zeros(1,3,640,640)
39 | # torch.onnx.export(
40 | # model,(dummy,),
41 | # "yolov7.onnx",
42 | # input_names=["image"],
43 | # output_names=["predict"],
44 | # opset_version=13,
45 | # dynamic_axes={"image":{0:"batch"},"predict":{0:"batch"},}
46 | # )
47 | # ```
--------------------------------------------------------------------------------
/demo/yolov7/figure/performance.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Rex-LK/tensorrt_learning/45df1f33cab6a66a0d75c0bb26a3978d539dc888/demo/yolov7/figure/performance.png
--------------------------------------------------------------------------------
/demo/yolov7/models/__init__.py:
--------------------------------------------------------------------------------
1 | # init
--------------------------------------------------------------------------------
/demo/yolov7/onnx_simplify.py:
--------------------------------------------------------------------------------
1 | from onnxsim import simplify
2 | import onnx
3 | input_path="yolov7.onnx"
4 | output_path="yolov7_sim.onnx"
5 | onnx_model = onnx.load(input_path)
6 | model_simp, check = simplify(onnx_model,input_shapes={'image': [0, 3, 640, 640]},dynamic_input_shape=True)
7 | assert check, "Simplified ONNX model could not be validated"
8 | onnx.save(model_simp, output_path)
9 | print('finished exporting onnx')
--------------------------------------------------------------------------------
/demo/yolov7/scripts/get_coco.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # COCO 2017 dataset http://cocodataset.org
3 | # Download command: bash ./scripts/get_coco.sh
4 |
5 | # Download/unzip labels
6 | d='./' # unzip directory
7 | url=https://github.com/ultralytics/yolov5/releases/download/v1.0/
8 | f='coco2017labels-segments.zip' # or 'coco2017labels.zip', 68 MB
9 | echo 'Downloading' $url$f ' ...'
10 | curl -L $url$f -o $f && unzip -q $f -d $d && rm $f & # download, unzip, remove in background
11 |
12 | # Download/unzip images
13 | d='./coco/images' # unzip directory
14 | url=http://images.cocodataset.org/zips/
15 | f1='train2017.zip' # 19G, 118k images
16 | f2='val2017.zip' # 1G, 5k images
17 | f3='test2017.zip' # 7G, 41k images (optional)
18 | for f in $f1 $f2 $f3; do
19 | echo 'Downloading' $url$f '...'
20 | curl -L $url$f -o $f && unzip -q $f -d $d && rm $f & # download, unzip, remove in background
21 | done
22 | wait # finish background tasks
23 |
--------------------------------------------------------------------------------
/demo/yolov7/utils/__init__.py:
--------------------------------------------------------------------------------
1 | # init
--------------------------------------------------------------------------------
/demo/yolov7/utils/aws/__init__.py:
--------------------------------------------------------------------------------
1 | #init
--------------------------------------------------------------------------------
/demo/yolov7/utils/aws/mime.sh:
--------------------------------------------------------------------------------
1 | # AWS EC2 instance startup 'MIME' script https://aws.amazon.com/premiumsupport/knowledge-center/execute-user-data-ec2/
2 | # This script will run on every instance restart, not only on first start
3 | # --- DO NOT COPY ABOVE COMMENTS WHEN PASTING INTO USERDATA ---
4 |
5 | Content-Type: multipart/mixed; boundary="//"
6 | MIME-Version: 1.0
7 |
8 | --//
9 | Content-Type: text/cloud-config; charset="us-ascii"
10 | MIME-Version: 1.0
11 | Content-Transfer-Encoding: 7bit
12 | Content-Disposition: attachment; filename="cloud-config.txt"
13 |
14 | #cloud-config
15 | cloud_final_modules:
16 | - [scripts-user, always]
17 |
18 | --//
19 | Content-Type: text/x-shellscript; charset="us-ascii"
20 | MIME-Version: 1.0
21 | Content-Transfer-Encoding: 7bit
22 | Content-Disposition: attachment; filename="userdata.txt"
23 |
24 | #!/bin/bash
25 | # --- paste contents of userdata.sh here ---
26 | --//
27 |
--------------------------------------------------------------------------------
/demo/yolov7/utils/aws/resume.py:
--------------------------------------------------------------------------------
1 | # Resume all interrupted trainings in yolor/ dir including DDP trainings
2 | # Usage: $ python utils/aws/resume.py
3 |
4 | import os
5 | import sys
6 | from pathlib import Path
7 |
8 | import torch
9 | import yaml
10 |
11 | sys.path.append('./') # to run '$ python *.py' files in subdirectories
12 |
13 | port = 0 # --master_port
14 | path = Path('').resolve()
15 | for last in path.rglob('*/**/last.pt'):
16 | ckpt = torch.load(last)
17 | if ckpt['optimizer'] is None:
18 | continue
19 |
20 | # Load opt.yaml
21 | with open(last.parent.parent / 'opt.yaml') as f:
22 | opt = yaml.load(f, Loader=yaml.SafeLoader)
23 |
24 | # Get device count
25 | d = opt['device'].split(',') # devices
26 | nd = len(d) # number of devices
27 | ddp = nd > 1 or (nd == 0 and torch.cuda.device_count() > 1) # distributed data parallel
28 |
29 | if ddp: # multi-GPU
30 | port += 1
31 | cmd = f'python -m torch.distributed.launch --nproc_per_node {nd} --master_port {port} train.py --resume {last}'
32 | else: # single-GPU
33 | cmd = f'python train.py --resume {last}'
34 |
35 | cmd += ' > /dev/null 2>&1 &' # redirect output to dev/null and run in daemon thread
36 | print(cmd)
37 | os.system(cmd)
38 |
--------------------------------------------------------------------------------
/demo/yolov7/utils/aws/userdata.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # AWS EC2 instance startup script https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/user-data.html
3 | # This script will run only once on first instance start (for a re-start script see mime.sh)
4 | # /home/ubuntu (ubuntu) or /home/ec2-user (amazon-linux) is working dir
5 | # Use >300 GB SSD
6 |
7 | cd home/ubuntu
8 | if [ ! -d yolor ]; then
9 | echo "Running first-time script." # install dependencies, download COCO, pull Docker
10 | git clone -b paper https://github.com/WongKinYiu/yolor && sudo chmod -R 777 yolor
11 | cd yolor
12 | bash data/scripts/get_coco.sh && echo "Data done." &
13 | sudo docker pull nvcr.io/nvidia/pytorch:21.08-py3 && echo "Docker done." &
14 | python -m pip install --upgrade pip && pip install -r requirements.txt && python detect.py && echo "Requirements done." &
15 | wait && echo "All tasks done." # finish background tasks
16 | else
17 | echo "Running re-start script." # resume interrupted runs
18 | i=0
19 | list=$(sudo docker ps -qa) # container list i.e. $'one\ntwo\nthree\nfour'
20 | while IFS= read -r id; do
21 | ((i++))
22 | echo "restarting container $i: $id"
23 | sudo docker start $id
24 | # sudo docker exec -it $id python train.py --resume # single-GPU
25 | sudo docker exec -d $id python utils/aws/resume.py # multi-scenario
26 | done <<<"$list"
27 | fi
28 |
--------------------------------------------------------------------------------
/demo/yolov7/utils/google_app_engine/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM gcr.io/google-appengine/python
2 |
3 | # Create a virtualenv for dependencies. This isolates these packages from
4 | # system-level packages.
5 | # Use -p python3 or -p python3.7 to select python version. Default is version 2.
6 | RUN virtualenv /env -p python3
7 |
8 | # Setting these environment variables are the same as running
9 | # source /env/bin/activate.
10 | ENV VIRTUAL_ENV /env
11 | ENV PATH /env/bin:$PATH
12 |
13 | RUN apt-get update && apt-get install -y python-opencv
14 |
15 | # Copy the application's requirements.txt and run pip to install all
16 | # dependencies into the virtualenv.
17 | ADD requirements.txt /app/requirements.txt
18 | RUN pip install -r /app/requirements.txt
19 |
20 | # Add the application source code.
21 | ADD . /app
22 |
23 | # Run a WSGI server to serve the application. gunicorn must be declared as
24 | # a dependency in requirements.txt.
25 | CMD gunicorn -b :$PORT main:app
26 |
--------------------------------------------------------------------------------
/demo/yolov7/utils/google_app_engine/additional_requirements.txt:
--------------------------------------------------------------------------------
1 | # add these requirements in your app on top of the existing ones
2 | pip==18.1
3 | Flask==1.0.2
4 | gunicorn==19.9.0
5 |
--------------------------------------------------------------------------------
/demo/yolov7/utils/google_app_engine/app.yaml:
--------------------------------------------------------------------------------
1 | runtime: custom
2 | env: flex
3 |
4 | service: yolorapp
5 |
6 | liveness_check:
7 | initial_delay_sec: 600
8 |
9 | manual_scaling:
10 | instances: 1
11 | resources:
12 | cpu: 1
13 | memory_gb: 4
14 | disk_size_gb: 20
--------------------------------------------------------------------------------
/demo/yolov7/utils/wandb_logging/__init__.py:
--------------------------------------------------------------------------------
1 | # init
--------------------------------------------------------------------------------
/demo/yolov7/utils/wandb_logging/log_dataset.py:
--------------------------------------------------------------------------------
1 | import argparse
2 |
3 | import yaml
4 |
5 | from wandb_utils import WandbLogger
6 |
7 | WANDB_ARTIFACT_PREFIX = 'wandb-artifact://'
8 |
9 |
10 | def create_dataset_artifact(opt):
11 | with open(opt.data) as f:
12 | data = yaml.load(f, Loader=yaml.SafeLoader) # data dict
13 | logger = WandbLogger(opt, '', None, data, job_type='Dataset Creation')
14 |
15 |
16 | if __name__ == '__main__':
17 | parser = argparse.ArgumentParser()
18 | parser.add_argument('--data', type=str, default='data/coco.yaml', help='data.yaml path')
19 | parser.add_argument('--single-cls', action='store_true', help='train as single-class dataset')
20 | parser.add_argument('--project', type=str, default='YOLOR', help='name of W&B Project')
21 | opt = parser.parse_args()
22 | opt.resume = False # Explicitly disallow resume check for dataset upload job
23 |
24 | create_dataset_artifact(opt)
25 |
--------------------------------------------------------------------------------
/sideline_learn/cpp-move/forward.cpp:
--------------------------------------------------------------------------------
1 | #include
2 | #include
3 |
4 |
5 | void Print(int& val) {
6 | std::cout << "lvalue refrence: val=" << val << std::endl;
7 | }
8 |
9 | void Print(int&& val) {
10 | std::cout << "rvalue refrence: val=" << val << std::endl;
11 | }
12 |
13 | // template
14 | // void TPrint(T &&t) {
15 | // //传进来为左值时,t为左值引用
16 | // //传进来右值时,t为右值引用 引用折叠
17 | // //按道理来说 传进来的值为右值引用时,会调用 Print(int &&val) 但实际上调用了Print(int &&val)
18 | // //因为传进来的 右值引用,这个右值引用实际上是左值
19 | // return Print(t);
20 | // }
21 | template
22 | void TPrint(T&& t) {
23 | //这里会根据 传进来的值判断是左值还是右值
24 | //然后活使用forward保持原来的属性
25 | //如果t为左值引用,forward会返回一个左值然后传递给Print
26 | //如果t为右值引用,forward会返回一个右值然后传递给Print
27 | //从而实现完美转发
28 | return Print(std::forward(t));
29 | }
30 |
31 |
32 | // template
33 | // constexpr _Tp&&
34 | // forward(typename std::remove_reference<_Tp>::type&& __t) noexcept
35 | // {
36 | // static_assert(!std::is_lvalue_reference<_Tp>::value, "template argument"
37 | // " substituting _Tp is an lvalue reference type");
38 | //强制类型转化
39 | // return static_cast<_Tp&&>(__t);
40 | // }
41 |
42 |
43 | //左值和右值
44 | //左值引用和右值引用
45 | //左值引用和右值引用 全为左值
46 |
47 | int main() {
48 | int date = 1021;
49 | TPrint(date); // 传入左值引用
50 | TPrint(501); //传入右值引用 实际上 "右值引用" 为左值
51 |
52 | return 0;
53 | }
54 |
--------------------------------------------------------------------------------
/sideline_learn/cpp-move/move.cpp:
--------------------------------------------------------------------------------
1 | #include
2 | #include
3 | #include
4 | #include
5 | using namespace std;
6 | class String
7 | {
8 | public:
9 | String(const char* buf,const char* buf2)
10 | {
11 | _buf = new char[strlen(buf) + 1];
12 | strcpy(_buf, buf); // 内存拷贝
13 | _buf2 = new char(strlen(buf2) + 1);
14 | strcpy(_buf2, buf2); // 内存拷贝
15 | cout<<"使用普通构造函数" <
2 | using namespace std;
3 |
4 | template
5 |
6 | void func(T& val)
7 | {
8 | cout <<"right-value" << val << endl;
9 | }
10 | template
11 | void func(T&& val)
12 | {
13 | cout <<"right-value" << val << endl;
14 | }
15 |
16 | int main()
17 | {
18 | //demo1
19 | int year = 2020;
20 | func(year); //传入左值 引用折叠
21 | func(2020); //传入右值 引用折叠
22 | return 0;
23 | }
24 |
--------------------------------------------------------------------------------
/sideline_learn/multi-thread/async.cpp:
--------------------------------------------------------------------------------
1 | #include
2 | #include
3 | #include
4 | #include
5 | #include
6 | #include
7 | #include
8 | #include
9 | #include
10 |
11 | using namespace std;
12 |
13 |
14 | int task(int a,int b){
15 | int ret_a = a * a;
16 | int ret_b = b * 2;
17 | return ret_a+ret_b;
18 | }
19 |
20 |
21 | int main(){
22 |
23 |
24 | //对线程进行包装,之后传递参数
25 | // packaged_task t(task);
26 | //启动线程
27 | // t(1,2);
28 |
29 | //包装时传递参数
30 |
31 | packaged_task t(bind(task,1,2));
32 | t();
33 |
34 | //获取返回值
35 | int res = t.get_future().get();
36 |
37 | cout <<"res:"<
2 | #include
3 | #include
4 | #include
5 | #include
6 | #include
7 | #include
8 | #include
9 | #include
10 |
11 | using namespace std;
12 |
13 | mutex mtx;
14 | condition_variable cv;
15 |
16 | void task(int a,int b, promise& ret){
17 | int ret_a = a * a;
18 | int ret_b = b * 2;
19 | ret = ret_a + ret_b;
20 | cv.notify_one();
21 | }
22 |
23 | //这个版本导致代码太多,使用future可以优雅的解决
24 |
25 | int main(){
26 | int ret = 0;
27 | //如果需要传递引用,则需要加ref
28 |
29 | //将p传递进去
30 | thread t(task,1,2,ref(ret));
31 |
32 | unique_locklock(mtx);
33 | //等待线程通知,然后往下执行
34 | cv.wait(lock);
35 | cout<< "return value is" << ret<
2 | #include
3 | #include
4 | #include
5 | #include
6 | #include
7 | #include
8 | #include
9 | #include
10 |
11 | using namespace std;
12 |
13 |
14 | int task(int a,int b){
15 | int ret_a = a * a;
16 | //等待主线程的p_in 设置号值之后再继续
17 | int ret_b = b * 2;
18 | return ret_a+ret_b;
19 | }
20 |
21 | //为了简化上述方法,可以使用async
22 |
23 | int main(){
24 |
25 | //使用async可以在线程中获得返回值
26 | //async一点创建新的线程做计算,如果使用launch::async则会开启新的线程
27 | //launch::deferred 为延迟调用,当有fu.get()时,才会开启线程
28 | future fu = async(launch::async,task,1,2);
29 | cout <<"return ret is :" << fu.get() <
2 | #include
3 | #include
4 | #include
5 | #include
6 | #include
7 | #include
8 | #include
9 | #include
10 |
11 | using namespace std;
12 |
13 |
14 | void task(int a,int b, promise& ret){
15 | int ret_a = a * a;
16 | int ret_b = b * 2;
17 |
18 | ret.set_value(ret_a+ret_b);
19 |
20 | }
21 |
22 | //这个版本导致代码太多,使用future可以优雅的解决
23 |
24 | int main(){
25 | int ret = 0;
26 |
27 | promisep;
28 | futuref = p.get_future(); //将f与p关联起来
29 |
30 | thread t(task,1,2,ref(p));
31 |
32 | //f.get()会阻塞在这里等待 p给f传递值,且f.get()只能用一次
33 | cout <<"return ret is :" << f.get() <
2 | #include
3 | #include
4 | #include
5 | #include
6 | #include
7 | #include
8 | #include
9 | #include
10 |
11 | using namespace std;
12 |
13 |
14 | void task(int a, future&b, promise& ret){
15 | int ret_a = a * a;
16 | //等待主线程的p_in 设置号值之后再继续
17 | int ret_b = b.get()* 2;
18 |
19 | ret.set_value(ret_a+ret_b);
20 |
21 | }
22 |
23 | //有时候 在给线程传值是,可能不需要立马传递值,可能需要过一段时间传递值
24 |
25 | int main(){
26 | int ret = 0;
27 |
28 | promisep_ret;
29 | futuref_ret = p_ret.get_future();
30 |
31 | promisep_in;
32 | futuref_in = p_in.get_future();
33 |
34 | thread t(task,1,ref(f_in),ref(p_ret));
35 |
36 | //do
37 |
38 | p_in.set_value(8);
39 |
40 | //
41 |
42 | cout <<"return ret is :" << f_ret.get() <
3 | #include
4 | #include
5 | #include
6 | #include
7 | #include
8 | #include
9 | #include
10 | #include
11 |
12 | using namespace std;
13 |
14 |
15 | void task(int a,shared_future&b, promise& ret){
16 | int ret_a = a * a;
17 | //等待主线程的p_in 设置号值之后再继续
18 | int ret_b = b.get()* 2;
19 |
20 | ret.set_value(ret_a+ret_b);
21 |
22 | }
23 |
24 |
25 | int main(){
26 | int ret = 0;
27 | //p_ret是不能赋值的
28 | promisep_ret;
29 | //可以通过move实现
30 | promisep_ret2 = move(p_ret);
31 |
32 | promisep_ret;
33 | futuref_ret = p_ret.get_future();
34 |
35 | promisep_in;
36 | futuref_in = p_in.get_future();
37 |
38 | //当创建了很多线程时
39 | //由于线程中 b.get()只能用一次,这里会出现异常
40 | //解决办法
41 | shared_future s_f = f_in.share();
42 |
43 | thread t(task,1(s_f,ref(p_ret));
44 | thread t(task,1(s_f,ref(p_ret));
45 | thread t(task,1(s_f,ref(p_ret));
46 | thread t(task,1(s_f,ref(p_ret));
47 |
48 | p_in.set_value(8);
49 |
50 | cout <<"return ret is :" << f_ret.get() <
4 | #include
5 | using namespace std;
6 | using namespace fastdet;
7 |
8 | int main() {
9 | string img_path = "/data/rex/ncnn_proj/ncnn_multi_thread/data/imgs/3.jpg";
10 | cv::Mat img = cv::imread(img_path);
11 | int img_width = img.cols;
12 | int img_height = img.rows;
13 | string param_path =
14 | "/data/rex/ncnn_proj/ncnn_multi_thread/data/model/FastestDet.param";
15 | string model_path =
16 | "/data/rex/ncnn_proj/ncnn_multi_thread/data/model/FastestDet.bin";
17 | FastDet* pred = new FastDet(352, 352, param_path, model_path);
18 | int class_num = sizeof(pred->class_names) / sizeof(pred->class_names[0]);
19 | pred->prepare_input(img);
20 | pred->infrence("input.1", "758", 6);
21 | pred->postprocess(img_width, img_height, class_num, 0.65);
22 | for (size_t i = 0; i < pred->nms_boxes.size(); i++) {
23 | TargetBox box = pred->nms_boxes[i];
24 | cv::rectangle(img, cv::Point(box.x1, box.y1), cv::Point(box.x2, box.y2),
25 | cv::Scalar(0, 0, 255), 2);
26 | cv::putText(img, pred->class_names[box.category], cv::Point(box.x1, box.y1),
27 | cv::FONT_HERSHEY_SIMPLEX, 0.75, cv::Scalar(0, 255, 0), 2);
28 | }
29 | cv::imwrite("result_test.jpg", img);
30 | return 0;
31 | }
32 |
33 | // 单模型 多视频
34 | // 多模型 单视频
35 | // 多模型 多视频
--------------------------------------------------------------------------------
/sideline_learn/ncnn_multi_thread/include/infer.hpp:
--------------------------------------------------------------------------------
1 | #ifndef INFER_HPP
2 | #define INFER_HPP
3 |
4 | #include
5 | #include
6 | #include
7 | #include
8 |
9 | #include "fastdet.h"
10 | // 封装接口类
11 | class Infer
12 | {
13 | public:
14 | virtual std::shared_future> commit(
15 | cv::Mat &input) = 0;
16 | };
17 |
18 | std::shared_ptr create_infer(const std::string ¶m_path,
19 | const std::string &model_path);
20 |
21 | #endif // INFER_HPP
--------------------------------------------------------------------------------
/sideline_learn/ncnn_multi_thread/multi_thead_infer.cpp:
--------------------------------------------------------------------------------
1 | #include
2 | #include
3 |
4 | #include "infer.hpp"
5 |
6 | using namespace cv;
7 | using namespace std;
8 | using namespace fastdet;
9 |
10 | int main()
11 | {
12 | string param_path =
13 | "/home/rex/Desktop/ncnn_multi_thread/data/model/FastestDet.param";
14 | string model_path =
15 | "/home/rex/Desktop/ncnn_multi_thread/data/model/FastestDet.bin";
16 |
17 | auto infer = create_infer(
18 | param_path,
19 | model_path); // 创建及初始化推理器
20 |
21 | if (infer == nullptr)
22 | {
23 | printf("Infer is nullptr.\n");
24 | return 0;
25 | }
26 |
27 | string img_path = "/home/rex/Desktop/ncnn_multi_thread/data/imgs/3.jpg";
28 | Mat img = cv::imread(img_path);
29 | auto fut = infer->commit(img); // 将任务提交给推理器(推理器执行commit)
30 | vector res = fut.get(); // 等待结果
31 |
32 | for (size_t i = 0; i < res.size(); i++)
33 | {
34 | TargetBox box = res[i];
35 | rectangle(img, cv::Point(box.x1, box.y1), cv::Point(box.x2, box.y2),
36 | cv::Scalar(0, 0, 255), 2);
37 | // cv::putText(img, pred->class_names[box.category], cv::Point(box.x1,
38 | // box.y1),
39 | // cv::FONT_HERSHEY_SIMPLEX, 0.75, cv::Scalar(0, 255, 0), 2);
40 | }
41 | cv::imwrite("result_test.jpg", img);
42 |
43 | return 0;
44 | }
45 |
--------------------------------------------------------------------------------
/sideline_learn/tools/pic2video2.py:
--------------------------------------------------------------------------------
1 | import cv2
2 | import os
3 | from PIL import Image
4 |
5 |
6 | def PicToVideo(imgPath, videoPath):
7 | images = os.listdir(imgPath)
8 | print(len(images))
9 | fps = 10 # 帧率
10 | fourcc = cv2.VideoWriter_fourcc(*"MJPG")
11 | im = Image.open(imgPath + images[0])
12 | videoWriter = cv2.VideoWriter(videoPath, fourcc, fps, im.size)
13 | for im_name in range(len(images)):
14 | frame = cv2.imread(imgPath + images[im_name])
15 | for i in range(90):
16 | videoWriter.write(frame)
17 | videoWriter.release()
18 |
19 |
20 | if __name__ == "__main__":
21 |
22 | imgPath = "/home/rex/Pictures/Wallpapers/"
23 | videoPath = "/home/rex/Desktop/test.avi"
24 | PicToVideo(imgPath, videoPath)
25 |
--------------------------------------------------------------------------------
/sideline_learn/warpaffine-cuda/CMakeLists.txt:
--------------------------------------------------------------------------------
1 | cmake_minimum_required(VERSION 2.6)
2 |
3 | project(warpaffine-cuda-test)
4 | set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11 -Wall -Ofast -g -Wfatal-errors -D_MWAITXINTRIN_H_INCLUDED")
5 | add_definitions(-std=c++11)
6 | add_definitions(-DAPI_EXPORTS)
7 | option(CUDA_USE_STATIC_CUDA_RUNTIME OFF)
8 | set(CMAKE_CXX_STANDARD 11)
9 | set(CMAKE_BUILD_TYPE Debug)
10 |
11 | find_package(CUDA REQUIRED)
12 | find_package(OpenCV)
13 | include_directories(${OpenCV_INCLUDE_DIRS})
14 |
15 | if(WIN32)
16 | enable_language(CUDA)
17 | endif(WIN32)
18 |
19 | include_directories(${PROJECT_SOURCE_DIR}/include)
20 |
21 | # cuda
22 | include_directories(/usr/local/cuda/include)
23 | link_directories(/usr/local/cuda/lib64)
24 | # tensorrt
25 | include_directories(/home/rex/TensorRT-8.2.0.6/include/)
26 | link_directories(/home/rex/TensorRT-8.2.0.6/targets/x86_64-linux-gnu/lib)
27 |
28 | cuda_add_library(warpaffine SHARED src/warpaffine.cu)
29 | target_link_libraries(warpaffine)
30 |
31 | cuda_add_executable(test src/test.cpp)
32 | target_link_libraries(test warpaffine ${OpenCV_LIBS})
33 |
34 | if(UNIX)
35 | add_definitions(-O2 -pthread)
36 | endif(UNIX)
37 |
38 |
39 |
--------------------------------------------------------------------------------
/sideline_learn/warpaffine-cuda/build/CMakeFiles/3.16.3/CMakeDetermineCompilerABI_C.bin:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Rex-LK/tensorrt_learning/45df1f33cab6a66a0d75c0bb26a3978d539dc888/sideline_learn/warpaffine-cuda/build/CMakeFiles/3.16.3/CMakeDetermineCompilerABI_C.bin
--------------------------------------------------------------------------------
/sideline_learn/warpaffine-cuda/build/CMakeFiles/3.16.3/CMakeDetermineCompilerABI_CXX.bin:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Rex-LK/tensorrt_learning/45df1f33cab6a66a0d75c0bb26a3978d539dc888/sideline_learn/warpaffine-cuda/build/CMakeFiles/3.16.3/CMakeDetermineCompilerABI_CXX.bin
--------------------------------------------------------------------------------
/sideline_learn/warpaffine-cuda/build/CMakeFiles/3.16.3/CompilerIdC/a.out:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Rex-LK/tensorrt_learning/45df1f33cab6a66a0d75c0bb26a3978d539dc888/sideline_learn/warpaffine-cuda/build/CMakeFiles/3.16.3/CompilerIdC/a.out
--------------------------------------------------------------------------------
/sideline_learn/warpaffine-cuda/build/CMakeFiles/3.16.3/CompilerIdCXX/a.out:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Rex-LK/tensorrt_learning/45df1f33cab6a66a0d75c0bb26a3978d539dc888/sideline_learn/warpaffine-cuda/build/CMakeFiles/3.16.3/CompilerIdCXX/a.out
--------------------------------------------------------------------------------
/sideline_learn/warpaffine-cuda/build/CMakeFiles/CMakeRuleHashes.txt:
--------------------------------------------------------------------------------
1 | # Hashes of file build rules.
2 | cb598fc354e6b0a852644dd25bf90882 CMakeFiles/warpaffine.dir/src/warpaffine_generated_warpaffine.cu.o
3 |
--------------------------------------------------------------------------------
/sideline_learn/warpaffine-cuda/build/CMakeFiles/TargetDirectories.txt:
--------------------------------------------------------------------------------
1 | /home/rex/Desktop/deeplearning_rex/warpaffine-cuda/build/CMakeFiles/rebuild_cache.dir
2 | /home/rex/Desktop/deeplearning_rex/warpaffine-cuda/build/CMakeFiles/edit_cache.dir
3 | /home/rex/Desktop/deeplearning_rex/warpaffine-cuda/build/CMakeFiles/test.dir
4 | /home/rex/Desktop/deeplearning_rex/warpaffine-cuda/build/CMakeFiles/warpaffine.dir
5 |
--------------------------------------------------------------------------------
/sideline_learn/warpaffine-cuda/build/CMakeFiles/cmake.check_cache:
--------------------------------------------------------------------------------
1 | # This file is generated by cmake for dependency checking of the CMakeCache.txt file
2 |
--------------------------------------------------------------------------------
/sideline_learn/warpaffine-cuda/build/CMakeFiles/progress.marks:
--------------------------------------------------------------------------------
1 | 4
2 |
--------------------------------------------------------------------------------
/sideline_learn/warpaffine-cuda/build/CMakeFiles/test.dir/flags.make:
--------------------------------------------------------------------------------
1 | # CMAKE generated file: DO NOT EDIT!
2 | # Generated by "Unix Makefiles" Generator, CMake Version 3.16
3 |
4 | # compile CXX with /usr/bin/c++
5 | CXX_FLAGS = -std=c++11 -Wall -Ofast -g -Wfatal-errors -D_MWAITXINTRIN_H_INCLUDED -g -std=c++11 -O2 -pthread -std=gnu++11
6 |
7 | CXX_DEFINES = -DAPI_EXPORTS
8 |
9 | CXX_INCLUDES = -I/home/rex/Desktop/deeplearning_rex/warpaffine-cuda/include -I/usr/local/cuda/include -I/home/rex/TensorRT-8.2.0.6/include -isystem /usr/local/include/opencv4
10 |
11 |
--------------------------------------------------------------------------------
/sideline_learn/warpaffine-cuda/build/CMakeFiles/test.dir/progress.make:
--------------------------------------------------------------------------------
1 | CMAKE_PROGRESS_1 = 1
2 | CMAKE_PROGRESS_2 = 2
3 |
4 |
--------------------------------------------------------------------------------
/sideline_learn/warpaffine-cuda/build/CMakeFiles/warpaffine.dir/depend.internal:
--------------------------------------------------------------------------------
1 | # CMAKE generated file: DO NOT EDIT!
2 | # Generated by "Unix Makefiles" Generator, CMake Version 3.16
3 |
4 |
--------------------------------------------------------------------------------
/sideline_learn/warpaffine-cuda/build/CMakeFiles/warpaffine.dir/depend.make:
--------------------------------------------------------------------------------
1 | # CMAKE generated file: DO NOT EDIT!
2 | # Generated by "Unix Makefiles" Generator, CMake Version 3.16
3 |
4 |
--------------------------------------------------------------------------------
/sideline_learn/warpaffine-cuda/build/CMakeFiles/warpaffine.dir/flags.make:
--------------------------------------------------------------------------------
1 | # CMAKE generated file: DO NOT EDIT!
2 | # Generated by "Unix Makefiles" Generator, CMake Version 3.16
3 |
4 |
--------------------------------------------------------------------------------
/sideline_learn/warpaffine-cuda/build/CMakeFiles/warpaffine.dir/link.txt:
--------------------------------------------------------------------------------
1 | /usr/bin/c++ -fPIC -std=c++11 -Wall -Ofast -g -Wfatal-errors -D_MWAITXINTRIN_H_INCLUDED -g -shared -Wl,-soname,libwarpaffine.so -o libwarpaffine.so CMakeFiles/warpaffine.dir/src/warpaffine_generated_warpaffine.cu.o -L/usr/local/cuda/lib64 -L/home/rex/TensorRT-8.2.0.6/targets/x86_64-linux-gnu/lib -Wl,-rpath,/usr/local/cuda/lib64:/home/rex/TensorRT-8.2.0.6/targets/x86_64-linux-gnu/lib /usr/local/cuda/lib64/libcudart.so
2 |
--------------------------------------------------------------------------------
/sideline_learn/warpaffine-cuda/build/CMakeFiles/warpaffine.dir/progress.make:
--------------------------------------------------------------------------------
1 | CMAKE_PROGRESS_1 = 3
2 | CMAKE_PROGRESS_2 = 4
3 |
4 |
--------------------------------------------------------------------------------
/sideline_learn/warpaffine-cuda/build/libwarpaffine.so:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Rex-LK/tensorrt_learning/45df1f33cab6a66a0d75c0bb26a3978d539dc888/sideline_learn/warpaffine-cuda/build/libwarpaffine.so
--------------------------------------------------------------------------------
/sideline_learn/warpaffine-cuda/build/test:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Rex-LK/tensorrt_learning/45df1f33cab6a66a0d75c0bb26a3978d539dc888/sideline_learn/warpaffine-cuda/build/test
--------------------------------------------------------------------------------
/sideline_learn/warpaffine-cuda/include/warpaffine.h:
--------------------------------------------------------------------------------
1 |
2 | #include
3 | #include
4 | #include
5 | #include
6 | #define min(a, b) ((a) < (b) ? (a) : (b))
7 | #define num_threads 512
8 |
9 | typedef unsigned char uint8_t;
10 |
11 | struct MySize{
12 | int width = 0, height = 0;
13 |
14 | MySize() = default;
15 | MySize(int w, int h)
16 | :width(w), height(h){}
17 | };
18 |
19 |
20 | struct AffineMatrix{
21 | // i2d 正变换的M矩阵
22 | float i2d[6];
23 | // d2i M矩阵的逆矩阵
24 | float d2i[6];
25 | // 求逆矩阵
26 | void invertAffineTransform(float imat[6], float omat[6]);
27 |
28 | void compute(const MySize& from, const MySize& to);
29 | };
30 |
31 | __device__ void affine_project(float* matrix, int x, int y, float* proj_x, float* proj_y);
32 |
33 | __global__ void warp_affine_bilinear_kernel(
34 | uint8_t* src, int src_line_size, int src_width, int src_height,
35 | uint8_t* dst, int dst_line_size, int dst_width, int dst_height,
36 | uint8_t fill_value, AffineMatrix matrix
37 | );
38 |
39 | void warp_affine_bilinear(
40 | uint8_t* src, int src_line_size, int src_width, int src_height,
41 | uint8_t* dst, int dst_line_size, int dst_width, int dst_height,
42 | uint8_t fill_value
43 | );
44 |
45 | cv::Mat warpaffine_to_center_align(const cv::Mat& image, const cv::Size& size);
46 |
47 | #define checkRuntime(op) __check_cuda_runtime((op), #op, __FILE__, __LINE__)
48 | bool __check_cuda_runtime(cudaError_t code, const char* op, const char* file, int line);
--------------------------------------------------------------------------------
/sideline_learn/warpaffine-cuda/scripts/warpaffine.py:
--------------------------------------------------------------------------------
1 | import cv2
2 | import matplotlib.pyplot as plt
3 | import numpy as np
4 | def inv_mat(M):
5 | k = M[0,0],
6 | b1 = M[0,2]
7 | b2 = M[1,2],
8 | k = list(k)[0]
9 | return np.array([[1/k,0,-b1/k],
10 | [0,1/k,-b2[0]/k]])
11 |
12 | def transform(image,dst_size):
13 | oh,ow = image.shape[:2]
14 | dh,dw = dst_size
15 | scale = min(dw/ow,dh/oh)
16 |
17 | M = np.array([
18 | [scale,0,-scale * ow * 0.5 + dw * 0.5],
19 | [0,scale,-scale * oh * 0.5 + dh * 0.5]
20 | ])
21 | return cv2.warpAffine(image,M,dst_size),M,inv_mat(M)
22 |
23 | img = cv2.imread("/home/rex/Desktop/notebook/warpaffine/keji2.jpeg")
24 | img_d,M,inv= transform(img,(640,640))
25 |
26 | plt.subplot(1,2,1)
27 | plt.title("WarpAffine")
28 | plt.imshow(img_d[...,::-1])
29 |
30 | img_s = cv2.warpAffine(img_d,inv,img.shape[:2][::-1])
31 | print(img_s)
32 | plt.subplot(1,2,2)
33 | plt.title("or")
34 | plt.imshow(img_s[...,::-1])
--------------------------------------------------------------------------------
/sideline_learn/warpaffine-cuda/src/test.cpp:
--------------------------------------------------------------------------------
1 | #include
2 | using namespace cv;
3 |
4 | int main(){
5 | Mat image = imread("/home/rex/Desktop/rex_extra/notebook/warpaffine/keji1.jpeg");
6 | Mat test_image = warpaffine_to_center_align(image, Size(640, 640));
7 | imwrite("test.jpg", test_image);
8 | imshow("1",test_image);
9 | waitKey(0);
10 | return 0;
11 | }
--------------------------------------------------------------------------------
/sideline_learn/yolo5-6.0-ros/.catkin_workspace:
--------------------------------------------------------------------------------
1 | # This file currently only serves to mark the location of a catkin workspace for tool integration
2 |
--------------------------------------------------------------------------------
/sideline_learn/yolo5-6.0-ros/Screenshot from 2022-07-30 09-39-36.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Rex-LK/tensorrt_learning/45df1f33cab6a66a0d75c0bb26a3978d539dc888/sideline_learn/yolo5-6.0-ros/Screenshot from 2022-07-30 09-39-36.png
--------------------------------------------------------------------------------
/sideline_learn/yolo5-6.0-ros/src/CMakeLists.txt:
--------------------------------------------------------------------------------
1 | /opt/ros/melodic/share/catkin/cmake/toplevel.cmake
--------------------------------------------------------------------------------
/sideline_learn/yolo5-6.0-ros/src/base/CMakeLists.txt:
--------------------------------------------------------------------------------
1 | cmake_minimum_required(VERSION 3.20)
2 | project(base)
3 |
4 | add_compile_options(-std=c++11)
5 |
6 |
7 | find_package(catkin REQUIRED COMPONENTS
8 | nodelet
9 | roscpp
10 | rospy
11 | sensor_msgs
12 | cv_bridge
13 | std_msgs
14 | message_generation
15 |
16 | )
17 |
18 | find_package(CUDA REQUIRED)
19 | find_package(PkgConfig REQUIRED)
20 | pkg_check_modules(JSONCPP jsoncpp)
21 | link_libraries(${JSONCPP_LIBRARIES})
22 |
23 | add_service_files(
24 | FILES
25 | RosImage.srv
26 | )
27 |
28 | generate_messages(
29 | DEPENDENCIES
30 | std_msgs
31 | sensor_msgs
32 | )
33 |
34 |
35 | catkin_package(
36 | INCLUDE_DIRS include
37 | LIBRARIES base
38 | CATKIN_DEPENDS nodelet roscpp rospy sensor_msgs cv_bridge std_msgs message_runtime
39 |
40 | )
41 |
42 | include_directories(
43 | include
44 | ${catkin_INCLUDE_DIRS}
45 | )
46 |
47 | cuda_add_library(base SHARED src/base.cpp)
48 |
49 | add_dependencies(base ${PROJECT_NAME}_gencpp)
50 |
51 | target_link_libraries(base
52 | ${catkin_LIBRARIES}
53 | -lpthread
54 | )
55 |
56 |
--------------------------------------------------------------------------------
/sideline_learn/yolo5-6.0-ros/src/base/code_info.txt:
--------------------------------------------------------------------------------
1 | warning:
2 | 001 active_safety
3 | 002 active_safety cps status
4 | 003 active_safety top lidar loading
5 | 004 active_safety path check
6 | 005 active_safety junction speed limit
7 | 006 coudl not get matched cps info
8 | 007 active_safety end check
9 | 008 active_safety slow down
10 | 009 active_safety top safety
11 | 010 GPS or Chassis info may delay
12 | 011 trailer angle init error, could not get correct path
13 | 012 control fail
14 | 013 fms suspend
15 | 014 emergency stop not tiggered in TS area
16 | 015 emergency stop not released in TS area
17 | 016 no pass area stop
18 | 017 guarding not open
19 | 018 active_safety lane change
20 | 019 could not get stop dis rtgc
21 | 020 could not get stop dis qc
22 | 021 active_safety stereo_check fail
23 | 022 active_safety lidar_check fail
24 | 023 precise stop over 1.5 limit
25 | 024 lane change blocked by light tower
26 | 025 max steer arrive
27 | 026 routing block in cutout
28 | 027 cps info delay
29 | 028 precise stop timeout
30 | 029 too close for routing
31 | 030 vehicle param not init
32 | 031 lock task get
33 | 032 task abort
34 | 033 overtake stop
35 |
36 | error
37 | 001 routing_fail
38 | 002 top lidar scenario not calibed, require calib
39 | 003 websocket lost connection
40 | 004 stop dis config file open fail
41 | 006 cps_fetch crash
42 | 007 fms server disconnect
43 | 008 motion obstacle abnormal
44 | 009 active safety abnormal
45 | 010 module crash
--------------------------------------------------------------------------------
/sideline_learn/yolo5-6.0-ros/src/base/include/base/base.h:
--------------------------------------------------------------------------------
1 | #ifndef _BASE_H
2 | #define _BASE_H
3 |
4 | #include
5 | #include
6 | #include
7 | #include
8 | #include "boost/type_index.hpp"
9 |
10 | #include "ros/ros.h"
11 | #include
12 | #include
13 | #include
14 | #include "std_msgs/String.h"
15 |
16 |
17 | namespace base
18 | {
19 |
20 | class Base : public nodelet::Nodelet
21 | {
22 | protected:
23 | ros::NodeHandle nh_;
24 | ros::NodeHandle pnh_;
25 | std::unordered_map params_;
26 |
27 |
28 | public:
29 | Base(){};
30 | ~Base(){};
31 |
32 | void init()
33 | {
34 | nh_ = getNodeHandle();
35 | pnh_ = getPrivateNodeHandle();
36 | std::string node_name = getName();
37 |
38 | std::vector param_names;
39 | if (pnh_.getParamNames(param_names))
40 | {
41 | for (std::string name : param_names)
42 | {
43 | std::string param_name;
44 | bool valid = get_param(node_name, name, param_name);
45 | if (valid)
46 | {
47 | std::string param_value;
48 | pnh_.getParam(name, param_value);
49 | ROS_INFO("settings: %s,%s", param_name.c_str(), param_value.c_str());
50 | params_[param_name] = param_value;
51 | }
52 | }
53 | }
54 | }
55 |
56 | bool get_param(std::string &node_name, std::string &name,
57 | std::string ¶m_name)
58 | {
59 | std::stringstream ss(name);
60 | bool valid = false;
61 | while (getline(ss, param_name, '/'))
62 | {
63 | if ("/" + param_name == node_name)
64 | {
65 | valid = true;
66 | }
67 | }
68 | return valid;
69 | }
70 | };
71 |
72 | }
73 |
74 | #endif
--------------------------------------------------------------------------------
/sideline_learn/yolo5-6.0-ros/src/base/nodelet_plugins.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
--------------------------------------------------------------------------------
/sideline_learn/yolo5-6.0-ros/src/base/src/base.cpp:
--------------------------------------------------------------------------------
1 | #include "base/base.h"
2 |
3 | namespace base
4 | {
5 |
6 | }
--------------------------------------------------------------------------------
/sideline_learn/yolo5-6.0-ros/src/base/srv/RosImage.srv:
--------------------------------------------------------------------------------
1 | sensor_msgs/Image image
2 | ---
3 | string result
--------------------------------------------------------------------------------
/sideline_learn/yolo5-6.0-ros/src/client/CMakeLists.txt:
--------------------------------------------------------------------------------
1 | cmake_minimum_required(VERSION 3.0.2)
2 | project(client_nodelet)
3 |
4 | add_compile_options(-std=c++11)
5 |
6 |
7 | find_package(catkin REQUIRED COMPONENTS
8 | base
9 | nodelet
10 | roscpp
11 | rospy
12 | std_msgs
13 | )
14 |
15 |
16 | find_package(OpenCV REQUIRED)
17 | find_package(CUDA REQUIRED)
18 | find_package(PkgConfig REQUIRED)
19 | pkg_check_modules(JSONCPP jsoncpp)
20 | link_libraries(${JSONCPP_LIBRARIES})
21 |
22 | catkin_package(
23 | INCLUDE_DIRS include
24 | LIBRARIES client_nodelet
25 | CATKIN_DEPENDS base nodelet roscpp rospy std_msgs message_runtime
26 | )
27 |
28 | include_directories(
29 | include
30 | ${catkin_INCLUDE_DIRS}
31 | )
32 |
33 |
34 | add_library(client_nodelet SHARED src/client_nodelet.cpp)
35 | target_link_libraries(client_nodelet
36 | ${catkin_LIBRARIES}
37 | ${OpenCV_LIBS}
38 | -lpthread
39 | ${OpenCV_LIBS}
40 | )
41 |
--------------------------------------------------------------------------------
/sideline_learn/yolo5-6.0-ros/src/client/include/client_nodelet/client_nodelet.h:
--------------------------------------------------------------------------------
1 | #ifndef CLIENT_CLIENT_NODELET_H
2 | #define CLIENT_CLIENT_NODELET_H
3 |
4 | #include
5 | #include
6 | #include
7 | #include
8 |
9 | #include
10 | #include
11 | #include "base/base.h"
12 | #include "base/RosImage.h"
13 | using namespace std;
14 |
15 | namespace client_nodelet
16 | {
17 | class ClientNodelet : public base::Base
18 | {
19 | public:
20 | ~ClientNodelet();
21 |
22 | virtual void onInit();
23 |
24 | private:
25 | void run();
26 |
27 | private:
28 | thread thread_;
29 | string image_path;
30 | string client_name;
31 | };
32 | }
33 |
34 | #endif
--------------------------------------------------------------------------------
/sideline_learn/yolo5-6.0-ros/src/client/launch/client_nodelet.launch:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
--------------------------------------------------------------------------------
/sideline_learn/yolo5-6.0-ros/src/client/nodelet_plugins.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
--------------------------------------------------------------------------------
/sideline_learn/yolo5-6.0-ros/src/client/src/client_nodelet.cpp:
--------------------------------------------------------------------------------
1 | #include
2 |
3 | #include "client_nodelet/client_nodelet.h"
4 | #include
5 | #include
6 |
7 | using namespace std;
8 | using namespace cv;
9 |
10 | namespace client_nodelet
11 | {
12 | ClientNodelet::~ClientNodelet() {}
13 |
14 | void ClientNodelet::onInit()
15 | {
16 | init();
17 | image_path = params_["image_path"];
18 | client_name = params_["client_name"];
19 | thread_ = thread(boost::bind(&ClientNodelet::run, this));
20 | }
21 |
22 | void ClientNodelet::run()
23 | {
24 |
25 | setlocale(LC_ALL, "");
26 | ros::service::waitForService(client_name);
27 | ros::ServiceClient client = nh_.serviceClient(client_name);
28 |
29 | Mat image = cv::imread(image_path);
30 | sensor_msgs::ImagePtr msg = cv_bridge::CvImage(std_msgs::Header(), "bgr8", image).toImageMsg();
31 | sensor_msgs::Image msg1 = *msg;
32 |
33 | base::RosImage ai;
34 | ai.request.image = *msg;
35 | bool flag = client.call(ai);
36 | if (flag)
37 | {
38 | ROS_INFO("detected target,result = %s", ai.response.result.c_str());
39 | }
40 | else
41 | {
42 | ROS_INFO("failed,no target");
43 | }
44 | ros::Duration(0.2).sleep();
45 |
46 | }
47 |
48 | } // namespace server_nodelet
49 | PLUGINLIB_EXPORT_CLASS(client_nodelet::ClientNodelet,
50 | nodelet::Nodelet)
51 |
--------------------------------------------------------------------------------
/sideline_learn/yolo5-6.0-ros/src/yolov5-6.0/CMakeLists.txt:
--------------------------------------------------------------------------------
1 | cmake_minimum_required(VERSION 3.20)
2 |
3 | project(yolov5_trt)
4 | set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11 -Wall -Ofast -g -Wfatal-errors -D_MWAITXINTRIN_H_INCLUDED")
5 | add_definitions(-std=c++11)
6 | add_definitions(-DAPI_EXPORTS)
7 | option(CUDA_USE_STATIC_CUDA_RUNTIME OFF)
8 | set(CMAKE_CXX_STANDARD 11)
9 | set(CMAKE_BUILD_TYPE Debug)
10 |
11 | find_package(CUDA REQUIRED)
12 | find_package(OpenCV)
13 | include_directories(${OpenCV_INCLUDE_DIRS})
14 |
15 | if(WIN32)
16 | enable_language(CUDA)
17 | endif(WIN32)
18 |
19 | include_directories(${PROJECT_SOURCE_DIR}/include)
20 | # include and link dirs of cuda and tensorrt, you need adapt them if yours are different
21 | # cuda
22 | include_directories(/usr/local/cuda/include)
23 | link_directories(/usr/local/cuda/lib64)
24 | # tensorrt
25 | include_directories(/home/rex/TensorRT-8.2.0.6/include/)
26 | link_directories(/home/rex/TensorRT-8.2.0.6/targets/x86_64-linux-gnu/lib)
27 |
28 |
29 | cuda_add_library(myplugins SHARED src/yololayer.cu src/basic_transform.cu)
30 | target_link_libraries(myplugins nvinfer cudart)
31 |
32 |
33 |
34 | cuda_add_executable(yolov5 src/calibrator.cpp src/yolov5.cpp src/preprocess.cu)
35 | target_link_libraries(yolov5 nvinfer cudart myplugins ${OpenCV_LIBS})
36 |
37 | cuda_add_executable(detect src/detect.cpp src/preprocess.cu)
38 | target_link_libraries(detect nvinfer cudart myplugins ${OpenCV_LIBS})
39 |
40 | if(UNIX)
41 | add_definitions(-O2 -pthread)
42 | endif(UNIX)
43 |
44 |
45 |
--------------------------------------------------------------------------------
/sideline_learn/yolo5-6.0-ros/src/yolov5-6.0/include/basic_transform.h:
--------------------------------------------------------------------------------
1 | #ifndef BASIC_TRANSFORM_H
2 | #define BASIC_TRANSFORM_H
3 | #include
4 | void PicResize(uint8_t*, uint8_t*, uint32_t, uint32_t, uint32_t, uint32_t);
5 | void PicNormalize(uint8_t*, float*, uint32_t, uint32_t);
6 | #endif
7 |
--------------------------------------------------------------------------------
/sideline_learn/yolo5-6.0-ros/src/yolov5-6.0/include/calibrator.h:
--------------------------------------------------------------------------------
1 | #ifndef ENTROPY_CALIBRATOR_H
2 | #define ENTROPY_CALIBRATOR_H
3 |
4 | #include
5 | #include
6 | #include
7 | #include "macros.h"
8 |
9 | //! \class Int8EntropyCalibrator2
10 | //!
11 | //! \brief Implements Entropy calibrator 2.
12 | //! CalibrationAlgoType is kENTROPY_CALIBRATION_2.
13 | //!
14 | class Int8EntropyCalibrator2 : public nvinfer1::IInt8EntropyCalibrator2
15 | {
16 | public:
17 | Int8EntropyCalibrator2(int batchsize, int input_w, int input_h, const char* img_dir, const char* calib_table_name, const char* input_blob_name, bool read_cache = true);
18 |
19 | virtual ~Int8EntropyCalibrator2();
20 | int getBatchSize() const TRT_NOEXCEPT override;
21 | bool getBatch(void* bindings[], const char* names[], int nbBindings) TRT_NOEXCEPT override;
22 | const void* readCalibrationCache(size_t& length) TRT_NOEXCEPT override;
23 | void writeCalibrationCache(const void* cache, size_t length) TRT_NOEXCEPT override;
24 |
25 | private:
26 | int batchsize_;
27 | int input_w_;
28 | int input_h_;
29 | int img_idx_;
30 | std::string img_dir_;
31 | std::vector img_files_;
32 | size_t input_count_;
33 | std::string calib_table_name_;
34 | const char* input_blob_name_;
35 | bool read_cache_;
36 | void* device_input_;
37 | std::vector calib_cache_;
38 | };
39 |
40 | #endif // ENTROPY_CALIBRATOR_H
41 |
--------------------------------------------------------------------------------
/sideline_learn/yolo5-6.0-ros/src/yolov5-6.0/include/cuda_utils.h:
--------------------------------------------------------------------------------
1 | #ifndef TRTX_CUDA_UTILS_H_
2 | #define TRTX_CUDA_UTILS_H_
3 |
4 | #include
5 |
6 | #ifndef CUDA_CHECK
7 | #define CUDA_CHECK(callstr)\
8 | {\
9 | cudaError_t error_code = callstr;\
10 | if (error_code != cudaSuccess) {\
11 | std::cerr << "CUDA error " << error_code << " at " << __FILE__ << ":" << __LINE__;\
12 | assert(0);\
13 | }\
14 | }
15 | #endif // CUDA_CHECK
16 |
17 | #endif // TRTX_CUDA_UTILS_H_
18 |
19 |
--------------------------------------------------------------------------------
/sideline_learn/yolo5-6.0-ros/src/yolov5-6.0/include/macros.h:
--------------------------------------------------------------------------------
1 | #ifndef __MACROS_H
2 | #define __MACROS_H
3 |
4 | #ifdef API_EXPORTS
5 | #if defined(_MSC_VER)
6 | #define API __declspec(dllexport)
7 | #else
8 | #define API __attribute__((visibility("default")))
9 | #endif
10 | #else
11 |
12 | #if defined(_MSC_VER)
13 | #define API __declspec(dllimport)
14 | #else
15 | #define API
16 | #endif
17 | #endif // API_EXPORTS
18 |
19 | #if NV_TENSORRT_MAJOR >= 8
20 | #define TRT_NOEXCEPT noexcept
21 | #define TRT_CONST_ENQUEUE const
22 | #else
23 | #define TRT_NOEXCEPT
24 | #define TRT_CONST_ENQUEUE
25 | #endif
26 |
27 | #endif // __MACROS_H
28 |
--------------------------------------------------------------------------------
/sideline_learn/yolo5-6.0-ros/src/yolov5-6.0/include/preprocess.h:
--------------------------------------------------------------------------------
1 | #ifndef __PREPROCESS_H
2 | #define __PREPROCESS_H
3 |
4 | #include
5 | #include
6 |
7 |
8 | struct AffineMatrix{
9 | float value[6];
10 | };
11 |
12 |
13 | void preprocess_kernel_img(uint8_t* src, int src_width, int src_height,
14 | float* dst, int dst_width, int dst_height,
15 | cudaStream_t stream);
16 | #endif // __PREPROCESS_H
17 |
--------------------------------------------------------------------------------
/sideline_learn/yolo5-6.0-ros/src/yolov5-6.0/include/utils.h:
--------------------------------------------------------------------------------
1 | #ifndef TRTX_YOLOV5_UTILS_H_
2 | #define TRTX_YOLOV5_UTILS_H_
3 |
4 | #include
5 | #include
6 |
7 | static inline cv::Mat preprocess_img(cv::Mat& img, int input_w, int input_h) {
8 | int w, h, x, y;
9 | float r_w = input_w / (img.cols*1.0);
10 | float r_h = input_h / (img.rows*1.0);
11 | if (r_h > r_w) {
12 | w = input_w;
13 | h = r_w * img.rows;
14 | x = 0;
15 | y = (input_h - h) / 2;
16 | } else {
17 | w = r_h * img.cols;
18 | h = input_h;
19 | x = (input_w - w) / 2;
20 | y = 0;
21 | }
22 | cv::Mat re(h, w, CV_8UC3);
23 | cv::resize(img, re, re.size(), 0, 0, cv::INTER_LINEAR);
24 | cv::Mat out(input_h, input_w, CV_8UC3, cv::Scalar(128, 128, 128));
25 | re.copyTo(out(cv::Rect(x, y, re.cols, re.rows)));
26 | return out;
27 | }
28 |
29 | static inline int read_files_in_dir(const char *p_dir_name, std::vector &file_names) {
30 | DIR *p_dir = opendir(p_dir_name);
31 | if (p_dir == nullptr) {
32 | return -1;
33 | }
34 |
35 | struct dirent* p_file = nullptr;
36 | while ((p_file = readdir(p_dir)) != nullptr) {
37 | if (strcmp(p_file->d_name, ".") != 0 &&
38 | strcmp(p_file->d_name, "..") != 0) {
39 | //std::string cur_file_name(p_dir_name);
40 | //cur_file_name += "/";
41 | //cur_file_name += p_file->d_name;
42 | std::string cur_file_name(p_file->d_name);
43 | file_names.push_back(cur_file_name);
44 | }
45 | }
46 |
47 | closedir(p_dir);
48 | return 0;
49 | }
50 |
51 | #endif // TRTX_YOLOV5_UTILS_H_
52 |
53 |
--------------------------------------------------------------------------------
/sideline_learn/yolo5-6.0-ros/src/yolov5-6.0/src/detect.cpp:
--------------------------------------------------------------------------------
1 | #include
2 | #include
3 | #include
4 | #include "cuda_utils.h"
5 | #include "logging.h"
6 | #include "common.hpp"
7 | #include "utils.h"
8 | #include "preprocess.h"
9 | #include "yolov5-detect.h"
10 |
11 | int main(int argc, char **argv)
12 | {
13 | cudaSetDevice(0);
14 |
15 | std::string engine_name = "yolov5s.engine";
16 | yolov5 *det = new yolov5(engine_name);
17 |
18 | cv::Mat img = cv::imread("1.jpg");
19 | int w = img.cols;
20 | int h = img.rows;
21 | unsigned char *d_image;
22 | cudaMalloc((void **)&d_image, sizeof(unsigned char) * w * h * 3);
23 | cudaMemcpy(d_image, img.data, w * h * 3 * sizeof(unsigned char),cudaMemcpyHostToDevice);
24 |
25 | bool flag = det->detect(d_image, w, h,img);
26 |
27 | cudaFree(d_image);
28 | return 0;
29 | }
30 |
--------------------------------------------------------------------------------
/sideline_learn/yolo5-6.0-ros/src/yolov5-infer/include/yolov5_infer_nodelet/yolov5_infer_nodelet.h:
--------------------------------------------------------------------------------
1 | #ifndef YOLOV5_INFER_NODELET_H
2 | #define YOLOV5_INFER_NODELET_H
3 |
4 | #include
5 | #include
6 | #include
7 | #include
8 | #include "base/base.h"
9 | #include
10 | #include "yolov5-detect.h"
11 |
12 | using namespace cv;
13 | using namespace std;
14 |
15 | namespace yolov5_infer_nodelet
16 | {
17 | class Yolov5InferNodelet : public base::Base
18 | {
19 | public:
20 | ~Yolov5InferNodelet();
21 |
22 | virtual void onInit();
23 |
24 | private:
25 | void run();
26 |
27 | private:
28 | int gpu_id;
29 | string engine_path;
30 | string image_path;
31 | thread thread_;
32 | yolov5 *infer;
33 | };
34 | }
35 |
36 | #endif // YOLOV5_INFER_NODELET_H
--------------------------------------------------------------------------------
/sideline_learn/yolo5-6.0-ros/src/yolov5-infer/launch/yolov5_infer_nodelet.launch:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
--------------------------------------------------------------------------------
/sideline_learn/yolo5-6.0-ros/src/yolov5-infer/nodelet_plugins.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
--------------------------------------------------------------------------------
/sideline_learn/yolo5-6.0-ros/src/yolov5-infer/src/yolov5_infer_nodelet.cpp:
--------------------------------------------------------------------------------
1 | #include
2 | #include
3 | #include "yolov5_infer_nodelet/yolov5_infer_nodelet.h"
4 | #include "cuda_runtime.h"
5 | #include "dirent.h"
6 |
7 | using namespace std;
8 | using namespace cv;
9 |
10 | namespace yolov5_infer_nodelet
11 | {
12 | Yolov5InferNodelet::~Yolov5InferNodelet() {}
13 |
14 | void Yolov5InferNodelet::onInit()
15 | {
16 | init();
17 | gpu_id = stoi(params_["gpu_id"]);
18 | engine_path = params_["engine_path"];
19 | image_path = params_["image_path"];
20 | thread_ = thread(boost::bind(&Yolov5InferNodelet::run, this));
21 | }
22 |
23 | void Yolov5InferNodelet::run()
24 | {
25 | cudaSetDevice(gpu_id);
26 | infer = new yolov5(engine_path);
27 | cv::Mat img = cv::imread(image_path);
28 | int w = img.cols;
29 | int h = img.rows;
30 | unsigned char *d_image;
31 | cudaMalloc((void **)&d_image, sizeof(unsigned char) * w * h * 3);
32 | cudaMemcpy(d_image, img.data, w * h * 3 * sizeof(unsigned char),cudaMemcpyHostToDevice);
33 | infer->detect(d_image, w, h,img);
34 | cv::imshow("show_image", img);
35 | cv::waitKey(0);
36 | cudaFree(d_image);
37 | }
38 | }
39 | PLUGINLIB_EXPORT_CLASS(yolov5_infer_nodelet::Yolov5InferNodelet,
40 | nodelet::Nodelet)
--------------------------------------------------------------------------------
/sideline_learn/yolo5-6.0-ros/src/yolov5-server/include/yolov5_server_nodelet/yolov5_server_nodelet.h:
--------------------------------------------------------------------------------
1 | #ifndef YOLOV5_SERVER_NODELET_H
2 | #define YOLOV5_SERVER_NODELET_H
3 |
4 | #include
5 | #include
6 | #include
7 |
8 | #include "base/base.h"
9 | #include "base/RosImage.h"
10 | #include "yolov5-detect.h"
11 |
12 | using namespace cv;
13 | using namespace std;
14 |
15 | namespace yolov5_server_nodelet
16 | {
17 | class Yolov5ServerNodelet : public base::Base
18 | {
19 | public:
20 | ~Yolov5ServerNodelet();
21 |
22 | virtual void onInit();
23 | void run();
24 |
25 | private:
26 | bool inference(base::RosImage::Request &request,
27 | base::RosImage::Response &response);
28 |
29 | int gpu_id;
30 | unsigned char *d_image;
31 | yolov5 *infer;
32 | std::string engine_path;
33 | std::string server_name;
34 | };
35 | }
36 |
37 | #endif // YOLOV5_SERVER_NODELET_H
--------------------------------------------------------------------------------
/sideline_learn/yolo5-6.0-ros/src/yolov5-server/launch/yolov5_server_nodelet.launch:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
--------------------------------------------------------------------------------
/sideline_learn/yolo5-6.0-ros/src/yolov5-server/nodelet_plugins.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
--------------------------------------------------------------------------------
/trt_cpp/CMakeLists.txt:
--------------------------------------------------------------------------------
1 | cmake_minimum_required(VERSION 2.6)
2 |
3 | project(demo_trt)
4 |
5 | option(CUDA_USE_STATIC_CUDA_RUNTIME OFF)
6 | set(CMAKE_CXX_STANDARD 17)
7 | set(CMAKE_BUILD_TYPE Debug)
8 | set(EXECUTABLE_OUTPUT_PATH ${PROJECT_SOURCE_DIR}/workspace)
9 | set(HAS_PYTHON OFF)
10 |
11 | set(TENSORRT_DIR "/data/app/TensorRT-8.4.3.1")
12 |
13 |
14 | find_package(CUDA REQUIRED)
15 | find_package(OpenCV)
16 |
17 | include_directories(
18 | ${PROJECT_SOURCE_DIR}/src
19 | ${PROJECT_SOURCE_DIR}/src/trt
20 | ${PROJECT_SOURCE_DIR}/src/trt/common
21 | ${OpenCV_INCLUDE_DIRS}
22 | ${CUDA_TOOLKIT_ROOT_DIR}/include
23 | ${TENSORRT_DIR}/include
24 | ${CUDNN_DIR}/include
25 | )
26 | link_directories(
27 | ${TENSORRT_DIR}/lib
28 | ${CUDA_TOOLKIT_ROOT_DIR}/lib64
29 | ${CUDNN_DIR}/lib
30 | )
31 |
32 |
33 | set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11 -Wall -O0 -Wfatal-errors -pthread -w -g")
34 | set(CUDA_NVCC_FLAGS "${CUDA_NVCC_FLAGS} -std=c++11 -O0 -Xcompiler -fPIC -g -w ${CUDA_GEN_CODE}")
35 | file(GLOB_RECURSE cpp_srcs ${PROJECT_SOURCE_DIR}/src/*.cpp)
36 | file(GLOB_RECURSE cuda_srcs ${PROJECT_SOURCE_DIR}/src/*.cu)
37 | cuda_add_library(plugin_list SHARED ${cuda_srcs})
38 | target_link_libraries(plugin_list nvinfer nvinfer_plugin nvonnxparser)
39 | target_link_libraries(plugin_list cuda cublas cudart cudnn)
40 | target_link_libraries(plugin_list protobuf pthread)
41 | target_link_libraries(plugin_list ${OpenCV_LIBS})
42 |
43 | add_executable(demo_infer ${cpp_srcs})
44 |
45 | target_link_libraries(demo_infer plugin_list)
46 |
47 |
48 |
--------------------------------------------------------------------------------
/trt_cpp/README.md:
--------------------------------------------------------------------------------
1 |
2 |
3 | #### 1、demo工程
4 |
5 | 可以先参考demo的里面的工程,下载相应的模型权重,或者直接从给出的百度网盘下载。
6 |
7 | #### 2、export_onnx
8 |
9 | 下载好模型之后,在onnx2trt/demo里面一般可以使用export_onnx.py或者predict.py产生相应的onnx模型,并可以使用infer-onnxruntime使用onnx进行推理,也可以使用onnx_simplify对onnx进行简化。
10 |
11 | #### 3、tensorrt
12 |
13 | 在workspace下面放入需要转化的onnx模型,如vit.onnx、centernet.onnx等
14 |
15 | 在Makefile中修改本机对应的环境,可以参考手写AI提供的自动环境配置进行,或者修改CMakeLists.txt中本机环境
16 |
17 | 在src/main.cpp中提供了build_engine 和 inference的代码。直接在onnx2trt文件夹下,进行执行 make run,即可进行推理。
18 |
19 | 或者在CMakeLists中修改本机的tensorrt、protobuf路径,==可以将模型路径和图片路径修改为绝对路径,执行吗,修改src/main.cpp中的demo名字==
20 |
21 | mkdir build && cd build
22 |
23 | cmake .. && make -j
24 |
25 | 在worksapce中执行可执行文件./demo_infer 即可运行相应的demo
26 |
27 | #### 4、总结
28 |
29 | 本次学习课程学习了torch2onnx2trt的方式,同时复习了深度学习中的比较经典的模型,同时对一些模型的后处理采用c++的处理方式,同时也提高了c++的代码能力。
30 |
31 |
32 |
33 |
--------------------------------------------------------------------------------
/trt_cpp/src/main.cpp:
--------------------------------------------------------------------------------
1 |
2 | #include
3 | #include
4 | #include
5 | #include
6 | #include
7 |
8 | using namespace std;
9 |
10 | int main()
11 | {
12 | demoInfer demo;
13 | string demo_name = "yolov5seg";
14 | demo.do_infer(demo_name);
15 | return 0;
16 | }
17 |
--------------------------------------------------------------------------------
/trt_cpp/src/trt/common/cuda-tools.hpp:
--------------------------------------------------------------------------------
1 | #ifndef CUDA_TOOLS_HPP
2 | #define CUDA_TOOLS_HPP
3 |
4 | #include
5 | #include
6 |
7 | #define checkRuntime(call) CUDATools::check_runtime(call, #call, __LINE__, __FILE__)
8 |
9 | #define checkKernel(...) \
10 | __VA_ARGS__; \
11 | do{cudaError_t cudaStatus = cudaPeekAtLastError(); \
12 | if (cudaStatus != cudaSuccess){ \
13 | INFOE("launch failed: %s", cudaGetErrorString(cudaStatus)); \
14 | }} while(0);
15 |
16 | namespace CUDATools{
17 |
18 | bool check_runtime(cudaError_t e, const char* call, int iLine, const char *szFile);
19 | bool check_device_id(int device_id);
20 | int current_device_id();
21 | std::string device_description();
22 |
23 | // 自动切换当前的deviceid,并在析构的时候切换回去
24 | class AutoDevice{
25 | public:
26 | AutoDevice(int device_id = 0);
27 | virtual ~AutoDevice();
28 |
29 | private:
30 | int old_ = -1;
31 | };
32 | }
33 |
34 |
35 | #endif // CUDA_TOOLS_HPP
--------------------------------------------------------------------------------
/trt_cpp/src/trt/common/matrix.hpp:
--------------------------------------------------------------------------------
1 | #ifndef GEMM_HPP
2 | #define GEMM_HPP
3 |
4 | #include
5 | #include
6 | #include
7 | #include
8 | /* 实现一个自定义的matrix类 */
9 | class Matrix{
10 | public:
11 | Matrix();
12 | Matrix(int rows, int cols, const std::initializer_list& pdata={});
13 | Matrix(int rows, int cols, const std::vector&v);
14 |
15 | const float& operator()(int irow, int icol)const {return data_[irow * cols_ + icol];}
16 | float& operator()(int irow, int icol){return data_[irow * cols_ + icol];}
17 | Matrix element_wise(const std::function &func) const;
18 | Matrix operator*(const Matrix &value) const;
19 | Matrix operator*(float value) const;
20 |
21 | Matrix operator+(float value) const;
22 |
23 | Matrix operator-(float value) const;
24 | Matrix operator/(float value) const;
25 |
26 | int rows() const{return rows_;}
27 | int cols() const{return cols_;}
28 | Matrix view(int rows, int cols) const;
29 | Matrix power(float y) const;
30 | float reduce_sum() const;
31 | float* ptr() const{return (float*)data_.data();}
32 | Matrix exp(float value);
33 |
34 | int rows_ = 0;
35 | int cols_ = 0;
36 | std::vector data_;
37 | };
38 |
39 | /* 全局操作符重载,使得能够被cout << m; */
40 | std::ostream& operator << (std::ostream& out, const Matrix& m);
41 |
42 | #endif // GEMM_HPP
--------------------------------------------------------------------------------
/trt_cpp/src/trt/common/simple-logger.hpp:
--------------------------------------------------------------------------------
1 | #ifndef SIMPLE_LOGGER_HPP
2 | #define SIMPLE_LOGGER_HPP
3 |
4 | #include
5 |
6 | #define INFOD(...) SimpleLogger::__log_func(__FILE__, __LINE__, SimpleLogger::LogLevel::Debug, __VA_ARGS__)
7 | #define INFOV(...) SimpleLogger::__log_func(__FILE__, __LINE__, SimpleLogger::LogLevel::Verbose, __VA_ARGS__)
8 | #define INFO(...) SimpleLogger::__log_func(__FILE__, __LINE__, SimpleLogger::LogLevel::Info, __VA_ARGS__)
9 | #define INFOW(...) SimpleLogger::__log_func(__FILE__, __LINE__, SimpleLogger::LogLevel::Warning, __VA_ARGS__)
10 | #define INFOE(...) SimpleLogger::__log_func(__FILE__, __LINE__, SimpleLogger::LogLevel::Error, __VA_ARGS__)
11 | #define INFOF(...) SimpleLogger::__log_func(__FILE__, __LINE__, SimpleLogger::LogLevel::Fatal, __VA_ARGS__)
12 |
13 |
14 | namespace SimpleLogger{
15 |
16 | enum class LogLevel : int{
17 | Debug = 5,
18 | Verbose = 4,
19 | Info = 3,
20 | Warning = 2,
21 | Error = 1,
22 | Fatal = 0
23 | };
24 |
25 | void set_log_level(LogLevel level);
26 | LogLevel get_log_level();
27 | void __log_func(const char* file, int line, LogLevel level, const char* fmt, ...);
28 |
29 | }; // SimpleLogger
30 |
31 | #endif // SIMPLE_LOGGER_HPP
--------------------------------------------------------------------------------
/trt_cpp/src/trt/demo-infer/centernet/centernet.h:
--------------------------------------------------------------------------------
1 | #ifndef CENTERNET_HPP
2 | #define CENTERNET_HPP
3 |
4 | #include
5 | #include
6 | #include
7 |
8 | struct bbox{
9 | float left, top, right, bottom, confidence;
10 | int label;
11 |
12 | bbox() = default;
13 | bbox(float left, float top, float right, float bottom, float confidence, int label):
14 | left(left), top(top), right(right), bottom(bottom), confidence(confidence), label(label){}
15 | };
16 |
17 | namespace Centernet{
18 | void centernet_inference();
19 | void centernet_inference_gpu();
20 | };
21 |
22 |
23 |
24 |
25 | #endif // CENTERNET_HPP
--------------------------------------------------------------------------------
/trt_cpp/src/trt/demo-infer/detr/detr.h:
--------------------------------------------------------------------------------
1 | #ifndef DETR_HPP
2 | #define DETR_HPP
3 |
4 | #include
5 | #include
6 | #include
7 | #include
8 |
9 | namespace Detr{
10 | void detr_inference();
11 | };
12 |
13 | struct Pred
14 | {
15 | std::vector>bbox;
16 | std::vectorlabel;
17 | };
18 |
19 |
20 |
21 |
22 | #endif // DETR_HPP
--------------------------------------------------------------------------------
/trt_cpp/src/trt/demo-infer/hrnet/hrnet.h:
--------------------------------------------------------------------------------
1 | #ifndef HRNET_HPP
2 | #define HRNET_HPP
3 |
4 | #include
5 | #include
6 | #include
7 |
8 | namespace Hrnet{
9 | void hrnet_inference();
10 | void hrnet_inference_gpu();
11 | };
12 |
13 |
14 |
15 | #endif // HRNET_HPP
--------------------------------------------------------------------------------
/trt_cpp/src/trt/demo-infer/unet/unet.h:
--------------------------------------------------------------------------------
1 | #ifndef UNET_HPP
2 | #define UNET_HPP
3 | #include
4 | #include
5 |
6 | namespace Unet{
7 | void unet_inference();
8 | void render(cv::Mat& image, const cv::Mat& prob, const cv::Mat& iclass);
9 | std::tuple post_process(float* output, int output_width, int output_height, int num_class, int ibatch);
10 | };
11 |
12 | #endif // UNET_HPP
--------------------------------------------------------------------------------
/trt_cpp/src/trt/demo-infer/vit/vit.h:
--------------------------------------------------------------------------------
1 | #ifndef VIT_HPP
2 | #define VIT_HPP
3 |
4 | namespace Vit{
5 | void vit_inference();
6 | };
7 |
8 | #endif // Vit_HPP
--------------------------------------------------------------------------------
/trt_cpp/src/trt/demo-infer/yolov5/yolov5.hpp:
--------------------------------------------------------------------------------
1 | #ifndef YOLOV5_HPP
2 | #define YOLOV5_HPP
3 |
4 | #include
5 | #include
6 | #include
7 | #include
8 |
9 | /////////////////////////////////////////////////////////////////////////////////////////
10 | // 封装接口类
11 | namespace YoloV5{
12 |
13 | struct Box{
14 | float left, top, right, bottom, confidence;
15 | int class_label;
16 |
17 | Box() = default;
18 |
19 | Box(float left, float top, float right, float bottom, float confidence, int class_label)
20 | :left(left), top(top), right(right), bottom(bottom), confidence(confidence), class_label(class_label){}
21 | };
22 | typedef std::vector BoxArray;
23 |
24 | class Infer{
25 | public:
26 | virtual std::shared_future commit(const cv::Mat& input) = 0;
27 | };
28 |
29 | std::shared_ptr create_infer(
30 | const std::string& file,
31 | int gpuid=0, float confidence_threshold=0.25, float nms_threshold=0.45
32 | );
33 | };
34 |
35 | #endif // YOLOV5_HPP
--------------------------------------------------------------------------------
/trt_cpp/src/trt/demo-infer/yolov5seg/yolact.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Rex-LK/tensorrt_learning/45df1f33cab6a66a0d75c0bb26a3978d539dc888/trt_cpp/src/trt/demo-infer/yolov5seg/yolact.png
--------------------------------------------------------------------------------
/trt_cpp/src/trt/demo-infer/yolov5seg/yolov5seg.h:
--------------------------------------------------------------------------------
1 | #ifndef YOLOV5SEG
2 | #define YOLOV5SEG
3 |
4 | #include
5 | #include
6 | #include
7 |
8 | namespace Yolov5Seg
9 | {
10 | void yolov5Seg_inference();
11 | };
12 |
13 | #endif
--------------------------------------------------------------------------------
/trt_cpp/src/trt/demo-infer/yolov5seg/yolov5seg.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Rex-LK/tensorrt_learning/45df1f33cab6a66a0d75c0bb26a3978d539dc888/trt_cpp/src/trt/demo-infer/yolov5seg/yolov5seg.png
--------------------------------------------------------------------------------
/trt_cpp/src/trt/demo-infer/yolov7/yolov7.h:
--------------------------------------------------------------------------------
1 | #ifndef YOLOV7NET_HPP
2 | #define YOLOV7NET_HPP
3 |
4 | #include
5 | #include
6 | #include
7 |
8 | namespace Yolov7
9 | {
10 | // void yolov7_inference();
11 | void yolov7_inference();
12 |
13 | void yolov7_inference_gpu();
14 | };
15 |
16 | #endif // YOLOV7NET_HPP
--------------------------------------------------------------------------------
/trt_cpp/workspace/cat.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Rex-LK/tensorrt_learning/45df1f33cab6a66a0d75c0bb26a3978d539dc888/trt_cpp/workspace/cat.jpg
--------------------------------------------------------------------------------
/trt_cpp/workspace/centernet-gpu—pred.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Rex-LK/tensorrt_learning/45df1f33cab6a66a0d75c0bb26a3978d539dc888/trt_cpp/workspace/centernet-gpu—pred.jpg
--------------------------------------------------------------------------------
/trt_cpp/workspace/centernet-pred-street.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Rex-LK/tensorrt_learning/45df1f33cab6a66a0d75c0bb26a3978d539dc888/trt_cpp/workspace/centernet-pred-street.jpg
--------------------------------------------------------------------------------
/trt_cpp/workspace/centernet-pred.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Rex-LK/tensorrt_learning/45df1f33cab6a66a0d75c0bb26a3978d539dc888/trt_cpp/workspace/centernet-pred.jpg
--------------------------------------------------------------------------------
/trt_cpp/workspace/detr-pred.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Rex-LK/tensorrt_learning/45df1f33cab6a66a0d75c0bb26a3978d539dc888/trt_cpp/workspace/detr-pred.jpg
--------------------------------------------------------------------------------
/trt_cpp/workspace/flower.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Rex-LK/tensorrt_learning/45df1f33cab6a66a0d75c0bb26a3978d539dc888/trt_cpp/workspace/flower.jpg
--------------------------------------------------------------------------------
/trt_cpp/workspace/hrnet-cuda-pred.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Rex-LK/tensorrt_learning/45df1f33cab6a66a0d75c0bb26a3978d539dc888/trt_cpp/workspace/hrnet-cuda-pred.jpg
--------------------------------------------------------------------------------
/trt_cpp/workspace/hrnet-pred.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Rex-LK/tensorrt_learning/45df1f33cab6a66a0d75c0bb26a3978d539dc888/trt_cpp/workspace/hrnet-pred.jpg
--------------------------------------------------------------------------------
/trt_cpp/workspace/person.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Rex-LK/tensorrt_learning/45df1f33cab6a66a0d75c0bb26a3978d539dc888/trt_cpp/workspace/person.png
--------------------------------------------------------------------------------
/trt_cpp/workspace/person1.jpeg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Rex-LK/tensorrt_learning/45df1f33cab6a66a0d75c0bb26a3978d539dc888/trt_cpp/workspace/person1.jpeg
--------------------------------------------------------------------------------
/trt_cpp/workspace/person2.jpeg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Rex-LK/tensorrt_learning/45df1f33cab6a66a0d75c0bb26a3978d539dc888/trt_cpp/workspace/person2.jpeg
--------------------------------------------------------------------------------
/trt_cpp/workspace/street.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Rex-LK/tensorrt_learning/45df1f33cab6a66a0d75c0bb26a3978d539dc888/trt_cpp/workspace/street.jpg
--------------------------------------------------------------------------------
/trt_cpp/workspace/warp-affine.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Rex-LK/tensorrt_learning/45df1f33cab6a66a0d75c0bb26a3978d539dc888/trt_cpp/workspace/warp-affine.jpg
--------------------------------------------------------------------------------
/trt_cpp/模型权重.md:
--------------------------------------------------------------------------------
1 | 模型权重
2 |
3 | 百度云链接为:
4 |
5 | 链接: https://pan.baidu.com/s/1JEW9qy0BGvyzCM_qZ9gPKw 提取码: dus7
6 |
--------------------------------------------------------------------------------
/trt_py/basic_infer/__pycache__/infer.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Rex-LK/tensorrt_learning/45df1f33cab6a66a0d75c0bb26a3978d539dc888/trt_py/basic_infer/__pycache__/infer.cpython-39.pyc
--------------------------------------------------------------------------------
/trt_py/basic_infer/_init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Rex-LK/tensorrt_learning/45df1f33cab6a66a0d75c0bb26a3978d539dc888/trt_py/basic_infer/_init__.py
--------------------------------------------------------------------------------
/trt_py/basic_infer/warpaffine.py:
--------------------------------------------------------------------------------
1 | import cv2
2 | import numpy as np
3 |
4 | def inv_mat(M):
5 | k = M[0, 0],
6 | b1 = M[0, 2]
7 | b2 = M[1, 2],
8 | return np.array([[1 / k[0], 0, -b1 / k[0]],
9 | [0, 1 / k[0], -b2[0] / k[0]]])
10 |
11 |
12 | def image_transfer(image, dst_size):
13 | oh, ow = image.shape[:2]
14 | dh, dw = dst_size
15 | scale = min(dw / ow, dh / oh)
16 |
17 | M = np.array([
18 | [scale, 0, -scale * ow * 0.5 + dw * 0.5],
19 | [0, scale, -scale * oh * 0.5 + dh * 0.5]
20 | ])
21 | return cv2.warpAffine(image, M, dst_size), M, inv_mat(M)
22 |
23 |
24 | if __name__ == '__main__':
25 | img = cv2.imread("/hrnet/images/person2.png")
26 | cv2.imshow("img_o", img)
27 | print(img.shape)
28 | img_d, M, inv = image_transfer(img, (192, 256))
29 | print(img_d.shape)
30 | cv2.imshow("img_d",img_d)
31 | img_s = cv2.warpAffine(img_d, inv, img.shape[:2][::-1])
32 | cv2.imshow("img_s",img_s)
33 | cv2.waitKey(0)
--------------------------------------------------------------------------------
/trt_py/build_engine/__pycache__/common.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Rex-LK/tensorrt_learning/45df1f33cab6a66a0d75c0bb26a3978d539dc888/trt_py/build_engine/__pycache__/common.cpython-39.pyc
--------------------------------------------------------------------------------
/trt_py/build_engine/batch_dynamic/build_engine_batch_image.py:
--------------------------------------------------------------------------------
1 | import tensorrt as trt
2 |
3 | # 构建logger,builder,network
4 | logger = trt.Logger(trt.Logger.WARNING)
5 | builder = trt.Builder(logger)
6 | builder.max_batch_size = 1
7 |
8 | network = builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH))
9 | parser = trt.OnnxParser(network, logger)
10 | # 读入onnx查看有无错误
11 | success = parser.parse_from_file("/home/rex/Desktop/tensorrt_learning/trt_py/build_engine/batch_dynamic/detr_sim.onnx")
12 | for idx in range(parser.num_errors):
13 | print(parser.get_error(idx))
14 | if success:
15 | print('Construction sucess!!!')
16 | pass # Error handling code here
17 |
18 | profile = builder.create_optimization_profile()
19 | # profile.set_shape("images", (1, 3, 640, 640), (8, 3, 640, 640), (16, 3, 640, 640))
20 | profile.set_shape("images", (1, 3, 800, 1066),(8, 3, 800, 1066), (16, 3,800, 1066))
21 |
22 | # profile = builder.create_optimization_profile()
23 | # profile.set_shape("foo", (1,3, 512, 512), (20,3,640, 640), (10,3,640, 640))
24 | config = builder.create_builder_config()
25 | config.add_optimization_profile(profile)
26 | config.max_workspace_size = 1 << 30 #
27 | serialized_engine = builder.build_serialized_network(network, config)
28 | with open("/home/rex/Desktop/tensorrt_learning/trt_py/build_engine/batch_dynamic/hrnet.pyengine", "wb") as f:
29 | print('正在写入engine文件...')
30 | f.write(serialized_engine)
31 | print('构建引擎成功!!!')
--------------------------------------------------------------------------------
/trt_py/centernet/images/cat.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Rex-LK/tensorrt_learning/45df1f33cab6a66a0d75c0bb26a3978d539dc888/trt_py/centernet/images/cat.jpg
--------------------------------------------------------------------------------
/trt_py/centernet/images/street.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Rex-LK/tensorrt_learning/45df1f33cab6a66a0d75c0bb26a3978d539dc888/trt_py/centernet/images/street.jpg
--------------------------------------------------------------------------------
/trt_py/detr/images/cat.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Rex-LK/tensorrt_learning/45df1f33cab6a66a0d75c0bb26a3978d539dc888/trt_py/detr/images/cat.jpg
--------------------------------------------------------------------------------
/trt_py/detr/images/street.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Rex-LK/tensorrt_learning/45df1f33cab6a66a0d75c0bb26a3978d539dc888/trt_py/detr/images/street.jpg
--------------------------------------------------------------------------------
/trt_py/hrnet/hrnet-pred.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Rex-LK/tensorrt_learning/45df1f33cab6a66a0d75c0bb26a3978d539dc888/trt_py/hrnet/hrnet-pred.jpg
--------------------------------------------------------------------------------
/trt_py/hrnet/images/person.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Rex-LK/tensorrt_learning/45df1f33cab6a66a0d75c0bb26a3978d539dc888/trt_py/hrnet/images/person.png
--------------------------------------------------------------------------------
/trt_py/hrnet/images/person1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Rex-LK/tensorrt_learning/45df1f33cab6a66a0d75c0bb26a3978d539dc888/trt_py/hrnet/images/person1.png
--------------------------------------------------------------------------------
/trt_py/hrnet/images/person2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Rex-LK/tensorrt_learning/45df1f33cab6a66a0d75c0bb26a3978d539dc888/trt_py/hrnet/images/person2.png
--------------------------------------------------------------------------------
/trt_py/yolov5/images/cat.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Rex-LK/tensorrt_learning/45df1f33cab6a66a0d75c0bb26a3978d539dc888/trt_py/yolov5/images/cat.jpg
--------------------------------------------------------------------------------
/trt_py/yolov5/images/street.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Rex-LK/tensorrt_learning/45df1f33cab6a66a0d75c0bb26a3978d539dc888/trt_py/yolov5/images/street.jpg
--------------------------------------------------------------------------------
/trt_py/yolov7/images/street.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Rex-LK/tensorrt_learning/45df1f33cab6a66a0d75c0bb26a3978d539dc888/trt_py/yolov7/images/street.jpg
--------------------------------------------------------------------------------