├── .gitignore ├── DETR_py ├── data │ └── 000000039769.jpg ├── inference.py └── weights_list.txt ├── Data_calib ├── 000000000139.jpg ├── 000000000285.jpg ├── 000000000632.jpg ├── 000000000724.jpg ├── 000000000776.jpg ├── 000000000785.jpg ├── 000000000802.jpg ├── 000000000872.jpg ├── 000000000885.jpg ├── 000000001000.jpg ├── 000000001268.jpg ├── 000000001296.jpg ├── 000000001353.jpg ├── 000000001425.jpg ├── 000000001490.jpg ├── 000000001503.jpg ├── 000000001532.jpg ├── 000000001584.jpg ├── 000000001675.jpg ├── 000000001761.jpg ├── 000000001818.jpg ├── 000000001993.jpg ├── 000000002006.jpg ├── 000000002149.jpg ├── 000000002153.jpg ├── 000000002157.jpg ├── 000000002261.jpg ├── 000000002299.jpg ├── 000000002431.jpg ├── 000000002473.jpg ├── 000000002532.jpg ├── 000000002587.jpg ├── 000000002592.jpg ├── 000000002685.jpg ├── 000000002923.jpg ├── 000000003156.jpg ├── 000000003255.jpg ├── 000000003501.jpg ├── 000000003553.jpg ├── 000000003661.jpg ├── 000000003845.jpg ├── 000000003934.jpg ├── 000000004134.jpg ├── 000000004395.jpg ├── 000000004495.jpg ├── 000000004765.jpg ├── 000000004795.jpg ├── 000000005001.jpg ├── 000000005037.jpg ├── 000000005060.jpg ├── 000000005193.jpg ├── 000000005477.jpg ├── 000000005503.jpg ├── 000000005529.jpg ├── 000000005586.jpg ├── 000000005600.jpg ├── 000000005992.jpg ├── 000000006012.jpg ├── 000000006040.jpg ├── 000000006213.jpg ├── 000000006460.jpg ├── 000000006471.jpg ├── 000000006614.jpg ├── 000000006723.jpg ├── 000000006763.jpg ├── 000000006771.jpg ├── 000000006818.jpg ├── 000000006894.jpg ├── 000000006954.jpg ├── 000000007088.jpg ├── 000000007108.jpg ├── 000000007278.jpg ├── 000000007281.jpg ├── 000000007386.jpg ├── 000000007511.jpg ├── 000000007574.jpg ├── 000000007784.jpg ├── 000000007795.jpg ├── 000000007816.jpg ├── 000000007818.jpg ├── 000000007888.jpg ├── 000000007977.jpg ├── 000000007991.jpg ├── 000000008021.jpg ├── 000000008211.jpg ├── 000000008277.jpg ├── 000000008532.jpg ├── 000000008629.jpg ├── 000000008690.jpg ├── 000000008762.jpg ├── 000000008844.jpg ├── 000000008899.jpg ├── 000000009378.jpg ├── 000000009400.jpg ├── 000000009448.jpg ├── 000000009483.jpg ├── 000000009590.jpg ├── 000000009769.jpg ├── 000000009772.jpg └── 000000009891.jpg ├── LICENSE ├── README.md ├── Real-ESRGAN_py ├── data │ └── 0030.jpg ├── experiments │ └── pretrained_models │ │ └── README.md ├── gen_wts.py ├── inference_realesrgan.py ├── inputs │ ├── 00003.png │ ├── 0014.jpg │ ├── 0030.jpg │ ├── ADE_val_00000114.jpg │ ├── OST_009.png │ └── tree_alpha_16bit.png ├── model_structure.txt ├── model_structure2.txt ├── realesrgan │ ├── __init__.py │ ├── __pycache__ │ │ ├── __init__.cpython-37.pyc │ │ ├── utils.cpython-37.pyc │ │ └── version.cpython-37.pyc │ ├── archs │ │ ├── __init__.py │ │ ├── __pycache__ │ │ │ ├── __init__.cpython-37.pyc │ │ │ ├── discriminator_arch.cpython-37.pyc │ │ │ └── srvgg_arch.cpython-37.pyc │ │ ├── discriminator_arch.py │ │ └── srvgg_arch.py │ ├── data │ │ ├── __init__.py │ │ ├── __pycache__ │ │ │ ├── __init__.cpython-37.pyc │ │ │ ├── realesrgan_dataset.cpython-37.pyc │ │ │ └── realesrgan_paired_dataset.cpython-37.pyc │ │ ├── realesrgan_dataset.py │ │ └── realesrgan_paired_dataset.py │ ├── models │ │ ├── __init__.py │ │ ├── __pycache__ │ │ │ ├── __init__.cpython-37.pyc │ │ │ ├── realesrgan_model.cpython-37.pyc │ │ │ └── realesrnet_model.cpython-37.pyc │ │ ├── realesrgan_model.py │ │ └── realesrnet_model.py │ ├── train.py │ ├── utils.py │ ├── version.py │ └── weights │ │ └── README.md ├── weight_list.txt └── weight_list2.txt ├── Resnet18_py ├── data │ └── panda0.jpg ├── inference.py ├── model_structure.txt ├── resnet18.py └── weight_list.txt ├── TPS_Motion_py ├── models structure │ ├── avd_network.txt │ ├── dense_motion_network.txt │ ├── inpainting.txt │ └── kp_detector.txt └── models weights │ ├── avd_network.txt │ ├── dense_motion_network.txt │ ├── inpainting.txt │ └── kp_detector.txt ├── TensorRT.sln ├── TensorRT ├── TensorRT.vcxproj ├── TensorRT.vcxproj.filters ├── TensorRT.vcxproj.user ├── calibrator.cpp ├── calibrator.h ├── common.cpp ├── common.hpp ├── detr_trt.cpp ├── logging.hpp ├── plugin_ex1.cpp ├── postprocess.cu ├── postprocess.hpp ├── preprocess.cu ├── preprocess.hpp ├── real-esrgan.cpp ├── resnet18.cpp ├── unet.cpp ├── utils.cpp ├── utils.hpp ├── vgg11.cpp ├── yololayer.cu ├── yololayer.hpp ├── yolov5s.cpp ├── yolov6.cpp └── yolov7.cpp ├── TestData3 └── OST_009.png ├── Unet_py ├── car0.jpg ├── data │ └── 00ad56bf7ee6_03.jpg ├── inference.py ├── input_data ├── model_structure.txt ├── unet │ ├── __init__.py │ ├── __pycache__ │ │ ├── __init__.cpython-37.pyc │ │ ├── __init__.cpython-38.pyc │ │ ├── unet_model.cpython-37.pyc │ │ ├── unet_model.cpython-38.pyc │ │ ├── unet_parts.cpython-37.pyc │ │ └── unet_parts.cpython-38.pyc │ ├── unet_model.py │ └── unet_parts.py └── weight_list.txt ├── VGG11_py ├── data │ └── panda0.jpg ├── inference.py ├── requirements.txt └── vgg.py ├── Validation_py ├── __pycache__ │ ├── compare.cpython-37.pyc │ ├── compare.cpython-38.pyc │ └── compare.cpython-39.pyc ├── compare.py ├── valide.py ├── valide_preproc.py └── valideint.py ├── yolov5s_py ├── data │ └── zidane.jpg ├── gen_wts.py ├── models │ ├── __init__.py │ ├── __pycache__ │ │ ├── __init__.cpython-37.pyc │ │ ├── common.cpython-37.pyc │ │ ├── experimental.cpython-37.pyc │ │ └── yolo.cpython-37.pyc │ ├── common.py │ ├── experimental.py │ ├── hub │ │ ├── anchors.yaml │ │ ├── yolov3-spp.yaml │ │ ├── yolov3-tiny.yaml │ │ ├── yolov3.yaml │ │ ├── yolov5-bifpn.yaml │ │ ├── yolov5-fpn.yaml │ │ ├── yolov5-p2.yaml │ │ ├── yolov5-p6.yaml │ │ ├── yolov5-p7.yaml │ │ ├── yolov5-panet.yaml │ │ ├── yolov5l6.yaml │ │ ├── yolov5m6.yaml │ │ ├── yolov5n6.yaml │ │ ├── yolov5s-ghost.yaml │ │ ├── yolov5s-transformer.yaml │ │ ├── yolov5s6.yaml │ │ └── yolov5x6.yaml │ ├── tf.py │ ├── yolo.py │ ├── yolov5l.yaml │ ├── yolov5m.yaml │ ├── yolov5n.yaml │ ├── yolov5s.yaml │ └── yolov5x.yaml ├── utils │ ├── __init__.py │ ├── __pycache__ │ │ ├── __init__.cpython-37.pyc │ │ ├── augmentations.cpython-37.pyc │ │ ├── autoanchor.cpython-37.pyc │ │ ├── datasets.cpython-37.pyc │ │ ├── downloads.cpython-37.pyc │ │ ├── general.cpython-37.pyc │ │ ├── metrics.cpython-37.pyc │ │ ├── plots.cpython-37.pyc │ │ └── torch_utils.cpython-37.pyc │ ├── activations.py │ ├── augmentations.py │ ├── autoanchor.py │ ├── autobatch.py │ ├── aws │ │ ├── __init__.py │ │ ├── mime.sh │ │ ├── resume.py │ │ └── userdata.sh │ ├── callbacks.py │ ├── datasets.py │ ├── downloads.py │ ├── flask_rest_api │ │ ├── README.md │ │ ├── example_request.py │ │ └── restapi.py │ ├── general.py │ ├── google_app_engine │ │ ├── Dockerfile │ │ ├── additional_requirements.txt │ │ └── app.yaml │ ├── loggers │ │ ├── __init__.py │ │ └── wandb │ │ │ ├── README.md │ │ │ ├── __init__.py │ │ │ ├── log_dataset.py │ │ │ ├── sweep.py │ │ │ ├── sweep.yaml │ │ │ └── wandb_utils.py │ ├── loss.py │ ├── metrics.py │ ├── plots.py │ └── torch_utils.py └── yolov5s.pt ├── yolov6s_py ├── data │ └── image1.jpg ├── gen_wts.py ├── model_structure.txt ├── weight_structure.txt └── yolov6 │ ├── core │ ├── __pycache__ │ │ └── inferer.cpython-37.pyc │ ├── engine.py │ ├── evaler.py │ └── inferer.py │ ├── data │ ├── __pycache__ │ │ ├── data_augment.cpython-37.pyc │ │ └── datasets.cpython-37.pyc │ ├── data_augment.py │ ├── data_load.py │ └── datasets.py │ ├── layers │ ├── __pycache__ │ │ ├── common.cpython-37.pyc │ │ └── common.cpython-39.pyc │ └── common.py │ ├── models │ ├── __pycache__ │ │ ├── efficientrep.cpython-39.pyc │ │ ├── effidehead.cpython-39.pyc │ │ ├── reppan.cpython-39.pyc │ │ └── yolo.cpython-39.pyc │ ├── efficientrep.py │ ├── effidehead.py │ ├── loss.py │ ├── reppan.py │ └── yolo.py │ ├── solver │ └── build.py │ └── utils │ ├── Arial.ttf │ ├── __pycache__ │ ├── checkpoint.cpython-37.pyc │ ├── checkpoint.cpython-39.pyc │ ├── events.cpython-37.pyc │ ├── events.cpython-39.pyc │ ├── nms.cpython-37.pyc │ ├── torch_utils.cpython-37.pyc │ └── torch_utils.cpython-39.pyc │ ├── checkpoint.py │ ├── config.py │ ├── ema.py │ ├── envs.py │ ├── events.py │ ├── figure_iou.py │ ├── nms.py │ └── torch_utils.py └── yolov7_py ├── model_structure.txt └── weight_lists.txt /.gitignore: -------------------------------------------------------------------------------- 1 | x64 2 | .vs 3 | *.idea 4 | *.pth 5 | *.pt 6 | *.wts 7 | *.weights 8 | *.engine 9 | *.table 10 | C_Tensor 11 | py 12 | trt 13 | c 14 | p 15 | result.png 16 | TPS_Motion_py/assets -------------------------------------------------------------------------------- /DETR_py/data/000000039769.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yester31/TensorRT_API/b32eb47bacedd7be472cbe608b6cc330000889be/DETR_py/data/000000039769.jpg -------------------------------------------------------------------------------- /Data_calib/000000000139.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yester31/TensorRT_API/b32eb47bacedd7be472cbe608b6cc330000889be/Data_calib/000000000139.jpg -------------------------------------------------------------------------------- /Data_calib/000000000285.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yester31/TensorRT_API/b32eb47bacedd7be472cbe608b6cc330000889be/Data_calib/000000000285.jpg -------------------------------------------------------------------------------- /Data_calib/000000000632.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yester31/TensorRT_API/b32eb47bacedd7be472cbe608b6cc330000889be/Data_calib/000000000632.jpg -------------------------------------------------------------------------------- /Data_calib/000000000724.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yester31/TensorRT_API/b32eb47bacedd7be472cbe608b6cc330000889be/Data_calib/000000000724.jpg -------------------------------------------------------------------------------- /Data_calib/000000000776.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yester31/TensorRT_API/b32eb47bacedd7be472cbe608b6cc330000889be/Data_calib/000000000776.jpg -------------------------------------------------------------------------------- /Data_calib/000000000785.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yester31/TensorRT_API/b32eb47bacedd7be472cbe608b6cc330000889be/Data_calib/000000000785.jpg -------------------------------------------------------------------------------- /Data_calib/000000000802.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yester31/TensorRT_API/b32eb47bacedd7be472cbe608b6cc330000889be/Data_calib/000000000802.jpg -------------------------------------------------------------------------------- /Data_calib/000000000872.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yester31/TensorRT_API/b32eb47bacedd7be472cbe608b6cc330000889be/Data_calib/000000000872.jpg -------------------------------------------------------------------------------- /Data_calib/000000000885.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yester31/TensorRT_API/b32eb47bacedd7be472cbe608b6cc330000889be/Data_calib/000000000885.jpg -------------------------------------------------------------------------------- /Data_calib/000000001000.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yester31/TensorRT_API/b32eb47bacedd7be472cbe608b6cc330000889be/Data_calib/000000001000.jpg -------------------------------------------------------------------------------- /Data_calib/000000001268.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yester31/TensorRT_API/b32eb47bacedd7be472cbe608b6cc330000889be/Data_calib/000000001268.jpg -------------------------------------------------------------------------------- /Data_calib/000000001296.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yester31/TensorRT_API/b32eb47bacedd7be472cbe608b6cc330000889be/Data_calib/000000001296.jpg -------------------------------------------------------------------------------- /Data_calib/000000001353.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yester31/TensorRT_API/b32eb47bacedd7be472cbe608b6cc330000889be/Data_calib/000000001353.jpg -------------------------------------------------------------------------------- /Data_calib/000000001425.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yester31/TensorRT_API/b32eb47bacedd7be472cbe608b6cc330000889be/Data_calib/000000001425.jpg -------------------------------------------------------------------------------- /Data_calib/000000001490.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yester31/TensorRT_API/b32eb47bacedd7be472cbe608b6cc330000889be/Data_calib/000000001490.jpg -------------------------------------------------------------------------------- /Data_calib/000000001503.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yester31/TensorRT_API/b32eb47bacedd7be472cbe608b6cc330000889be/Data_calib/000000001503.jpg -------------------------------------------------------------------------------- /Data_calib/000000001532.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yester31/TensorRT_API/b32eb47bacedd7be472cbe608b6cc330000889be/Data_calib/000000001532.jpg -------------------------------------------------------------------------------- /Data_calib/000000001584.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yester31/TensorRT_API/b32eb47bacedd7be472cbe608b6cc330000889be/Data_calib/000000001584.jpg -------------------------------------------------------------------------------- /Data_calib/000000001675.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yester31/TensorRT_API/b32eb47bacedd7be472cbe608b6cc330000889be/Data_calib/000000001675.jpg -------------------------------------------------------------------------------- /Data_calib/000000001761.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yester31/TensorRT_API/b32eb47bacedd7be472cbe608b6cc330000889be/Data_calib/000000001761.jpg -------------------------------------------------------------------------------- /Data_calib/000000001818.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yester31/TensorRT_API/b32eb47bacedd7be472cbe608b6cc330000889be/Data_calib/000000001818.jpg -------------------------------------------------------------------------------- /Data_calib/000000001993.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yester31/TensorRT_API/b32eb47bacedd7be472cbe608b6cc330000889be/Data_calib/000000001993.jpg -------------------------------------------------------------------------------- /Data_calib/000000002006.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yester31/TensorRT_API/b32eb47bacedd7be472cbe608b6cc330000889be/Data_calib/000000002006.jpg -------------------------------------------------------------------------------- /Data_calib/000000002149.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yester31/TensorRT_API/b32eb47bacedd7be472cbe608b6cc330000889be/Data_calib/000000002149.jpg -------------------------------------------------------------------------------- /Data_calib/000000002153.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yester31/TensorRT_API/b32eb47bacedd7be472cbe608b6cc330000889be/Data_calib/000000002153.jpg -------------------------------------------------------------------------------- /Data_calib/000000002157.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yester31/TensorRT_API/b32eb47bacedd7be472cbe608b6cc330000889be/Data_calib/000000002157.jpg -------------------------------------------------------------------------------- /Data_calib/000000002261.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yester31/TensorRT_API/b32eb47bacedd7be472cbe608b6cc330000889be/Data_calib/000000002261.jpg -------------------------------------------------------------------------------- /Data_calib/000000002299.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yester31/TensorRT_API/b32eb47bacedd7be472cbe608b6cc330000889be/Data_calib/000000002299.jpg -------------------------------------------------------------------------------- /Data_calib/000000002431.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yester31/TensorRT_API/b32eb47bacedd7be472cbe608b6cc330000889be/Data_calib/000000002431.jpg -------------------------------------------------------------------------------- /Data_calib/000000002473.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yester31/TensorRT_API/b32eb47bacedd7be472cbe608b6cc330000889be/Data_calib/000000002473.jpg -------------------------------------------------------------------------------- /Data_calib/000000002532.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yester31/TensorRT_API/b32eb47bacedd7be472cbe608b6cc330000889be/Data_calib/000000002532.jpg -------------------------------------------------------------------------------- /Data_calib/000000002587.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yester31/TensorRT_API/b32eb47bacedd7be472cbe608b6cc330000889be/Data_calib/000000002587.jpg -------------------------------------------------------------------------------- /Data_calib/000000002592.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yester31/TensorRT_API/b32eb47bacedd7be472cbe608b6cc330000889be/Data_calib/000000002592.jpg -------------------------------------------------------------------------------- /Data_calib/000000002685.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yester31/TensorRT_API/b32eb47bacedd7be472cbe608b6cc330000889be/Data_calib/000000002685.jpg -------------------------------------------------------------------------------- /Data_calib/000000002923.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yester31/TensorRT_API/b32eb47bacedd7be472cbe608b6cc330000889be/Data_calib/000000002923.jpg -------------------------------------------------------------------------------- /Data_calib/000000003156.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yester31/TensorRT_API/b32eb47bacedd7be472cbe608b6cc330000889be/Data_calib/000000003156.jpg -------------------------------------------------------------------------------- /Data_calib/000000003255.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yester31/TensorRT_API/b32eb47bacedd7be472cbe608b6cc330000889be/Data_calib/000000003255.jpg -------------------------------------------------------------------------------- /Data_calib/000000003501.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yester31/TensorRT_API/b32eb47bacedd7be472cbe608b6cc330000889be/Data_calib/000000003501.jpg -------------------------------------------------------------------------------- /Data_calib/000000003553.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yester31/TensorRT_API/b32eb47bacedd7be472cbe608b6cc330000889be/Data_calib/000000003553.jpg -------------------------------------------------------------------------------- /Data_calib/000000003661.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yester31/TensorRT_API/b32eb47bacedd7be472cbe608b6cc330000889be/Data_calib/000000003661.jpg -------------------------------------------------------------------------------- /Data_calib/000000003845.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yester31/TensorRT_API/b32eb47bacedd7be472cbe608b6cc330000889be/Data_calib/000000003845.jpg -------------------------------------------------------------------------------- /Data_calib/000000003934.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yester31/TensorRT_API/b32eb47bacedd7be472cbe608b6cc330000889be/Data_calib/000000003934.jpg -------------------------------------------------------------------------------- /Data_calib/000000004134.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yester31/TensorRT_API/b32eb47bacedd7be472cbe608b6cc330000889be/Data_calib/000000004134.jpg -------------------------------------------------------------------------------- /Data_calib/000000004395.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yester31/TensorRT_API/b32eb47bacedd7be472cbe608b6cc330000889be/Data_calib/000000004395.jpg -------------------------------------------------------------------------------- /Data_calib/000000004495.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yester31/TensorRT_API/b32eb47bacedd7be472cbe608b6cc330000889be/Data_calib/000000004495.jpg -------------------------------------------------------------------------------- /Data_calib/000000004765.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yester31/TensorRT_API/b32eb47bacedd7be472cbe608b6cc330000889be/Data_calib/000000004765.jpg -------------------------------------------------------------------------------- /Data_calib/000000004795.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yester31/TensorRT_API/b32eb47bacedd7be472cbe608b6cc330000889be/Data_calib/000000004795.jpg -------------------------------------------------------------------------------- /Data_calib/000000005001.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yester31/TensorRT_API/b32eb47bacedd7be472cbe608b6cc330000889be/Data_calib/000000005001.jpg -------------------------------------------------------------------------------- /Data_calib/000000005037.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yester31/TensorRT_API/b32eb47bacedd7be472cbe608b6cc330000889be/Data_calib/000000005037.jpg -------------------------------------------------------------------------------- /Data_calib/000000005060.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yester31/TensorRT_API/b32eb47bacedd7be472cbe608b6cc330000889be/Data_calib/000000005060.jpg -------------------------------------------------------------------------------- /Data_calib/000000005193.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yester31/TensorRT_API/b32eb47bacedd7be472cbe608b6cc330000889be/Data_calib/000000005193.jpg -------------------------------------------------------------------------------- /Data_calib/000000005477.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yester31/TensorRT_API/b32eb47bacedd7be472cbe608b6cc330000889be/Data_calib/000000005477.jpg -------------------------------------------------------------------------------- /Data_calib/000000005503.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yester31/TensorRT_API/b32eb47bacedd7be472cbe608b6cc330000889be/Data_calib/000000005503.jpg -------------------------------------------------------------------------------- /Data_calib/000000005529.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yester31/TensorRT_API/b32eb47bacedd7be472cbe608b6cc330000889be/Data_calib/000000005529.jpg -------------------------------------------------------------------------------- /Data_calib/000000005586.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yester31/TensorRT_API/b32eb47bacedd7be472cbe608b6cc330000889be/Data_calib/000000005586.jpg -------------------------------------------------------------------------------- /Data_calib/000000005600.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yester31/TensorRT_API/b32eb47bacedd7be472cbe608b6cc330000889be/Data_calib/000000005600.jpg -------------------------------------------------------------------------------- /Data_calib/000000005992.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yester31/TensorRT_API/b32eb47bacedd7be472cbe608b6cc330000889be/Data_calib/000000005992.jpg -------------------------------------------------------------------------------- /Data_calib/000000006012.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yester31/TensorRT_API/b32eb47bacedd7be472cbe608b6cc330000889be/Data_calib/000000006012.jpg -------------------------------------------------------------------------------- /Data_calib/000000006040.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yester31/TensorRT_API/b32eb47bacedd7be472cbe608b6cc330000889be/Data_calib/000000006040.jpg -------------------------------------------------------------------------------- /Data_calib/000000006213.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yester31/TensorRT_API/b32eb47bacedd7be472cbe608b6cc330000889be/Data_calib/000000006213.jpg -------------------------------------------------------------------------------- /Data_calib/000000006460.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yester31/TensorRT_API/b32eb47bacedd7be472cbe608b6cc330000889be/Data_calib/000000006460.jpg -------------------------------------------------------------------------------- /Data_calib/000000006471.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yester31/TensorRT_API/b32eb47bacedd7be472cbe608b6cc330000889be/Data_calib/000000006471.jpg -------------------------------------------------------------------------------- /Data_calib/000000006614.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yester31/TensorRT_API/b32eb47bacedd7be472cbe608b6cc330000889be/Data_calib/000000006614.jpg -------------------------------------------------------------------------------- /Data_calib/000000006723.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yester31/TensorRT_API/b32eb47bacedd7be472cbe608b6cc330000889be/Data_calib/000000006723.jpg -------------------------------------------------------------------------------- /Data_calib/000000006763.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yester31/TensorRT_API/b32eb47bacedd7be472cbe608b6cc330000889be/Data_calib/000000006763.jpg -------------------------------------------------------------------------------- /Data_calib/000000006771.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yester31/TensorRT_API/b32eb47bacedd7be472cbe608b6cc330000889be/Data_calib/000000006771.jpg -------------------------------------------------------------------------------- /Data_calib/000000006818.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yester31/TensorRT_API/b32eb47bacedd7be472cbe608b6cc330000889be/Data_calib/000000006818.jpg -------------------------------------------------------------------------------- /Data_calib/000000006894.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yester31/TensorRT_API/b32eb47bacedd7be472cbe608b6cc330000889be/Data_calib/000000006894.jpg -------------------------------------------------------------------------------- /Data_calib/000000006954.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yester31/TensorRT_API/b32eb47bacedd7be472cbe608b6cc330000889be/Data_calib/000000006954.jpg -------------------------------------------------------------------------------- /Data_calib/000000007088.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yester31/TensorRT_API/b32eb47bacedd7be472cbe608b6cc330000889be/Data_calib/000000007088.jpg -------------------------------------------------------------------------------- /Data_calib/000000007108.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yester31/TensorRT_API/b32eb47bacedd7be472cbe608b6cc330000889be/Data_calib/000000007108.jpg -------------------------------------------------------------------------------- /Data_calib/000000007278.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yester31/TensorRT_API/b32eb47bacedd7be472cbe608b6cc330000889be/Data_calib/000000007278.jpg -------------------------------------------------------------------------------- /Data_calib/000000007281.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yester31/TensorRT_API/b32eb47bacedd7be472cbe608b6cc330000889be/Data_calib/000000007281.jpg -------------------------------------------------------------------------------- /Data_calib/000000007386.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yester31/TensorRT_API/b32eb47bacedd7be472cbe608b6cc330000889be/Data_calib/000000007386.jpg -------------------------------------------------------------------------------- /Data_calib/000000007511.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yester31/TensorRT_API/b32eb47bacedd7be472cbe608b6cc330000889be/Data_calib/000000007511.jpg -------------------------------------------------------------------------------- /Data_calib/000000007574.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yester31/TensorRT_API/b32eb47bacedd7be472cbe608b6cc330000889be/Data_calib/000000007574.jpg -------------------------------------------------------------------------------- /Data_calib/000000007784.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yester31/TensorRT_API/b32eb47bacedd7be472cbe608b6cc330000889be/Data_calib/000000007784.jpg -------------------------------------------------------------------------------- /Data_calib/000000007795.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yester31/TensorRT_API/b32eb47bacedd7be472cbe608b6cc330000889be/Data_calib/000000007795.jpg -------------------------------------------------------------------------------- /Data_calib/000000007816.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yester31/TensorRT_API/b32eb47bacedd7be472cbe608b6cc330000889be/Data_calib/000000007816.jpg -------------------------------------------------------------------------------- /Data_calib/000000007818.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yester31/TensorRT_API/b32eb47bacedd7be472cbe608b6cc330000889be/Data_calib/000000007818.jpg -------------------------------------------------------------------------------- /Data_calib/000000007888.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yester31/TensorRT_API/b32eb47bacedd7be472cbe608b6cc330000889be/Data_calib/000000007888.jpg -------------------------------------------------------------------------------- /Data_calib/000000007977.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yester31/TensorRT_API/b32eb47bacedd7be472cbe608b6cc330000889be/Data_calib/000000007977.jpg -------------------------------------------------------------------------------- /Data_calib/000000007991.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yester31/TensorRT_API/b32eb47bacedd7be472cbe608b6cc330000889be/Data_calib/000000007991.jpg -------------------------------------------------------------------------------- /Data_calib/000000008021.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yester31/TensorRT_API/b32eb47bacedd7be472cbe608b6cc330000889be/Data_calib/000000008021.jpg -------------------------------------------------------------------------------- /Data_calib/000000008211.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yester31/TensorRT_API/b32eb47bacedd7be472cbe608b6cc330000889be/Data_calib/000000008211.jpg -------------------------------------------------------------------------------- /Data_calib/000000008277.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yester31/TensorRT_API/b32eb47bacedd7be472cbe608b6cc330000889be/Data_calib/000000008277.jpg -------------------------------------------------------------------------------- /Data_calib/000000008532.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yester31/TensorRT_API/b32eb47bacedd7be472cbe608b6cc330000889be/Data_calib/000000008532.jpg -------------------------------------------------------------------------------- /Data_calib/000000008629.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yester31/TensorRT_API/b32eb47bacedd7be472cbe608b6cc330000889be/Data_calib/000000008629.jpg -------------------------------------------------------------------------------- /Data_calib/000000008690.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yester31/TensorRT_API/b32eb47bacedd7be472cbe608b6cc330000889be/Data_calib/000000008690.jpg -------------------------------------------------------------------------------- /Data_calib/000000008762.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yester31/TensorRT_API/b32eb47bacedd7be472cbe608b6cc330000889be/Data_calib/000000008762.jpg -------------------------------------------------------------------------------- /Data_calib/000000008844.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yester31/TensorRT_API/b32eb47bacedd7be472cbe608b6cc330000889be/Data_calib/000000008844.jpg -------------------------------------------------------------------------------- /Data_calib/000000008899.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yester31/TensorRT_API/b32eb47bacedd7be472cbe608b6cc330000889be/Data_calib/000000008899.jpg -------------------------------------------------------------------------------- /Data_calib/000000009378.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yester31/TensorRT_API/b32eb47bacedd7be472cbe608b6cc330000889be/Data_calib/000000009378.jpg -------------------------------------------------------------------------------- /Data_calib/000000009400.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yester31/TensorRT_API/b32eb47bacedd7be472cbe608b6cc330000889be/Data_calib/000000009400.jpg -------------------------------------------------------------------------------- /Data_calib/000000009448.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yester31/TensorRT_API/b32eb47bacedd7be472cbe608b6cc330000889be/Data_calib/000000009448.jpg -------------------------------------------------------------------------------- /Data_calib/000000009483.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yester31/TensorRT_API/b32eb47bacedd7be472cbe608b6cc330000889be/Data_calib/000000009483.jpg -------------------------------------------------------------------------------- /Data_calib/000000009590.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yester31/TensorRT_API/b32eb47bacedd7be472cbe608b6cc330000889be/Data_calib/000000009590.jpg -------------------------------------------------------------------------------- /Data_calib/000000009769.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yester31/TensorRT_API/b32eb47bacedd7be472cbe608b6cc330000889be/Data_calib/000000009769.jpg -------------------------------------------------------------------------------- /Data_calib/000000009772.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yester31/TensorRT_API/b32eb47bacedd7be472cbe608b6cc330000889be/Data_calib/000000009772.jpg -------------------------------------------------------------------------------- /Data_calib/000000009891.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yester31/TensorRT_API/b32eb47bacedd7be472cbe608b6cc330000889be/Data_calib/000000009891.jpg -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2021 yhpark 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /Real-ESRGAN_py/data/0030.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yester31/TensorRT_API/b32eb47bacedd7be472cbe608b6cc330000889be/Real-ESRGAN_py/data/0030.jpg -------------------------------------------------------------------------------- /Real-ESRGAN_py/experiments/pretrained_models/README.md: -------------------------------------------------------------------------------- 1 | # Put downloaded pre-trained models here 2 | -------------------------------------------------------------------------------- /Real-ESRGAN_py/gen_wts.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import os 3 | import struct 4 | from basicsr.archs.rrdbnet_arch import RRDBNet 5 | from realesrgan import RealESRGANer 6 | from realesrgan.archs.srvgg_arch import SRVGGNetCompact 7 | 8 | def main(): 9 | """Inference demo for Real-ESRGAN. 10 | """ 11 | parser = argparse.ArgumentParser() 12 | #parser.add_argument('-i', '--input', type=str, default='../TestData3', help='Input image or folder') 13 | parser.add_argument('-i', '--input', type=str, default='inputs', help='Input image or folder') 14 | parser.add_argument( 15 | '-n', 16 | '--model_name', 17 | type=str, 18 | default='RealESRGAN_x4plus', 19 | help=('Model names: RealESRGAN_x4plus | RealESRNet_x4plus | RealESRGAN_x4plus_anime_6B | RealESRGAN_x2plus | ' 20 | 'realesr-animevideov3')) 21 | parser.add_argument('-o', '--output', type=str, default='results', help='Output folder') 22 | parser.add_argument('-s', '--outscale', type=float, default=4, help='The final upsampling scale of the image') 23 | parser.add_argument('--suffix', type=str, default='out', help='Suffix of the restored image') 24 | parser.add_argument('-t', '--tile', type=int, default=0, help='Tile size, 0 for no tile during testing') 25 | parser.add_argument('--tile_pad', type=int, default=10, help='Tile padding') 26 | parser.add_argument('--pre_pad', type=int, default=0, help='Pre padding size at each border') 27 | parser.add_argument('--face_enhance', action='store_true', help='Use GFPGAN to enhance face') 28 | parser.add_argument( 29 | '--fp32', action='store_true', help='Use fp32 precision during inference. Default: fp16 (half precision).') 30 | parser.add_argument( 31 | '--alpha_upsampler', 32 | type=str, 33 | default='realesrgan', 34 | help='The upsampler for the alpha channels. Options: realesrgan | bicubic') 35 | parser.add_argument( 36 | '--ext', 37 | type=str, 38 | default='auto', 39 | help='Image extension. Options: auto | jpg | png, auto means using the same extension as inputs') 40 | args = parser.parse_args() 41 | 42 | # determine models according to model names 43 | args.model_name = args.model_name.split('.')[0] 44 | if args.model_name in ['RealESRGAN_x4plus', 'RealESRNet_x4plus']: # x4 RRDBNet model 45 | model = RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=23, num_grow_ch=32, scale=4) 46 | netscale = 4 47 | elif args.model_name in ['RealESRGAN_x4plus_anime_6B']: # x4 RRDBNet model with 6 blocks 48 | model = RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=6, num_grow_ch=32, scale=4) 49 | netscale = 4 50 | elif args.model_name in ['RealESRGAN_x2plus']: # x2 RRDBNet model 51 | model = RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=23, num_grow_ch=32, scale=2) 52 | netscale = 2 53 | elif args.model_name in ['realesr-animevideov3']: # x4 VGG-style model (XS size) 54 | model = SRVGGNetCompact(num_in_ch=3, num_out_ch=3, num_feat=64, num_conv=16, upscale=4, act_type='prelu') 55 | netscale = 4 56 | 57 | # determine model paths 58 | model_path = os.path.join('experiments/pretrained_models', args.model_name + '.pth') 59 | if not os.path.isfile(model_path): 60 | model_path = os.path.join('realesrgan/weights', args.model_name + '.pth') 61 | if not os.path.isfile(model_path): 62 | raise ValueError(f'Model {args.model_name} does not exist.') 63 | 64 | # restorer 65 | upsampler = RealESRGANer( 66 | scale=netscale, 67 | model_path=model_path, 68 | model=model, 69 | tile=args.tile, 70 | tile_pad=args.tile_pad, 71 | pre_pad=args.pre_pad, 72 | half=args.fp32) 73 | 74 | if os.path.isfile('real-esrgan.wts'): 75 | print('Already, real-esrgan.wts file exists.') 76 | else: 77 | print('making real-esrgan.wts file ...') 78 | f = open("real-esrgan.wts", 'w') 79 | f.write("{}\n".format(len(upsampler.model.state_dict().keys()))) 80 | for k, v in upsampler.model.state_dict().items(): 81 | print('key: ', k) 82 | print('value: ', v.shape) 83 | vr = v.reshape(-1).cpu().numpy() 84 | f.write("{} {}".format(k, len(vr))) 85 | for vv in vr: 86 | f.write(" ") 87 | f.write(struct.pack(">f", float(vv)).hex()) 88 | f.write("\n") 89 | print('Completed real-esrgan.wts file!') 90 | 91 | if __name__ == '__main__': 92 | main() 93 | -------------------------------------------------------------------------------- /Real-ESRGAN_py/inputs/00003.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yester31/TensorRT_API/b32eb47bacedd7be472cbe608b6cc330000889be/Real-ESRGAN_py/inputs/00003.png -------------------------------------------------------------------------------- /Real-ESRGAN_py/inputs/0014.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yester31/TensorRT_API/b32eb47bacedd7be472cbe608b6cc330000889be/Real-ESRGAN_py/inputs/0014.jpg -------------------------------------------------------------------------------- /Real-ESRGAN_py/inputs/0030.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yester31/TensorRT_API/b32eb47bacedd7be472cbe608b6cc330000889be/Real-ESRGAN_py/inputs/0030.jpg -------------------------------------------------------------------------------- /Real-ESRGAN_py/inputs/ADE_val_00000114.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yester31/TensorRT_API/b32eb47bacedd7be472cbe608b6cc330000889be/Real-ESRGAN_py/inputs/ADE_val_00000114.jpg -------------------------------------------------------------------------------- /Real-ESRGAN_py/inputs/OST_009.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yester31/TensorRT_API/b32eb47bacedd7be472cbe608b6cc330000889be/Real-ESRGAN_py/inputs/OST_009.png -------------------------------------------------------------------------------- /Real-ESRGAN_py/inputs/tree_alpha_16bit.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yester31/TensorRT_API/b32eb47bacedd7be472cbe608b6cc330000889be/Real-ESRGAN_py/inputs/tree_alpha_16bit.png -------------------------------------------------------------------------------- /Real-ESRGAN_py/realesrgan/__init__.py: -------------------------------------------------------------------------------- 1 | # flake8: noqa 2 | from .archs import * 3 | from .data import * 4 | from .models import * 5 | from .utils import * 6 | from .version import * 7 | -------------------------------------------------------------------------------- /Real-ESRGAN_py/realesrgan/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yester31/TensorRT_API/b32eb47bacedd7be472cbe608b6cc330000889be/Real-ESRGAN_py/realesrgan/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /Real-ESRGAN_py/realesrgan/__pycache__/utils.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yester31/TensorRT_API/b32eb47bacedd7be472cbe608b6cc330000889be/Real-ESRGAN_py/realesrgan/__pycache__/utils.cpython-37.pyc -------------------------------------------------------------------------------- /Real-ESRGAN_py/realesrgan/__pycache__/version.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yester31/TensorRT_API/b32eb47bacedd7be472cbe608b6cc330000889be/Real-ESRGAN_py/realesrgan/__pycache__/version.cpython-37.pyc -------------------------------------------------------------------------------- /Real-ESRGAN_py/realesrgan/archs/__init__.py: -------------------------------------------------------------------------------- 1 | import importlib 2 | from basicsr.utils import scandir 3 | from os import path as osp 4 | 5 | # automatically scan and import arch modules for registry 6 | # scan all the files that end with '_arch.py' under the archs folder 7 | arch_folder = osp.dirname(osp.abspath(__file__)) 8 | arch_filenames = [osp.splitext(osp.basename(v))[0] for v in scandir(arch_folder) if v.endswith('_arch.py')] 9 | # import all the arch modules 10 | _arch_modules = [importlib.import_module(f'realesrgan.archs.{file_name}') for file_name in arch_filenames] 11 | -------------------------------------------------------------------------------- /Real-ESRGAN_py/realesrgan/archs/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yester31/TensorRT_API/b32eb47bacedd7be472cbe608b6cc330000889be/Real-ESRGAN_py/realesrgan/archs/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /Real-ESRGAN_py/realesrgan/archs/__pycache__/discriminator_arch.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yester31/TensorRT_API/b32eb47bacedd7be472cbe608b6cc330000889be/Real-ESRGAN_py/realesrgan/archs/__pycache__/discriminator_arch.cpython-37.pyc -------------------------------------------------------------------------------- /Real-ESRGAN_py/realesrgan/archs/__pycache__/srvgg_arch.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yester31/TensorRT_API/b32eb47bacedd7be472cbe608b6cc330000889be/Real-ESRGAN_py/realesrgan/archs/__pycache__/srvgg_arch.cpython-37.pyc -------------------------------------------------------------------------------- /Real-ESRGAN_py/realesrgan/archs/discriminator_arch.py: -------------------------------------------------------------------------------- 1 | from basicsr.utils.registry import ARCH_REGISTRY 2 | from torch import nn as nn 3 | from torch.nn import functional as F 4 | from torch.nn.utils import spectral_norm 5 | 6 | 7 | @ARCH_REGISTRY.register() 8 | class UNetDiscriminatorSN(nn.Module): 9 | """Defines a U-Net discriminator with spectral normalization (SN) 10 | 11 | It is used in Real-ESRGAN: Training Real-World Blind Super-Resolution with Pure Synthetic Data. 12 | 13 | Arg: 14 | num_in_ch (int): Channel number of inputs. Default: 3. 15 | num_feat (int): Channel number of base intermediate features. Default: 64. 16 | skip_connection (bool): Whether to use skip connections between U-Net. Default: True. 17 | """ 18 | 19 | def __init__(self, num_in_ch, num_feat=64, skip_connection=True): 20 | super(UNetDiscriminatorSN, self).__init__() 21 | self.skip_connection = skip_connection 22 | norm = spectral_norm 23 | # the first convolution 24 | self.conv0 = nn.Conv2d(num_in_ch, num_feat, kernel_size=3, stride=1, padding=1) 25 | # downsample 26 | self.conv1 = norm(nn.Conv2d(num_feat, num_feat * 2, 4, 2, 1, bias=False)) 27 | self.conv2 = norm(nn.Conv2d(num_feat * 2, num_feat * 4, 4, 2, 1, bias=False)) 28 | self.conv3 = norm(nn.Conv2d(num_feat * 4, num_feat * 8, 4, 2, 1, bias=False)) 29 | # upsample 30 | self.conv4 = norm(nn.Conv2d(num_feat * 8, num_feat * 4, 3, 1, 1, bias=False)) 31 | self.conv5 = norm(nn.Conv2d(num_feat * 4, num_feat * 2, 3, 1, 1, bias=False)) 32 | self.conv6 = norm(nn.Conv2d(num_feat * 2, num_feat, 3, 1, 1, bias=False)) 33 | # extra convolutions 34 | self.conv7 = norm(nn.Conv2d(num_feat, num_feat, 3, 1, 1, bias=False)) 35 | self.conv8 = norm(nn.Conv2d(num_feat, num_feat, 3, 1, 1, bias=False)) 36 | self.conv9 = nn.Conv2d(num_feat, 1, 3, 1, 1) 37 | 38 | def forward(self, x): 39 | # downsample 40 | x0 = F.leaky_relu(self.conv0(x), negative_slope=0.2, inplace=True) 41 | x1 = F.leaky_relu(self.conv1(x0), negative_slope=0.2, inplace=True) 42 | x2 = F.leaky_relu(self.conv2(x1), negative_slope=0.2, inplace=True) 43 | x3 = F.leaky_relu(self.conv3(x2), negative_slope=0.2, inplace=True) 44 | 45 | # upsample 46 | x3 = F.interpolate(x3, scale_factor=2, mode='bilinear', align_corners=False) 47 | x4 = F.leaky_relu(self.conv4(x3), negative_slope=0.2, inplace=True) 48 | 49 | if self.skip_connection: 50 | x4 = x4 + x2 51 | x4 = F.interpolate(x4, scale_factor=2, mode='bilinear', align_corners=False) 52 | x5 = F.leaky_relu(self.conv5(x4), negative_slope=0.2, inplace=True) 53 | 54 | if self.skip_connection: 55 | x5 = x5 + x1 56 | x5 = F.interpolate(x5, scale_factor=2, mode='bilinear', align_corners=False) 57 | x6 = F.leaky_relu(self.conv6(x5), negative_slope=0.2, inplace=True) 58 | 59 | if self.skip_connection: 60 | x6 = x6 + x0 61 | 62 | # extra convolutions 63 | out = F.leaky_relu(self.conv7(x6), negative_slope=0.2, inplace=True) 64 | out = F.leaky_relu(self.conv8(out), negative_slope=0.2, inplace=True) 65 | out = self.conv9(out) 66 | 67 | return out 68 | -------------------------------------------------------------------------------- /Real-ESRGAN_py/realesrgan/archs/srvgg_arch.py: -------------------------------------------------------------------------------- 1 | from basicsr.utils.registry import ARCH_REGISTRY 2 | from torch import nn as nn 3 | from torch.nn import functional as F 4 | 5 | 6 | @ARCH_REGISTRY.register() 7 | class SRVGGNetCompact(nn.Module): 8 | """A compact VGG-style network structure for super-resolution. 9 | 10 | It is a compact network structure, which performs upsampling in the last layer and no convolution is 11 | conducted on the HR feature space. 12 | 13 | Args: 14 | num_in_ch (int): Channel number of inputs. Default: 3. 15 | num_out_ch (int): Channel number of outputs. Default: 3. 16 | num_feat (int): Channel number of intermediate features. Default: 64. 17 | num_conv (int): Number of convolution layers in the body network. Default: 16. 18 | upscale (int): Upsampling factor. Default: 4. 19 | act_type (str): Activation type, options: 'relu', 'prelu', 'leakyrelu'. Default: prelu. 20 | """ 21 | 22 | def __init__(self, num_in_ch=3, num_out_ch=3, num_feat=64, num_conv=16, upscale=4, act_type='prelu'): 23 | super(SRVGGNetCompact, self).__init__() 24 | self.num_in_ch = num_in_ch 25 | self.num_out_ch = num_out_ch 26 | self.num_feat = num_feat 27 | self.num_conv = num_conv 28 | self.upscale = upscale 29 | self.act_type = act_type 30 | 31 | self.body = nn.ModuleList() 32 | # the first conv 33 | self.body.append(nn.Conv2d(num_in_ch, num_feat, 3, 1, 1)) 34 | # the first activation 35 | if act_type == 'relu': 36 | activation = nn.ReLU(inplace=True) 37 | elif act_type == 'prelu': 38 | activation = nn.PReLU(num_parameters=num_feat) 39 | elif act_type == 'leakyrelu': 40 | activation = nn.LeakyReLU(negative_slope=0.1, inplace=True) 41 | self.body.append(activation) 42 | 43 | # the body structure 44 | for _ in range(num_conv): 45 | self.body.append(nn.Conv2d(num_feat, num_feat, 3, 1, 1)) 46 | # activation 47 | if act_type == 'relu': 48 | activation = nn.ReLU(inplace=True) 49 | elif act_type == 'prelu': 50 | activation = nn.PReLU(num_parameters=num_feat) 51 | elif act_type == 'leakyrelu': 52 | activation = nn.LeakyReLU(negative_slope=0.1, inplace=True) 53 | self.body.append(activation) 54 | 55 | # the last conv 56 | self.body.append(nn.Conv2d(num_feat, num_out_ch * upscale * upscale, 3, 1, 1)) 57 | # upsample 58 | self.upsampler = nn.PixelShuffle(upscale) 59 | 60 | def forward(self, x): 61 | out = x 62 | for i in range(0, len(self.body)): 63 | out = self.body[i](out) 64 | 65 | out = self.upsampler(out) 66 | # add the nearest upsampled image, so that the network learns the residual 67 | base = F.interpolate(x, scale_factor=self.upscale, mode='nearest') 68 | out += base 69 | return out 70 | -------------------------------------------------------------------------------- /Real-ESRGAN_py/realesrgan/data/__init__.py: -------------------------------------------------------------------------------- 1 | import importlib 2 | from basicsr.utils import scandir 3 | from os import path as osp 4 | 5 | # automatically scan and import dataset modules for registry 6 | # scan all the files that end with '_dataset.py' under the data folder 7 | data_folder = osp.dirname(osp.abspath(__file__)) 8 | dataset_filenames = [osp.splitext(osp.basename(v))[0] for v in scandir(data_folder) if v.endswith('_dataset.py')] 9 | # import all the dataset modules 10 | _dataset_modules = [importlib.import_module(f'realesrgan.data.{file_name}') for file_name in dataset_filenames] 11 | -------------------------------------------------------------------------------- /Real-ESRGAN_py/realesrgan/data/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yester31/TensorRT_API/b32eb47bacedd7be472cbe608b6cc330000889be/Real-ESRGAN_py/realesrgan/data/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /Real-ESRGAN_py/realesrgan/data/__pycache__/realesrgan_dataset.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yester31/TensorRT_API/b32eb47bacedd7be472cbe608b6cc330000889be/Real-ESRGAN_py/realesrgan/data/__pycache__/realesrgan_dataset.cpython-37.pyc -------------------------------------------------------------------------------- /Real-ESRGAN_py/realesrgan/data/__pycache__/realesrgan_paired_dataset.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yester31/TensorRT_API/b32eb47bacedd7be472cbe608b6cc330000889be/Real-ESRGAN_py/realesrgan/data/__pycache__/realesrgan_paired_dataset.cpython-37.pyc -------------------------------------------------------------------------------- /Real-ESRGAN_py/realesrgan/models/__init__.py: -------------------------------------------------------------------------------- 1 | import importlib 2 | from basicsr.utils import scandir 3 | from os import path as osp 4 | 5 | # automatically scan and import model modules for registry 6 | # scan all the files that end with '_model.py' under the model folder 7 | model_folder = osp.dirname(osp.abspath(__file__)) 8 | model_filenames = [osp.splitext(osp.basename(v))[0] for v in scandir(model_folder) if v.endswith('_model.py')] 9 | # import all the model modules 10 | _model_modules = [importlib.import_module(f'realesrgan.models.{file_name}') for file_name in model_filenames] 11 | -------------------------------------------------------------------------------- /Real-ESRGAN_py/realesrgan/models/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yester31/TensorRT_API/b32eb47bacedd7be472cbe608b6cc330000889be/Real-ESRGAN_py/realesrgan/models/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /Real-ESRGAN_py/realesrgan/models/__pycache__/realesrgan_model.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yester31/TensorRT_API/b32eb47bacedd7be472cbe608b6cc330000889be/Real-ESRGAN_py/realesrgan/models/__pycache__/realesrgan_model.cpython-37.pyc -------------------------------------------------------------------------------- /Real-ESRGAN_py/realesrgan/models/__pycache__/realesrnet_model.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yester31/TensorRT_API/b32eb47bacedd7be472cbe608b6cc330000889be/Real-ESRGAN_py/realesrgan/models/__pycache__/realesrnet_model.cpython-37.pyc -------------------------------------------------------------------------------- /Real-ESRGAN_py/realesrgan/train.py: -------------------------------------------------------------------------------- 1 | # flake8: noqa 2 | import os.path as osp 3 | from basicsr.train import train_pipeline 4 | 5 | import realesrgan.archs 6 | import realesrgan.data 7 | import realesrgan.models 8 | 9 | if __name__ == '__main__': 10 | root_path = osp.abspath(osp.join(__file__, osp.pardir, osp.pardir)) 11 | train_pipeline(root_path) 12 | -------------------------------------------------------------------------------- /Real-ESRGAN_py/realesrgan/version.py: -------------------------------------------------------------------------------- 1 | # GENERATED VERSION FILE 2 | # TIME: Sun May 8 00:58:37 2022 3 | __version__ = '0.2.5.0' 4 | __gitsha__ = '23d180f' 5 | version_info = (0, 2, 5, 0) 6 | -------------------------------------------------------------------------------- /Real-ESRGAN_py/realesrgan/weights/README.md: -------------------------------------------------------------------------------- 1 | # Weights 2 | 3 | Put the downloaded weights to this folder. 4 | -------------------------------------------------------------------------------- /Resnet18_py/data/panda0.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yester31/TensorRT_API/b32eb47bacedd7be472cbe608b6cc330000889be/Resnet18_py/data/panda0.jpg -------------------------------------------------------------------------------- /Resnet18_py/model_structure.txt: -------------------------------------------------------------------------------- 1 | ResNet( 2 | (conv1): Conv2d(3, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias=False) 3 | (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) 4 | (relu): ReLU(inplace=True) 5 | (maxpool): MaxPool2d(kernel_size=3, stride=2, padding=1, dilation=1, ceil_mode=False) 6 | (layer1): Sequential( 7 | (0): BasicBlock( 8 | (conv1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) 9 | (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) 10 | (relu): ReLU(inplace=True) 11 | (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) 12 | (bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) 13 | ) 14 | (1): BasicBlock( 15 | (conv1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) 16 | (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) 17 | (relu): ReLU(inplace=True) 18 | (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) 19 | (bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) 20 | ) 21 | ) 22 | (layer2): Sequential( 23 | (0): BasicBlock( 24 | (conv1): Conv2d(64, 128, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False) 25 | (bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) 26 | (relu): ReLU(inplace=True) 27 | (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) 28 | (bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) 29 | (downsample): Sequential( 30 | (0): Conv2d(64, 128, kernel_size=(1, 1), stride=(2, 2), bias=False) 31 | (1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) 32 | ) 33 | ) 34 | (1): BasicBlock( 35 | (conv1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) 36 | (bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) 37 | (relu): ReLU(inplace=True) 38 | (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) 39 | (bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) 40 | ) 41 | ) 42 | (layer3): Sequential( 43 | (0): BasicBlock( 44 | (conv1): Conv2d(128, 256, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False) 45 | (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) 46 | (relu): ReLU(inplace=True) 47 | (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) 48 | (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) 49 | (downsample): Sequential( 50 | (0): Conv2d(128, 256, kernel_size=(1, 1), stride=(2, 2), bias=False) 51 | (1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) 52 | ) 53 | ) 54 | (1): BasicBlock( 55 | (conv1): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) 56 | (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) 57 | (relu): ReLU(inplace=True) 58 | (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) 59 | (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) 60 | ) 61 | ) 62 | (layer4): Sequential( 63 | (0): BasicBlock( 64 | (conv1): Conv2d(256, 512, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False) 65 | (bn1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) 66 | (relu): ReLU(inplace=True) 67 | (conv2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) 68 | (bn2): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) 69 | (downsample): Sequential( 70 | (0): Conv2d(256, 512, kernel_size=(1, 1), stride=(2, 2), bias=False) 71 | (1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) 72 | ) 73 | ) 74 | (1): BasicBlock( 75 | (conv1): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) 76 | (bn1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) 77 | (relu): ReLU(inplace=True) 78 | (conv2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) 79 | (bn2): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) 80 | ) 81 | ) 82 | (avgpool): AdaptiveAvgPool2d(output_size=(1, 1)) 83 | (fc): Linear(in_features=512, out_features=1000, bias=True) 84 | ) -------------------------------------------------------------------------------- /Resnet18_py/resnet18.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from torch import nn 3 | from torch.nn import functional as F 4 | import torchvision 5 | import cv2 6 | import numpy as np 7 | 8 | def tofile(img, weight_path = "input2"): 9 | with open(weight_path, 'wb') as f: 10 | img.tofile(f) 11 | f.close() 12 | 13 | def main(): 14 | print('cuda device count: ', torch.cuda.device_count()) 15 | net = torchvision.models.resnet18(pretrained=True) 16 | net = net.eval() 17 | net = net.to('cuda:0') 18 | #print(net) 19 | 20 | img = cv2.imread("./data/panda0.jpg") 21 | img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) 22 | img = img.transpose(2, 0, 1) 23 | img = img.astype(np.float32) 24 | #tofile(img) 25 | 26 | img = torch.from_numpy(img) 27 | img = img.unsqueeze(0) 28 | img = img.to('cuda:0') 29 | 30 | out = net(img) 31 | max_index = out.max(dim=1) 32 | max_value = out.max() 33 | print('resnet18 max index : {} , value : {}'.format( max_index, max_value)) 34 | print('resnet18 out:', out.shape) 35 | torch.save(net, "resnet18.pth") 36 | 37 | if __name__ == '__main__': 38 | main() 39 | 40 | -------------------------------------------------------------------------------- /TPS_Motion_py/models structure/avd_network.txt: -------------------------------------------------------------------------------- 1 | AVDNetwork( 2 | (id_encoder): Sequential( 3 | (0): Linear(in_features=100, out_features=256, bias=True) 4 | (1): BatchNorm1d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) 5 | (2): ReLU(inplace=True) 6 | (3): Linear(in_features=256, out_features=512, bias=True) 7 | (4): BatchNorm1d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) 8 | (5): ReLU(inplace=True) 9 | (6): Linear(in_features=512, out_features=1024, bias=True) 10 | (7): BatchNorm1d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) 11 | (8): ReLU(inplace=True) 12 | (9): Linear(in_features=1024, out_features=128, bias=True) 13 | ) 14 | (pose_encoder): Sequential( 15 | (0): Linear(in_features=100, out_features=256, bias=True) 16 | (1): BatchNorm1d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) 17 | (2): ReLU(inplace=True) 18 | (3): Linear(in_features=256, out_features=512, bias=True) 19 | (4): BatchNorm1d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) 20 | (5): ReLU(inplace=True) 21 | (6): Linear(in_features=512, out_features=1024, bias=True) 22 | (7): BatchNorm1d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) 23 | (8): ReLU(inplace=True) 24 | (9): Linear(in_features=1024, out_features=128, bias=True) 25 | ) 26 | (decoder): Sequential( 27 | (0): Linear(in_features=256, out_features=1024, bias=True) 28 | (1): BatchNorm1d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) 29 | (2): ReLU() 30 | (3): Linear(in_features=1024, out_features=512, bias=True) 31 | (4): BatchNorm1d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) 32 | (5): ReLU() 33 | (6): Linear(in_features=512, out_features=256, bias=True) 34 | (7): BatchNorm1d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) 35 | (8): ReLU() 36 | (9): Linear(in_features=256, out_features=100, bias=True) 37 | ) 38 | ) -------------------------------------------------------------------------------- /TPS_Motion_py/models structure/dense_motion_network.txt: -------------------------------------------------------------------------------- 1 | DenseMotionNetwork( 2 | (down): AntiAliasInterpolation2d() 3 | (hourglass): Hourglass( 4 | (encoder): Encoder( 5 | (down_blocks): ModuleList( 6 | (0): DownBlock2d( 7 | (conv): Conv2d(84, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) 8 | (norm): InstanceNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=False) 9 | (pool): AvgPool2d(kernel_size=(2, 2), stride=(2, 2), padding=0) 10 | ) 11 | (1): DownBlock2d( 12 | (conv): Conv2d(128, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) 13 | (norm): InstanceNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=False) 14 | (pool): AvgPool2d(kernel_size=(2, 2), stride=(2, 2), padding=0) 15 | ) 16 | (2): DownBlock2d( 17 | (conv): Conv2d(256, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) 18 | (norm): InstanceNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=False) 19 | (pool): AvgPool2d(kernel_size=(2, 2), stride=(2, 2), padding=0) 20 | ) 21 | (3): DownBlock2d( 22 | (conv): Conv2d(512, 1024, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) 23 | (norm): InstanceNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=False) 24 | (pool): AvgPool2d(kernel_size=(2, 2), stride=(2, 2), padding=0) 25 | ) 26 | (4): DownBlock2d( 27 | (conv): Conv2d(1024, 1024, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) 28 | (norm): InstanceNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=False) 29 | (pool): AvgPool2d(kernel_size=(2, 2), stride=(2, 2), padding=0) 30 | ) 31 | ) 32 | ) 33 | (decoder): Decoder( 34 | (up_blocks): ModuleList( 35 | (0): UpBlock2d( 36 | (conv): Conv2d(1024, 1024, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) 37 | (norm): InstanceNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=False) 38 | ) 39 | (1): UpBlock2d( 40 | (conv): Conv2d(2048, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) 41 | (norm): InstanceNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=False) 42 | ) 43 | (2): UpBlock2d( 44 | (conv): Conv2d(1024, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) 45 | (norm): InstanceNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=False) 46 | ) 47 | (3): UpBlock2d( 48 | (conv): Conv2d(512, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) 49 | (norm): InstanceNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=False) 50 | ) 51 | (4): UpBlock2d( 52 | (conv): Conv2d(256, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) 53 | (norm): InstanceNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=False) 54 | ) 55 | ) 56 | ) 57 | ) 58 | (maps): Conv2d(148, 11, kernel_size=(7, 7), stride=(1, 1), padding=(3, 3)) 59 | (up): ModuleList( 60 | (0): UpBlock2d( 61 | (conv): Conv2d(148, 74, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) 62 | (norm): InstanceNorm2d(74, eps=1e-05, momentum=0.1, affine=True, track_running_stats=False) 63 | ) 64 | (1): UpBlock2d( 65 | (conv): Conv2d(74, 37, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) 66 | (norm): InstanceNorm2d(37, eps=1e-05, momentum=0.1, affine=True, track_running_stats=False) 67 | ) 68 | ) 69 | (occlusion): ModuleList( 70 | (0): Conv2d(256, 1, kernel_size=(7, 7), stride=(1, 1), padding=(3, 3)) 71 | (1): Conv2d(148, 1, kernel_size=(7, 7), stride=(1, 1), padding=(3, 3)) 72 | (2): Conv2d(74, 1, kernel_size=(7, 7), stride=(1, 1), padding=(3, 3)) 73 | (3): Conv2d(37, 1, kernel_size=(7, 7), stride=(1, 1), padding=(3, 3)) 74 | ) 75 | ) -------------------------------------------------------------------------------- /TPS_Motion_py/models structure/inpainting.txt: -------------------------------------------------------------------------------- 1 | InpaintingNetwork( 2 | (first): SameBlock2d( 3 | (conv): Conv2d(3, 64, kernel_size=(7, 7), stride=(1, 1), padding=(3, 3)) 4 | (norm): InstanceNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=False) 5 | ) 6 | (down_blocks): ModuleList( 7 | (0): DownBlock2d( 8 | (conv): Conv2d(64, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) 9 | (norm): InstanceNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=False) 10 | (pool): AvgPool2d(kernel_size=(2, 2), stride=(2, 2), padding=0) 11 | ) 12 | (1): DownBlock2d( 13 | (conv): Conv2d(128, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) 14 | (norm): InstanceNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=False) 15 | (pool): AvgPool2d(kernel_size=(2, 2), stride=(2, 2), padding=0) 16 | ) 17 | (2): DownBlock2d( 18 | (conv): Conv2d(256, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) 19 | (norm): InstanceNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=False) 20 | (pool): AvgPool2d(kernel_size=(2, 2), stride=(2, 2), padding=0) 21 | ) 22 | ) 23 | (up_blocks): ModuleList( 24 | (0): UpBlock2d( 25 | (conv): Conv2d(512, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) 26 | (norm): InstanceNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=False) 27 | ) 28 | (1): UpBlock2d( 29 | (conv): Conv2d(512, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) 30 | (norm): InstanceNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=False) 31 | ) 32 | (2): UpBlock2d( 33 | (conv): Conv2d(256, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) 34 | (norm): InstanceNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=False) 35 | ) 36 | ) 37 | (resblock): ModuleList( 38 | (0): ResBlock2d( 39 | (conv1): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) 40 | (conv2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) 41 | (norm1): InstanceNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=False) 42 | (norm2): InstanceNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=False) 43 | ) 44 | (1): ResBlock2d( 45 | (conv1): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) 46 | (conv2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) 47 | (norm1): InstanceNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=False) 48 | (norm2): InstanceNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=False) 49 | ) 50 | (2): ResBlock2d( 51 | (conv1): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) 52 | (conv2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) 53 | (norm1): InstanceNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=False) 54 | (norm2): InstanceNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=False) 55 | ) 56 | (3): ResBlock2d( 57 | (conv1): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) 58 | (conv2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) 59 | (norm1): InstanceNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=False) 60 | (norm2): InstanceNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=False) 61 | ) 62 | (4): ResBlock2d( 63 | (conv1): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) 64 | (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) 65 | (norm1): InstanceNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=False) 66 | (norm2): InstanceNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=False) 67 | ) 68 | (5): ResBlock2d( 69 | (conv1): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) 70 | (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) 71 | (norm1): InstanceNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=False) 72 | (norm2): InstanceNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=False) 73 | ) 74 | ) 75 | (final): Conv2d(64, 3, kernel_size=(7, 7), stride=(1, 1), padding=(3, 3)) 76 | ) -------------------------------------------------------------------------------- /TPS_Motion_py/models structure/kp_detector.txt: -------------------------------------------------------------------------------- 1 | KPDetector( 2 | (fg_encoder): ResNet( 3 | (conv1): Conv2d(3, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias=False) 4 | (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) 5 | (relu): ReLU(inplace=True) 6 | (maxpool): MaxPool2d(kernel_size=3, stride=2, padding=1, dilation=1, ceil_mode=False) 7 | (layer1): Sequential( 8 | (0): BasicBlock( 9 | (conv1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) 10 | (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) 11 | (relu): ReLU(inplace=True) 12 | (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) 13 | (bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) 14 | ) 15 | (1): BasicBlock( 16 | (conv1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) 17 | (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) 18 | (relu): ReLU(inplace=True) 19 | (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) 20 | (bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) 21 | ) 22 | ) 23 | (layer2): Sequential( 24 | (0): BasicBlock( 25 | (conv1): Conv2d(64, 128, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False) 26 | (bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) 27 | (relu): ReLU(inplace=True) 28 | (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) 29 | (bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) 30 | (downsample): Sequential( 31 | (0): Conv2d(64, 128, kernel_size=(1, 1), stride=(2, 2), bias=False) 32 | (1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) 33 | ) 34 | ) 35 | (1): BasicBlock( 36 | (conv1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) 37 | (bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) 38 | (relu): ReLU(inplace=True) 39 | (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) 40 | (bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) 41 | ) 42 | ) 43 | (layer3): Sequential( 44 | (0): BasicBlock( 45 | (conv1): Conv2d(128, 256, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False) 46 | (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) 47 | (relu): ReLU(inplace=True) 48 | (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) 49 | (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) 50 | (downsample): Sequential( 51 | (0): Conv2d(128, 256, kernel_size=(1, 1), stride=(2, 2), bias=False) 52 | (1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) 53 | ) 54 | ) 55 | (1): BasicBlock( 56 | (conv1): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) 57 | (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) 58 | (relu): ReLU(inplace=True) 59 | (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) 60 | (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) 61 | ) 62 | ) 63 | (layer4): Sequential( 64 | (0): BasicBlock( 65 | (conv1): Conv2d(256, 512, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False) 66 | (bn1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) 67 | (relu): ReLU(inplace=True) 68 | (conv2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) 69 | (bn2): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) 70 | (downsample): Sequential( 71 | (0): Conv2d(256, 512, kernel_size=(1, 1), stride=(2, 2), bias=False) 72 | (1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) 73 | ) 74 | ) 75 | (1): BasicBlock( 76 | (conv1): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) 77 | (bn1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) 78 | (relu): ReLU(inplace=True) 79 | (conv2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) 80 | (bn2): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) 81 | ) 82 | ) 83 | (avgpool): AvgPool2d(kernel_size=(8, 8), stride=(1, 1), padding=0) 84 | (fc): Linear(in_features=512, out_features=100, bias=True) 85 | ) 86 | ) -------------------------------------------------------------------------------- /TensorRT.sln: -------------------------------------------------------------------------------- 1 |  2 | Microsoft Visual Studio Solution File, Format Version 12.00 3 | # Visual Studio 15 4 | VisualStudioVersion = 15.0.28307.1525 5 | MinimumVisualStudioVersion = 10.0.40219.1 6 | Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "TensorRT", "TensorRT\TensorRT.vcxproj", "{6AF45D49-E7C2-41F4-995D-5714C4EB1254}" 7 | EndProject 8 | Global 9 | GlobalSection(SolutionConfigurationPlatforms) = preSolution 10 | Debug|x64 = Debug|x64 11 | Debug|x86 = Debug|x86 12 | Release|x64 = Release|x64 13 | Release|x86 = Release|x86 14 | EndGlobalSection 15 | GlobalSection(ProjectConfigurationPlatforms) = postSolution 16 | {6AF45D49-E7C2-41F4-995D-5714C4EB1254}.Debug|x64.ActiveCfg = Debug|x64 17 | {6AF45D49-E7C2-41F4-995D-5714C4EB1254}.Debug|x64.Build.0 = Debug|x64 18 | {6AF45D49-E7C2-41F4-995D-5714C4EB1254}.Debug|x86.ActiveCfg = Debug|x64 19 | {6AF45D49-E7C2-41F4-995D-5714C4EB1254}.Release|x64.ActiveCfg = Release|x64 20 | {6AF45D49-E7C2-41F4-995D-5714C4EB1254}.Release|x64.Build.0 = Release|x64 21 | {6AF45D49-E7C2-41F4-995D-5714C4EB1254}.Release|x86.ActiveCfg = Release|x64 22 | EndGlobalSection 23 | GlobalSection(SolutionProperties) = preSolution 24 | HideSolutionNode = FALSE 25 | EndGlobalSection 26 | GlobalSection(ExtensibilityGlobals) = postSolution 27 | SolutionGuid = {C3A8A302-BA2D-4A77-AF74-E22632B7499C} 28 | EndGlobalSection 29 | EndGlobal 30 | -------------------------------------------------------------------------------- /TensorRT/TensorRT.vcxproj.filters: -------------------------------------------------------------------------------- 1 |  2 | 3 | 4 | 5 | utils 6 | 7 | 8 | 9 | 10 | calibrate 11 | 12 | 13 | calibrate 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | plugin 26 | 27 | 28 | utils 29 | 30 | 31 | utils 32 | 33 | 34 | calibrate 35 | 36 | 37 | calibrate 38 | 39 | 40 | plugin 41 | 42 | 43 | plugin 44 | 45 | 46 | 47 | 48 | {b08e34f0-7631-4bd6-b666-41b735a20001} 49 | 50 | 51 | {1f6f354d-a2df-47f0-af86-7d80e4dc7112} 52 | 53 | 54 | {ee66ee0e-fda5-4789-9e05-a50d9fe108f7} 55 | 56 | 57 | 58 | 59 | plugin 60 | 61 | 62 | plugin 63 | 64 | 65 | plugin 66 | 67 | 68 | 69 | 70 | 71 | -------------------------------------------------------------------------------- /TensorRT/TensorRT.vcxproj.user: -------------------------------------------------------------------------------- 1 |  2 | 3 | 4 | -------------------------------------------------------------------------------- /TensorRT/calibrator.cpp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yester31/TensorRT_API/b32eb47bacedd7be472cbe608b6cc330000889be/TensorRT/calibrator.cpp -------------------------------------------------------------------------------- /TensorRT/calibrator.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | #include "NvInfer.h" 3 | #include 4 | #include 5 | 6 | //! \class Int8EntropyCalibrator2 7 | //! 8 | //! \brief Implements Entropy calibrator 2. 9 | //! CalibrationAlgoType is kENTROPY_CALIBRATION_2. 10 | //! 11 | class Int8EntropyCalibrator2 : public nvinfer1::IInt8EntropyCalibrator2 12 | { 13 | public: 14 | Int8EntropyCalibrator2(int batchsize, int input_w, int input_h, int process_type, const char* img_dir, const char* calib_table_name, const char* input_blob_name, bool read_cache = true); 15 | 16 | virtual ~Int8EntropyCalibrator2(); 17 | int getBatchSize() const noexcept override; 18 | bool getBatch(void* bindings[], const char* names[], int nbBindings) noexcept override; 19 | const void* readCalibrationCache(size_t& length) noexcept override; 20 | void writeCalibrationCache(const void* cache, size_t length) noexcept override; 21 | 22 | private: 23 | int batchsize_; 24 | int input_w_; 25 | int input_h_; 26 | int img_idx_; 27 | int process_type_; 28 | std::string img_dir_; 29 | std::vector img_files_; 30 | size_t input_count_; 31 | size_t input_size_; 32 | std::string calib_table_name_; 33 | const char* input_blob_name_; 34 | bool read_cache_; 35 | void* device_input_; 36 | std::vector calib_cache_; 37 | }; 38 | -------------------------------------------------------------------------------- /TensorRT/common.cpp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yester31/TensorRT_API/b32eb47bacedd7be472cbe608b6cc330000889be/TensorRT/common.cpp -------------------------------------------------------------------------------- /TensorRT/common.hpp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yester31/TensorRT_API/b32eb47bacedd7be472cbe608b6cc330000889be/TensorRT/common.hpp -------------------------------------------------------------------------------- /TensorRT/logging.hpp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yester31/TensorRT_API/b32eb47bacedd7be472cbe608b6cc330000889be/TensorRT/logging.hpp -------------------------------------------------------------------------------- /TensorRT/plugin_ex1.cpp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yester31/TensorRT_API/b32eb47bacedd7be472cbe608b6cc330000889be/TensorRT/plugin_ex1.cpp -------------------------------------------------------------------------------- /TensorRT/postprocess.cu: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yester31/TensorRT_API/b32eb47bacedd7be472cbe608b6cc330000889be/TensorRT/postprocess.cu -------------------------------------------------------------------------------- /TensorRT/postprocess.hpp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yester31/TensorRT_API/b32eb47bacedd7be472cbe608b6cc330000889be/TensorRT/postprocess.hpp -------------------------------------------------------------------------------- /TensorRT/preprocess.cu: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yester31/TensorRT_API/b32eb47bacedd7be472cbe608b6cc330000889be/TensorRT/preprocess.cu -------------------------------------------------------------------------------- /TensorRT/preprocess.hpp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yester31/TensorRT_API/b32eb47bacedd7be472cbe608b6cc330000889be/TensorRT/preprocess.hpp -------------------------------------------------------------------------------- /TensorRT/resnet18.cpp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yester31/TensorRT_API/b32eb47bacedd7be472cbe608b6cc330000889be/TensorRT/resnet18.cpp -------------------------------------------------------------------------------- /TensorRT/utils.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | #include "NvInfer.h" 3 | #include "cuda_runtime_api.h" 4 | #include // access 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include 10 | #include 11 | #include 12 | #include 13 | #include 14 | #include 15 | 16 | #include "NvInferRuntime.h" 17 | #include "NvInferPlugin.h" 18 | 19 | 20 | // 파일 이름 가져오기(DFS) window용 21 | int SearchFile(const std::string& folder_path, std::vector &file_names, bool recursive = false); 22 | 23 | // vector 값 출력 24 | void valueCheck(std::vector& Input, int IN = 1, int IC = 1, int IH = 1, int IW = 1, bool one = false); 25 | 26 | // 데이터 초기화 함수 (스칼라, 등차수열) 27 | void initTensor(std::vector& output, float start = 1, float step = 0); 28 | 29 | // 데이터 초기화 함수 (랜덤 생성) 30 | void initTensor(std::vector& output, std::string random, float min = -10.f, float max = 10.f); 31 | 32 | // 데이터 초기화 함수 (차원값 입력) 33 | void initTensor(std::vector& output, int N, int C, int H, int W, float start = 1, float step = 0); 34 | 35 | // 데이터 바이너리 파일로 생성 (serialize) 36 | template 37 | void tofile(std::vector &Buffer, std::string fname = "../Validation_py/C_Tensor") { 38 | std::ofstream fs(fname, std::ios::binary); 39 | if (fs.is_open()) 40 | fs.write((const char*)Buffer.data(), Buffer.size() * sizeof(T)); 41 | fs.close(); 42 | std::cout << "Done! file production to " << fname << std::endl; 43 | } 44 | // 데이터 바이너리 파일 로드 (unserialize) 45 | // 사용 예) 46 | // fromfile(input, "../Unet_py/input_data"); // python 전처리 결과 로드 47 | template 48 | void fromfile(std::vector& Buffer, std::string fname = "../Validation_py/C_Tensor") { 49 | std::ifstream ifs(fname, std::ios::binary); 50 | if (ifs.is_open()) 51 | ifs.read((char*)Buffer.data(), Buffer.size() * sizeof(T)); 52 | ifs.close(); 53 | std::cout << "Done! file load from " << fname << std::endl; 54 | } 55 | // 최대값 인덱스 출력 함수 56 | // 사용 예) 57 | // std::cout << "index : "<< argMax(output) << " , label name : " << class_names[argMax(output) ] << " , prob : " << output[argMax(output) ] << std::endl; 58 | int argMax(std::vector &output); 59 | 60 | // colors table 61 | //std::vector> colors_table{ {244,67,54},{233,30,99},{156,39,176},{103,58,183},{63,81,181},{33,150,243},{3,169,244}, 62 | //{0,188,212}, {0,150,136}, {76,175,80}, {139,195,74}, {205,220,57}, {255,235,59}, {255,193,7}, 63 | //{255,152,0}, {255,87,34}, {121,85,72}, {158,158,158}, {96,125,139} }; 64 | 65 | // Load weights from files shared with TensorRT samples. 66 | // TensorRT weight files have a simple space delimited format: 67 | // [type] [size] 68 | std::map loadWeights(const std::string file); 69 | 70 | // Print Tensor dimensions information 71 | void show_dims(nvinfer1::ITensor* tensor); -------------------------------------------------------------------------------- /TensorRT/vgg11.cpp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yester31/TensorRT_API/b32eb47bacedd7be472cbe608b6cc330000889be/TensorRT/vgg11.cpp -------------------------------------------------------------------------------- /TensorRT/yololayer.cu: -------------------------------------------------------------------------------- 1 | #include 2 | #include "device_launch_parameters.h" 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | 9 | using namespace std; 10 | 11 | __global__ void kernel_yololayer_cu( 12 | float* output, 13 | float* input, 14 | float* anchor_grid, 15 | int height, int width, int channel, int grid_stride, int out_size, 16 | int tcount) 17 | { 18 | int pos = threadIdx.x + blockIdx.x * blockDim.x; 19 | if (pos >= tcount) return; 20 | 21 | int c_idx = pos % channel; 22 | int idx = pos / channel; 23 | int o_idx = idx % out_size; 24 | int b_idx = idx / out_size; 25 | 26 | int w_idx = idx % width; 27 | idx /= width; 28 | int h_idx = idx % height; 29 | idx /= height; 30 | int ic_idx = (idx % 3) * 2; 31 | 32 | int g_idx = b_idx * out_size * channel + o_idx * channel; 33 | int g_idx2 = b_idx * out_size * 6 + o_idx * 6; 34 | 35 | output[g_idx2] = (input[g_idx] * 2 - 0.5 + w_idx) * grid_stride; 36 | output[g_idx2 + 1] = (input[g_idx + 1] * 2 - 0.5 + h_idx) * grid_stride; 37 | output[g_idx2 + 2] = input[g_idx + 2] * input[g_idx + 2] * 4 * anchor_grid[ic_idx] * grid_stride; 38 | output[g_idx2 + 3] = input[g_idx + 3] * input[g_idx + 3] * 4 * anchor_grid[ic_idx + 1] * grid_stride; 39 | 40 | float box_prob = input[g_idx + 4]; 41 | if (box_prob < 0.1f) { 42 | output[g_idx2 + 4] = 0; 43 | output[g_idx2 + 5] = -1; 44 | } 45 | else { 46 | int class_id = 0; 47 | float max_cls_prob = 0.0; 48 | for (int i = 5; i < channel; ++i) { 49 | float p = input[g_idx + i]; 50 | if (p > max_cls_prob) { 51 | max_cls_prob = p; 52 | class_id = i - 5; 53 | } 54 | } 55 | output[g_idx2 + 4] = box_prob * max_cls_prob; 56 | output[g_idx2 + 5] = class_id; 57 | } 58 | 59 | } 60 | void yololayer_cu(float* output, float* input, float* anchor_grid, int batchSize, int height, int width, int CLASS_NUM, int Grid_stride, cudaStream_t stream) 61 | { 62 | int tcount = batchSize * height * width * 3 * (CLASS_NUM + 5); 63 | int blocks = 512; 64 | int grids = (tcount - 1) / blocks + 1; 65 | 66 | kernel_yololayer_cu << > > (output, input, anchor_grid, height, width, CLASS_NUM + 5, Grid_stride, height * width * 3, tcount); 67 | } -------------------------------------------------------------------------------- /TensorRT/yololayer.hpp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yester31/TensorRT_API/b32eb47bacedd7be472cbe608b6cc330000889be/TensorRT/yololayer.hpp -------------------------------------------------------------------------------- /TensorRT/yolov5s.cpp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yester31/TensorRT_API/b32eb47bacedd7be472cbe608b6cc330000889be/TensorRT/yolov5s.cpp -------------------------------------------------------------------------------- /TestData3/OST_009.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yester31/TensorRT_API/b32eb47bacedd7be472cbe608b6cc330000889be/TestData3/OST_009.png -------------------------------------------------------------------------------- /Unet_py/car0.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yester31/TensorRT_API/b32eb47bacedd7be472cbe608b6cc330000889be/Unet_py/car0.jpg -------------------------------------------------------------------------------- /Unet_py/data/00ad56bf7ee6_03.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yester31/TensorRT_API/b32eb47bacedd7be472cbe608b6cc330000889be/Unet_py/data/00ad56bf7ee6_03.jpg -------------------------------------------------------------------------------- /Unet_py/input_data: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yester31/TensorRT_API/b32eb47bacedd7be472cbe608b6cc330000889be/Unet_py/input_data -------------------------------------------------------------------------------- /Unet_py/unet/__init__.py: -------------------------------------------------------------------------------- 1 | from .unet_model import UNet 2 | -------------------------------------------------------------------------------- /Unet_py/unet/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yester31/TensorRT_API/b32eb47bacedd7be472cbe608b6cc330000889be/Unet_py/unet/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /Unet_py/unet/__pycache__/__init__.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yester31/TensorRT_API/b32eb47bacedd7be472cbe608b6cc330000889be/Unet_py/unet/__pycache__/__init__.cpython-38.pyc -------------------------------------------------------------------------------- /Unet_py/unet/__pycache__/unet_model.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yester31/TensorRT_API/b32eb47bacedd7be472cbe608b6cc330000889be/Unet_py/unet/__pycache__/unet_model.cpython-37.pyc -------------------------------------------------------------------------------- /Unet_py/unet/__pycache__/unet_model.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yester31/TensorRT_API/b32eb47bacedd7be472cbe608b6cc330000889be/Unet_py/unet/__pycache__/unet_model.cpython-38.pyc -------------------------------------------------------------------------------- /Unet_py/unet/__pycache__/unet_parts.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yester31/TensorRT_API/b32eb47bacedd7be472cbe608b6cc330000889be/Unet_py/unet/__pycache__/unet_parts.cpython-37.pyc -------------------------------------------------------------------------------- /Unet_py/unet/__pycache__/unet_parts.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yester31/TensorRT_API/b32eb47bacedd7be472cbe608b6cc330000889be/Unet_py/unet/__pycache__/unet_parts.cpython-38.pyc -------------------------------------------------------------------------------- /Unet_py/unet/unet_model.py: -------------------------------------------------------------------------------- 1 | """ Full assembly of the parts to form the complete network """ 2 | 3 | from .unet_parts import * 4 | 5 | 6 | class UNet(nn.Module): 7 | def __init__(self, n_channels, n_classes, bilinear=True): 8 | super(UNet, self).__init__() 9 | self.n_channels = n_channels 10 | self.n_classes = n_classes 11 | self.bilinear = bilinear 12 | 13 | self.inc = DoubleConv(n_channels, 64) 14 | self.down1 = Down(64, 128) 15 | self.down2 = Down(128, 256) 16 | self.down3 = Down(256, 512) 17 | factor = 2 if bilinear else 1 18 | self.down4 = Down(512, 1024 // factor) 19 | self.up1 = Up(1024, 512 // factor, bilinear) 20 | self.up2 = Up(512, 256 // factor, bilinear) 21 | self.up3 = Up(256, 128 // factor, bilinear) 22 | self.up4 = Up(128, 64, bilinear) 23 | self.outc = OutConv(64, n_classes) 24 | 25 | def forward(self, x): 26 | x1 = self.inc(x) 27 | x2 = self.down1(x1) 28 | x3 = self.down2(x2) 29 | x4 = self.down3(x3) 30 | x5 = self.down4(x4) 31 | x = self.up1(x5, x4) 32 | x = self.up2(x, x3) 33 | x = self.up3(x, x2) 34 | x = self.up4(x, x1) 35 | logits = self.outc(x) 36 | return logits 37 | -------------------------------------------------------------------------------- /Unet_py/unet/unet_parts.py: -------------------------------------------------------------------------------- 1 | """ Parts of the U-Net model """ 2 | 3 | import torch 4 | import torch.nn as nn 5 | import torch.nn.functional as F 6 | 7 | 8 | class DoubleConv(nn.Module): 9 | """(convolution => [BN] => ReLU) * 2""" 10 | 11 | def __init__(self, in_channels, out_channels, mid_channels=None): 12 | super().__init__() 13 | if not mid_channels: 14 | mid_channels = out_channels 15 | self.double_conv = nn.Sequential( 16 | nn.Conv2d(in_channels, mid_channels, kernel_size=3, padding=1), 17 | nn.BatchNorm2d(mid_channels), 18 | nn.ReLU(inplace=True), 19 | nn.Conv2d(mid_channels, out_channels, kernel_size=3, padding=1), 20 | nn.BatchNorm2d(out_channels), 21 | nn.ReLU(inplace=True) 22 | ) 23 | 24 | def forward(self, x): 25 | return self.double_conv(x) 26 | 27 | 28 | class Down(nn.Module): 29 | """Downscaling with maxpool then double conv""" 30 | 31 | def __init__(self, in_channels, out_channels): 32 | super().__init__() 33 | self.maxpool_conv = nn.Sequential( 34 | nn.MaxPool2d(2), 35 | DoubleConv(in_channels, out_channels) 36 | ) 37 | 38 | def forward(self, x): 39 | return self.maxpool_conv(x) 40 | 41 | 42 | class Up(nn.Module): 43 | """Upscaling then double conv""" 44 | 45 | def __init__(self, in_channels, out_channels, bilinear=True): 46 | super().__init__() 47 | 48 | # if bilinear, use the normal convolutions to reduce the number of channels 49 | if bilinear: 50 | self.up = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True) 51 | self.conv = DoubleConv(in_channels, out_channels, in_channels // 2) 52 | else: 53 | self.up = nn.ConvTranspose2d(in_channels, in_channels // 2, kernel_size=2, stride=2) 54 | self.conv = DoubleConv(in_channels, out_channels) 55 | 56 | def forward(self, x1, x2): 57 | x1 = self.up(x1) 58 | # input is CHW 59 | diffY = x2.size()[2] - x1.size()[2] 60 | diffX = x2.size()[3] - x1.size()[3] 61 | 62 | x1 = F.pad(x1, [diffX // 2, diffX - diffX // 2, 63 | diffY // 2, diffY - diffY // 2]) 64 | # if you have padding issues, see 65 | # https://github.com/HaiyongJiang/U-Net-Pytorch-Unstructured-Buggy/commit/0e854509c2cea854e247a9c615f175f76fbb2e3a 66 | # https://github.com/xiaopeng-liao/Pytorch-UNet/commit/8ebac70e633bac59fc22bb5195e513d5832fb3bd 67 | x = torch.cat([x2, x1], dim=1) 68 | return self.conv(x) 69 | 70 | 71 | class OutConv(nn.Module): 72 | def __init__(self, in_channels, out_channels): 73 | super(OutConv, self).__init__() 74 | self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=1) 75 | 76 | def forward(self, x): 77 | return self.conv(x) 78 | -------------------------------------------------------------------------------- /VGG11_py/data/panda0.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yester31/TensorRT_API/b32eb47bacedd7be472cbe608b6cc330000889be/VGG11_py/data/panda0.jpg -------------------------------------------------------------------------------- /VGG11_py/requirements.txt: -------------------------------------------------------------------------------- 1 | torch==1.9.0+cu111 2 | torchvision==0.10.0+cu111 3 | torchaudio===0.9.0 4 | torchsummary 5 | opencv-python -------------------------------------------------------------------------------- /VGG11_py/vgg.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from torch import nn 3 | from torch.nn import functional as F 4 | import torchvision 5 | import cv2 6 | import numpy as np 7 | 8 | def tofile(img, weight_path = "input2"): 9 | with open(weight_path, 'wb') as f: 10 | img.tofile(f) 11 | f.close() 12 | 13 | def main(): 14 | print('cuda device count: ', torch.cuda.device_count()) 15 | net = torchvision.models.vgg11(pretrained=True) 16 | net = net.eval() 17 | net = net.to('cuda:0') 18 | #print(net) 19 | 20 | img = cv2.imread("./data/panda0.jpg") 21 | img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) 22 | img = img.transpose(2, 0, 1) 23 | img = img.astype(np.float32) 24 | #tofile(img) 25 | 26 | img = torch.from_numpy(img) 27 | img = img.unsqueeze(0) 28 | img = img.to('cuda:0') 29 | 30 | out = net(img) 31 | max_index = out.max(dim=1) 32 | max_value = out.max() 33 | print('vgg11 max index : {} , value : {}'.format( max_index,max_value )) 34 | print('vgg11 out:', out.shape) 35 | torch.save(net, "vgg11.pth") 36 | 37 | if __name__ == '__main__': 38 | main() 39 | 40 | -------------------------------------------------------------------------------- /Validation_py/__pycache__/compare.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yester31/TensorRT_API/b32eb47bacedd7be472cbe608b6cc330000889be/Validation_py/__pycache__/compare.cpython-37.pyc -------------------------------------------------------------------------------- /Validation_py/__pycache__/compare.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yester31/TensorRT_API/b32eb47bacedd7be472cbe608b6cc330000889be/Validation_py/__pycache__/compare.cpython-38.pyc -------------------------------------------------------------------------------- /Validation_py/__pycache__/compare.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yester31/TensorRT_API/b32eb47bacedd7be472cbe608b6cc330000889be/Validation_py/__pycache__/compare.cpython-39.pyc -------------------------------------------------------------------------------- /Validation_py/compare.py: -------------------------------------------------------------------------------- 1 | 2 | def compare_two_tensor(output, output_c): 3 | if len(output) != len(output_c): 4 | print("Tensor size is not same : output_py=%d output_c=%d" % (len(output), len(output_c))) 5 | exit() 6 | 7 | max_idx = -1 8 | max_diff = 0 9 | cnt_diff = 0 10 | r0 = 0 11 | r = 0 12 | for idx in range(len(output)): 13 | if output[idx] != output_c[idx]: 14 | cnt_diff += 1 15 | diff = abs(output[idx] - output_c[idx]) 16 | if diff > max_diff: 17 | max_diff = diff 18 | max_idx = idx 19 | r0 = output[idx] 20 | r = output_c[idx] 21 | print("%6d output_py=%10.6f output_c=%10.6f diff=%.6f" % (idx, output[idx], output_c[idx], diff)) 22 | if max_diff > 0: 23 | print("cnt_total=%d cnt_diff=%d max_idx=%6d output_py=%10.6f output_c=%10.6f diff=%.6f" % ( 24 | len(output), cnt_diff, max_idx, r0, r, max_diff)) 25 | else: 26 | print("All data is same!") 27 | 28 | 29 | def compare_two_tensor_uint8(output, output_c): 30 | if len(output) != len(output_c): 31 | print("Tensor size is not same : output_py=%d output_c=%d" % (len(output), len(output_c))) 32 | exit() 33 | 34 | max_idx = -1 35 | max_diff = 0 36 | cnt_diff = 0 37 | r0 = 0 38 | r = 0 39 | for idx in range(len(output)): 40 | if output[idx] != output_c[idx]: 41 | cnt_diff += 1 42 | diff = abs(output[idx] - output_c[idx]) 43 | if diff > max_diff: 44 | max_diff = diff 45 | max_idx = idx 46 | r0 = output[idx] 47 | r = output_c[idx] 48 | print("%6d output_py=%d output_c=%d diff=%d" % (idx, output[idx], output_c[idx], diff)) 49 | if max_diff > 0: 50 | print("cnt_total=%d cnt_diff=%d max_idx=%6d output_py=%d output_c=%d diff=%d" % ( 51 | len(output), cnt_diff, max_idx, r0, r, max_diff)) 52 | else: 53 | print("All data is same!") 54 | 55 | def compare_two_tensor_uint8_2(output, output_c): 56 | if len(output) != len(output_c): 57 | print("Tensor size is not same : output_py=%d output_c=%d" % (len(output), len(output_c))) 58 | exit() 59 | 60 | max_idx = -1 61 | max_diff = 0 62 | cnt_diff = 0 63 | r0 = 0 64 | r = 0 65 | for idx in range(len(output)): 66 | diff = 0 67 | if output[idx] != output_c[idx]: 68 | cnt_diff += 1 69 | diff = abs(output[idx] - output_c[idx]) 70 | if diff > max_diff: 71 | max_diff = diff 72 | max_idx = idx 73 | r0 = output[idx] 74 | r = output_c[idx] 75 | print("%6d output_py=%d output_c=%d diff=%d" % (idx, output[idx], output_c[idx], diff)) 76 | if max_diff > 0: 77 | print("cnt_total=%d cnt_diff=%d max_idx=%6d output_py=%d output_c=%d diff=%d" % ( 78 | len(output), cnt_diff, max_idx, r0, r, max_diff)) 79 | else: 80 | print("All data is same!") 81 | 82 | def compare_two_tensor2(output, output_c): 83 | if len(output) != len(output_c): 84 | print("Tensor size is not same : output_py=%d output_c=%d" % (len(output), len(output_c))) 85 | exit() 86 | 87 | max_idx = -1 88 | max_diff = 0 89 | cnt_diff = 0 90 | r0 = 0 91 | r = 0 92 | for idx in range(len(output)): 93 | diff = 0 94 | if output[idx] != output_c[idx]: 95 | cnt_diff += 1 96 | diff = abs(output[idx] - output_c[idx]) 97 | if diff > max_diff: 98 | max_diff = diff 99 | max_idx = idx 100 | r0 = output[idx] 101 | r = output_c[idx] 102 | print("%6d output_py=%10.6f output_c=%10.6f diff=%.6f" % (idx, output[idx], output_c[idx], diff)) 103 | if max_diff > 0: 104 | print("cnt_total=%d cnt_diff=%d max_idx=%6d output_py=%10.6f output_c=%10.6f diff=%.6f" % ( 105 | len(output), cnt_diff, max_idx, r0, r, max_diff)) 106 | else: 107 | print("prefect match and done!") -------------------------------------------------------------------------------- /Validation_py/valide.py: -------------------------------------------------------------------------------- 1 | #import torch # torch 1.9.0+cu111 2 | import numpy as np 3 | from compare import * 4 | 5 | if 1: 6 | output_c = np.fromfile("c", dtype=np.float32) 7 | # for t in range(int(len(output_c)/6)): 8 | # tt = output_c[t * 6 + 4] 9 | # if tt > 0.6 : 10 | # print(tt) 11 | # print(output_c[t * 6 ]) 12 | # print(output_c[t * 6 + 1]) 13 | # print(output_c[t * 6 + 2]) 14 | # print(output_c[t * 6 + 3]) 15 | # print(output_c[t * 6 + 4]) 16 | # print(output_c[t * 6 + 5]) 17 | 18 | 19 | output_py = np.fromfile("p", dtype=np.float32) 20 | compare_two_tensor(output_py, output_c) 21 | else: 22 | output_c = np.fromfile("trt_1", dtype=np.int8) 23 | output_py = np.fromfile("py_0", dtype=np.int8) 24 | compare_two_tensor_uint8(output_py, output_c) 25 | #compare_two_tensor_uint8_2(output_py, output_c) -------------------------------------------------------------------------------- /Validation_py/valide_preproc.py: -------------------------------------------------------------------------------- 1 | #import torch # torch 1.9.0+cu111 2 | import numpy as np 3 | import cv2 4 | from compare import * 5 | 6 | img = cv2.imread('../TestDate/panda0.jpg') 7 | #img = cv2.resize(img, (224,224)) 8 | img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) 9 | img = img.transpose(2, 0, 1) 10 | img = img.astype(np.float32) 11 | img = img/255 12 | output_py = img.flatten() 13 | 14 | output_c = np.fromfile("C_Preproc_Result", dtype=np.float32) 15 | 16 | compare_two_tensor2(output_py, output_c) 17 | -------------------------------------------------------------------------------- /Validation_py/valideint.py: -------------------------------------------------------------------------------- 1 | #import torch # torch 1.9.0+cu111 2 | import numpy as np 3 | from compare import * 4 | 5 | 6 | output_c = np.fromfile("c", dtype=np.int8) 7 | output_py = np.fromfile("p", dtype=np.int8) 8 | compare_two_tensor_uint8(output_py, output_c) 9 | #compare_two_tensor_uint8_2(output_py, output_c) -------------------------------------------------------------------------------- /yolov5s_py/data/zidane.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yester31/TensorRT_API/b32eb47bacedd7be472cbe608b6cc330000889be/yolov5s_py/data/zidane.jpg -------------------------------------------------------------------------------- /yolov5s_py/gen_wts.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import argparse 3 | import os 4 | import struct 5 | import torch 6 | from utils.torch_utils import select_device 7 | 8 | def parse_args(): 9 | parser = argparse.ArgumentParser(description='Convert .pt file to .wts') 10 | parser.add_argument('-w', '--weights', default='yolov5s.pt') 11 | parser.add_argument('-o', '--output', help='Output (.wts) file path (optional)') 12 | args = parser.parse_args() 13 | if not os.path.isfile(args.weights): 14 | raise SystemExit('Invalid input file') 15 | if not args.output: 16 | args.output = os.path.splitext(args.weights)[0] + '.wts' 17 | elif os.path.isdir(args.output): 18 | args.output = os.path.join( 19 | args.output, 20 | os.path.splitext(os.path.basename(args.weights))[0] + '.wts') 21 | return args.weights, args.output 22 | 23 | 24 | pt_file, wts_file = parse_args() 25 | 26 | # Initialize 27 | device = select_device('cpu') 28 | # Load model 29 | model = torch.load(pt_file, map_location=device)['model'].float() # load to FP32 30 | 31 | # # update anchor_grid info 32 | # anchor_grid = model.model[-1].anchors * model.model[-1].stride[...,None,None] 33 | # model.model[-1].anchor_grid = anchor_grid 34 | # delattr(model.model[-1], 'anchor_grid') # model.model[-1] is detect layer 35 | # model.model[-1].register_buffer("anchor_grid",anchor_grid) #The parameters are saved in the OrderDict through the "register_buffer" method, and then saved to the weight. 36 | 37 | anchor_grid = model.model[-1].anchors 38 | model.model[-1].register_buffer('anchor_grid0', anchor_grid[0, :, :]) 39 | model.model[-1].register_buffer('anchor_grid1', anchor_grid[1, :, :]) 40 | model.model[-1].register_buffer('anchor_grid2', anchor_grid[2, :, :]) 41 | model.to(device).eval() 42 | 43 | if 1: 44 | with open(wts_file, 'w') as f: 45 | f.write('{}\n'.format(len(model.state_dict().keys()))) 46 | for k, v in model.state_dict().items(): 47 | vr = v.reshape(-1).cpu().numpy() 48 | f.write('{} {} '.format(k, len(vr))) 49 | for vv in vr: 50 | f.write(' ') 51 | f.write(struct.pack('>f' ,float(vv)).hex()) 52 | f.write('\n') 53 | -------------------------------------------------------------------------------- /yolov5s_py/models/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yester31/TensorRT_API/b32eb47bacedd7be472cbe608b6cc330000889be/yolov5s_py/models/__init__.py -------------------------------------------------------------------------------- /yolov5s_py/models/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yester31/TensorRT_API/b32eb47bacedd7be472cbe608b6cc330000889be/yolov5s_py/models/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /yolov5s_py/models/__pycache__/common.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yester31/TensorRT_API/b32eb47bacedd7be472cbe608b6cc330000889be/yolov5s_py/models/__pycache__/common.cpython-37.pyc -------------------------------------------------------------------------------- /yolov5s_py/models/__pycache__/experimental.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yester31/TensorRT_API/b32eb47bacedd7be472cbe608b6cc330000889be/yolov5s_py/models/__pycache__/experimental.cpython-37.pyc -------------------------------------------------------------------------------- /yolov5s_py/models/__pycache__/yolo.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yester31/TensorRT_API/b32eb47bacedd7be472cbe608b6cc330000889be/yolov5s_py/models/__pycache__/yolo.cpython-37.pyc -------------------------------------------------------------------------------- /yolov5s_py/models/experimental.py: -------------------------------------------------------------------------------- 1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license 2 | """ 3 | Experimental modules 4 | """ 5 | import math 6 | 7 | import numpy as np 8 | import torch 9 | import torch.nn as nn 10 | 11 | from models.common import Conv 12 | from utils.downloads import attempt_download 13 | 14 | 15 | class CrossConv(nn.Module): 16 | # Cross Convolution Downsample 17 | def __init__(self, c1, c2, k=3, s=1, g=1, e=1.0, shortcut=False): 18 | # ch_in, ch_out, kernel, stride, groups, expansion, shortcut 19 | super().__init__() 20 | c_ = int(c2 * e) # hidden channels 21 | self.cv1 = Conv(c1, c_, (1, k), (1, s)) 22 | self.cv2 = Conv(c_, c2, (k, 1), (s, 1), g=g) 23 | self.add = shortcut and c1 == c2 24 | 25 | def forward(self, x): 26 | return x + self.cv2(self.cv1(x)) if self.add else self.cv2(self.cv1(x)) 27 | 28 | 29 | class Sum(nn.Module): 30 | # Weighted sum of 2 or more layers https://arxiv.org/abs/1911.09070 31 | def __init__(self, n, weight=False): # n: number of inputs 32 | super().__init__() 33 | self.weight = weight # apply weights boolean 34 | self.iter = range(n - 1) # iter object 35 | if weight: 36 | self.w = nn.Parameter(-torch.arange(1.0, n) / 2, requires_grad=True) # layer weights 37 | 38 | def forward(self, x): 39 | y = x[0] # no weight 40 | if self.weight: 41 | w = torch.sigmoid(self.w) * 2 42 | for i in self.iter: 43 | y = y + x[i + 1] * w[i] 44 | else: 45 | for i in self.iter: 46 | y = y + x[i + 1] 47 | return y 48 | 49 | 50 | class MixConv2d(nn.Module): 51 | # Mixed Depth-wise Conv https://arxiv.org/abs/1907.09595 52 | def __init__(self, c1, c2, k=(1, 3), s=1, equal_ch=True): # ch_in, ch_out, kernel, stride, ch_strategy 53 | super().__init__() 54 | n = len(k) # number of convolutions 55 | if equal_ch: # equal c_ per group 56 | i = torch.linspace(0, n - 1E-6, c2).floor() # c2 indices 57 | c_ = [(i == g).sum() for g in range(n)] # intermediate channels 58 | else: # equal weight.numel() per group 59 | b = [c2] + [0] * n 60 | a = np.eye(n + 1, n, k=-1) 61 | a -= np.roll(a, 1, axis=1) 62 | a *= np.array(k) ** 2 63 | a[0] = 1 64 | c_ = np.linalg.lstsq(a, b, rcond=None)[0].round() # solve for equal weight indices, ax = b 65 | 66 | self.m = nn.ModuleList( 67 | [nn.Conv2d(c1, int(c_), k, s, k // 2, groups=math.gcd(c1, int(c_)), bias=False) for k, c_ in zip(k, c_)]) 68 | self.bn = nn.BatchNorm2d(c2) 69 | self.act = nn.SiLU() 70 | 71 | def forward(self, x): 72 | return self.act(self.bn(torch.cat([m(x) for m in self.m], 1))) 73 | 74 | 75 | class Ensemble(nn.ModuleList): 76 | # Ensemble of models 77 | def __init__(self): 78 | super().__init__() 79 | 80 | def forward(self, x, augment=False, profile=False, visualize=False): 81 | y = [] 82 | for module in self: 83 | y.append(module(x, augment, profile, visualize)[0]) 84 | # y = torch.stack(y).max(0)[0] # max ensemble 85 | # y = torch.stack(y).mean(0) # mean ensemble 86 | y = torch.cat(y, 1) # nms ensemble 87 | return y, None # inference, train output 88 | 89 | 90 | def attempt_load(weights, map_location=None, inplace=True, fuse=True): 91 | from models.yolo import Detect, Model 92 | 93 | # Loads an ensemble of models weights=[a,b,c] or a single model weights=[a] or weights=a 94 | model = Ensemble() 95 | for w in weights if isinstance(weights, list) else [weights]: 96 | ckpt = torch.load(attempt_download(w), map_location=map_location) # load 97 | if fuse: 98 | model.append(ckpt['ema' if ckpt.get('ema') else 'model'].float().fuse().eval()) # FP32 model 99 | else: 100 | model.append(ckpt['ema' if ckpt.get('ema') else 'model'].float().eval()) # without layer fuse 101 | 102 | # Compatibility updates 103 | for m in model.modules(): 104 | if type(m) in [nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6, nn.SiLU, Detect, Model]: 105 | m.inplace = inplace # pytorch 1.7.0 compatibility 106 | if type(m) is Detect: 107 | if not isinstance(m.anchor_grid, list): # new Detect Layer compatibility 108 | delattr(m, 'anchor_grid') 109 | setattr(m, 'anchor_grid', [torch.zeros(1)] * m.nl) 110 | elif type(m) is Conv: 111 | m._non_persistent_buffers_set = set() # pytorch 1.6.0 compatibility 112 | 113 | if len(model) == 1: 114 | return model[-1] # return model 115 | else: 116 | print(f'Ensemble created with {weights}\n') 117 | for k in ['names']: 118 | setattr(model, k, getattr(model[-1], k)) 119 | model.stride = model[torch.argmax(torch.tensor([m.stride.max() for m in model])).int()].stride # max stride 120 | return model # return ensemble 121 | -------------------------------------------------------------------------------- /yolov5s_py/models/hub/anchors.yaml: -------------------------------------------------------------------------------- 1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license 2 | # Default anchors for COCO data 3 | 4 | 5 | # P5 ------------------------------------------------------------------------------------------------------------------- 6 | # P5-640: 7 | anchors_p5_640: 8 | - [10,13, 16,30, 33,23] # P3/8 9 | - [30,61, 62,45, 59,119] # P4/16 10 | - [116,90, 156,198, 373,326] # P5/32 11 | 12 | 13 | # P6 ------------------------------------------------------------------------------------------------------------------- 14 | # P6-640: thr=0.25: 0.9964 BPR, 5.54 anchors past thr, n=12, img_size=640, metric_all=0.281/0.716-mean/best, past_thr=0.469-mean: 9,11, 21,19, 17,41, 43,32, 39,70, 86,64, 65,131, 134,130, 120,265, 282,180, 247,354, 512,387 15 | anchors_p6_640: 16 | - [9,11, 21,19, 17,41] # P3/8 17 | - [43,32, 39,70, 86,64] # P4/16 18 | - [65,131, 134,130, 120,265] # P5/32 19 | - [282,180, 247,354, 512,387] # P6/64 20 | 21 | # P6-1280: thr=0.25: 0.9950 BPR, 5.55 anchors past thr, n=12, img_size=1280, metric_all=0.281/0.714-mean/best, past_thr=0.468-mean: 19,27, 44,40, 38,94, 96,68, 86,152, 180,137, 140,301, 303,264, 238,542, 436,615, 739,380, 925,792 22 | anchors_p6_1280: 23 | - [19,27, 44,40, 38,94] # P3/8 24 | - [96,68, 86,152, 180,137] # P4/16 25 | - [140,301, 303,264, 238,542] # P5/32 26 | - [436,615, 739,380, 925,792] # P6/64 27 | 28 | # P6-1920: thr=0.25: 0.9950 BPR, 5.55 anchors past thr, n=12, img_size=1920, metric_all=0.281/0.714-mean/best, past_thr=0.468-mean: 28,41, 67,59, 57,141, 144,103, 129,227, 270,205, 209,452, 455,396, 358,812, 653,922, 1109,570, 1387,1187 29 | anchors_p6_1920: 30 | - [28,41, 67,59, 57,141] # P3/8 31 | - [144,103, 129,227, 270,205] # P4/16 32 | - [209,452, 455,396, 358,812] # P5/32 33 | - [653,922, 1109,570, 1387,1187] # P6/64 34 | 35 | 36 | # P7 ------------------------------------------------------------------------------------------------------------------- 37 | # P7-640: thr=0.25: 0.9962 BPR, 6.76 anchors past thr, n=15, img_size=640, metric_all=0.275/0.733-mean/best, past_thr=0.466-mean: 11,11, 13,30, 29,20, 30,46, 61,38, 39,92, 78,80, 146,66, 79,163, 149,150, 321,143, 157,303, 257,402, 359,290, 524,372 38 | anchors_p7_640: 39 | - [11,11, 13,30, 29,20] # P3/8 40 | - [30,46, 61,38, 39,92] # P4/16 41 | - [78,80, 146,66, 79,163] # P5/32 42 | - [149,150, 321,143, 157,303] # P6/64 43 | - [257,402, 359,290, 524,372] # P7/128 44 | 45 | # P7-1280: thr=0.25: 0.9968 BPR, 6.71 anchors past thr, n=15, img_size=1280, metric_all=0.273/0.732-mean/best, past_thr=0.463-mean: 19,22, 54,36, 32,77, 70,83, 138,71, 75,173, 165,159, 148,334, 375,151, 334,317, 251,626, 499,474, 750,326, 534,814, 1079,818 46 | anchors_p7_1280: 47 | - [19,22, 54,36, 32,77] # P3/8 48 | - [70,83, 138,71, 75,173] # P4/16 49 | - [165,159, 148,334, 375,151] # P5/32 50 | - [334,317, 251,626, 499,474] # P6/64 51 | - [750,326, 534,814, 1079,818] # P7/128 52 | 53 | # P7-1920: thr=0.25: 0.9968 BPR, 6.71 anchors past thr, n=15, img_size=1920, metric_all=0.273/0.732-mean/best, past_thr=0.463-mean: 29,34, 81,55, 47,115, 105,124, 207,107, 113,259, 247,238, 222,500, 563,227, 501,476, 376,939, 749,711, 1126,489, 801,1222, 1618,1227 54 | anchors_p7_1920: 55 | - [29,34, 81,55, 47,115] # P3/8 56 | - [105,124, 207,107, 113,259] # P4/16 57 | - [247,238, 222,500, 563,227] # P5/32 58 | - [501,476, 376,939, 749,711] # P6/64 59 | - [1126,489, 801,1222, 1618,1227] # P7/128 60 | -------------------------------------------------------------------------------- /yolov5s_py/models/hub/yolov3-spp.yaml: -------------------------------------------------------------------------------- 1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license 2 | 3 | # Parameters 4 | nc: 80 # number of classes 5 | depth_multiple: 1.0 # model depth multiple 6 | width_multiple: 1.0 # layer channel multiple 7 | anchors: 8 | - [10,13, 16,30, 33,23] # P3/8 9 | - [30,61, 62,45, 59,119] # P4/16 10 | - [116,90, 156,198, 373,326] # P5/32 11 | 12 | # darknet53 backbone 13 | backbone: 14 | # [from, number, module, args] 15 | [[-1, 1, Conv, [32, 3, 1]], # 0 16 | [-1, 1, Conv, [64, 3, 2]], # 1-P1/2 17 | [-1, 1, Bottleneck, [64]], 18 | [-1, 1, Conv, [128, 3, 2]], # 3-P2/4 19 | [-1, 2, Bottleneck, [128]], 20 | [-1, 1, Conv, [256, 3, 2]], # 5-P3/8 21 | [-1, 8, Bottleneck, [256]], 22 | [-1, 1, Conv, [512, 3, 2]], # 7-P4/16 23 | [-1, 8, Bottleneck, [512]], 24 | [-1, 1, Conv, [1024, 3, 2]], # 9-P5/32 25 | [-1, 4, Bottleneck, [1024]], # 10 26 | ] 27 | 28 | # YOLOv3-SPP head 29 | head: 30 | [[-1, 1, Bottleneck, [1024, False]], 31 | [-1, 1, SPP, [512, [5, 9, 13]]], 32 | [-1, 1, Conv, [1024, 3, 1]], 33 | [-1, 1, Conv, [512, 1, 1]], 34 | [-1, 1, Conv, [1024, 3, 1]], # 15 (P5/32-large) 35 | 36 | [-2, 1, Conv, [256, 1, 1]], 37 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 38 | [[-1, 8], 1, Concat, [1]], # cat backbone P4 39 | [-1, 1, Bottleneck, [512, False]], 40 | [-1, 1, Bottleneck, [512, False]], 41 | [-1, 1, Conv, [256, 1, 1]], 42 | [-1, 1, Conv, [512, 3, 1]], # 22 (P4/16-medium) 43 | 44 | [-2, 1, Conv, [128, 1, 1]], 45 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 46 | [[-1, 6], 1, Concat, [1]], # cat backbone P3 47 | [-1, 1, Bottleneck, [256, False]], 48 | [-1, 2, Bottleneck, [256, False]], # 27 (P3/8-small) 49 | 50 | [[27, 22, 15], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) 51 | ] 52 | -------------------------------------------------------------------------------- /yolov5s_py/models/hub/yolov3-tiny.yaml: -------------------------------------------------------------------------------- 1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license 2 | 3 | # Parameters 4 | nc: 80 # number of classes 5 | depth_multiple: 1.0 # model depth multiple 6 | width_multiple: 1.0 # layer channel multiple 7 | anchors: 8 | - [10,14, 23,27, 37,58] # P4/16 9 | - [81,82, 135,169, 344,319] # P5/32 10 | 11 | # YOLOv3-tiny backbone 12 | backbone: 13 | # [from, number, module, args] 14 | [[-1, 1, Conv, [16, 3, 1]], # 0 15 | [-1, 1, nn.MaxPool2d, [2, 2, 0]], # 1-P1/2 16 | [-1, 1, Conv, [32, 3, 1]], 17 | [-1, 1, nn.MaxPool2d, [2, 2, 0]], # 3-P2/4 18 | [-1, 1, Conv, [64, 3, 1]], 19 | [-1, 1, nn.MaxPool2d, [2, 2, 0]], # 5-P3/8 20 | [-1, 1, Conv, [128, 3, 1]], 21 | [-1, 1, nn.MaxPool2d, [2, 2, 0]], # 7-P4/16 22 | [-1, 1, Conv, [256, 3, 1]], 23 | [-1, 1, nn.MaxPool2d, [2, 2, 0]], # 9-P5/32 24 | [-1, 1, Conv, [512, 3, 1]], 25 | [-1, 1, nn.ZeroPad2d, [[0, 1, 0, 1]]], # 11 26 | [-1, 1, nn.MaxPool2d, [2, 1, 0]], # 12 27 | ] 28 | 29 | # YOLOv3-tiny head 30 | head: 31 | [[-1, 1, Conv, [1024, 3, 1]], 32 | [-1, 1, Conv, [256, 1, 1]], 33 | [-1, 1, Conv, [512, 3, 1]], # 15 (P5/32-large) 34 | 35 | [-2, 1, Conv, [128, 1, 1]], 36 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 37 | [[-1, 8], 1, Concat, [1]], # cat backbone P4 38 | [-1, 1, Conv, [256, 3, 1]], # 19 (P4/16-medium) 39 | 40 | [[19, 15], 1, Detect, [nc, anchors]], # Detect(P4, P5) 41 | ] 42 | -------------------------------------------------------------------------------- /yolov5s_py/models/hub/yolov3.yaml: -------------------------------------------------------------------------------- 1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license 2 | 3 | # Parameters 4 | nc: 80 # number of classes 5 | depth_multiple: 1.0 # model depth multiple 6 | width_multiple: 1.0 # layer channel multiple 7 | anchors: 8 | - [10,13, 16,30, 33,23] # P3/8 9 | - [30,61, 62,45, 59,119] # P4/16 10 | - [116,90, 156,198, 373,326] # P5/32 11 | 12 | # darknet53 backbone 13 | backbone: 14 | # [from, number, module, args] 15 | [[-1, 1, Conv, [32, 3, 1]], # 0 16 | [-1, 1, Conv, [64, 3, 2]], # 1-P1/2 17 | [-1, 1, Bottleneck, [64]], 18 | [-1, 1, Conv, [128, 3, 2]], # 3-P2/4 19 | [-1, 2, Bottleneck, [128]], 20 | [-1, 1, Conv, [256, 3, 2]], # 5-P3/8 21 | [-1, 8, Bottleneck, [256]], 22 | [-1, 1, Conv, [512, 3, 2]], # 7-P4/16 23 | [-1, 8, Bottleneck, [512]], 24 | [-1, 1, Conv, [1024, 3, 2]], # 9-P5/32 25 | [-1, 4, Bottleneck, [1024]], # 10 26 | ] 27 | 28 | # YOLOv3 head 29 | head: 30 | [[-1, 1, Bottleneck, [1024, False]], 31 | [-1, 1, Conv, [512, [1, 1]]], 32 | [-1, 1, Conv, [1024, 3, 1]], 33 | [-1, 1, Conv, [512, 1, 1]], 34 | [-1, 1, Conv, [1024, 3, 1]], # 15 (P5/32-large) 35 | 36 | [-2, 1, Conv, [256, 1, 1]], 37 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 38 | [[-1, 8], 1, Concat, [1]], # cat backbone P4 39 | [-1, 1, Bottleneck, [512, False]], 40 | [-1, 1, Bottleneck, [512, False]], 41 | [-1, 1, Conv, [256, 1, 1]], 42 | [-1, 1, Conv, [512, 3, 1]], # 22 (P4/16-medium) 43 | 44 | [-2, 1, Conv, [128, 1, 1]], 45 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 46 | [[-1, 6], 1, Concat, [1]], # cat backbone P3 47 | [-1, 1, Bottleneck, [256, False]], 48 | [-1, 2, Bottleneck, [256, False]], # 27 (P3/8-small) 49 | 50 | [[27, 22, 15], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) 51 | ] 52 | -------------------------------------------------------------------------------- /yolov5s_py/models/hub/yolov5-bifpn.yaml: -------------------------------------------------------------------------------- 1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license 2 | 3 | # Parameters 4 | nc: 80 # number of classes 5 | depth_multiple: 1.0 # model depth multiple 6 | width_multiple: 1.0 # layer channel multiple 7 | anchors: 8 | - [10,13, 16,30, 33,23] # P3/8 9 | - [30,61, 62,45, 59,119] # P4/16 10 | - [116,90, 156,198, 373,326] # P5/32 11 | 12 | # YOLOv5 v6.0 backbone 13 | backbone: 14 | # [from, number, module, args] 15 | [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 16 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 17 | [-1, 3, C3, [128]], 18 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 19 | [-1, 6, C3, [256]], 20 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 21 | [-1, 9, C3, [512]], 22 | [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 23 | [-1, 3, C3, [1024]], 24 | [-1, 1, SPPF, [1024, 5]], # 9 25 | ] 26 | 27 | # YOLOv5 v6.0 BiFPN head 28 | head: 29 | [[-1, 1, Conv, [512, 1, 1]], 30 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 31 | [[-1, 6], 1, Concat, [1]], # cat backbone P4 32 | [-1, 3, C3, [512, False]], # 13 33 | 34 | [-1, 1, Conv, [256, 1, 1]], 35 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 36 | [[-1, 4], 1, Concat, [1]], # cat backbone P3 37 | [-1, 3, C3, [256, False]], # 17 (P3/8-small) 38 | 39 | [-1, 1, Conv, [256, 3, 2]], 40 | [[-1, 14, 6], 1, Concat, [1]], # cat P4 <--- BiFPN change 41 | [-1, 3, C3, [512, False]], # 20 (P4/16-medium) 42 | 43 | [-1, 1, Conv, [512, 3, 2]], 44 | [[-1, 10], 1, Concat, [1]], # cat head P5 45 | [-1, 3, C3, [1024, False]], # 23 (P5/32-large) 46 | 47 | [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) 48 | ] 49 | -------------------------------------------------------------------------------- /yolov5s_py/models/hub/yolov5-fpn.yaml: -------------------------------------------------------------------------------- 1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license 2 | 3 | # Parameters 4 | nc: 80 # number of classes 5 | depth_multiple: 1.0 # model depth multiple 6 | width_multiple: 1.0 # layer channel multiple 7 | anchors: 8 | - [10,13, 16,30, 33,23] # P3/8 9 | - [30,61, 62,45, 59,119] # P4/16 10 | - [116,90, 156,198, 373,326] # P5/32 11 | 12 | # YOLOv5 v6.0 backbone 13 | backbone: 14 | # [from, number, module, args] 15 | [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 16 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 17 | [-1, 3, C3, [128]], 18 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 19 | [-1, 6, C3, [256]], 20 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 21 | [-1, 9, C3, [512]], 22 | [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 23 | [-1, 3, C3, [1024]], 24 | [-1, 1, SPPF, [1024, 5]], # 9 25 | ] 26 | 27 | # YOLOv5 v6.0 FPN head 28 | head: 29 | [[-1, 3, C3, [1024, False]], # 10 (P5/32-large) 30 | 31 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 32 | [[-1, 6], 1, Concat, [1]], # cat backbone P4 33 | [-1, 1, Conv, [512, 1, 1]], 34 | [-1, 3, C3, [512, False]], # 14 (P4/16-medium) 35 | 36 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 37 | [[-1, 4], 1, Concat, [1]], # cat backbone P3 38 | [-1, 1, Conv, [256, 1, 1]], 39 | [-1, 3, C3, [256, False]], # 18 (P3/8-small) 40 | 41 | [[18, 14, 10], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) 42 | ] 43 | -------------------------------------------------------------------------------- /yolov5s_py/models/hub/yolov5-p2.yaml: -------------------------------------------------------------------------------- 1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license 2 | 3 | # Parameters 4 | nc: 80 # number of classes 5 | depth_multiple: 1.0 # model depth multiple 6 | width_multiple: 1.0 # layer channel multiple 7 | anchors: 3 # auto-anchor evolves 3 anchors per P output layer 8 | 9 | # YOLOv5 v6.0 backbone 10 | backbone: 11 | # [from, number, module, args] 12 | [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 13 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 14 | [-1, 3, C3, [128]], 15 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 16 | [-1, 6, C3, [256]], 17 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 18 | [-1, 9, C3, [512]], 19 | [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 20 | [-1, 3, C3, [1024]], 21 | [-1, 1, SPPF, [1024, 5]], # 9 22 | ] 23 | 24 | # YOLOv5 v6.0 head 25 | head: 26 | [[-1, 1, Conv, [512, 1, 1]], 27 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 28 | [[-1, 6], 1, Concat, [1]], # cat backbone P4 29 | [-1, 3, C3, [512, False]], # 13 30 | 31 | [-1, 1, Conv, [256, 1, 1]], 32 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 33 | [[-1, 4], 1, Concat, [1]], # cat backbone P3 34 | [-1, 3, C3, [256, False]], # 17 (P3/8-small) 35 | 36 | [-1, 1, Conv, [128, 1, 1]], 37 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 38 | [[-1, 2], 1, Concat, [1]], # cat backbone P2 39 | [-1, 1, C3, [128, False]], # 21 (P2/4-xsmall) 40 | 41 | [-1, 1, Conv, [128, 3, 2]], 42 | [[-1, 18], 1, Concat, [1]], # cat head P3 43 | [-1, 3, C3, [256, False]], # 24 (P3/8-small) 44 | 45 | [-1, 1, Conv, [256, 3, 2]], 46 | [[-1, 14], 1, Concat, [1]], # cat head P4 47 | [-1, 3, C3, [512, False]], # 27 (P4/16-medium) 48 | 49 | [-1, 1, Conv, [512, 3, 2]], 50 | [[-1, 10], 1, Concat, [1]], # cat head P5 51 | [-1, 3, C3, [1024, False]], # 30 (P5/32-large) 52 | 53 | [[21, 24, 27, 30], 1, Detect, [nc, anchors]], # Detect(P2, P3, P4, P5) 54 | ] 55 | -------------------------------------------------------------------------------- /yolov5s_py/models/hub/yolov5-p6.yaml: -------------------------------------------------------------------------------- 1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license 2 | 3 | # Parameters 4 | nc: 80 # number of classes 5 | depth_multiple: 1.0 # model depth multiple 6 | width_multiple: 1.0 # layer channel multiple 7 | anchors: 3 # auto-anchor 3 anchors per P output layer 8 | 9 | # YOLOv5 v6.0 backbone 10 | backbone: 11 | # [from, number, module, args] 12 | [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 13 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 14 | [-1, 3, C3, [128]], 15 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 16 | [-1, 6, C3, [256]], 17 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 18 | [-1, 9, C3, [512]], 19 | [-1, 1, Conv, [768, 3, 2]], # 7-P5/32 20 | [-1, 3, C3, [768]], 21 | [-1, 1, Conv, [1024, 3, 2]], # 9-P6/64 22 | [-1, 3, C3, [1024]], 23 | [-1, 1, SPPF, [1024, 5]], # 11 24 | ] 25 | 26 | # YOLOv5 v6.0 head 27 | head: 28 | [[-1, 1, Conv, [768, 1, 1]], 29 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 30 | [[-1, 8], 1, Concat, [1]], # cat backbone P5 31 | [-1, 3, C3, [768, False]], # 15 32 | 33 | [-1, 1, Conv, [512, 1, 1]], 34 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 35 | [[-1, 6], 1, Concat, [1]], # cat backbone P4 36 | [-1, 3, C3, [512, False]], # 19 37 | 38 | [-1, 1, Conv, [256, 1, 1]], 39 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 40 | [[-1, 4], 1, Concat, [1]], # cat backbone P3 41 | [-1, 3, C3, [256, False]], # 23 (P3/8-small) 42 | 43 | [-1, 1, Conv, [256, 3, 2]], 44 | [[-1, 20], 1, Concat, [1]], # cat head P4 45 | [-1, 3, C3, [512, False]], # 26 (P4/16-medium) 46 | 47 | [-1, 1, Conv, [512, 3, 2]], 48 | [[-1, 16], 1, Concat, [1]], # cat head P5 49 | [-1, 3, C3, [768, False]], # 29 (P5/32-large) 50 | 51 | [-1, 1, Conv, [768, 3, 2]], 52 | [[-1, 12], 1, Concat, [1]], # cat head P6 53 | [-1, 3, C3, [1024, False]], # 32 (P6/64-xlarge) 54 | 55 | [[23, 26, 29, 32], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5, P6) 56 | ] 57 | -------------------------------------------------------------------------------- /yolov5s_py/models/hub/yolov5-p7.yaml: -------------------------------------------------------------------------------- 1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license 2 | 3 | # Parameters 4 | nc: 80 # number of classes 5 | depth_multiple: 1.0 # model depth multiple 6 | width_multiple: 1.0 # layer channel multiple 7 | anchors: 3 # auto-anchor 3 anchors per P output layer 8 | 9 | # YOLOv5 v6.0 backbone 10 | backbone: 11 | # [from, number, module, args] 12 | [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 13 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 14 | [-1, 3, C3, [128]], 15 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 16 | [-1, 6, C3, [256]], 17 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 18 | [-1, 9, C3, [512]], 19 | [-1, 1, Conv, [768, 3, 2]], # 7-P5/32 20 | [-1, 3, C3, [768]], 21 | [-1, 1, Conv, [1024, 3, 2]], # 9-P6/64 22 | [-1, 3, C3, [1024]], 23 | [-1, 1, Conv, [1280, 3, 2]], # 11-P7/128 24 | [-1, 3, C3, [1280]], 25 | [-1, 1, SPPF, [1280, 5]], # 13 26 | ] 27 | 28 | # YOLOv5 head 29 | head: 30 | [[-1, 1, Conv, [1024, 1, 1]], 31 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 32 | [[-1, 10], 1, Concat, [1]], # cat backbone P6 33 | [-1, 3, C3, [1024, False]], # 17 34 | 35 | [-1, 1, Conv, [768, 1, 1]], 36 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 37 | [[-1, 8], 1, Concat, [1]], # cat backbone P5 38 | [-1, 3, C3, [768, False]], # 21 39 | 40 | [-1, 1, Conv, [512, 1, 1]], 41 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 42 | [[-1, 6], 1, Concat, [1]], # cat backbone P4 43 | [-1, 3, C3, [512, False]], # 25 44 | 45 | [-1, 1, Conv, [256, 1, 1]], 46 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 47 | [[-1, 4], 1, Concat, [1]], # cat backbone P3 48 | [-1, 3, C3, [256, False]], # 29 (P3/8-small) 49 | 50 | [-1, 1, Conv, [256, 3, 2]], 51 | [[-1, 26], 1, Concat, [1]], # cat head P4 52 | [-1, 3, C3, [512, False]], # 32 (P4/16-medium) 53 | 54 | [-1, 1, Conv, [512, 3, 2]], 55 | [[-1, 22], 1, Concat, [1]], # cat head P5 56 | [-1, 3, C3, [768, False]], # 35 (P5/32-large) 57 | 58 | [-1, 1, Conv, [768, 3, 2]], 59 | [[-1, 18], 1, Concat, [1]], # cat head P6 60 | [-1, 3, C3, [1024, False]], # 38 (P6/64-xlarge) 61 | 62 | [-1, 1, Conv, [1024, 3, 2]], 63 | [[-1, 14], 1, Concat, [1]], # cat head P7 64 | [-1, 3, C3, [1280, False]], # 41 (P7/128-xxlarge) 65 | 66 | [[29, 32, 35, 38, 41], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5, P6, P7) 67 | ] 68 | -------------------------------------------------------------------------------- /yolov5s_py/models/hub/yolov5-panet.yaml: -------------------------------------------------------------------------------- 1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license 2 | 3 | # Parameters 4 | nc: 80 # number of classes 5 | depth_multiple: 1.0 # model depth multiple 6 | width_multiple: 1.0 # layer channel multiple 7 | anchors: 8 | - [10,13, 16,30, 33,23] # P3/8 9 | - [30,61, 62,45, 59,119] # P4/16 10 | - [116,90, 156,198, 373,326] # P5/32 11 | 12 | # YOLOv5 v6.0 backbone 13 | backbone: 14 | # [from, number, module, args] 15 | [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 16 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 17 | [-1, 3, C3, [128]], 18 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 19 | [-1, 6, C3, [256]], 20 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 21 | [-1, 9, C3, [512]], 22 | [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 23 | [-1, 3, C3, [1024]], 24 | [-1, 1, SPPF, [1024, 5]], # 9 25 | ] 26 | 27 | # YOLOv5 v6.0 PANet head 28 | head: 29 | [[-1, 1, Conv, [512, 1, 1]], 30 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 31 | [[-1, 6], 1, Concat, [1]], # cat backbone P4 32 | [-1, 3, C3, [512, False]], # 13 33 | 34 | [-1, 1, Conv, [256, 1, 1]], 35 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 36 | [[-1, 4], 1, Concat, [1]], # cat backbone P3 37 | [-1, 3, C3, [256, False]], # 17 (P3/8-small) 38 | 39 | [-1, 1, Conv, [256, 3, 2]], 40 | [[-1, 14], 1, Concat, [1]], # cat head P4 41 | [-1, 3, C3, [512, False]], # 20 (P4/16-medium) 42 | 43 | [-1, 1, Conv, [512, 3, 2]], 44 | [[-1, 10], 1, Concat, [1]], # cat head P5 45 | [-1, 3, C3, [1024, False]], # 23 (P5/32-large) 46 | 47 | [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) 48 | ] 49 | -------------------------------------------------------------------------------- /yolov5s_py/models/hub/yolov5l6.yaml: -------------------------------------------------------------------------------- 1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license 2 | 3 | # Parameters 4 | nc: 80 # number of classes 5 | depth_multiple: 1.0 # model depth multiple 6 | width_multiple: 1.0 # layer channel multiple 7 | anchors: 8 | - [19,27, 44,40, 38,94] # P3/8 9 | - [96,68, 86,152, 180,137] # P4/16 10 | - [140,301, 303,264, 238,542] # P5/32 11 | - [436,615, 739,380, 925,792] # P6/64 12 | 13 | # YOLOv5 v6.0 backbone 14 | backbone: 15 | # [from, number, module, args] 16 | [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 17 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 18 | [-1, 3, C3, [128]], 19 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 20 | [-1, 6, C3, [256]], 21 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 22 | [-1, 9, C3, [512]], 23 | [-1, 1, Conv, [768, 3, 2]], # 7-P5/32 24 | [-1, 3, C3, [768]], 25 | [-1, 1, Conv, [1024, 3, 2]], # 9-P6/64 26 | [-1, 3, C3, [1024]], 27 | [-1, 1, SPPF, [1024, 5]], # 11 28 | ] 29 | 30 | # YOLOv5 v6.0 head 31 | head: 32 | [[-1, 1, Conv, [768, 1, 1]], 33 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 34 | [[-1, 8], 1, Concat, [1]], # cat backbone P5 35 | [-1, 3, C3, [768, False]], # 15 36 | 37 | [-1, 1, Conv, [512, 1, 1]], 38 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 39 | [[-1, 6], 1, Concat, [1]], # cat backbone P4 40 | [-1, 3, C3, [512, False]], # 19 41 | 42 | [-1, 1, Conv, [256, 1, 1]], 43 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 44 | [[-1, 4], 1, Concat, [1]], # cat backbone P3 45 | [-1, 3, C3, [256, False]], # 23 (P3/8-small) 46 | 47 | [-1, 1, Conv, [256, 3, 2]], 48 | [[-1, 20], 1, Concat, [1]], # cat head P4 49 | [-1, 3, C3, [512, False]], # 26 (P4/16-medium) 50 | 51 | [-1, 1, Conv, [512, 3, 2]], 52 | [[-1, 16], 1, Concat, [1]], # cat head P5 53 | [-1, 3, C3, [768, False]], # 29 (P5/32-large) 54 | 55 | [-1, 1, Conv, [768, 3, 2]], 56 | [[-1, 12], 1, Concat, [1]], # cat head P6 57 | [-1, 3, C3, [1024, False]], # 32 (P6/64-xlarge) 58 | 59 | [[23, 26, 29, 32], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5, P6) 60 | ] 61 | -------------------------------------------------------------------------------- /yolov5s_py/models/hub/yolov5m6.yaml: -------------------------------------------------------------------------------- 1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license 2 | 3 | # Parameters 4 | nc: 80 # number of classes 5 | depth_multiple: 0.67 # model depth multiple 6 | width_multiple: 0.75 # layer channel multiple 7 | anchors: 8 | - [19,27, 44,40, 38,94] # P3/8 9 | - [96,68, 86,152, 180,137] # P4/16 10 | - [140,301, 303,264, 238,542] # P5/32 11 | - [436,615, 739,380, 925,792] # P6/64 12 | 13 | # YOLOv5 v6.0 backbone 14 | backbone: 15 | # [from, number, module, args] 16 | [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 17 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 18 | [-1, 3, C3, [128]], 19 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 20 | [-1, 6, C3, [256]], 21 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 22 | [-1, 9, C3, [512]], 23 | [-1, 1, Conv, [768, 3, 2]], # 7-P5/32 24 | [-1, 3, C3, [768]], 25 | [-1, 1, Conv, [1024, 3, 2]], # 9-P6/64 26 | [-1, 3, C3, [1024]], 27 | [-1, 1, SPPF, [1024, 5]], # 11 28 | ] 29 | 30 | # YOLOv5 v6.0 head 31 | head: 32 | [[-1, 1, Conv, [768, 1, 1]], 33 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 34 | [[-1, 8], 1, Concat, [1]], # cat backbone P5 35 | [-1, 3, C3, [768, False]], # 15 36 | 37 | [-1, 1, Conv, [512, 1, 1]], 38 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 39 | [[-1, 6], 1, Concat, [1]], # cat backbone P4 40 | [-1, 3, C3, [512, False]], # 19 41 | 42 | [-1, 1, Conv, [256, 1, 1]], 43 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 44 | [[-1, 4], 1, Concat, [1]], # cat backbone P3 45 | [-1, 3, C3, [256, False]], # 23 (P3/8-small) 46 | 47 | [-1, 1, Conv, [256, 3, 2]], 48 | [[-1, 20], 1, Concat, [1]], # cat head P4 49 | [-1, 3, C3, [512, False]], # 26 (P4/16-medium) 50 | 51 | [-1, 1, Conv, [512, 3, 2]], 52 | [[-1, 16], 1, Concat, [1]], # cat head P5 53 | [-1, 3, C3, [768, False]], # 29 (P5/32-large) 54 | 55 | [-1, 1, Conv, [768, 3, 2]], 56 | [[-1, 12], 1, Concat, [1]], # cat head P6 57 | [-1, 3, C3, [1024, False]], # 32 (P6/64-xlarge) 58 | 59 | [[23, 26, 29, 32], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5, P6) 60 | ] 61 | -------------------------------------------------------------------------------- /yolov5s_py/models/hub/yolov5n6.yaml: -------------------------------------------------------------------------------- 1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license 2 | 3 | # Parameters 4 | nc: 80 # number of classes 5 | depth_multiple: 0.33 # model depth multiple 6 | width_multiple: 0.25 # layer channel multiple 7 | anchors: 8 | - [19,27, 44,40, 38,94] # P3/8 9 | - [96,68, 86,152, 180,137] # P4/16 10 | - [140,301, 303,264, 238,542] # P5/32 11 | - [436,615, 739,380, 925,792] # P6/64 12 | 13 | # YOLOv5 v6.0 backbone 14 | backbone: 15 | # [from, number, module, args] 16 | [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 17 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 18 | [-1, 3, C3, [128]], 19 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 20 | [-1, 6, C3, [256]], 21 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 22 | [-1, 9, C3, [512]], 23 | [-1, 1, Conv, [768, 3, 2]], # 7-P5/32 24 | [-1, 3, C3, [768]], 25 | [-1, 1, Conv, [1024, 3, 2]], # 9-P6/64 26 | [-1, 3, C3, [1024]], 27 | [-1, 1, SPPF, [1024, 5]], # 11 28 | ] 29 | 30 | # YOLOv5 v6.0 head 31 | head: 32 | [[-1, 1, Conv, [768, 1, 1]], 33 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 34 | [[-1, 8], 1, Concat, [1]], # cat backbone P5 35 | [-1, 3, C3, [768, False]], # 15 36 | 37 | [-1, 1, Conv, [512, 1, 1]], 38 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 39 | [[-1, 6], 1, Concat, [1]], # cat backbone P4 40 | [-1, 3, C3, [512, False]], # 19 41 | 42 | [-1, 1, Conv, [256, 1, 1]], 43 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 44 | [[-1, 4], 1, Concat, [1]], # cat backbone P3 45 | [-1, 3, C3, [256, False]], # 23 (P3/8-small) 46 | 47 | [-1, 1, Conv, [256, 3, 2]], 48 | [[-1, 20], 1, Concat, [1]], # cat head P4 49 | [-1, 3, C3, [512, False]], # 26 (P4/16-medium) 50 | 51 | [-1, 1, Conv, [512, 3, 2]], 52 | [[-1, 16], 1, Concat, [1]], # cat head P5 53 | [-1, 3, C3, [768, False]], # 29 (P5/32-large) 54 | 55 | [-1, 1, Conv, [768, 3, 2]], 56 | [[-1, 12], 1, Concat, [1]], # cat head P6 57 | [-1, 3, C3, [1024, False]], # 32 (P6/64-xlarge) 58 | 59 | [[23, 26, 29, 32], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5, P6) 60 | ] 61 | -------------------------------------------------------------------------------- /yolov5s_py/models/hub/yolov5s-ghost.yaml: -------------------------------------------------------------------------------- 1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license 2 | 3 | # Parameters 4 | nc: 80 # number of classes 5 | depth_multiple: 0.33 # model depth multiple 6 | width_multiple: 0.50 # layer channel multiple 7 | anchors: 8 | - [10,13, 16,30, 33,23] # P3/8 9 | - [30,61, 62,45, 59,119] # P4/16 10 | - [116,90, 156,198, 373,326] # P5/32 11 | 12 | # YOLOv5 v6.0 backbone 13 | backbone: 14 | # [from, number, module, args] 15 | [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 16 | [-1, 1, GhostConv, [128, 3, 2]], # 1-P2/4 17 | [-1, 3, C3Ghost, [128]], 18 | [-1, 1, GhostConv, [256, 3, 2]], # 3-P3/8 19 | [-1, 6, C3Ghost, [256]], 20 | [-1, 1, GhostConv, [512, 3, 2]], # 5-P4/16 21 | [-1, 9, C3Ghost, [512]], 22 | [-1, 1, GhostConv, [1024, 3, 2]], # 7-P5/32 23 | [-1, 3, C3Ghost, [1024]], 24 | [-1, 1, SPPF, [1024, 5]], # 9 25 | ] 26 | 27 | # YOLOv5 v6.0 head 28 | head: 29 | [[-1, 1, GhostConv, [512, 1, 1]], 30 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 31 | [[-1, 6], 1, Concat, [1]], # cat backbone P4 32 | [-1, 3, C3Ghost, [512, False]], # 13 33 | 34 | [-1, 1, GhostConv, [256, 1, 1]], 35 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 36 | [[-1, 4], 1, Concat, [1]], # cat backbone P3 37 | [-1, 3, C3Ghost, [256, False]], # 17 (P3/8-small) 38 | 39 | [-1, 1, GhostConv, [256, 3, 2]], 40 | [[-1, 14], 1, Concat, [1]], # cat head P4 41 | [-1, 3, C3Ghost, [512, False]], # 20 (P4/16-medium) 42 | 43 | [-1, 1, GhostConv, [512, 3, 2]], 44 | [[-1, 10], 1, Concat, [1]], # cat head P5 45 | [-1, 3, C3Ghost, [1024, False]], # 23 (P5/32-large) 46 | 47 | [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) 48 | ] 49 | -------------------------------------------------------------------------------- /yolov5s_py/models/hub/yolov5s-transformer.yaml: -------------------------------------------------------------------------------- 1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license 2 | 3 | # Parameters 4 | nc: 80 # number of classes 5 | depth_multiple: 0.33 # model depth multiple 6 | width_multiple: 0.50 # layer channel multiple 7 | anchors: 8 | - [10,13, 16,30, 33,23] # P3/8 9 | - [30,61, 62,45, 59,119] # P4/16 10 | - [116,90, 156,198, 373,326] # P5/32 11 | 12 | # YOLOv5 v6.0 backbone 13 | backbone: 14 | # [from, number, module, args] 15 | [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 16 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 17 | [-1, 3, C3, [128]], 18 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 19 | [-1, 6, C3, [256]], 20 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 21 | [-1, 9, C3, [512]], 22 | [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 23 | [-1, 3, C3TR, [1024]], # 9 <--- C3TR() Transformer module 24 | [-1, 1, SPPF, [1024, 5]], # 9 25 | ] 26 | 27 | # YOLOv5 v6.0 head 28 | head: 29 | [[-1, 1, Conv, [512, 1, 1]], 30 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 31 | [[-1, 6], 1, Concat, [1]], # cat backbone P4 32 | [-1, 3, C3, [512, False]], # 13 33 | 34 | [-1, 1, Conv, [256, 1, 1]], 35 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 36 | [[-1, 4], 1, Concat, [1]], # cat backbone P3 37 | [-1, 3, C3, [256, False]], # 17 (P3/8-small) 38 | 39 | [-1, 1, Conv, [256, 3, 2]], 40 | [[-1, 14], 1, Concat, [1]], # cat head P4 41 | [-1, 3, C3, [512, False]], # 20 (P4/16-medium) 42 | 43 | [-1, 1, Conv, [512, 3, 2]], 44 | [[-1, 10], 1, Concat, [1]], # cat head P5 45 | [-1, 3, C3, [1024, False]], # 23 (P5/32-large) 46 | 47 | [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) 48 | ] 49 | -------------------------------------------------------------------------------- /yolov5s_py/models/hub/yolov5s6.yaml: -------------------------------------------------------------------------------- 1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license 2 | 3 | # Parameters 4 | nc: 80 # number of classes 5 | depth_multiple: 0.33 # model depth multiple 6 | width_multiple: 0.50 # layer channel multiple 7 | anchors: 8 | - [19,27, 44,40, 38,94] # P3/8 9 | - [96,68, 86,152, 180,137] # P4/16 10 | - [140,301, 303,264, 238,542] # P5/32 11 | - [436,615, 739,380, 925,792] # P6/64 12 | 13 | # YOLOv5 v6.0 backbone 14 | backbone: 15 | # [from, number, module, args] 16 | [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 17 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 18 | [-1, 3, C3, [128]], 19 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 20 | [-1, 6, C3, [256]], 21 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 22 | [-1, 9, C3, [512]], 23 | [-1, 1, Conv, [768, 3, 2]], # 7-P5/32 24 | [-1, 3, C3, [768]], 25 | [-1, 1, Conv, [1024, 3, 2]], # 9-P6/64 26 | [-1, 3, C3, [1024]], 27 | [-1, 1, SPPF, [1024, 5]], # 11 28 | ] 29 | 30 | # YOLOv5 v6.0 head 31 | head: 32 | [[-1, 1, Conv, [768, 1, 1]], 33 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 34 | [[-1, 8], 1, Concat, [1]], # cat backbone P5 35 | [-1, 3, C3, [768, False]], # 15 36 | 37 | [-1, 1, Conv, [512, 1, 1]], 38 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 39 | [[-1, 6], 1, Concat, [1]], # cat backbone P4 40 | [-1, 3, C3, [512, False]], # 19 41 | 42 | [-1, 1, Conv, [256, 1, 1]], 43 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 44 | [[-1, 4], 1, Concat, [1]], # cat backbone P3 45 | [-1, 3, C3, [256, False]], # 23 (P3/8-small) 46 | 47 | [-1, 1, Conv, [256, 3, 2]], 48 | [[-1, 20], 1, Concat, [1]], # cat head P4 49 | [-1, 3, C3, [512, False]], # 26 (P4/16-medium) 50 | 51 | [-1, 1, Conv, [512, 3, 2]], 52 | [[-1, 16], 1, Concat, [1]], # cat head P5 53 | [-1, 3, C3, [768, False]], # 29 (P5/32-large) 54 | 55 | [-1, 1, Conv, [768, 3, 2]], 56 | [[-1, 12], 1, Concat, [1]], # cat head P6 57 | [-1, 3, C3, [1024, False]], # 32 (P6/64-xlarge) 58 | 59 | [[23, 26, 29, 32], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5, P6) 60 | ] 61 | -------------------------------------------------------------------------------- /yolov5s_py/models/hub/yolov5x6.yaml: -------------------------------------------------------------------------------- 1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license 2 | 3 | # Parameters 4 | nc: 80 # number of classes 5 | depth_multiple: 1.33 # model depth multiple 6 | width_multiple: 1.25 # layer channel multiple 7 | anchors: 8 | - [19,27, 44,40, 38,94] # P3/8 9 | - [96,68, 86,152, 180,137] # P4/16 10 | - [140,301, 303,264, 238,542] # P5/32 11 | - [436,615, 739,380, 925,792] # P6/64 12 | 13 | # YOLOv5 v6.0 backbone 14 | backbone: 15 | # [from, number, module, args] 16 | [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 17 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 18 | [-1, 3, C3, [128]], 19 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 20 | [-1, 6, C3, [256]], 21 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 22 | [-1, 9, C3, [512]], 23 | [-1, 1, Conv, [768, 3, 2]], # 7-P5/32 24 | [-1, 3, C3, [768]], 25 | [-1, 1, Conv, [1024, 3, 2]], # 9-P6/64 26 | [-1, 3, C3, [1024]], 27 | [-1, 1, SPPF, [1024, 5]], # 11 28 | ] 29 | 30 | # YOLOv5 v6.0 head 31 | head: 32 | [[-1, 1, Conv, [768, 1, 1]], 33 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 34 | [[-1, 8], 1, Concat, [1]], # cat backbone P5 35 | [-1, 3, C3, [768, False]], # 15 36 | 37 | [-1, 1, Conv, [512, 1, 1]], 38 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 39 | [[-1, 6], 1, Concat, [1]], # cat backbone P4 40 | [-1, 3, C3, [512, False]], # 19 41 | 42 | [-1, 1, Conv, [256, 1, 1]], 43 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 44 | [[-1, 4], 1, Concat, [1]], # cat backbone P3 45 | [-1, 3, C3, [256, False]], # 23 (P3/8-small) 46 | 47 | [-1, 1, Conv, [256, 3, 2]], 48 | [[-1, 20], 1, Concat, [1]], # cat head P4 49 | [-1, 3, C3, [512, False]], # 26 (P4/16-medium) 50 | 51 | [-1, 1, Conv, [512, 3, 2]], 52 | [[-1, 16], 1, Concat, [1]], # cat head P5 53 | [-1, 3, C3, [768, False]], # 29 (P5/32-large) 54 | 55 | [-1, 1, Conv, [768, 3, 2]], 56 | [[-1, 12], 1, Concat, [1]], # cat head P6 57 | [-1, 3, C3, [1024, False]], # 32 (P6/64-xlarge) 58 | 59 | [[23, 26, 29, 32], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5, P6) 60 | ] 61 | -------------------------------------------------------------------------------- /yolov5s_py/models/yolov5l.yaml: -------------------------------------------------------------------------------- 1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license 2 | 3 | # Parameters 4 | nc: 80 # number of classes 5 | depth_multiple: 1.0 # model depth multiple 6 | width_multiple: 1.0 # layer channel multiple 7 | anchors: 8 | - [10,13, 16,30, 33,23] # P3/8 9 | - [30,61, 62,45, 59,119] # P4/16 10 | - [116,90, 156,198, 373,326] # P5/32 11 | 12 | # YOLOv5 v6.0 backbone 13 | backbone: 14 | # [from, number, module, args] 15 | [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 16 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 17 | [-1, 3, C3, [128]], 18 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 19 | [-1, 6, C3, [256]], 20 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 21 | [-1, 9, C3, [512]], 22 | [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 23 | [-1, 3, C3, [1024]], 24 | [-1, 1, SPPF, [1024, 5]], # 9 25 | ] 26 | 27 | # YOLOv5 v6.0 head 28 | head: 29 | [[-1, 1, Conv, [512, 1, 1]], 30 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 31 | [[-1, 6], 1, Concat, [1]], # cat backbone P4 32 | [-1, 3, C3, [512, False]], # 13 33 | 34 | [-1, 1, Conv, [256, 1, 1]], 35 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 36 | [[-1, 4], 1, Concat, [1]], # cat backbone P3 37 | [-1, 3, C3, [256, False]], # 17 (P3/8-small) 38 | 39 | [-1, 1, Conv, [256, 3, 2]], 40 | [[-1, 14], 1, Concat, [1]], # cat head P4 41 | [-1, 3, C3, [512, False]], # 20 (P4/16-medium) 42 | 43 | [-1, 1, Conv, [512, 3, 2]], 44 | [[-1, 10], 1, Concat, [1]], # cat head P5 45 | [-1, 3, C3, [1024, False]], # 23 (P5/32-large) 46 | 47 | [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) 48 | ] 49 | -------------------------------------------------------------------------------- /yolov5s_py/models/yolov5m.yaml: -------------------------------------------------------------------------------- 1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license 2 | 3 | # Parameters 4 | nc: 80 # number of classes 5 | depth_multiple: 0.67 # model depth multiple 6 | width_multiple: 0.75 # layer channel multiple 7 | anchors: 8 | - [10,13, 16,30, 33,23] # P3/8 9 | - [30,61, 62,45, 59,119] # P4/16 10 | - [116,90, 156,198, 373,326] # P5/32 11 | 12 | # YOLOv5 v6.0 backbone 13 | backbone: 14 | # [from, number, module, args] 15 | [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 16 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 17 | [-1, 3, C3, [128]], 18 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 19 | [-1, 6, C3, [256]], 20 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 21 | [-1, 9, C3, [512]], 22 | [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 23 | [-1, 3, C3, [1024]], 24 | [-1, 1, SPPF, [1024, 5]], # 9 25 | ] 26 | 27 | # YOLOv5 v6.0 head 28 | head: 29 | [[-1, 1, Conv, [512, 1, 1]], 30 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 31 | [[-1, 6], 1, Concat, [1]], # cat backbone P4 32 | [-1, 3, C3, [512, False]], # 13 33 | 34 | [-1, 1, Conv, [256, 1, 1]], 35 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 36 | [[-1, 4], 1, Concat, [1]], # cat backbone P3 37 | [-1, 3, C3, [256, False]], # 17 (P3/8-small) 38 | 39 | [-1, 1, Conv, [256, 3, 2]], 40 | [[-1, 14], 1, Concat, [1]], # cat head P4 41 | [-1, 3, C3, [512, False]], # 20 (P4/16-medium) 42 | 43 | [-1, 1, Conv, [512, 3, 2]], 44 | [[-1, 10], 1, Concat, [1]], # cat head P5 45 | [-1, 3, C3, [1024, False]], # 23 (P5/32-large) 46 | 47 | [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) 48 | ] 49 | -------------------------------------------------------------------------------- /yolov5s_py/models/yolov5n.yaml: -------------------------------------------------------------------------------- 1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license 2 | 3 | # Parameters 4 | nc: 80 # number of classes 5 | depth_multiple: 0.33 # model depth multiple 6 | width_multiple: 0.25 # layer channel multiple 7 | anchors: 8 | - [10,13, 16,30, 33,23] # P3/8 9 | - [30,61, 62,45, 59,119] # P4/16 10 | - [116,90, 156,198, 373,326] # P5/32 11 | 12 | # YOLOv5 v6.0 backbone 13 | backbone: 14 | # [from, number, module, args] 15 | [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 16 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 17 | [-1, 3, C3, [128]], 18 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 19 | [-1, 6, C3, [256]], 20 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 21 | [-1, 9, C3, [512]], 22 | [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 23 | [-1, 3, C3, [1024]], 24 | [-1, 1, SPPF, [1024, 5]], # 9 25 | ] 26 | 27 | # YOLOv5 v6.0 head 28 | head: 29 | [[-1, 1, Conv, [512, 1, 1]], 30 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 31 | [[-1, 6], 1, Concat, [1]], # cat backbone P4 32 | [-1, 3, C3, [512, False]], # 13 33 | 34 | [-1, 1, Conv, [256, 1, 1]], 35 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 36 | [[-1, 4], 1, Concat, [1]], # cat backbone P3 37 | [-1, 3, C3, [256, False]], # 17 (P3/8-small) 38 | 39 | [-1, 1, Conv, [256, 3, 2]], 40 | [[-1, 14], 1, Concat, [1]], # cat head P4 41 | [-1, 3, C3, [512, False]], # 20 (P4/16-medium) 42 | 43 | [-1, 1, Conv, [512, 3, 2]], 44 | [[-1, 10], 1, Concat, [1]], # cat head P5 45 | [-1, 3, C3, [1024, False]], # 23 (P5/32-large) 46 | 47 | [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) 48 | ] 49 | -------------------------------------------------------------------------------- /yolov5s_py/models/yolov5s.yaml: -------------------------------------------------------------------------------- 1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license 2 | 3 | # Parameters 4 | nc: 80 # number of classes 5 | depth_multiple: 0.33 # model depth multiple 6 | width_multiple: 0.50 # layer channel multiple 7 | anchors: 8 | - [10,13, 16,30, 33,23] # P3/8 9 | - [30,61, 62,45, 59,119] # P4/16 10 | - [116,90, 156,198, 373,326] # P5/32 11 | 12 | # YOLOv5 v6.0 backbone 13 | backbone: 14 | # [from, number, module, args] 15 | [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 16 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 17 | [-1, 3, C3, [128]], 18 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 19 | [-1, 6, C3, [256]], 20 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 21 | [-1, 9, C3, [512]], 22 | [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 23 | [-1, 3, C3, [1024]], 24 | [-1, 1, SPPF, [1024, 5]], # 9 25 | ] 26 | 27 | # YOLOv5 v6.0 head 28 | head: 29 | [[-1, 1, Conv, [512, 1, 1]], 30 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 31 | [[-1, 6], 1, Concat, [1]], # cat backbone P4 32 | [-1, 3, C3, [512, False]], # 13 33 | 34 | [-1, 1, Conv, [256, 1, 1]], 35 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 36 | [[-1, 4], 1, Concat, [1]], # cat backbone P3 37 | [-1, 3, C3, [256, False]], # 17 (P3/8-small) 38 | 39 | [-1, 1, Conv, [256, 3, 2]], 40 | [[-1, 14], 1, Concat, [1]], # cat head P4 41 | [-1, 3, C3, [512, False]], # 20 (P4/16-medium) 42 | 43 | [-1, 1, Conv, [512, 3, 2]], 44 | [[-1, 10], 1, Concat, [1]], # cat head P5 45 | [-1, 3, C3, [1024, False]], # 23 (P5/32-large) 46 | 47 | [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) 48 | ] 49 | -------------------------------------------------------------------------------- /yolov5s_py/models/yolov5x.yaml: -------------------------------------------------------------------------------- 1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license 2 | 3 | # Parameters 4 | nc: 80 # number of classes 5 | depth_multiple: 1.33 # model depth multiple 6 | width_multiple: 1.25 # layer channel multiple 7 | anchors: 8 | - [10,13, 16,30, 33,23] # P3/8 9 | - [30,61, 62,45, 59,119] # P4/16 10 | - [116,90, 156,198, 373,326] # P5/32 11 | 12 | # YOLOv5 v6.0 backbone 13 | backbone: 14 | # [from, number, module, args] 15 | [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 16 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 17 | [-1, 3, C3, [128]], 18 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 19 | [-1, 6, C3, [256]], 20 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 21 | [-1, 9, C3, [512]], 22 | [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 23 | [-1, 3, C3, [1024]], 24 | [-1, 1, SPPF, [1024, 5]], # 9 25 | ] 26 | 27 | # YOLOv5 v6.0 head 28 | head: 29 | [[-1, 1, Conv, [512, 1, 1]], 30 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 31 | [[-1, 6], 1, Concat, [1]], # cat backbone P4 32 | [-1, 3, C3, [512, False]], # 13 33 | 34 | [-1, 1, Conv, [256, 1, 1]], 35 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 36 | [[-1, 4], 1, Concat, [1]], # cat backbone P3 37 | [-1, 3, C3, [256, False]], # 17 (P3/8-small) 38 | 39 | [-1, 1, Conv, [256, 3, 2]], 40 | [[-1, 14], 1, Concat, [1]], # cat head P4 41 | [-1, 3, C3, [512, False]], # 20 (P4/16-medium) 42 | 43 | [-1, 1, Conv, [512, 3, 2]], 44 | [[-1, 10], 1, Concat, [1]], # cat head P5 45 | [-1, 3, C3, [1024, False]], # 23 (P5/32-large) 46 | 47 | [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) 48 | ] 49 | -------------------------------------------------------------------------------- /yolov5s_py/utils/__init__.py: -------------------------------------------------------------------------------- 1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license 2 | """ 3 | utils/initialization 4 | """ 5 | 6 | 7 | def notebook_init(): 8 | # For YOLOv5 notebooks 9 | print('Checking setup...') 10 | from IPython import display # to display images and clear console output 11 | 12 | from utils.general import emojis 13 | from utils.torch_utils import select_device # YOLOv5 imports 14 | 15 | display.clear_output() 16 | select_device(newline=False) 17 | print(emojis('Setup complete ✅')) 18 | return display 19 | -------------------------------------------------------------------------------- /yolov5s_py/utils/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yester31/TensorRT_API/b32eb47bacedd7be472cbe608b6cc330000889be/yolov5s_py/utils/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /yolov5s_py/utils/__pycache__/augmentations.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yester31/TensorRT_API/b32eb47bacedd7be472cbe608b6cc330000889be/yolov5s_py/utils/__pycache__/augmentations.cpython-37.pyc -------------------------------------------------------------------------------- /yolov5s_py/utils/__pycache__/autoanchor.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yester31/TensorRT_API/b32eb47bacedd7be472cbe608b6cc330000889be/yolov5s_py/utils/__pycache__/autoanchor.cpython-37.pyc -------------------------------------------------------------------------------- /yolov5s_py/utils/__pycache__/datasets.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yester31/TensorRT_API/b32eb47bacedd7be472cbe608b6cc330000889be/yolov5s_py/utils/__pycache__/datasets.cpython-37.pyc -------------------------------------------------------------------------------- /yolov5s_py/utils/__pycache__/downloads.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yester31/TensorRT_API/b32eb47bacedd7be472cbe608b6cc330000889be/yolov5s_py/utils/__pycache__/downloads.cpython-37.pyc -------------------------------------------------------------------------------- /yolov5s_py/utils/__pycache__/general.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yester31/TensorRT_API/b32eb47bacedd7be472cbe608b6cc330000889be/yolov5s_py/utils/__pycache__/general.cpython-37.pyc -------------------------------------------------------------------------------- /yolov5s_py/utils/__pycache__/metrics.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yester31/TensorRT_API/b32eb47bacedd7be472cbe608b6cc330000889be/yolov5s_py/utils/__pycache__/metrics.cpython-37.pyc -------------------------------------------------------------------------------- /yolov5s_py/utils/__pycache__/plots.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yester31/TensorRT_API/b32eb47bacedd7be472cbe608b6cc330000889be/yolov5s_py/utils/__pycache__/plots.cpython-37.pyc -------------------------------------------------------------------------------- /yolov5s_py/utils/__pycache__/torch_utils.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yester31/TensorRT_API/b32eb47bacedd7be472cbe608b6cc330000889be/yolov5s_py/utils/__pycache__/torch_utils.cpython-37.pyc -------------------------------------------------------------------------------- /yolov5s_py/utils/activations.py: -------------------------------------------------------------------------------- 1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license 2 | """ 3 | Activation functions 4 | """ 5 | 6 | import torch 7 | import torch.nn as nn 8 | import torch.nn.functional as F 9 | 10 | 11 | # SiLU https://arxiv.org/pdf/1606.08415.pdf ---------------------------------------------------------------------------- 12 | class SiLU(nn.Module): # export-friendly version of nn.SiLU() 13 | @staticmethod 14 | def forward(x): 15 | return x * torch.sigmoid(x) 16 | 17 | 18 | class Hardswish(nn.Module): # export-friendly version of nn.Hardswish() 19 | @staticmethod 20 | def forward(x): 21 | # return x * F.hardsigmoid(x) # for TorchScript and CoreML 22 | return x * F.hardtanh(x + 3, 0.0, 6.0) / 6.0 # for TorchScript, CoreML and ONNX 23 | 24 | 25 | # Mish https://github.com/digantamisra98/Mish -------------------------------------------------------------------------- 26 | class Mish(nn.Module): 27 | @staticmethod 28 | def forward(x): 29 | return x * F.softplus(x).tanh() 30 | 31 | 32 | class MemoryEfficientMish(nn.Module): 33 | class F(torch.autograd.Function): 34 | @staticmethod 35 | def forward(ctx, x): 36 | ctx.save_for_backward(x) 37 | return x.mul(torch.tanh(F.softplus(x))) # x * tanh(ln(1 + exp(x))) 38 | 39 | @staticmethod 40 | def backward(ctx, grad_output): 41 | x = ctx.saved_tensors[0] 42 | sx = torch.sigmoid(x) 43 | fx = F.softplus(x).tanh() 44 | return grad_output * (fx + x * sx * (1 - fx * fx)) 45 | 46 | def forward(self, x): 47 | return self.F.apply(x) 48 | 49 | 50 | # FReLU https://arxiv.org/abs/2007.11824 ------------------------------------------------------------------------------- 51 | class FReLU(nn.Module): 52 | def __init__(self, c1, k=3): # ch_in, kernel 53 | super().__init__() 54 | self.conv = nn.Conv2d(c1, c1, k, 1, 1, groups=c1, bias=False) 55 | self.bn = nn.BatchNorm2d(c1) 56 | 57 | def forward(self, x): 58 | return torch.max(x, self.bn(self.conv(x))) 59 | 60 | 61 | # ACON https://arxiv.org/pdf/2009.04759.pdf ---------------------------------------------------------------------------- 62 | class AconC(nn.Module): 63 | r""" ACON activation (activate or not). 64 | AconC: (p1*x-p2*x) * sigmoid(beta*(p1*x-p2*x)) + p2*x, beta is a learnable parameter 65 | according to "Activate or Not: Learning Customized Activation" . 66 | """ 67 | 68 | def __init__(self, c1): 69 | super().__init__() 70 | self.p1 = nn.Parameter(torch.randn(1, c1, 1, 1)) 71 | self.p2 = nn.Parameter(torch.randn(1, c1, 1, 1)) 72 | self.beta = nn.Parameter(torch.ones(1, c1, 1, 1)) 73 | 74 | def forward(self, x): 75 | dpx = (self.p1 - self.p2) * x 76 | return dpx * torch.sigmoid(self.beta * dpx) + self.p2 * x 77 | 78 | 79 | class MetaAconC(nn.Module): 80 | r""" ACON activation (activate or not). 81 | MetaAconC: (p1*x-p2*x) * sigmoid(beta*(p1*x-p2*x)) + p2*x, beta is generated by a small network 82 | according to "Activate or Not: Learning Customized Activation" . 83 | """ 84 | 85 | def __init__(self, c1, k=1, s=1, r=16): # ch_in, kernel, stride, r 86 | super().__init__() 87 | c2 = max(r, c1 // r) 88 | self.p1 = nn.Parameter(torch.randn(1, c1, 1, 1)) 89 | self.p2 = nn.Parameter(torch.randn(1, c1, 1, 1)) 90 | self.fc1 = nn.Conv2d(c1, c2, k, s, bias=True) 91 | self.fc2 = nn.Conv2d(c2, c1, k, s, bias=True) 92 | # self.bn1 = nn.BatchNorm2d(c2) 93 | # self.bn2 = nn.BatchNorm2d(c1) 94 | 95 | def forward(self, x): 96 | y = x.mean(dim=2, keepdims=True).mean(dim=3, keepdims=True) 97 | # batch-size 1 bug/instabilities https://github.com/ultralytics/yolov5/issues/2891 98 | # beta = torch.sigmoid(self.bn2(self.fc2(self.bn1(self.fc1(y))))) # bug/unstable 99 | beta = torch.sigmoid(self.fc2(self.fc1(y))) # bug patch BN layers removed 100 | dpx = (self.p1 - self.p2) * x 101 | return dpx * torch.sigmoid(beta * dpx) + self.p2 * x 102 | -------------------------------------------------------------------------------- /yolov5s_py/utils/autobatch.py: -------------------------------------------------------------------------------- 1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license 2 | """ 3 | Auto-batch utils 4 | """ 5 | 6 | from copy import deepcopy 7 | 8 | import numpy as np 9 | import torch 10 | from torch.cuda import amp 11 | 12 | from utils.general import LOGGER, colorstr 13 | from utils.torch_utils import profile 14 | 15 | 16 | def check_train_batch_size(model, imgsz=640): 17 | # Check YOLOv5 training batch size 18 | with amp.autocast(): 19 | return autobatch(deepcopy(model).train(), imgsz) # compute optimal batch size 20 | 21 | 22 | def autobatch(model, imgsz=640, fraction=0.9, batch_size=16): 23 | # Automatically estimate best batch size to use `fraction` of available CUDA memory 24 | # Usage: 25 | # import torch 26 | # from utils.autobatch import autobatch 27 | # model = torch.hub.load('ultralytics/yolov5', 'yolov5s', autoshape=False) 28 | # print(autobatch(model)) 29 | 30 | prefix = colorstr('AutoBatch: ') 31 | LOGGER.info(f'{prefix}Computing optimal batch size for --imgsz {imgsz}') 32 | device = next(model.parameters()).device # get model device 33 | if device.type == 'cpu': 34 | LOGGER.info(f'{prefix}CUDA not detected, using default CPU batch-size {batch_size}') 35 | return batch_size 36 | 37 | d = str(device).upper() # 'CUDA:0' 38 | properties = torch.cuda.get_device_properties(device) # device properties 39 | t = properties.total_memory / 1024 ** 3 # (GiB) 40 | r = torch.cuda.memory_reserved(device) / 1024 ** 3 # (GiB) 41 | a = torch.cuda.memory_allocated(device) / 1024 ** 3 # (GiB) 42 | f = t - (r + a) # free inside reserved 43 | LOGGER.info(f'{prefix}{d} ({properties.name}) {t:.2f}G total, {r:.2f}G reserved, {a:.2f}G allocated, {f:.2f}G free') 44 | 45 | batch_sizes = [1, 2, 4, 8, 16] 46 | try: 47 | img = [torch.zeros(b, 3, imgsz, imgsz) for b in batch_sizes] 48 | y = profile(img, model, n=3, device=device) 49 | except Exception as e: 50 | LOGGER.warning(f'{prefix}{e}') 51 | 52 | y = [x[2] for x in y if x] # memory [2] 53 | batch_sizes = batch_sizes[:len(y)] 54 | p = np.polyfit(batch_sizes, y, deg=1) # first degree polynomial fit 55 | b = int((f * fraction - p[1]) / p[0]) # y intercept (optimal batch size) 56 | LOGGER.info(f'{prefix}Using batch-size {b} for {d} {t * fraction:.2f}G/{t:.2f}G ({fraction * 100:.0f}%)') 57 | return b 58 | -------------------------------------------------------------------------------- /yolov5s_py/utils/aws/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yester31/TensorRT_API/b32eb47bacedd7be472cbe608b6cc330000889be/yolov5s_py/utils/aws/__init__.py -------------------------------------------------------------------------------- /yolov5s_py/utils/aws/mime.sh: -------------------------------------------------------------------------------- 1 | # AWS EC2 instance startup 'MIME' script https://aws.amazon.com/premiumsupport/knowledge-center/execute-user-data-ec2/ 2 | # This script will run on every instance restart, not only on first start 3 | # --- DO NOT COPY ABOVE COMMENTS WHEN PASTING INTO USERDATA --- 4 | 5 | Content-Type: multipart/mixed; boundary="//" 6 | MIME-Version: 1.0 7 | 8 | --// 9 | Content-Type: text/cloud-config; charset="us-ascii" 10 | MIME-Version: 1.0 11 | Content-Transfer-Encoding: 7bit 12 | Content-Disposition: attachment; filename="cloud-config.txt" 13 | 14 | #cloud-config 15 | cloud_final_modules: 16 | - [scripts-user, always] 17 | 18 | --// 19 | Content-Type: text/x-shellscript; charset="us-ascii" 20 | MIME-Version: 1.0 21 | Content-Transfer-Encoding: 7bit 22 | Content-Disposition: attachment; filename="userdata.txt" 23 | 24 | #!/bin/bash 25 | # --- paste contents of userdata.sh here --- 26 | --// 27 | -------------------------------------------------------------------------------- /yolov5s_py/utils/aws/resume.py: -------------------------------------------------------------------------------- 1 | # Resume all interrupted trainings in yolov5/ dir including DDP trainings 2 | # Usage: $ python utils/aws/resume.py 3 | 4 | import os 5 | import sys 6 | from pathlib import Path 7 | 8 | import torch 9 | import yaml 10 | 11 | FILE = Path(__file__).resolve() 12 | ROOT = FILE.parents[2] # YOLOv5 root directory 13 | if str(ROOT) not in sys.path: 14 | sys.path.append(str(ROOT)) # add ROOT to PATH 15 | 16 | port = 0 # --master_port 17 | path = Path('').resolve() 18 | for last in path.rglob('*/**/last.pt'): 19 | ckpt = torch.load(last) 20 | if ckpt['optimizer'] is None: 21 | continue 22 | 23 | # Load opt.yaml 24 | with open(last.parent.parent / 'opt.yaml', errors='ignore') as f: 25 | opt = yaml.safe_load(f) 26 | 27 | # Get device count 28 | d = opt['device'].split(',') # devices 29 | nd = len(d) # number of devices 30 | ddp = nd > 1 or (nd == 0 and torch.cuda.device_count() > 1) # distributed data parallel 31 | 32 | if ddp: # multi-GPU 33 | port += 1 34 | cmd = f'python -m torch.distributed.run --nproc_per_node {nd} --master_port {port} train.py --resume {last}' 35 | else: # single-GPU 36 | cmd = f'python train.py --resume {last}' 37 | 38 | cmd += ' > /dev/null 2>&1 &' # redirect output to dev/null and run in daemon thread 39 | print(cmd) 40 | os.system(cmd) 41 | -------------------------------------------------------------------------------- /yolov5s_py/utils/aws/userdata.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # AWS EC2 instance startup script https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/user-data.html 3 | # This script will run only once on first instance start (for a re-start script see mime.sh) 4 | # /home/ubuntu (ubuntu) or /home/ec2-user (amazon-linux) is working dir 5 | # Use >300 GB SSD 6 | 7 | cd home/ubuntu 8 | if [ ! -d yolov5 ]; then 9 | echo "Running first-time script." # install dependencies, download COCO, pull Docker 10 | git clone https://github.com/ultralytics/yolov5 -b master && sudo chmod -R 777 yolov5 11 | cd yolov5 12 | bash data/scripts/get_coco.sh && echo "COCO done." & 13 | sudo docker pull ultralytics/yolov5:latest && echo "Docker done." & 14 | python -m pip install --upgrade pip && pip install -r requirements.txt && python detect.py && echo "Requirements done." & 15 | wait && echo "All tasks done." # finish background tasks 16 | else 17 | echo "Running re-start script." # resume interrupted runs 18 | i=0 19 | list=$(sudo docker ps -qa) # container list i.e. $'one\ntwo\nthree\nfour' 20 | while IFS= read -r id; do 21 | ((i++)) 22 | echo "restarting container $i: $id" 23 | sudo docker start $id 24 | # sudo docker exec -it $id python train.py --resume # single-GPU 25 | sudo docker exec -d $id python utils/aws/resume.py # multi-scenario 26 | done <<<"$list" 27 | fi 28 | -------------------------------------------------------------------------------- /yolov5s_py/utils/callbacks.py: -------------------------------------------------------------------------------- 1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license 2 | """ 3 | Callback utils 4 | """ 5 | 6 | 7 | class Callbacks: 8 | """" 9 | Handles all registered callbacks for YOLOv5 Hooks 10 | """ 11 | 12 | # Define the available callbacks 13 | _callbacks = { 14 | 'on_pretrain_routine_start': [], 15 | 'on_pretrain_routine_end': [], 16 | 17 | 'on_train_start': [], 18 | 'on_train_epoch_start': [], 19 | 'on_train_batch_start': [], 20 | 'optimizer_step': [], 21 | 'on_before_zero_grad': [], 22 | 'on_train_batch_end': [], 23 | 'on_train_epoch_end': [], 24 | 25 | 'on_val_start': [], 26 | 'on_val_batch_start': [], 27 | 'on_val_image_end': [], 28 | 'on_val_batch_end': [], 29 | 'on_val_end': [], 30 | 31 | 'on_fit_epoch_end': [], # fit = train + val 32 | 'on_model_save': [], 33 | 'on_train_end': [], 34 | 35 | 'teardown': [], 36 | } 37 | 38 | def register_action(self, hook, name='', callback=None): 39 | """ 40 | Register a new action to a callback hook 41 | 42 | Args: 43 | hook The callback hook name to register the action to 44 | name The name of the action for later reference 45 | callback The callback to fire 46 | """ 47 | assert hook in self._callbacks, f"hook '{hook}' not found in callbacks {self._callbacks}" 48 | assert callable(callback), f"callback '{callback}' is not callable" 49 | self._callbacks[hook].append({'name': name, 'callback': callback}) 50 | 51 | def get_registered_actions(self, hook=None): 52 | """" 53 | Returns all the registered actions by callback hook 54 | 55 | Args: 56 | hook The name of the hook to check, defaults to all 57 | """ 58 | if hook: 59 | return self._callbacks[hook] 60 | else: 61 | return self._callbacks 62 | 63 | def run(self, hook, *args, **kwargs): 64 | """ 65 | Loop through the registered actions and fire all callbacks 66 | 67 | Args: 68 | hook The name of the hook to check, defaults to all 69 | args Arguments to receive from YOLOv5 70 | kwargs Keyword Arguments to receive from YOLOv5 71 | """ 72 | 73 | assert hook in self._callbacks, f"hook '{hook}' not found in callbacks {self._callbacks}" 74 | 75 | for logger in self._callbacks[hook]: 76 | logger['callback'](*args, **kwargs) 77 | -------------------------------------------------------------------------------- /yolov5s_py/utils/flask_rest_api/README.md: -------------------------------------------------------------------------------- 1 | # Flask REST API 2 | 3 | [REST](https://en.wikipedia.org/wiki/Representational_state_transfer) [API](https://en.wikipedia.org/wiki/API)s are 4 | commonly used to expose Machine Learning (ML) models to other services. This folder contains an example REST API 5 | created using Flask to expose the YOLOv5s model from [PyTorch Hub](https://pytorch.org/hub/ultralytics_yolov5/). 6 | 7 | ## Requirements 8 | 9 | [Flask](https://palletsprojects.com/p/flask/) is required. Install with: 10 | 11 | ```shell 12 | $ pip install Flask 13 | ``` 14 | 15 | ## Run 16 | 17 | After Flask installation run: 18 | 19 | ```shell 20 | $ python3 restapi.py --port 5000 21 | ``` 22 | 23 | Then use [curl](https://curl.se/) to perform a request: 24 | 25 | ```shell 26 | $ curl -X POST -F image=@zidane.jpg 'http://localhost:5000/v1/object-detection/yolov5s' 27 | ``` 28 | 29 | The model inference results are returned as a JSON response: 30 | 31 | ```json 32 | [ 33 | { 34 | "class": 0, 35 | "confidence": 0.8900438547, 36 | "height": 0.9318675399, 37 | "name": "person", 38 | "width": 0.3264600933, 39 | "xcenter": 0.7438579798, 40 | "ycenter": 0.5207948685 41 | }, 42 | { 43 | "class": 0, 44 | "confidence": 0.8440024257, 45 | "height": 0.7155083418, 46 | "name": "person", 47 | "width": 0.6546785235, 48 | "xcenter": 0.427829951, 49 | "ycenter": 0.6334488392 50 | }, 51 | { 52 | "class": 27, 53 | "confidence": 0.3771208823, 54 | "height": 0.3902671337, 55 | "name": "tie", 56 | "width": 0.0696444362, 57 | "xcenter": 0.3675483763, 58 | "ycenter": 0.7991207838 59 | }, 60 | { 61 | "class": 27, 62 | "confidence": 0.3527112305, 63 | "height": 0.1540903747, 64 | "name": "tie", 65 | "width": 0.0336618312, 66 | "xcenter": 0.7814827561, 67 | "ycenter": 0.5065554976 68 | } 69 | ] 70 | ``` 71 | 72 | An example python script to perform inference using [requests](https://docs.python-requests.org/en/master/) is given 73 | in `example_request.py` 74 | -------------------------------------------------------------------------------- /yolov5s_py/utils/flask_rest_api/example_request.py: -------------------------------------------------------------------------------- 1 | """Perform test request""" 2 | import pprint 3 | 4 | import requests 5 | 6 | DETECTION_URL = "http://localhost:5000/v1/object-detection/yolov5s" 7 | TEST_IMAGE = "zidane.jpg" 8 | 9 | image_data = open(TEST_IMAGE, "rb").read() 10 | 11 | response = requests.post(DETECTION_URL, files={"image": image_data}).json() 12 | 13 | pprint.pprint(response) 14 | -------------------------------------------------------------------------------- /yolov5s_py/utils/flask_rest_api/restapi.py: -------------------------------------------------------------------------------- 1 | """ 2 | Run a rest API exposing the yolov5s object detection model 3 | """ 4 | import argparse 5 | import io 6 | 7 | import torch 8 | from flask import Flask, request 9 | from PIL import Image 10 | 11 | app = Flask(__name__) 12 | 13 | DETECTION_URL = "/v1/object-detection/yolov5s" 14 | 15 | 16 | @app.route(DETECTION_URL, methods=["POST"]) 17 | def predict(): 18 | if not request.method == "POST": 19 | return 20 | 21 | if request.files.get("image"): 22 | image_file = request.files["image"] 23 | image_bytes = image_file.read() 24 | 25 | img = Image.open(io.BytesIO(image_bytes)) 26 | 27 | results = model(img, size=640) # reduce size=320 for faster inference 28 | return results.pandas().xyxy[0].to_json(orient="records") 29 | 30 | 31 | if __name__ == "__main__": 32 | parser = argparse.ArgumentParser(description="Flask API exposing YOLOv5 model") 33 | parser.add_argument("--port", default=5000, type=int, help="port number") 34 | args = parser.parse_args() 35 | 36 | model = torch.hub.load("ultralytics/yolov5", "yolov5s", force_reload=True) # force_reload to recache 37 | app.run(host="0.0.0.0", port=args.port) # debug=True causes Restarting with stat 38 | -------------------------------------------------------------------------------- /yolov5s_py/utils/google_app_engine/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM gcr.io/google-appengine/python 2 | 3 | # Create a virtualenv for dependencies. This isolates these packages from 4 | # system-level packages. 5 | # Use -p python3 or -p python3.7 to select python version. Default is version 2. 6 | RUN virtualenv /env -p python3 7 | 8 | # Setting these environment variables are the same as running 9 | # source /env/bin/activate. 10 | ENV VIRTUAL_ENV /env 11 | ENV PATH /env/bin:$PATH 12 | 13 | RUN apt-get update && apt-get install -y python-opencv 14 | 15 | # Copy the application's requirements.txt and run pip to install all 16 | # dependencies into the virtualenv. 17 | ADD requirements.txt /app/requirements.txt 18 | RUN pip install -r /app/requirements.txt 19 | 20 | # Add the application source code. 21 | ADD . /app 22 | 23 | # Run a WSGI server to serve the application. gunicorn must be declared as 24 | # a dependency in requirements.txt. 25 | CMD gunicorn -b :$PORT main:app 26 | -------------------------------------------------------------------------------- /yolov5s_py/utils/google_app_engine/additional_requirements.txt: -------------------------------------------------------------------------------- 1 | # add these requirements in your app on top of the existing ones 2 | pip==21.1 3 | Flask==1.0.2 4 | gunicorn==19.9.0 5 | -------------------------------------------------------------------------------- /yolov5s_py/utils/google_app_engine/app.yaml: -------------------------------------------------------------------------------- 1 | runtime: custom 2 | env: flex 3 | 4 | service: yolov5app 5 | 6 | liveness_check: 7 | initial_delay_sec: 600 8 | 9 | manual_scaling: 10 | instances: 1 11 | resources: 12 | cpu: 1 13 | memory_gb: 4 14 | disk_size_gb: 20 15 | -------------------------------------------------------------------------------- /yolov5s_py/utils/loggers/wandb/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yester31/TensorRT_API/b32eb47bacedd7be472cbe608b6cc330000889be/yolov5s_py/utils/loggers/wandb/__init__.py -------------------------------------------------------------------------------- /yolov5s_py/utils/loggers/wandb/log_dataset.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | 3 | from wandb_utils import WandbLogger 4 | 5 | from utils.general import LOGGER 6 | 7 | WANDB_ARTIFACT_PREFIX = 'wandb-artifact://' 8 | 9 | 10 | def create_dataset_artifact(opt): 11 | logger = WandbLogger(opt, None, job_type='Dataset Creation') # TODO: return value unused 12 | if not logger.wandb: 13 | LOGGER.info("install wandb using `pip install wandb` to log the dataset") 14 | 15 | 16 | if __name__ == '__main__': 17 | parser = argparse.ArgumentParser() 18 | parser.add_argument('--data', type=str, default='data/coco128.yaml', help='data.yaml path') 19 | parser.add_argument('--single-cls', action='store_true', help='train as single-class dataset') 20 | parser.add_argument('--project', type=str, default='YOLOv5', help='name of W&B Project') 21 | parser.add_argument('--entity', default=None, help='W&B entity') 22 | parser.add_argument('--name', type=str, default='log dataset', help='name of W&B run') 23 | 24 | opt = parser.parse_args() 25 | opt.resume = False # Explicitly disallow resume check for dataset upload job 26 | 27 | create_dataset_artifact(opt) 28 | -------------------------------------------------------------------------------- /yolov5s_py/utils/loggers/wandb/sweep.py: -------------------------------------------------------------------------------- 1 | import sys 2 | from pathlib import Path 3 | 4 | import wandb 5 | 6 | FILE = Path(__file__).resolve() 7 | ROOT = FILE.parents[3] # YOLOv5 root directory 8 | if str(ROOT) not in sys.path: 9 | sys.path.append(str(ROOT)) # add ROOT to PATH 10 | 11 | from train import parse_opt, train 12 | from utils.callbacks import Callbacks 13 | from utils.general import increment_path 14 | from utils.torch_utils import select_device 15 | 16 | 17 | def sweep(): 18 | wandb.init() 19 | # Get hyp dict from sweep agent 20 | hyp_dict = vars(wandb.config).get("_items") 21 | 22 | # Workaround: get necessary opt args 23 | opt = parse_opt(known=True) 24 | opt.batch_size = hyp_dict.get("batch_size") 25 | opt.save_dir = str(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok or opt.evolve)) 26 | opt.epochs = hyp_dict.get("epochs") 27 | opt.nosave = True 28 | opt.data = hyp_dict.get("data") 29 | opt.weights = str(opt.weights) 30 | opt.cfg = str(opt.cfg) 31 | opt.data = str(opt.data) 32 | opt.hyp = str(opt.hyp) 33 | opt.project = str(opt.project) 34 | device = select_device(opt.device, batch_size=opt.batch_size) 35 | 36 | # train 37 | train(hyp_dict, opt, device, callbacks=Callbacks()) 38 | 39 | 40 | if __name__ == "__main__": 41 | sweep() 42 | -------------------------------------------------------------------------------- /yolov5s_py/utils/loggers/wandb/sweep.yaml: -------------------------------------------------------------------------------- 1 | # Hyperparameters for training 2 | # To set range- 3 | # Provide min and max values as: 4 | # parameter: 5 | # 6 | # min: scalar 7 | # max: scalar 8 | # OR 9 | # 10 | # Set a specific list of search space- 11 | # parameter: 12 | # values: [scalar1, scalar2, scalar3...] 13 | # 14 | # You can use grid, bayesian and hyperopt search strategy 15 | # For more info on configuring sweeps visit - https://docs.wandb.ai/guides/sweeps/configuration 16 | 17 | program: utils/loggers/wandb/sweep.py 18 | method: random 19 | metric: 20 | name: metrics/mAP_0.5 21 | goal: maximize 22 | 23 | parameters: 24 | # hyperparameters: set either min, max range or values list 25 | data: 26 | value: "data/coco128.yaml" 27 | batch_size: 28 | values: [64] 29 | epochs: 30 | values: [10] 31 | 32 | lr0: 33 | distribution: uniform 34 | min: 1e-5 35 | max: 1e-1 36 | lrf: 37 | distribution: uniform 38 | min: 0.01 39 | max: 1.0 40 | momentum: 41 | distribution: uniform 42 | min: 0.6 43 | max: 0.98 44 | weight_decay: 45 | distribution: uniform 46 | min: 0.0 47 | max: 0.001 48 | warmup_epochs: 49 | distribution: uniform 50 | min: 0.0 51 | max: 5.0 52 | warmup_momentum: 53 | distribution: uniform 54 | min: 0.0 55 | max: 0.95 56 | warmup_bias_lr: 57 | distribution: uniform 58 | min: 0.0 59 | max: 0.2 60 | box: 61 | distribution: uniform 62 | min: 0.02 63 | max: 0.2 64 | cls: 65 | distribution: uniform 66 | min: 0.2 67 | max: 4.0 68 | cls_pw: 69 | distribution: uniform 70 | min: 0.5 71 | max: 2.0 72 | obj: 73 | distribution: uniform 74 | min: 0.2 75 | max: 4.0 76 | obj_pw: 77 | distribution: uniform 78 | min: 0.5 79 | max: 2.0 80 | iou_t: 81 | distribution: uniform 82 | min: 0.1 83 | max: 0.7 84 | anchor_t: 85 | distribution: uniform 86 | min: 2.0 87 | max: 8.0 88 | fl_gamma: 89 | distribution: uniform 90 | min: 0.0 91 | max: 0.1 92 | hsv_h: 93 | distribution: uniform 94 | min: 0.0 95 | max: 0.1 96 | hsv_s: 97 | distribution: uniform 98 | min: 0.0 99 | max: 0.9 100 | hsv_v: 101 | distribution: uniform 102 | min: 0.0 103 | max: 0.9 104 | degrees: 105 | distribution: uniform 106 | min: 0.0 107 | max: 45.0 108 | translate: 109 | distribution: uniform 110 | min: 0.0 111 | max: 0.9 112 | scale: 113 | distribution: uniform 114 | min: 0.0 115 | max: 0.9 116 | shear: 117 | distribution: uniform 118 | min: 0.0 119 | max: 10.0 120 | perspective: 121 | distribution: uniform 122 | min: 0.0 123 | max: 0.001 124 | flipud: 125 | distribution: uniform 126 | min: 0.0 127 | max: 1.0 128 | fliplr: 129 | distribution: uniform 130 | min: 0.0 131 | max: 1.0 132 | mosaic: 133 | distribution: uniform 134 | min: 0.0 135 | max: 1.0 136 | mixup: 137 | distribution: uniform 138 | min: 0.0 139 | max: 1.0 140 | copy_paste: 141 | distribution: uniform 142 | min: 0.0 143 | max: 1.0 144 | -------------------------------------------------------------------------------- /yolov5s_py/yolov5s.pt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yester31/TensorRT_API/b32eb47bacedd7be472cbe608b6cc330000889be/yolov5s_py/yolov5s.pt -------------------------------------------------------------------------------- /yolov6s_py/data/image1.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yester31/TensorRT_API/b32eb47bacedd7be472cbe608b6cc330000889be/yolov6s_py/data/image1.jpg -------------------------------------------------------------------------------- /yolov6s_py/gen_wts.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import argparse 3 | import os 4 | import struct 5 | import torch 6 | 7 | from yolov6.layers.common import DetectBackend 8 | 9 | wts_file = 'yolov6s.wts' 10 | 11 | # Initialize 12 | weights = 'yolov6s.pt' 13 | device = 'cpu' 14 | cuda = device != 'cpu' and torch.cuda.is_available() 15 | device = torch.device('cuda:0' if cuda else 'cpu') 16 | 17 | # Load model 18 | model = DetectBackend(weights, device=device, fuse=True).model 19 | 20 | if 1: # LIST 형태 웨이트 파일 생성 로직 21 | weights = model.state_dict() 22 | weight_list = [(key, value) for (key, value) in weights.items()] 23 | for idx in range(len(weight_list)): 24 | key, value = weight_list[idx] 25 | if "num_batches_tracked" in key: 26 | print(idx, "--------------------") 27 | continue 28 | print(idx, key, value.shape) 29 | 30 | if 0: 31 | with open(wts_file, 'w') as f: 32 | f.write('{}\n'.format(len(model.state_dict().keys()))) 33 | for k, v in model.state_dict().items(): 34 | vr = v.reshape(-1).cpu().numpy() 35 | f.write('{} {} '.format(k, len(vr))) 36 | for vv in vr: 37 | f.write(' ') 38 | f.write(struct.pack('>f' ,float(vv)).hex()) 39 | f.write('\n') 40 | else : 41 | if os.path.isfile('yolov6s.wts'): 42 | print('Already, yolov6s.wts file exists.') 43 | else: 44 | print('making yolov6s.wts file ...') # vgg.wts 파일이 없다면 생성 45 | f = open("yolov6s.wts", 'w') 46 | f.write("{}\n".format(len(model.state_dict().keys()))) 47 | for k, v in model.state_dict().items(): 48 | print('key: ', k) 49 | print('value: ', v.shape) 50 | vr = v.reshape(-1).cpu().numpy() 51 | f.write("{} {}".format(k, len(vr))) 52 | for vv in vr: 53 | f.write(" ") 54 | f.write(struct.pack(">f", float(vv)).hex()) 55 | f.write("\n") 56 | print('Completed resnet18.wts file!') -------------------------------------------------------------------------------- /yolov6s_py/yolov6/core/__pycache__/inferer.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yester31/TensorRT_API/b32eb47bacedd7be472cbe608b6cc330000889be/yolov6s_py/yolov6/core/__pycache__/inferer.cpython-37.pyc -------------------------------------------------------------------------------- /yolov6s_py/yolov6/data/__pycache__/data_augment.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yester31/TensorRT_API/b32eb47bacedd7be472cbe608b6cc330000889be/yolov6s_py/yolov6/data/__pycache__/data_augment.cpython-37.pyc -------------------------------------------------------------------------------- /yolov6s_py/yolov6/data/__pycache__/datasets.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yester31/TensorRT_API/b32eb47bacedd7be472cbe608b6cc330000889be/yolov6s_py/yolov6/data/__pycache__/datasets.cpython-37.pyc -------------------------------------------------------------------------------- /yolov6s_py/yolov6/data/data_load.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # -*- coding:utf-8 -*- 3 | # This code is based on 4 | # https://github.com/ultralytics/yolov5/blob/master/utils/dataloaders.py 5 | 6 | import os 7 | from torch.utils.data import dataloader, distributed 8 | 9 | from .datasets import TrainValDataset 10 | from yolov6.utils.events import LOGGER 11 | from yolov6.utils.torch_utils import torch_distributed_zero_first 12 | 13 | 14 | def create_dataloader(path, img_size, batch_size, stride, hyp=None, augment=False, check_images=False, check_labels=False, pad=0.0, rect=False, rank=-1, workers=8, shuffle=False,class_names=None, task='Train'): 15 | '''Create general dataloader. 16 | 17 | Returns dataloader and dataset 18 | ''' 19 | if rect and shuffle: 20 | LOGGER.warning('WARNING: --rect is incompatible with DataLoader shuffle, setting shuffle=False') 21 | shuffle = False 22 | with torch_distributed_zero_first(rank): 23 | dataset = TrainValDataset(path, img_size, batch_size, 24 | augment=augment, 25 | hyp=hyp, 26 | rect=rect, 27 | check_images=check_images, 28 | stride=int(stride), 29 | pad=pad, 30 | rank=rank, 31 | class_names=class_names, 32 | task=task) 33 | 34 | batch_size = min(batch_size, len(dataset)) 35 | workers = min([os.cpu_count() // int(os.getenv('WORLD_SIZE', 1)), batch_size if batch_size > 1 else 0, workers]) # number of workers 36 | sampler = None if rank == -1 else distributed.DistributedSampler(dataset, shuffle=shuffle) 37 | return TrainValDataLoader(dataset, 38 | batch_size=batch_size, 39 | shuffle=shuffle and sampler is None, 40 | num_workers=workers, 41 | sampler=sampler, 42 | pin_memory=True, 43 | collate_fn=TrainValDataset.collate_fn), dataset 44 | 45 | 46 | class TrainValDataLoader(dataloader.DataLoader): 47 | """ Dataloader that reuses workers 48 | 49 | Uses same syntax as vanilla DataLoader 50 | """ 51 | 52 | def __init__(self, *args, **kwargs): 53 | super().__init__(*args, **kwargs) 54 | object.__setattr__(self, 'batch_sampler', _RepeatSampler(self.batch_sampler)) 55 | self.iterator = super().__iter__() 56 | 57 | def __len__(self): 58 | return len(self.batch_sampler.sampler) 59 | 60 | def __iter__(self): 61 | for i in range(len(self)): 62 | yield next(self.iterator) 63 | 64 | 65 | class _RepeatSampler: 66 | """ Sampler that repeats forever 67 | 68 | Args: 69 | sampler (Sampler) 70 | """ 71 | 72 | def __init__(self, sampler): 73 | self.sampler = sampler 74 | 75 | def __iter__(self): 76 | while True: 77 | yield from iter(self.sampler) 78 | -------------------------------------------------------------------------------- /yolov6s_py/yolov6/layers/__pycache__/common.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yester31/TensorRT_API/b32eb47bacedd7be472cbe608b6cc330000889be/yolov6s_py/yolov6/layers/__pycache__/common.cpython-37.pyc -------------------------------------------------------------------------------- /yolov6s_py/yolov6/layers/__pycache__/common.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yester31/TensorRT_API/b32eb47bacedd7be472cbe608b6cc330000889be/yolov6s_py/yolov6/layers/__pycache__/common.cpython-39.pyc -------------------------------------------------------------------------------- /yolov6s_py/yolov6/models/__pycache__/efficientrep.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yester31/TensorRT_API/b32eb47bacedd7be472cbe608b6cc330000889be/yolov6s_py/yolov6/models/__pycache__/efficientrep.cpython-39.pyc -------------------------------------------------------------------------------- /yolov6s_py/yolov6/models/__pycache__/effidehead.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yester31/TensorRT_API/b32eb47bacedd7be472cbe608b6cc330000889be/yolov6s_py/yolov6/models/__pycache__/effidehead.cpython-39.pyc -------------------------------------------------------------------------------- /yolov6s_py/yolov6/models/__pycache__/reppan.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yester31/TensorRT_API/b32eb47bacedd7be472cbe608b6cc330000889be/yolov6s_py/yolov6/models/__pycache__/reppan.cpython-39.pyc -------------------------------------------------------------------------------- /yolov6s_py/yolov6/models/__pycache__/yolo.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yester31/TensorRT_API/b32eb47bacedd7be472cbe608b6cc330000889be/yolov6s_py/yolov6/models/__pycache__/yolo.cpython-39.pyc -------------------------------------------------------------------------------- /yolov6s_py/yolov6/models/efficientrep.py: -------------------------------------------------------------------------------- 1 | from torch import nn 2 | from yolov6.layers.common import RepVGGBlock, RepBlock, SimSPPF 3 | 4 | 5 | class EfficientRep(nn.Module): 6 | '''EfficientRep Backbone 7 | EfficientRep is handcrafted by hardware-aware neural network design. 8 | With rep-style struct, EfficientRep is friendly to high-computation hardware(e.g. GPU). 9 | ''' 10 | 11 | def __init__( 12 | self, 13 | in_channels=3, 14 | channels_list=None, 15 | num_repeats=None, 16 | ): 17 | super().__init__() 18 | 19 | assert channels_list is not None 20 | assert num_repeats is not None 21 | 22 | self.stem = RepVGGBlock( 23 | in_channels=in_channels, 24 | out_channels=channels_list[0], 25 | kernel_size=3, 26 | stride=2 27 | ) 28 | 29 | self.ERBlock_2 = nn.Sequential( 30 | RepVGGBlock( 31 | in_channels=channels_list[0], 32 | out_channels=channels_list[1], 33 | kernel_size=3, 34 | stride=2 35 | ), 36 | RepBlock( 37 | in_channels=channels_list[1], 38 | out_channels=channels_list[1], 39 | n=num_repeats[1] 40 | ) 41 | ) 42 | 43 | self.ERBlock_3 = nn.Sequential( 44 | RepVGGBlock( 45 | in_channels=channels_list[1], 46 | out_channels=channels_list[2], 47 | kernel_size=3, 48 | stride=2 49 | ), 50 | RepBlock( 51 | in_channels=channels_list[2], 52 | out_channels=channels_list[2], 53 | n=num_repeats[2] 54 | ) 55 | ) 56 | 57 | self.ERBlock_4 = nn.Sequential( 58 | RepVGGBlock( 59 | in_channels=channels_list[2], 60 | out_channels=channels_list[3], 61 | kernel_size=3, 62 | stride=2 63 | ), 64 | RepBlock( 65 | in_channels=channels_list[3], 66 | out_channels=channels_list[3], 67 | n=num_repeats[3] 68 | ) 69 | ) 70 | 71 | self.ERBlock_5 = nn.Sequential( 72 | RepVGGBlock( 73 | in_channels=channels_list[3], 74 | out_channels=channels_list[4], 75 | kernel_size=3, 76 | stride=2, 77 | ), 78 | RepBlock( 79 | in_channels=channels_list[4], 80 | out_channels=channels_list[4], 81 | n=num_repeats[4] 82 | ), 83 | SimSPPF( 84 | in_channels=channels_list[4], 85 | out_channels=channels_list[4], 86 | kernel_size=5 87 | ) 88 | ) 89 | 90 | def forward(self, x): 91 | 92 | outputs = [] 93 | x = self.stem(x) 94 | x = self.ERBlock_2(x) 95 | x = self.ERBlock_3(x) 96 | outputs.append(x) 97 | x = self.ERBlock_4(x) 98 | outputs.append(x) 99 | x = self.ERBlock_5(x) 100 | outputs.append(x) 101 | 102 | return tuple(outputs) 103 | -------------------------------------------------------------------------------- /yolov6s_py/yolov6/models/reppan.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from torch import nn 3 | from yolov6.layers.common import RepBlock, SimConv, Transpose 4 | 5 | 6 | class RepPANNeck(nn.Module): 7 | """RepPANNeck Module 8 | EfficientRep is the default backbone of this model. 9 | RepPANNeck has the balance of feature fusion ability and hardware efficiency. 10 | """ 11 | 12 | def __init__( 13 | self, 14 | channels_list=None, 15 | num_repeats=None 16 | ): 17 | super().__init__() 18 | 19 | assert channels_list is not None 20 | assert num_repeats is not None 21 | 22 | self.Rep_p4 = RepBlock( 23 | in_channels=channels_list[3] + channels_list[5], 24 | out_channels=channels_list[5], 25 | n=num_repeats[5], 26 | ) 27 | 28 | self.Rep_p3 = RepBlock( 29 | in_channels=channels_list[2] + channels_list[6], 30 | out_channels=channels_list[6], 31 | n=num_repeats[6] 32 | ) 33 | 34 | self.Rep_n3 = RepBlock( 35 | in_channels=channels_list[6] + channels_list[7], 36 | out_channels=channels_list[8], 37 | n=num_repeats[7], 38 | ) 39 | 40 | self.Rep_n4 = RepBlock( 41 | in_channels=channels_list[5] + channels_list[9], 42 | out_channels=channels_list[10], 43 | n=num_repeats[8] 44 | ) 45 | 46 | self.reduce_layer0 = SimConv( 47 | in_channels=channels_list[4], 48 | out_channels=channels_list[5], 49 | kernel_size=1, 50 | stride=1 51 | ) 52 | 53 | self.upsample0 = Transpose( 54 | in_channels=channels_list[5], 55 | out_channels=channels_list[5], 56 | ) 57 | 58 | self.reduce_layer1 = SimConv( 59 | in_channels=channels_list[5], 60 | out_channels=channels_list[6], 61 | kernel_size=1, 62 | stride=1 63 | ) 64 | 65 | self.upsample1 = Transpose( 66 | in_channels=channels_list[6], 67 | out_channels=channels_list[6] 68 | ) 69 | 70 | self.downsample2 = SimConv( 71 | in_channels=channels_list[6], 72 | out_channels=channels_list[7], 73 | kernel_size=3, 74 | stride=2 75 | ) 76 | 77 | self.downsample1 = SimConv( 78 | in_channels=channels_list[8], 79 | out_channels=channels_list[9], 80 | kernel_size=3, 81 | stride=2 82 | ) 83 | 84 | def forward(self, input): 85 | 86 | (x2, x1, x0) = input 87 | 88 | fpn_out0 = self.reduce_layer0(x0) 89 | upsample_feat0 = self.upsample0(fpn_out0) 90 | f_concat_layer0 = torch.cat([upsample_feat0, x1], 1) 91 | f_out0 = self.Rep_p4(f_concat_layer0) 92 | 93 | fpn_out1 = self.reduce_layer1(f_out0) 94 | upsample_feat1 = self.upsample1(fpn_out1) 95 | f_concat_layer1 = torch.cat([upsample_feat1, x2], 1) 96 | pan_out2 = self.Rep_p3(f_concat_layer1) 97 | 98 | down_feat1 = self.downsample2(pan_out2) 99 | p_concat_layer1 = torch.cat([down_feat1, fpn_out1], 1) 100 | pan_out1 = self.Rep_n3(p_concat_layer1) 101 | 102 | down_feat0 = self.downsample1(pan_out1) 103 | p_concat_layer2 = torch.cat([down_feat0, fpn_out0], 1) 104 | pan_out0 = self.Rep_n4(p_concat_layer2) 105 | 106 | outputs = [pan_out2, pan_out1, pan_out0] 107 | 108 | return outputs 109 | -------------------------------------------------------------------------------- /yolov6s_py/yolov6/models/yolo.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # -*- coding:utf-8 -*- 3 | import math 4 | import torch.nn as nn 5 | from yolov6.layers.common import * 6 | from yolov6.utils.torch_utils import initialize_weights 7 | from yolov6.models.efficientrep import EfficientRep 8 | from yolov6.models.reppan import RepPANNeck 9 | from yolov6.models.effidehead import Detect, build_effidehead_layer 10 | 11 | 12 | class Model(nn.Module): 13 | '''YOLOv6 model with backbone, neck and head. 14 | The default parts are EfficientRep Backbone, Rep-PAN and 15 | Efficient Decoupled Head. 16 | ''' 17 | def __init__(self, config, channels=3, num_classes=None, anchors=None): # model, input channels, number of classes 18 | super().__init__() 19 | # Build network 20 | num_layers = config.model.head.num_layers 21 | self.backbone, self.neck, self.detect = build_network(config, channels, num_classes, anchors, num_layers) 22 | 23 | # Init Detect head 24 | begin_indices = config.model.head.begin_indices 25 | out_indices_head = config.model.head.out_indices 26 | self.stride = self.detect.stride 27 | self.detect.i = begin_indices 28 | self.detect.f = out_indices_head 29 | self.detect.initialize_biases() 30 | 31 | # Init weights 32 | initialize_weights(self) 33 | 34 | def forward(self, x): 35 | x = self.backbone(x) 36 | x = self.neck(x) 37 | x = self.detect(x) 38 | return x 39 | 40 | def _apply(self, fn): 41 | self = super()._apply(fn) 42 | self.detect.stride = fn(self.detect.stride) 43 | self.detect.grid = list(map(fn, self.detect.grid)) 44 | return self 45 | 46 | 47 | def make_divisible(x, divisor): 48 | # Upward revision the value x to make it evenly divisible by the divisor. 49 | return math.ceil(x / divisor) * divisor 50 | 51 | 52 | def build_network(config, channels, num_classes, anchors, num_layers): 53 | depth_mul = config.model.depth_multiple 54 | width_mul = config.model.width_multiple 55 | num_repeat_backbone = config.model.backbone.num_repeats 56 | channels_list_backbone = config.model.backbone.out_channels 57 | num_repeat_neck = config.model.neck.num_repeats 58 | channels_list_neck = config.model.neck.out_channels 59 | num_anchors = config.model.head.anchors 60 | num_repeat = [(max(round(i * depth_mul), 1) if i > 1 else i) for i in (num_repeat_backbone + num_repeat_neck)] 61 | channels_list = [make_divisible(i * width_mul, 8) for i in (channels_list_backbone + channels_list_neck)] 62 | 63 | backbone = EfficientRep( 64 | in_channels=channels, 65 | channels_list=channels_list, 66 | num_repeats=num_repeat 67 | ) 68 | 69 | neck = RepPANNeck( 70 | channels_list=channels_list, 71 | num_repeats=num_repeat 72 | ) 73 | 74 | head_layers = build_effidehead_layer(channels_list, num_anchors, num_classes) 75 | 76 | head = Detect(num_classes, anchors, num_layers, head_layers=head_layers) 77 | 78 | return backbone, neck, head 79 | 80 | 81 | def build_model(cfg, num_classes, device): 82 | model = Model(cfg, channels=3, num_classes=num_classes, anchors=cfg.model.head.anchors).to(device) 83 | return model 84 | -------------------------------------------------------------------------------- /yolov6s_py/yolov6/solver/build.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # -*- coding:utf-8 -*- 3 | import os 4 | import math 5 | 6 | import torch 7 | import torch.nn as nn 8 | 9 | def build_optimizer(cfg, model): 10 | """ Build optimizer from cfg file.""" 11 | g_bnw, g_w, g_b = [], [], [] 12 | for v in model.modules(): 13 | if hasattr(v, 'bias') and isinstance(v.bias, nn.Parameter): 14 | g_b.append(v.bias) 15 | if isinstance(v, nn.BatchNorm2d): 16 | g_bnw.append(v.weight) 17 | elif hasattr(v, 'weight') and isinstance(v.weight, nn.Parameter): 18 | g_w.append(v.weight) 19 | 20 | assert cfg.solver.optim == 'SGD' or 'Adam', 'ERROR: unknown optimizer, use SGD defaulted' 21 | if cfg.solver.optim == 'SGD': 22 | optimizer = torch.optim.SGD(g_bnw, lr=cfg.solver.lr0, momentum=cfg.solver.momentum, nesterov=True) 23 | elif cfg.solver.optim == 'Adam': 24 | optimizer = torch.optim.Adam(g_bnw, lr=cfg.solver.lr0, betas=(cfg.solver.momentum, 0.999)) 25 | 26 | optimizer.add_param_group({'params': g_w, 'weight_decay': cfg.solver.weight_decay}) 27 | optimizer.add_param_group({'params': g_b}) 28 | 29 | del g_bnw, g_w, g_b 30 | return optimizer 31 | 32 | 33 | def build_lr_scheduler(cfg, optimizer, epochs): 34 | """Build learning rate scheduler from cfg file.""" 35 | if cfg.solver.lr_scheduler == 'Cosine': 36 | lf = lambda x: ((1 - math.cos(x * math.pi / epochs)) / 2) * (cfg.solver.lrf - 1) + 1 37 | else: 38 | LOGGER.error('unknown lr scheduler, use Cosine defaulted') 39 | 40 | scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, lr_lambda=lf) 41 | return scheduler, lf 42 | -------------------------------------------------------------------------------- /yolov6s_py/yolov6/utils/Arial.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yester31/TensorRT_API/b32eb47bacedd7be472cbe608b6cc330000889be/yolov6s_py/yolov6/utils/Arial.ttf -------------------------------------------------------------------------------- /yolov6s_py/yolov6/utils/__pycache__/checkpoint.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yester31/TensorRT_API/b32eb47bacedd7be472cbe608b6cc330000889be/yolov6s_py/yolov6/utils/__pycache__/checkpoint.cpython-37.pyc -------------------------------------------------------------------------------- /yolov6s_py/yolov6/utils/__pycache__/checkpoint.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yester31/TensorRT_API/b32eb47bacedd7be472cbe608b6cc330000889be/yolov6s_py/yolov6/utils/__pycache__/checkpoint.cpython-39.pyc -------------------------------------------------------------------------------- /yolov6s_py/yolov6/utils/__pycache__/events.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yester31/TensorRT_API/b32eb47bacedd7be472cbe608b6cc330000889be/yolov6s_py/yolov6/utils/__pycache__/events.cpython-37.pyc -------------------------------------------------------------------------------- /yolov6s_py/yolov6/utils/__pycache__/events.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yester31/TensorRT_API/b32eb47bacedd7be472cbe608b6cc330000889be/yolov6s_py/yolov6/utils/__pycache__/events.cpython-39.pyc -------------------------------------------------------------------------------- /yolov6s_py/yolov6/utils/__pycache__/nms.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yester31/TensorRT_API/b32eb47bacedd7be472cbe608b6cc330000889be/yolov6s_py/yolov6/utils/__pycache__/nms.cpython-37.pyc -------------------------------------------------------------------------------- /yolov6s_py/yolov6/utils/__pycache__/torch_utils.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yester31/TensorRT_API/b32eb47bacedd7be472cbe608b6cc330000889be/yolov6s_py/yolov6/utils/__pycache__/torch_utils.cpython-37.pyc -------------------------------------------------------------------------------- /yolov6s_py/yolov6/utils/__pycache__/torch_utils.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yester31/TensorRT_API/b32eb47bacedd7be472cbe608b6cc330000889be/yolov6s_py/yolov6/utils/__pycache__/torch_utils.cpython-39.pyc -------------------------------------------------------------------------------- /yolov6s_py/yolov6/utils/checkpoint.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # -*- coding:utf-8 -*- 3 | import os 4 | import shutil 5 | import torch 6 | import os.path as osp 7 | from yolov6.utils.events import LOGGER 8 | from yolov6.utils.torch_utils import fuse_model 9 | 10 | 11 | def load_state_dict(weights, model, map_location=None): 12 | """Load weights from checkpoint file, only assign weights those layers' name and shape are match.""" 13 | ckpt = torch.load(weights, map_location=map_location) 14 | state_dict = ckpt['model'].float().state_dict() 15 | model_state_dict = model.state_dict() 16 | state_dict = {k: v for k, v in state_dict.items() if k in model_state_dict and v.shape == model_state_dict[k].shape} 17 | model.load_state_dict(state_dict, strict=False) 18 | del ckpt, state_dict, model_state_dict 19 | return model 20 | 21 | 22 | def load_checkpoint(weights, map_location=None, inplace=True, fuse=True): 23 | """Load model from checkpoint file.""" 24 | LOGGER.info("Loading checkpoint from {}".format(weights)) 25 | ckpt = torch.load(weights, map_location=map_location) # load 26 | model = ckpt['ema' if ckpt.get('ema') else 'model'].float() 27 | if fuse: 28 | LOGGER.info("\nFusing model...") 29 | model = fuse_model(model).eval() 30 | else: 31 | model = model.eval() 32 | return model 33 | 34 | 35 | def save_checkpoint(ckpt, is_best, save_dir, model_name=""): 36 | """ Save checkpoint to the disk.""" 37 | if not osp.exists(save_dir): 38 | os.makedirs(save_dir) 39 | filename = osp.join(save_dir, model_name + '.pt') 40 | torch.save(ckpt, filename) 41 | if is_best: 42 | best_filename = osp.join(save_dir, 'best_ckpt.pt') 43 | shutil.copyfile(filename, best_filename) 44 | 45 | 46 | def strip_optimizer(ckpt_dir): 47 | for s in ['best', 'last']: 48 | ckpt_path = osp.join(ckpt_dir, '{}_ckpt.pt'.format(s)) 49 | if not osp.exists(ckpt_path): 50 | continue 51 | ckpt = torch.load(ckpt_path, map_location=torch.device('cpu')) 52 | if ckpt.get('ema'): 53 | ckpt['model'] = ckpt['ema'] # replace model with ema 54 | for k in ['optimizer', 'ema', 'updates']: # keys 55 | ckpt[k] = None 56 | ckpt['epoch'] = -1 57 | ckpt['model'].half() # to FP16 58 | for p in ckpt['model'].parameters(): 59 | p.requires_grad = False 60 | torch.save(ckpt, ckpt_path) 61 | -------------------------------------------------------------------------------- /yolov6s_py/yolov6/utils/config.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | # The code is based on 4 | # https://github.com/open-mmlab/mmcv/blob/master/mmcv/utils/config.py 5 | # Copyright (c) OpenMMLab. 6 | 7 | import os.path as osp 8 | import shutil 9 | import sys 10 | import tempfile 11 | from importlib import import_module 12 | from addict import Dict 13 | 14 | 15 | class ConfigDict(Dict): 16 | 17 | def __missing__(self, name): 18 | raise KeyError(name) 19 | 20 | def __getattr__(self, name): 21 | try: 22 | value = super(ConfigDict, self).__getattr__(name) 23 | except KeyError: 24 | ex = AttributeError("'{}' object has no attribute '{}'".format( 25 | self.__class__.__name__, name)) 26 | except Exception as e: 27 | ex = e 28 | else: 29 | return value 30 | raise ex 31 | 32 | 33 | class Config(object): 34 | 35 | @staticmethod 36 | def _file2dict(filename): 37 | filename = str(filename) 38 | if filename.endswith('.py'): 39 | with tempfile.TemporaryDirectory() as temp_config_dir: 40 | shutil.copyfile(filename, 41 | osp.join(temp_config_dir, '_tempconfig.py')) 42 | sys.path.insert(0, temp_config_dir) 43 | mod = import_module('_tempconfig') 44 | sys.path.pop(0) 45 | cfg_dict = { 46 | name: value 47 | for name, value in mod.__dict__.items() 48 | if not name.startswith('__') 49 | } 50 | # delete imported module 51 | del sys.modules['_tempconfig'] 52 | else: 53 | raise IOError('Only .py type are supported now!') 54 | cfg_text = filename + '\n' 55 | with open(filename, 'r') as f: 56 | cfg_text += f.read() 57 | 58 | return cfg_dict, cfg_text 59 | 60 | @staticmethod 61 | def fromfile(filename): 62 | cfg_dict, cfg_text = Config._file2dict(filename) 63 | return Config(cfg_dict, cfg_text=cfg_text, filename=filename) 64 | 65 | def __init__(self, cfg_dict=None, cfg_text=None, filename=None): 66 | if cfg_dict is None: 67 | cfg_dict = dict() 68 | elif not isinstance(cfg_dict, dict): 69 | raise TypeError('cfg_dict must be a dict, but got {}'.format( 70 | type(cfg_dict))) 71 | 72 | super(Config, self).__setattr__('_cfg_dict', ConfigDict(cfg_dict)) 73 | super(Config, self).__setattr__('_filename', filename) 74 | if cfg_text: 75 | text = cfg_text 76 | elif filename: 77 | with open(filename, 'r') as f: 78 | text = f.read() 79 | else: 80 | text = '' 81 | super(Config, self).__setattr__('_text', text) 82 | 83 | @property 84 | def filename(self): 85 | return self._filename 86 | 87 | @property 88 | def text(self): 89 | return self._text 90 | 91 | def __repr__(self): 92 | return 'Config (path: {}): {}'.format(self.filename, 93 | self._cfg_dict.__repr__()) 94 | 95 | def __getattr__(self, name): 96 | return getattr(self._cfg_dict, name) 97 | 98 | def __setattr__(self, name, value): 99 | if isinstance(value, dict): 100 | value = ConfigDict(value) 101 | self._cfg_dict.__setattr__(name, value) 102 | -------------------------------------------------------------------------------- /yolov6s_py/yolov6/utils/ema.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # -*- coding:utf-8 -*- 3 | # The code is based on 4 | # https://github.com/ultralytics/yolov5/blob/master/utils/torch_utils.py 5 | import math 6 | from copy import deepcopy 7 | import torch 8 | import torch.nn as nn 9 | 10 | 11 | class ModelEMA: 12 | """ Model Exponential Moving Average from https://github.com/rwightman/pytorch-image-models 13 | Keep a moving average of everything in the model state_dict (parameters and buffers). 14 | This is intended to allow functionality like 15 | https://www.tensorflow.org/api_docs/python/tf/train/ExponentialMovingAverage 16 | A smoothed version of the weights is necessary for some training schemes to perform well. 17 | This class is sensitive where it is initialized in the sequence of model init, 18 | GPU assignment and distributed training wrappers. 19 | """ 20 | 21 | def __init__(self, model, decay=0.9999, updates=0): 22 | self.ema = deepcopy(model.module if is_parallel(model) else model).eval() # FP32 EMA 23 | self.updates = updates 24 | self.decay = lambda x: decay * (1 - math.exp(-x / 2000)) 25 | for param in self.ema.parameters(): 26 | param.requires_grad_(False) 27 | 28 | def update(self, model): 29 | with torch.no_grad(): 30 | self.updates += 1 31 | decay = self.decay(self.updates) 32 | 33 | state_dict = model.module.state_dict() if is_parallel(model) else model.state_dict() # model state_dict 34 | for k, item in self.ema.state_dict().items(): 35 | if item.dtype.is_floating_point: 36 | item *= decay 37 | item += (1 - decay) * state_dict[k].detach() 38 | 39 | def update_attr(self, model, include=(), exclude=('process_group', 'reducer')): 40 | copy_attr(self.ema, model, include, exclude) 41 | 42 | 43 | def copy_attr(a, b, include=(), exclude=()): 44 | """Copy attributes from one instance and set them to another instance.""" 45 | for k, item in b.__dict__.items(): 46 | if (len(include) and k not in include) or k.startswith('_') or k in exclude: 47 | continue 48 | else: 49 | setattr(a, k, item) 50 | 51 | 52 | def is_parallel(model): 53 | # Return True if model's type is DP or DDP, else False. 54 | return type(model) in (nn.parallel.DataParallel, nn.parallel.DistributedDataParallel) 55 | 56 | 57 | def de_parallel(model): 58 | # De-parallelize a model. Return single-GPU model if model's type is DP or DDP. 59 | return model.module if is_parallel(model) else model 60 | -------------------------------------------------------------------------------- /yolov6s_py/yolov6/utils/envs.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | import os 4 | import random 5 | import numpy as np 6 | 7 | import torch 8 | import torch.backends.cudnn as cudnn 9 | from yolov6.utils.events import LOGGER 10 | 11 | 12 | def get_envs(): 13 | """Get PyTorch needed environments from system envirionments.""" 14 | local_rank = int(os.getenv('LOCAL_RANK', -1)) 15 | rank = int(os.getenv('RANK', -1)) 16 | world_size = int(os.getenv('WORLD_SIZE', 1)) 17 | return local_rank, rank, world_size 18 | 19 | 20 | def select_device(device): 21 | """Set devices' information to the program. 22 | Args: 23 | device: a string, like 'cpu' or '1,2,3,4' 24 | Returns: 25 | torch.device 26 | """ 27 | if device == 'cpu': 28 | os.environ['CUDA_VISIBLE_DEVICES'] = '-1' 29 | LOGGER.info('Using CPU for training... ') 30 | elif device: 31 | os.environ['CUDA_VISIBLE_DEVICES'] = device 32 | assert torch.cuda.is_available() 33 | nd = len(device.strip().split(',')) 34 | LOGGER.info(f'Using {nd} GPU for training... ') 35 | cuda = device != 'cpu' and torch.cuda.is_available() 36 | device = torch.device('cuda:0' if cuda else 'cpu') 37 | return device 38 | 39 | 40 | def set_random_seed(seed, deterministic=False): 41 | """ Set random state to random libray, numpy, torch and cudnn. 42 | Args: 43 | seed: int value. 44 | deterministic: bool value. 45 | """ 46 | random.seed(seed) 47 | np.random.seed(seed) 48 | torch.manual_seed(seed) 49 | if deterministic: 50 | cudnn.deterministic = True 51 | cudnn.benchmark = False 52 | else: 53 | cudnn.deterministic = False 54 | cudnn.benchmark = True 55 | -------------------------------------------------------------------------------- /yolov6s_py/yolov6/utils/events.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | import os 4 | import yaml 5 | import logging 6 | import shutil 7 | 8 | 9 | def set_logging(name=None): 10 | rank = int(os.getenv('RANK', -1)) 11 | logging.basicConfig(format="%(message)s", level=logging.INFO if (rank in (-1, 0)) else logging.WARNING) 12 | return logging.getLogger(name) 13 | 14 | 15 | LOGGER = set_logging(__name__) 16 | NCOLS = shutil.get_terminal_size().columns 17 | 18 | 19 | def load_yaml(file_path): 20 | """Load data from yaml file.""" 21 | if isinstance(file_path, str): 22 | with open(file_path, errors='ignore') as f: 23 | data_dict = yaml.safe_load(f) 24 | return data_dict 25 | 26 | 27 | def save_yaml(data_dict, save_path): 28 | """Save data to yaml file""" 29 | with open(save_path, 'w') as f: 30 | yaml.safe_dump(data_dict, f, sort_keys=False) 31 | 32 | 33 | def write_tblog(tblogger, epoch, results, losses): 34 | """Display mAP and loss information to log.""" 35 | tblogger.add_scalar("val/mAP@0.5", results[0], epoch + 1) 36 | tblogger.add_scalar("val/mAP@0.50:0.95", results[1], epoch + 1) 37 | 38 | tblogger.add_scalar("train/iou_loss", losses[0], epoch + 1) 39 | tblogger.add_scalar("train/l1_loss", losses[1], epoch + 1) 40 | tblogger.add_scalar("train/obj_loss", losses[2], epoch + 1) 41 | tblogger.add_scalar("train/cls_loss", losses[3], epoch + 1) 42 | -------------------------------------------------------------------------------- /yolov6s_py/yolov6/utils/torch_utils.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # -*- coding:utf-8 -*- 3 | 4 | import time 5 | from contextlib import contextmanager 6 | from copy import deepcopy 7 | import torch 8 | import torch.distributed as dist 9 | import torch.nn as nn 10 | import torch.nn.functional as F 11 | from yolov6.utils.events import LOGGER 12 | 13 | try: 14 | import thop # for FLOPs computation 15 | except ImportError: 16 | thop = None 17 | 18 | 19 | @contextmanager 20 | def torch_distributed_zero_first(local_rank: int): 21 | """ 22 | Decorator to make all processes in distributed training wait for each local_master to do something. 23 | """ 24 | if local_rank not in [-1, 0]: 25 | dist.barrier(device_ids=[local_rank]) 26 | yield 27 | if local_rank == 0: 28 | dist.barrier(device_ids=[0]) 29 | 30 | 31 | def time_sync(): 32 | # Waits for all kernels in all streams on a CUDA device to complete if cuda is available. 33 | if torch.cuda.is_available(): 34 | torch.cuda.synchronize() 35 | return time.time() 36 | 37 | 38 | def initialize_weights(model): 39 | for m in model.modules(): 40 | t = type(m) 41 | if t is nn.Conv2d: 42 | pass # nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') 43 | elif t is nn.BatchNorm2d: 44 | m.eps = 1e-3 45 | m.momentum = 0.03 46 | elif t in [nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6, nn.SiLU]: 47 | m.inplace = True 48 | 49 | 50 | def fuse_conv_and_bn(conv, bn): 51 | # Fuse convolution and batchnorm layers https://tehnokv.com/posts/fusing-batchnorm-and-conv/ 52 | fusedconv = ( 53 | nn.Conv2d( 54 | conv.in_channels, 55 | conv.out_channels, 56 | kernel_size=conv.kernel_size, 57 | stride=conv.stride, 58 | padding=conv.padding, 59 | groups=conv.groups, 60 | bias=True, 61 | ) 62 | .requires_grad_(False) 63 | .to(conv.weight.device) 64 | ) 65 | 66 | # prepare filters 67 | w_conv = conv.weight.clone().view(conv.out_channels, -1) 68 | w_bn = torch.diag(bn.weight.div(torch.sqrt(bn.eps + bn.running_var))) 69 | fusedconv.weight.copy_(torch.mm(w_bn, w_conv).view(fusedconv.weight.shape)) 70 | 71 | # prepare spatial bias 72 | b_conv = ( 73 | torch.zeros(conv.weight.size(0), device=conv.weight.device) 74 | if conv.bias is None 75 | else conv.bias 76 | ) 77 | b_bn = bn.bias - bn.weight.mul(bn.running_mean).div( 78 | torch.sqrt(bn.running_var + bn.eps) 79 | ) 80 | fusedconv.bias.copy_(torch.mm(w_bn, b_conv.reshape(-1, 1)).reshape(-1) + b_bn) 81 | 82 | return fusedconv 83 | 84 | 85 | def fuse_model(model): 86 | from yolov6.layers.common import Conv 87 | 88 | for m in model.modules(): 89 | if type(m) is Conv and hasattr(m, "bn"): 90 | m.conv = fuse_conv_and_bn(m.conv, m.bn) # update conv 91 | delattr(m, "bn") # remove batchnorm 92 | m.forward = m.forward_fuse # update forward 93 | return model 94 | 95 | 96 | def get_model_info(model, img_size=640): 97 | """Get model Params and GFlops. 98 | Code base on https://github.com/Megvii-BaseDetection/YOLOX/blob/main/yolox/utils/model_utils.py 99 | """ 100 | from thop import profile 101 | stride = 32 102 | img = torch.zeros((1, 3, stride, stride), device=next(model.parameters()).device) 103 | flops, params = profile(deepcopy(model), inputs=(img,), verbose=False) 104 | params /= 1e6 105 | flops /= 1e9 106 | img_size = img_size if isinstance(img_size, list) else [img_size, img_size] 107 | flops *= img_size[0] * img_size[1] / stride / stride * 2 # Gflops 108 | info = "Params: {:.2f}M, Gflops: {:.2f}".format(params, flops) 109 | return info 110 | --------------------------------------------------------------------------------