├── README.md ├── data ├── argoverse_hd.yaml ├── coco128.yaml ├── hyp.finetune.yaml ├── hyp.scratch.yaml ├── hyp.tiny.person.yaml ├── hyp.uva.yaml ├── notes.txt ├── test.yaml ├── tiny_person.yaml ├── uva.yaml ├── visdrone10.yaml └── voc.yaml ├── datasets ├── aware_datasets.py ├── multi_modal_uva_datasets.py ├── tiny_datasets.py ├── uav_datasets.py └── visdrone_datasets.py ├── detect.py ├── detect_multi.py ├── functions.py ├── img ├── 1-1.png ├── 1-2.png ├── 1-3.png ├── 1-4.png ├── 1-5.png ├── 1-6.png ├── 1-7.png └── 1-8.png ├── models ├── SE.py ├── SWTR.py ├── __init__.py ├── aware_model.py ├── common.py ├── experimental.py ├── export.py ├── export_multi.py ├── export_multi_aware.py ├── funcs.py ├── hub │ ├── anchors.yaml │ ├── yolov3-spp.yaml │ ├── yolov3-tiny.yaml │ ├── yolov3.yaml │ ├── yolov5-fpn.yaml │ ├── yolov5-p2.yaml │ ├── yolov5-p6.yaml │ ├── yolov5-p7.yaml │ ├── yolov5-panet.yaml │ ├── yolov5l6.yaml │ ├── yolov5m6.yaml │ ├── yolov5s6.yaml │ └── yolov5x6.yaml ├── modal_ensemble_model_uva.py ├── modal_ensemble_model_uva_aware.py ├── modal_ensemble_model_uva_aware_nms.py ├── pruned_yolov5l.yaml ├── yolo.py ├── yolo_prune.py ├── yolo_prune_multi.py ├── yolov5l-transformer.yaml ├── yolov5l.yaml ├── yolov5l_trs.yaml ├── yolov5m.yaml ├── yolov5s-ASFF.yaml ├── yolov5s-c3-cbl.yaml ├── yolov5s-tiny.yaml ├── yolov5s-transformer.yaml ├── yolov5s.yaml ├── yolov5s_medium_fusion.yaml ├── yolov5s_trs.yaml └── yolov5x.yaml ├── mutil_modal_test_back_uva.py ├── mutil_modal_test_back_uva_aware.py ├── prune_channel_yolov5l.py ├── prune_channel_yolov5s.py ├── prune_layer_yolov5l.py ├── prune_layer_yolov5s.py ├── sparsity.py ├── test.py ├── test_multi.py ├── test_visdrone.py ├── train.py ├── train_aware.py ├── train_multi.py ├── train_sr.py ├── train_visdrone.py ├── tsne ├── draw_w1_w2.py ├── evaluate.py ├── resnet.py └── tsne_torch.py └── utils ├── __init__.py ├── activations.py ├── autoanchor.py ├── aws ├── __init__.py ├── mime.sh ├── resume.py └── userdata.sh ├── datasets.py ├── general.py ├── google_app_engine ├── Dockerfile ├── additional_requirements.txt └── app.yaml ├── google_utils.py ├── loss.py ├── loss_multi.py ├── metrics.py ├── plots.py ├── torch_utils.py └── wandb_logging ├── __init__.py ├── log_dataset.py └── wandb_utils.py /README.md: -------------------------------------------------------------------------------- 1 | # yolov5-prune-multi 2 | 无人机视角、多模态、模型剪枝、国产AI芯片部署 3 | 将训练好的视觉模型,进行剪枝压缩再训练,最终部署到终端设备上。 4 | 5 | 6 | ## 相关实验数据 7 | 8 | #### 数据集展示 9 | 10 | ###### 对齐的双模态图像 11 | ![相关图片](./img/1-8.png) 12 | ###### 双模态图像类别分布 13 | ![相关图片](./img/1-7.png) 14 | 15 | 16 | #### 数据集基础实验(多模态数据集为团队收集,暂未公开) 17 | 18 | ![数据集基础实验](./img/1-6.png) 19 | 20 | #### 多模态模型剪枝效果实验 21 | 22 | ![剪枝实验](./img/1-1.png) 23 | 24 | #### 国产芯片与英伟达芯片对比实验 25 | 26 | ###### 硬件参数 27 | ![芯片相关参数](./img/1-5.png) 28 | ###### 对比实验(硬件性能开到最大) 29 | ![硬件性能对比](./img/1-2.png) 30 | 31 | #### 模型算法效果截图 32 | 33 | ![模型优化效果](./img/1-3.png) 34 | 35 | #### 模型部署到边缘设备效果截图 36 | 37 | ![边缘设备部署效果](./img/1-4.png) 38 | 39 | ## 参考 40 | [AGX板卡模型推理部署工具链](https://github.com/sbbug/yolov5-multi-tensorrt) 41 | [地平线板卡模型推理部署工具链](https://github.com/sbbug/x3_chain) 42 | -------------------------------------------------------------------------------- /data/argoverse_hd.yaml: -------------------------------------------------------------------------------- 1 | # Argoverse-HD dataset (ring-front-center camera) http://www.cs.cmu.edu/~mengtial/proj/streaming/ 2 | # Train command: python train.py --data argoverse_hd.yaml 3 | # Default dataset location is next to /yolov5: 4 | # /parent_folder 5 | # /argoverse 6 | # /yolov5 7 | 8 | 9 | # download command/URL (optional) 10 | download: bash data/scripts/get_argoverse_hd.sh 11 | 12 | # train and val data as 1) directory: path/images/, 2) file: path/images.txt, or 3) list: [path1/images/, path2/images/] 13 | train: ../argoverse/Argoverse-1.1/images/train/ # 39384 images 14 | val: ../argoverse/Argoverse-1.1/images/val/ # 15062 iamges 15 | test: ../argoverse/Argoverse-1.1/images/test/ # Submit to: https://eval.ai/web/challenges/challenge-page/800/overview 16 | 17 | # number of classes 18 | nc: 8 19 | 20 | # class names 21 | names: [ 'person', 'bicycle', 'car', 'motorcycle', 'bus', 'truck', 'traffic_light', 'stop_sign' ] 22 | -------------------------------------------------------------------------------- /data/coco128.yaml: -------------------------------------------------------------------------------- 1 | # COCO 2017 dataset http://cocodataset.org - first 128 training images 2 | # Train command: python train.py --data coco128.yaml 3 | # Default dataset location is next to /yolov5: 4 | # /parent_folder 5 | # /coco128 6 | # /yolov5 7 | 8 | 9 | # download command/URL (optional) 10 | download: https://github.com/ultralytics/yolov5/releases/download/v1.0/coco128.zip 11 | 12 | # train and val data as 1) directory: path/images/, 2) file: path/images.txt, or 3) list: [path1/images/, path2/images/] 13 | train: ../coco128/images/train2017/ # 128 images 14 | val: ../coco128/images/train2017/ # 128 images 15 | 16 | # number of classes 17 | nc: 80 18 | 19 | # class names 20 | names: [ 'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck', 'boat', 'traffic light', 21 | 'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow', 22 | 'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee', 23 | 'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard', 24 | 'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple', 25 | 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 26 | 'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', 27 | 'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear', 28 | 'hair drier', 'toothbrush' ] 29 | -------------------------------------------------------------------------------- /data/hyp.finetune.yaml: -------------------------------------------------------------------------------- 1 | # Hyperparameters for VOC finetuning 2 | # python train.py --batch 64 --weights yolov5m.pt --data voc.yaml --img 512 --epochs 50 3 | # See tutorials for hyperparameter evolution https://github.com/ultralytics/yolov5#tutorials 4 | 5 | 6 | # Hyperparameter Evolution Results 7 | # Generations: 306 8 | # P R mAP.5 mAP.5:.95 box obj cls 9 | # Metrics: 0.6 0.936 0.896 0.684 0.0115 0.00805 0.00146 10 | 11 | lr0: 0.0032 12 | lrf: 0.12 13 | momentum: 0.843 14 | weight_decay: 0.00036 15 | warmup_epochs: 2.0 16 | warmup_momentum: 0.5 17 | warmup_bias_lr: 0.05 18 | box: 0.0296 19 | cls: 0.243 20 | cls_pw: 0.631 21 | obj: 0.301 22 | obj_pw: 0.911 23 | iou_t: 0.2 24 | anchor_t: 2.91 25 | # anchors: 3.63 26 | fl_gamma: 0.0 27 | hsv_h: 0.0138 28 | hsv_s: 0.664 29 | hsv_v: 0.464 30 | degrees: 0.373 31 | translate: 0.245 32 | scale: 0.898 33 | shear: 0.602 34 | perspective: 0.0 35 | flipud: 0.00856 36 | fliplr: 0.5 37 | mosaic: 1.0 38 | mixup: 0.243 39 | -------------------------------------------------------------------------------- /data/hyp.scratch.yaml: -------------------------------------------------------------------------------- 1 | # Hyperparameters for COCO training from scratch 2 | # python train.py --batch 40 --cfg yolov5m.yaml --weights '' --data coco.yaml --img 640 --epochs 300 3 | # See tutorials for hyperparameter evolution https://github.com/ultralytics/yolov5#tutorials 4 | 5 | 6 | lr0: 0.01 # initial learning rate (SGD=1E-2, Adam=1E-3) 7 | lrf: 0.2 # final OneCycleLR learning rate (lr0 * lrf) 8 | momentum: 0.937 # SGD momentum/Adam beta1 9 | weight_decay: 0.0005 # optimizer weight decay 5e-4 10 | warmup_epochs: 3.0 # warmup epochs (fractions ok) 11 | warmup_momentum: 0.8 # warmup initial momentum 12 | warmup_bias_lr: 0.1 # warmup initial bias lr 13 | box: 0.05 # box loss gain 14 | cls: 0.5 # cls loss gain 15 | cls_pw: 1.0 # cls BCELoss positive_weight 16 | obj: 1.0 # obj loss gain (scale with pixels) 17 | obj_pw: 1.0 # obj BCELoss positive_weight 18 | iou_t: 0.20 # IoU training threshold 19 | anchor_t: 4.0 # anchor-multiple threshold 20 | # anchors: 3 # anchors per output layer (0 to ignore) 21 | fl_gamma: 0.0 # focal loss gamma (efficientDet default gamma=1.5) 22 | hsv_h: 0.015 # image HSV-Hue augmentation (fraction) 23 | hsv_s: 0.7 # image HSV-Saturation augmentation (fraction) 24 | hsv_v: 0.4 # image HSV-Value augmentation (fraction) 25 | degrees: 0.0 # image rotation (+/- deg) 26 | translate: 0.1 # image translation (+/- fraction) 27 | scale: 0.5 # image scale (+/- gain) 28 | shear: 0.0 # image shear (+/- deg) 29 | perspective: 0.0 # image perspective (+/- fraction), range 0-0.001 30 | flipud: 0.0 # image flip up-down (probability) 31 | fliplr: 0.5 # image flip left-right (probability) 32 | mosaic: 1.0 # image mosaic (probability) 33 | mixup: 0.0 # image mixup (probability) 34 | -------------------------------------------------------------------------------- /data/hyp.tiny.person.yaml: -------------------------------------------------------------------------------- 1 | # Hyperparameters for COCO training from scratch 2 | # python train.py --batch 40 --cfg yolov5m.yaml --weights '' --data coco.yaml --img 640 --epochs 300 3 | # See tutorials for hyperparameter evolution https://github.com/ultralytics/yolov5#tutorials 4 | 5 | 6 | lr0: 0.01 # initial learning rate (SGD=1E-2, Adam=1E-3) 7 | lrf: 0.2 # final OneCycleLR learning rate (lr0 * lrf) 8 | momentum: 0.937 # SGD momentum/Adam beta1 9 | weight_decay: 0.0005 # optimizer weight decay 5e-4 10 | warmup_epochs: 3.0 # warmup epochs (fractions ok) 11 | warmup_momentum: 0.8 # warmup initial momentum 12 | warmup_bias_lr: 0.1 # warmup initial bias lr 13 | box: 0.05 # box loss gain 14 | cls: 0.5 # cls loss gain 15 | cls_pw: 1.0 # cls BCELoss positive_weight 16 | obj: 1.0 # obj loss gain (scale with pixels) 17 | obj_pw: 1.0 # obj BCELoss positive_weight 18 | iou_t: 0.20 # IoU training threshold 19 | anchor_t: 4.0 # anchor-multiple threshold 20 | # anchors: 3 # anchors per output layer (0 to ignore) 21 | fl_gamma: 0.0 # focal loss gamma (efficientDet default gamma=1.5) 22 | hsv_h: 0.015 # image HSV-Hue augmentation (fraction) 23 | hsv_s: 0.7 # image HSV-Saturation augmentation (fraction) 24 | hsv_v: 0.4 # image HSV-Value augmentation (fraction) 25 | degrees: 0.0 # image rotation (+/- deg) 26 | translate: 0.1 # image translation (+/- fraction) 27 | scale: 0.5 # image scale (+/- gain) 28 | shear: 0.0 # image shear (+/- deg) 29 | perspective: 0.0 # image perspective (+/- fraction), range 0-0.001 30 | flipud: 0.0 # image flip up-down (probability) 31 | fliplr: 0.5 # image flip left-right (probability) 32 | mosaic: 1.0 # image mosaic (probability) 33 | mixup: 0.0 # image mixup (probability) 34 | -------------------------------------------------------------------------------- /data/hyp.uva.yaml: -------------------------------------------------------------------------------- 1 | # Hyperparameters for COCO training from scratch 2 | # python train.py --batch 40 --cfg yolov5m.yaml --weights '' --data coco.yaml --img 640 --epochs 300 3 | # See tutorials for hyperparameter evolution https://github.com/ultralytics/yolov5#tutorials 4 | 5 | 6 | lr0: 0.01 # initial learning rate (SGD=1E-2, Adam=1E-3) 7 | lrf: 0.2 # final OneCycleLR learning rate (lr0 * lrf) 8 | momentum: 0.937 # SGD momentum/Adam beta1 9 | weight_decay: 0.0005 # optimizer weight decay 5e-4 10 | warmup_epochs: 3.0 # warmup epochs (fractions ok) 11 | warmup_momentum: 0.8 # warmup initial momentum 12 | warmup_bias_lr: 0.1 # warmup initial bias lr 13 | box: 0.05 # box loss gain 14 | cls: 0.5 # cls loss gain 15 | cls_pw: 1.0 # cls BCELoss positive_weight 16 | obj: 1.0 # obj loss gain (scale with pixels) 17 | obj_pw: 1.0 # obj BCELoss positive_weight 18 | iou_t: 0.20 # IoU training threshold 19 | anchor_t: 4.0 # anchor-multiple threshold 20 | # anchors: 3 # anchors per output layer (0 to ignore) 21 | fl_gamma: 0.0 # focal loss gamma (efficientDet default gamma=1.5) 22 | hsv_h: 0.015 # image HSV-Hue augmentation (fraction) 23 | hsv_s: 0.7 # image HSV-Saturation augmentation (fraction) 24 | hsv_v: 0.4 # image HSV-Value augmentation (fraction) 25 | degrees: 0.0 # image rotation (+/- deg) 26 | translate: 0.1 # image translation (+/- fraction) 27 | scale: 0.5 # image scale (+/- gain) 28 | shear: 0.0 # image shear (+/- deg) 29 | perspective: 0.0 # image perspective (+/- fraction), range 0-0.001 30 | flipud: 0.0 # image flip up-down (probability) 31 | fliplr: 0.5 # image flip left-right (probability) 32 | mosaic: 1.0 # image mosaic (probability) 33 | mixup: 0.0 # image mixup (probability) 34 | giou: 1.582 35 | -------------------------------------------------------------------------------- /data/notes.txt: -------------------------------------------------------------------------------- 1 | 2 | prune_channel: 3 | sparsity-train: 4 | model exp epoch 5 | yolov5l 60 300=100+200 6 | yolov5s 76 200=100+100 7 | 8 | fine-tune: 9 | model raw_exp exp epoch ratio 10 | yolov5l 60 68 100 0.5-0.1 11 | yolov5l 60 69 150 0.8-0.1 last_150.pt 12 | 13 | yolov5s 76 78 100 0.5-0.1 14 | yolov5s 76 79 100 0.2-0.1 15 | prune_layer: 16 | 17 | fine-tune: 18 | model raw_exp exp epoch overall_layers 19 | yolov5l 60 150 6 20 | yolov5s 76 84 100 3 21 | yolov5s 76 85 100 6 22 | yolov5s 76 100 9 23 | 24 | 新的计划,yolov5s双模态模型稀疏化训练。 25 | sparsity-train: 26 | model exp epoch 27 | yolov5s-visible 87 200=100+100 28 | yolov5s-lwir 86 200=100+100 29 | 30 | yolov5l-visible 88 300=100+200 31 | yolov5l-lwir 89 300=100+200 32 | 33 | fine-tune: 34 | model raw_exp exp epoch overall_layers 35 | yolov5s-visible 87 91 100 6 36 | yolov5s-lwir 86 92 100 6 37 | 38 | yolov5s-visible 87 371 100 2 39 | yolov5s-lwir 86 372 100 2 40 | 41 | yolov5s-visible 87 384 100 4 42 | yolov5s-lwir 86 385 100 4 43 | 44 | 45 | 46 | yolov5l-visible 88 117 100 32 47 | yolov5l-visible 88 374 100 33 48 | yolov5l-visible 88 118 100 36 49 | 50 | yolov5l-lwir 89 120 100 32 51 | yolov5l-lwir 89 375 100 33 52 | yolov5l-lwir 89 119 100 36 53 | 54 | C3-Res-S新模块引入: 55 | exp modal datasets 56 | 142 visible 包含CB结构 mm-uva 57 | 58 | Neck层尺度平移实验: 59 | exp modal datasets 60 | 138 visible visdrone 平移前 61 | 143 visible visdrone 平移后 62 | 63 | 64 | transformer实验 65 | exp modal datasets 引入transformer 66 | 87 visible mm-uva 引入前 67 | 146 visible mm-uva 引入后 68 | 69 | 194 visible mm-uva no-layer-normal 70 | 71 | 195 visible mm-uva TransformerLayer-new 72 | 73 | 特征检测尺度平移: 74 | exp dataset 75 | 96 uva 使用exp=86 76 | XX tiny-person 失败 77 | 113 visdrone 平移前 78 | 79 | 80 | yolov5s下验证Transformer与C3-Res-S 81 | epoch module exp 82 | 100 C3-Res-S 224 83 | 84 | 50 C3-Res-S 85 | 50 Transformer 86 | 87 | 88 | 端到端训练融合: 89 | model exp epoch 90 | 91 | vis+lw 368 40 92 | vis+lw 369 40 -------------------------------------------------------------------------------- /data/test.yaml: -------------------------------------------------------------------------------- 1 | nc: 80 2 | depth_multiple: 0.33 3 | width_multiple: 0.5 4 | anchors: 5 | [[10, 13, 16, 30, 33, 23], [30, 61, 62, 45, 59, 119], [116, 90, 156, 198, 373, 326]] 6 | backbone: 7 | [[-1, 1, Focus , [64, 3]], 8 | [-1, 1, Conv , [128, 3, 2]], 9 | [-1, 3, C3_Res_S , [128, True, 1, [0.5, 0.5], [1.0, 1.0, 1.0]]], 10 | [-1, 1, Conv , [256, 3, 2]], 11 | [-1, 2, C3_Res_S , [256, True, 1, [0.5, 0.5], [1.0, 1.0]]], 12 | [-1, 1, Conv , [512, 3, 2]], 13 | [-1, 2, C3_Res_S , [512, True, 1, [0.5, 0.5], [1.0, 1.0]]], 14 | [-1, 1, Conv , [1024, 3, 2]], 15 | [-1, 1, SPP , [1024, [5, 9, 13], 0.5]], 16 | [-1, 3, C3_Res_S , [1024, False, 1, [0.5, 0.5], [0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5]]]] 17 | head: [[-1, 1, Conv , [512, 1, 1]], [-1, 1, nn.Upsample , [ None , 2, 'nearest']], [[-1, 6], 1, Concat , [1]], [-1, 3, C3_Res_S , [512, False, 1, [0.5, 0.5], [0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5]]], [-1, 1, Conv , [256, 1, 1]], [-1, 1, nn.Upsample , [ None , 2, 'nearest']], [[-1, 4], 1, Concat , [1]], [-1, 3, C3_Res_S , [256, False, 1, [0.5, 0.5], [0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5]]], [-1, 1, Conv , [256, 3, 2]], [[-1, 14], 1, Concat , [1]], [-1, 3, C3_Res_S , [512, False, 1, [0.5, 0.5], [0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5]]], [-1, 1, Conv , [512, 3, 2]], [[-1, 10], 1, Concat , [1]], [-1, 3, C3_Res_S , [1024, False, 1, [0.5, 0.5], [0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5]]], [[17, 20, 23], 1, Detect , [ nc , anchors ]]] 18 | 19 | 20 | 21 | 22 | 23 | nc: 80 24 | depth_multiple: 0.33 25 | width_multiple: 0.5 26 | anchors: [[10, 13, 16, 30, 33, 23], [30, 61, 62, 45, 59, 119], [116, 90, 156, 198, 373, 326]] 27 | backbone: 28 | [[-1, 1, Focus , [64, 3]], 29 | [-1, 1, Conv , [128, 3, 2]], 30 | [-1, 3, C3_Res_S , [128, True, 1, [0.5, 0.5], [1.0, 1.0, 1.0]]], 31 | [-1, 1, Conv , [256, 3, 2]], 32 | [-1, 1, C3_Res_S , [256, True, 1, [0.5, 0.5], [1.0]]], 33 | [-1, 1, Conv , [512, 3, 2]], 34 | [-1, 1, C3_Res_S , [512, True, 1, [0.5, 0.5], [1.0]]], 35 | [-1, 1, Conv , [1024, 3, 2]], 36 | [-1, 1, SPP , [1024, [5, 9, 13], 0.5]], 37 | [-1, 3, C3_Res_S , [1024, False, 1, [0.5, 0.5], [0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5]]]] 38 | head: [[-1, 1, Conv , [512, 1, 1]], [-1, 1, nn.Upsample , [ None , 2, 'nearest']], [[-1, 6], 1, Concat , [1]], [-1, 3, C3_Res_S , [512, False, 1, [0.5, 0.5], [0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5]]], [-1, 1, Conv , [256, 1, 1]], [-1, 1, nn.Upsample , [ None , 2, 'nearest']], [[-1, 4], 1, Concat , [1]], [-1, 3, C3_Res_S , [256, False, 1, [0.5, 0.5], [0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5]]], [-1, 1, Conv , [256, 3, 2]], [[-1, 14], 1, Concat , [1]], [-1, 3, C3_Res_S , [512, False, 1, [0.5, 0.5], [0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5]]], [-1, 1, Conv , [512, 3, 2]], [[-1, 10], 1, Concat , [1]], [-1, 3, C3_Res_S , [1024, False, 1, [0.5, 0.5], [0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5]]], [[17, 20, 23], 1, Detect , [ nc , anchors ]]] 39 | 40 | 41 | nc: 80 42 | depth_multiple: 0.33 43 | width_multiple: 0.5 44 | anchors: [[10, 13, 16, 30, 33, 23], [30, 61, 62, 45, 59, 119], [116, 90, 156, 198, 373, 326]] 45 | backbone: 46 | [[-1, 1, Focus , [64, 3]], 47 | [-1, 1, Conv , [128, 3, 2]], 48 | [-1, 3, C3_Res_S , [128, True, 1, [0.5, 0.5], [1.0, 1.0, 1.0]]], 49 | [-1, 1, Conv , [256, 3, 2]], 50 | [-1, 1, C3_Res_S , [256, True, 1, [0.5, 0.5], [1.0]]], 51 | [-1, 1, Conv , [512, 3, 2]], 52 | [-1, 2, C3_Res_S , [512, True, 1, [0.5, 0.5], [1.0, 1.0]]], 53 | [-1, 1, Conv , [1024, 3, 2]], 54 | [-1, 1, SPP , [1024, [5, 9, 13], 0.5]], 55 | [-1, 3, C3_Res_S , [1024, False, 1, [0.5, 0.5], [0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5]]]] 56 | head: [[-1, 1, Conv , [512, 1, 1]], [-1, 1, nn.Upsample , [ None , 2, 'nearest']], [[-1, 6], 1, Concat , [1]], [-1, 3, C3_Res_S , [512, False, 1, [0.5, 0.5], [0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5]]], [-1, 1, Conv , [256, 1, 1]], [-1, 1, nn.Upsample , [ None , 2, 'nearest']], [[-1, 4], 1, Concat , [1]], [-1, 3, C3_Res_S , [256, False, 1, [0.5, 0.5], [0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5]]], [-1, 1, Conv , [256, 3, 2]], [[-1, 14], 1, Concat , [1]], [-1, 3, C3_Res_S , [512, False, 1, [0.5, 0.5], [0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5]]], [-1, 1, Conv , [512, 3, 2]], [[-1, 10], 1, Concat , [1]], [-1, 3, C3_Res_S , [1024, False, 1, [0.5, 0.5], [0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5]]], [[17, 20, 23], 1, Detect , [ nc , anchors ]]] 57 | 58 | 59 | -------------------------------------------------------------------------------- /data/tiny_person.yaml: -------------------------------------------------------------------------------- 1 | # train and val data as 1) directory: path/images/, 2) file: path/images.txt, or 3) list: [path1/images/, path2/images/] 2 | train: /home/shw/data/tiny-person/train/images 3 | val: /home/shw/data/tiny-person/test/images 4 | 5 | # number of classes 6 | nc: 2 7 | 8 | # class names 9 | names: [ 10 | "sea_person", 11 | "earth_person" 12 | ] 13 | 14 | -------------------------------------------------------------------------------- /data/uva.yaml: -------------------------------------------------------------------------------- 1 | # train and val data as 1) directory: path/images/, 2) file: path/images.txt, or 3) list: [path1/images/, path2/images/] 2 | train: /home/shw/data/UVA/train/images 3 | val: /home/shw/data/UVA/val/images 4 | test: /home/shw/data/UVA/test/images 5 | # number of classes 6 | nc: 8 7 | 8 | # class names 9 | names: [ 10 | "car", 11 | "person", 12 | "minibus", 13 | "tricycle", 14 | "bus", 15 | "big-truck", 16 | "van", 17 | "cyclist" 18 | ] 19 | 20 | -------------------------------------------------------------------------------- /data/visdrone10.yaml: -------------------------------------------------------------------------------- 1 | # train and val data as 1) directory: path/images/, 2) file: path/images.txt, or 3) list: [path1/images/, path2/images/] 2 | train: /data2/data_backup/data/shw/VisDrone2019-DET-train/images 3 | val: /data2/data_backup/data/shw/VisDrone2019-DET-val/images 4 | 5 | # number of classes 6 | nc: 10 7 | 8 | # class names 9 | names: [ 10 | 'pedestrian', 11 | 'people', 12 | 'bicycle', 13 | 'car', 14 | 'van', 15 | 'truck', 16 | 'tricycle', 17 | 'awning-tricycle', 18 | 'bus', 19 | 'motor' 20 | ] 21 | -------------------------------------------------------------------------------- /data/voc.yaml: -------------------------------------------------------------------------------- 1 | # PASCAL VOC dataset http://host.robots.ox.ac.uk/pascal/VOC/ 2 | # Train command: python train.py --data voc.yaml 3 | # Default dataset location is next to /yolov5: 4 | # /parent_folder 5 | # /VOC 6 | # /yolov5 7 | 8 | 9 | # download command/URL (optional) 10 | download: bash data/scripts/get_voc.sh 11 | 12 | # train and val data as 1) directory: path/images/, 2) file: path/images.txt, or 3) list: [path1/images/, path2/images/] 13 | train: ../VOC/images/train/ # 16551 images 14 | val: ../VOC/images/val/ # 4952 images 15 | 16 | # number of classes 17 | nc: 20 18 | 19 | # class names 20 | names: [ 'aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', 21 | 'horse', 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor' ] 22 | -------------------------------------------------------------------------------- /datasets/aware_datasets.py: -------------------------------------------------------------------------------- 1 | import os 2 | import torch as t 3 | from torch.utils import data 4 | from PIL import Image 5 | import numpy as np 6 | from torchvision import transforms as T 7 | import cv2 8 | from torch.utils.data import DataLoader 9 | import glob 10 | from pathlib import Path 11 | import torch 12 | from PIL import Image, ImageStat 13 | import random 14 | 15 | 16 | class AwareData(data.Dataset): 17 | 18 | def __init__(self, root="/home/shw/data/uveAwareData", transforms=None, mode="train"): 19 | ''' 20 | 获取所有图片的地址,并根据训练,测试,验证三类对数据进行划分 21 | ''' 22 | self.root = os.path.join(root, mode) 23 | self.imgs = glob.glob(str(Path(self.root) / '**' / '*.*'), recursive=True) # 获取root路径下所有图片的地址 24 | self.smooth = False 25 | print(self.imgs) 26 | # print imgs 27 | 28 | def __getitem__(self, index): 29 | """ 30 | 返回图像数据和标签,0代表白天1代表晚上 31 | """ 32 | img_path = self.imgs[index] 33 | 34 | if self.smooth: 35 | label = brightness(img_path) / 255.0 36 | # print(label) 37 | else: 38 | if str(img_path).__contains__("day"): 39 | label = 0 40 | else: 41 | label = 1 42 | 43 | data = cv2.imread(img_path) # BGR 44 | data = data[..., ::-1] # BGR->RGB 45 | data = cv2.resize(data, (128, 128)).transpose(2, 0, 1) 46 | 47 | return torch.from_numpy(data), label 48 | 49 | def __len__(self): 50 | """ 51 | 返回数据集中所有图片的个数 52 | """ 53 | return len(self.imgs) 54 | 55 | 56 | def brightness(im_file): 57 | im = Image.open(im_file).convert('L') 58 | stat = ImageStat.Stat(im) 59 | return stat.rms[0] 60 | 61 | 62 | if __name__ == '__main__': 63 | 64 | train_dataset = AwareData("/home/shw/data/uveAwareData", mode="train") # 训练集 65 | # val_data = AwareData("/home/shw/data/uveAwareData", mode="val") # 验证集 66 | 67 | train_dataloader = DataLoader(train_dataset, 1, shuffle=True, num_workers=1) 68 | 69 | for ii, (data, label) in enumerate(train_dataloader): 70 | # 训练模型参数 71 | print(data.shape, label) 72 | -------------------------------------------------------------------------------- /detect.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import time 3 | from pathlib import Path 4 | 5 | import cv2 6 | import torch 7 | import torch.backends.cudnn as cudnn 8 | from numpy import random 9 | 10 | from models.experimental import attempt_load 11 | from utils.datasets import LoadStreams, LoadImages 12 | from utils.general import check_img_size, check_requirements, check_imshow, non_max_suppression, apply_classifier, \ 13 | scale_coords, xyxy2xywh, strip_optimizer, set_logging, increment_path 14 | from utils.plots import plot_one_box 15 | from utils.torch_utils import select_device, load_classifier, time_synchronized 16 | 17 | 18 | def detect(save_img=False): 19 | source, weights, view_img, save_txt, imgsz = opt.source, opt.weights, opt.view_img, opt.save_txt, opt.img_size 20 | webcam = source.isnumeric() or source.endswith('.txt') or source.lower().startswith( 21 | ('rtsp://', 'rtmp://', 'http://')) 22 | 23 | # Directories 24 | save_dir = Path(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok)) # increment run 25 | (save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir 26 | 27 | # Initialize 28 | set_logging() 29 | device = select_device(opt.device) 30 | half = device.type != 'cpu' # half precision only supported on CUDA 31 | 32 | # Load model 33 | model = attempt_load(weights, map_location=device) # load FP32 model 34 | stride = int(model.stride.max()) # model stride 35 | imgsz = check_img_size(imgsz, s=stride) # check img_size 36 | if half: 37 | model.half() # to FP16 38 | 39 | # Set Dataloader 40 | vid_path, vid_writer = None, None 41 | if webcam: 42 | view_img = check_imshow() 43 | cudnn.benchmark = True # set True to speed up constant image size inference 44 | dataset = LoadStreams(source, img_size=imgsz, stride=stride) 45 | else: 46 | save_img = True 47 | dataset = LoadImages(source, img_size=imgsz, stride=stride) 48 | 49 | # Get names and colors 50 | names = model.module.names if hasattr(model, 'module') else model.names 51 | colors = [[random.randint(0, 255) for _ in range(3)] for _ in names] 52 | 53 | # Run inference 54 | if device.type != 'cpu': 55 | model(torch.zeros(1, 3, imgsz, imgsz).to(device).type_as(next(model.parameters()))) # run once 56 | t0 = time.time() 57 | for path, img, im0s, vid_cap in dataset: 58 | img = torch.from_numpy(img).to(device) 59 | img = img.half() if half else img.float() # uint8 to fp16/32 60 | img /= 255.0 # 0 - 255 to 0.0 - 1.0 61 | if img.ndimension() == 3: 62 | img = img.unsqueeze(0) 63 | 64 | # Inference 65 | t1 = time_synchronized() 66 | pred = model(img, augment=opt.augment)[0] 67 | 68 | # Apply NMS 69 | pred = non_max_suppression(pred, opt.conf_thres, opt.iou_thres, classes=opt.classes, agnostic=opt.agnostic_nms) 70 | t2 = time_synchronized() 71 | 72 | # Process detections 73 | for i, det in enumerate(pred): # detections per image 74 | if webcam: # batch_size >= 1 75 | p, s, im0, frame = path[i], '%g: ' % i, im0s[i].copy(), dataset.count 76 | else: 77 | p, s, im0, frame = path, '', im0s, getattr(dataset, 'frame', 0) 78 | 79 | p = Path(p) # to Path 80 | save_path = str(save_dir / p.name) # img.jpg 81 | txt_path = str(save_dir / 'labels' / p.stem) + ('' if dataset.mode == 'image' else f'_{frame}') # img.txt 82 | s += '%gx%g ' % img.shape[2:] # print string 83 | gn = torch.tensor(im0.shape)[[1, 0, 1, 0]] # normalization gain whwh 84 | if len(det): 85 | # Rescale boxes from img_size to im0 size 86 | det[:, :4] = scale_coords(img.shape[2:], det[:, :4], im0.shape).round() 87 | 88 | # Print results 89 | for c in det[:, -1].unique(): 90 | n = (det[:, -1] == c).sum() # detections per class 91 | s += f"{n} {names[int(c)]}{'s' * (n > 1)}, " # add to string 92 | 93 | # Write results 94 | for *xyxy, conf, cls in reversed(det): 95 | if save_txt: # Write to file 96 | xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh 97 | line = (cls, *xywh, conf) if opt.save_conf else (cls, *xywh) # label format 98 | with open(txt_path + '.txt', 'a') as f: 99 | f.write(('%g ' * len(line)).rstrip() % line + '\n') 100 | 101 | if save_img or view_img: # Add bbox to image 102 | label = f'{names[int(cls)]} {conf:.2f}' 103 | plot_one_box(xyxy, im0, label=label, color=colors[int(cls)], line_thickness=1) 104 | 105 | # Print time (inference + NMS) 106 | print(f'{s}Done. ({t2 - t1:.3f}s)') 107 | 108 | # Stream results 109 | if view_img: 110 | cv2.imshow(str(p), im0) 111 | cv2.waitKey(1) # 1 millisecond 112 | 113 | # Save results (image with detections) 114 | if save_img: 115 | if dataset.mode == 'image': 116 | cv2.imwrite(save_path, im0) 117 | else: # 'video' 118 | if vid_path != save_path: # new video 119 | vid_path = save_path 120 | if isinstance(vid_writer, cv2.VideoWriter): 121 | vid_writer.release() # release previous video writer 122 | 123 | fourcc = 'mp4v' # output video codec 124 | fps = vid_cap.get(cv2.CAP_PROP_FPS) 125 | w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH)) 126 | h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) 127 | vid_writer = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*fourcc), fps, (w, h)) 128 | vid_writer.write(im0) 129 | 130 | if save_txt or save_img: 131 | s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else '' 132 | print(f"Results saved to {save_dir}{s}") 133 | 134 | print(f'Done. ({time.time() - t0:.3f}s)') 135 | 136 | 137 | if __name__ == '__main__': 138 | parser = argparse.ArgumentParser() 139 | parser.add_argument('--weights', nargs='+', type=str, default='runs/train/exp88/weights/best.pt', help='model.pt path(s)') 140 | parser.add_argument('--source', type=str, default='data/images/visible', help='source') # file/folder, 0 for webcam 141 | parser.add_argument('--img-size', type=int, default=672, help='inference size (pixels)') 142 | parser.add_argument('--conf-thres', type=float, default=0.25, help='object confidence threshold') 143 | parser.add_argument('--iou-thres', type=float, default=0.45, help='IOU threshold for NMS') 144 | parser.add_argument('--device', default='0', help='cuda device, i.e. 0 or cpu') 145 | parser.add_argument('--view-img', action='store_true',default=False, help='display results') 146 | parser.add_argument('--save-txt', action='store_true', help='save results to *.txt') 147 | parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels') 148 | parser.add_argument('--classes', nargs='+', type=int, help='filter by class: --class 0, or --class 0 2 3') 149 | parser.add_argument('--agnostic-nms', action='store_true', help='class-agnostic NMS') 150 | parser.add_argument('--augment', action='store_true', help='augmented inference') 151 | parser.add_argument('--update', action='store_true', help='update all models') 152 | parser.add_argument('--project', default='runs/detect', help='save results to project/name') 153 | parser.add_argument('--name', default='exp', help='save results to project/name') 154 | parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment') 155 | opt = parser.parse_args() 156 | print(opt) 157 | 158 | with torch.no_grad(): 159 | detect() 160 | -------------------------------------------------------------------------------- /detect_multi.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import time 3 | from pathlib import Path 4 | 5 | import cv2 6 | import torch 7 | import torch.backends.cudnn as cudnn 8 | from numpy import random 9 | 10 | from models.experimental import attempt_load 11 | from datasets.multi_modal_uva_datasets import LoadStreams, LoadImages 12 | from utils.general import check_img_size, check_requirements, check_imshow, non_max_suppression, apply_classifier, \ 13 | scale_coords, xyxy2xywh, strip_optimizer, set_logging, increment_path 14 | from utils.plots import plot_one_box 15 | from utils.torch_utils import select_device, load_classifier, time_synchronized 16 | from models.modal_ensemble_model_uva_aware import ModalEnseModel 17 | 18 | 19 | def detect(save_img=False): 20 | source, view_img, save_txt, imgsz, weights_visible, weights_lwir, aware = opt.source, opt.view_img, opt.save_txt, opt.img_size, opt.weights_visible, opt.weights_lwir, opt.aware 21 | webcam = source.isnumeric() or source.endswith('.txt') or source.lower().startswith( 22 | ('rtsp://', 'rtmp://', 'http://')) 23 | 24 | # Directories 25 | save_dir = Path(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok)) # increment run 26 | (save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir 27 | 28 | # Initialize 29 | set_logging() 30 | device = select_device(opt.device) 31 | half = device.type != 'cpu' # half precision only supported on CUDA 32 | 33 | # Load model 34 | model = ModalEnseModel(opt.aware) 35 | model.load_weights(weights_visible, weights_lwir, device) 36 | stride = 32 # model stride 37 | imgsz = check_img_size(imgsz, s=stride) # check img_size 38 | # if half: 39 | # model.half() # to FP16 40 | 41 | # Set Dataloader 42 | vid_path, vid_writer = None, None 43 | if webcam: 44 | view_img = check_imshow() 45 | cudnn.benchmark = True # set True to speed up constant image size inference 46 | dataset = LoadStreams(source, img_size=imgsz, stride=stride) 47 | else: 48 | save_img = True 49 | dataset = LoadImages(source, img_size=imgsz, stride=stride) 50 | 51 | # Get names and colors 52 | names = [ 53 | "car", 54 | "person", 55 | "minibus", 56 | "tricycle", 57 | "bus", 58 | "big-truck", 59 | "van", 60 | "cyclist" 61 | ] 62 | colors = [[random.randint(0, 255) for _ in range(3)] for _ in names] 63 | 64 | # Run inference 65 | if device.type != 'cpu': 66 | model(torch.zeros(1, 3, imgsz, imgsz).to(device).type_as(next(model.parameters())), 67 | torch.zeros(1, 3, imgsz, imgsz).to(device).type_as(next(model.parameters())), 68 | torch.zeros(1, 3, 128, 128).to(device).type_as(next(model.parameters())) 69 | ) # run once 70 | t0 = time.time() 71 | for path, im0s, img_v, img_l in dataset: 72 | img_v = torch.from_numpy(img_v).to(device) 73 | img_v = img_v.float() # uint8 to fp16/32 74 | 75 | img_l = torch.from_numpy(img_l).to(device) 76 | img_l = img_l.float() # uint8 to fp16/32 77 | 78 | img_v /= 255.0 # 0 - 255 to 0.0 - 1.0 79 | img_l /= 255.0 80 | if img_v.ndimension() == 3: 81 | img_v = img_v.unsqueeze(0) 82 | if img_l.ndimension() == 3: 83 | img_l = img_l.unsqueeze(0) 84 | 85 | # Inference 86 | t1 = time_synchronized() 87 | pred = model(img_v, img_l, aware, augment=opt.augment) 88 | 89 | # Apply NMS 90 | pred = non_max_suppression(pred, opt.conf_thres, opt.iou_thres, classes=opt.classes, agnostic=opt.agnostic_nms) 91 | t2 = time_synchronized() 92 | 93 | # Process detections 94 | for i, det in enumerate(pred): # detections per image 95 | if webcam: # batch_size >= 1 96 | p, s, im0, frame = path[i], '%g: ' % i, im0s[i].copy(), dataset.count 97 | else: 98 | p, s, im0, frame = path, '', im0s, getattr(dataset, 'frame', 0) 99 | 100 | p = Path(p) # to Path 101 | save_path = str(save_dir / p.name) # img.jpg 102 | txt_path = str(save_dir / 'labels' / p.stem) + ('' if dataset.mode == 'image' else f'_{frame}') # img.txt 103 | s += '%gx%g ' % img_v.shape[2:] # print string 104 | gn = torch.tensor(im0.shape)[[1, 0, 1, 0]] # normalization gain whwh 105 | if len(det): 106 | # Rescale boxes from img_size to im0 size 107 | det[:, :4] = scale_coords(img_v.shape[2:], det[:, :4], im0.shape).round() 108 | 109 | # Print results 110 | for c in det[:, -1].unique(): 111 | n = (det[:, -1] == c).sum() # detections per class 112 | s += f"{n} {names[int(c)]}{'s' * (n > 1)}, " # add to string 113 | 114 | # Write results 115 | for *xyxy, conf, cls in reversed(det): 116 | if save_txt: # Write to file 117 | xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh 118 | line = (cls, *xywh, conf) if opt.save_conf else (cls, *xywh) # label format 119 | with open(txt_path + '.txt', 'a') as f: 120 | f.write(('%g ' * len(line)).rstrip() % line + '\n') 121 | 122 | if save_img or view_img: # Add bbox to image 123 | label = f'{names[int(cls)]} {conf:.2f}' 124 | plot_one_box(xyxy, im0, label=label, color=colors[int(cls)], line_thickness=1) 125 | 126 | # Print time (inference + NMS) 127 | print(f'{s}Done. ({t2 - t1:.3f}s)') 128 | 129 | # Stream results 130 | if view_img: 131 | cv2.imshow(str(p), im0) 132 | cv2.waitKey(1) # 1 millisecond 133 | 134 | # Save results (image with detections) 135 | if save_img: 136 | if dataset.mode == 'images': 137 | cv2.imwrite(save_path, im0) 138 | # else: # 'video' 139 | # if vid_path != save_path: # new video 140 | # vid_path = save_path 141 | # if isinstance(vid_writer, cv2.VideoWriter): 142 | # vid_writer.release() # release previous video writer 143 | # 144 | # fourcc = 'mp4v' # output video codec 145 | # fps = vid_cap.get(cv2.CAP_PROP_FPS) 146 | # w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH)) 147 | # h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) 148 | # vid_writer = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*fourcc), fps, (w, h)) 149 | # vid_writer.write(im0) 150 | 151 | if save_txt or save_img: 152 | s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else '' 153 | print(f"Results saved to {save_dir}{s}") 154 | 155 | print(f'Done. ({time.time() - t0:.3f}s)') 156 | 157 | 158 | if __name__ == '__main__': 159 | parser = argparse.ArgumentParser() 160 | parser.add_argument('--source', type=str, default='data/images/visible', help='source') # file/folder, 0 for webcam 161 | parser.add_argument('--img-size', type=int, default=672, help='inference size (pixels)') 162 | parser.add_argument('--conf-thres', type=float, default=0.25, help='object confidence threshold') 163 | parser.add_argument('--iou-thres', type=float, default=0.45, help='IOU threshold for NMS') 164 | parser.add_argument('--device', default='0', help='cuda device, i.e. 0 or cpu') 165 | parser.add_argument('--view-img', action='store_true', default=False, help='display results') 166 | parser.add_argument('--save-txt', action='store_true', help='save results to *.txt') 167 | parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels') 168 | parser.add_argument('--classes', nargs='+', type=int, help='filter by class: --class 0, or --class 0 2 3') 169 | parser.add_argument('--agnostic-nms', action='store_true', help='class-agnostic NMS') 170 | parser.add_argument('--augment', action='store_true', help='augmented inference') 171 | parser.add_argument('--update', action='store_true', help='update all models') 172 | parser.add_argument('--project', default='runs/detect', help='save results to project/name') 173 | parser.add_argument('--name', default='exp', help='save results to project/name') 174 | parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment') 175 | parser.add_argument('--weights_visible', nargs='+', type=str, 176 | default='runs/train/exp88/weights/best.pt', 177 | help='model.pt path(s)') 178 | parser.add_argument('--weights_lwir', nargs='+', type=str, 179 | default='runs/train/exp89/weights/best.pt', 180 | help='model.pt path(s)') 181 | parser.add_argument('--aware', action='store_true', default=True, help='save results to *.txt') 182 | opt = parser.parse_args() 183 | print(opt) 184 | 185 | with torch.no_grad(): 186 | detect() 187 | -------------------------------------------------------------------------------- /functions.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | 4 | def gather_bn_weights(model): 5 | size = [] 6 | for m in model.modules(): 7 | if isinstance(m, torch.nn.BatchNorm2d): 8 | size.append(m.weight.data.shape[0]) 9 | 10 | bn_weights = torch.zeros(sum(size)) 11 | index = 0 12 | idx = 0 13 | for m in model.modules(): 14 | if isinstance(m, torch.nn.BatchNorm2d): 15 | bn_weights[index:(index + size[idx])] = m.weight.data.abs().clone() 16 | index += size[idx] 17 | idx += 1 18 | return bn_weights 19 | 20 | 21 | def gather_bn_weights_(module_list, prune_idx): 22 | size_list = [module_list[idx][1].weight.data.shape[0] for idx in prune_idx] 23 | 24 | bn_weights = torch.zeros(sum(size_list)) 25 | index = 0 26 | for idx, size in zip(prune_idx, size_list): 27 | bn_weights[index:(index + size)] = module_list[idx][1].weight.data.abs().clone() 28 | index += size 29 | 30 | return bn_weights 31 | -------------------------------------------------------------------------------- /img/1-1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sbbug/yolov5-prune-multi/7ebdf3ce3050a0b7c865920bb88d768a446ab3e9/img/1-1.png -------------------------------------------------------------------------------- /img/1-2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sbbug/yolov5-prune-multi/7ebdf3ce3050a0b7c865920bb88d768a446ab3e9/img/1-2.png -------------------------------------------------------------------------------- /img/1-3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sbbug/yolov5-prune-multi/7ebdf3ce3050a0b7c865920bb88d768a446ab3e9/img/1-3.png -------------------------------------------------------------------------------- /img/1-4.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sbbug/yolov5-prune-multi/7ebdf3ce3050a0b7c865920bb88d768a446ab3e9/img/1-4.png -------------------------------------------------------------------------------- /img/1-5.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sbbug/yolov5-prune-multi/7ebdf3ce3050a0b7c865920bb88d768a446ab3e9/img/1-5.png -------------------------------------------------------------------------------- /img/1-6.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sbbug/yolov5-prune-multi/7ebdf3ce3050a0b7c865920bb88d768a446ab3e9/img/1-6.png -------------------------------------------------------------------------------- /img/1-7.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sbbug/yolov5-prune-multi/7ebdf3ce3050a0b7c865920bb88d768a446ab3e9/img/1-7.png -------------------------------------------------------------------------------- /img/1-8.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sbbug/yolov5-prune-multi/7ebdf3ce3050a0b7c865920bb88d768a446ab3e9/img/1-8.png -------------------------------------------------------------------------------- /models/SE.py: -------------------------------------------------------------------------------- 1 | import torch.nn as nn 2 | import torch.nn.functional as F 3 | import torch 4 | 5 | from torch.nn import Conv2d, Parameter, Softmax 6 | 7 | class SE(nn.Module): 8 | 9 | def __init__(self, in_chnls, ratio): 10 | super(SE, self).__init__() 11 | self.squeeze = nn.AdaptiveAvgPool2d((1, 1)) 12 | self.compress = nn.Conv2d(in_chnls, in_chnls//ratio, 1) 13 | self.excitation = nn.Conv2d(in_chnls//ratio, in_chnls, 1) 14 | 15 | def forward(self, x): 16 | out = self.squeeze(x) 17 | out = self.compress(out) 18 | out = F.relu(out) 19 | out = self.excitation(out) 20 | return torch.sigmoid(out) 21 | 22 | class Fus(nn.Module): 23 | 24 | def __init__(self, in_chnls, out_chnls): 25 | super(Fus, self).__init__() 26 | 27 | self.conv = nn.Conv2d(in_chnls, out_chnls, 1) 28 | self.bn = nn.BatchNorm2d(out_chnls) 29 | # self.act = nn.SiLU() if act is True else (act if isinstance(act, nn.Module) else nn.Identity()) 30 | self.act = nn.LeakyReLU(0.1, inplace=True) 31 | 32 | def forward(self, x): 33 | 34 | return self.act(self.bn(self.conv(x))) 35 | 36 | class PAM_Module(nn.Module): 37 | """ Position attention module""" 38 | #Ref from SAGAN 39 | def __init__(self, in_dim): 40 | super(PAM_Module, self).__init__() 41 | self.chanel_in = in_dim 42 | 43 | self.query_conv = Conv2d(in_channels=in_dim, out_channels=in_dim//8, kernel_size=1) 44 | self.key_conv = Conv2d(in_channels=in_dim, out_channels=in_dim//8, kernel_size=1) 45 | self.value_conv = Conv2d(in_channels=in_dim, out_channels=in_dim, kernel_size=1) 46 | self.gamma = Parameter(torch.zeros(1)) 47 | 48 | self.softmax = Softmax(dim=-1) 49 | def forward(self, x): 50 | """ 51 | inputs : 52 | x : input feature maps( B X C X H X W) 53 | returns : 54 | out : attention value + input feature 55 | attention: B X (HxW) X (HxW) 56 | """ 57 | m_batchsize, C, height, width = x.size() 58 | proj_query = self.query_conv(x).view(m_batchsize, -1, width*height).permute(0, 2, 1) 59 | proj_key = self.key_conv(x).view(m_batchsize, -1, width*height) 60 | energy = torch.bmm(proj_query, proj_key) 61 | attention = self.softmax(energy) 62 | proj_value = self.value_conv(x).view(m_batchsize, -1, width*height) 63 | 64 | out = torch.bmm(proj_value, attention.permute(0, 2, 1)) 65 | out = out.view(m_batchsize, C, height, width) 66 | 67 | out = self.gamma*out + x 68 | return out -------------------------------------------------------------------------------- /models/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sbbug/yolov5-prune-multi/7ebdf3ce3050a0b7c865920bb88d768a446ab3e9/models/__init__.py -------------------------------------------------------------------------------- /models/aware_model.py: -------------------------------------------------------------------------------- 1 | ''' 2 | 本部分是光照感知模块,用来计算输入的可见光图像的光照强度 3 | ''' 4 | from torch.autograd import Variable # 这一步还没有显式用到variable,但是现在写在这里也没问题,后面会用到 5 | import torch.nn as nn 6 | import torch.nn.functional as F 7 | from models.common import Focus, Conv 8 | 9 | 10 | # define 11 | class AwareModel(nn.Module): 12 | def __init__(self): 13 | super(AwareModel, self).__init__() 14 | self.focus = Focus(3, 16, 5) # /2 15 | 16 | # convolutional layer 17 | # self.conv1 = nn.Conv2d(3, 16, 5) 18 | # # max pooling layer 19 | self.pool = nn.MaxPool2d(2, 2) # /4 20 | self.conv2 = Conv(16, 32, 5) 21 | self.dropout = nn.Dropout(0.2) 22 | self.fc1 = nn.Linear(32 * 16 * 16, 128) 23 | self.fc2 = nn.Linear(128, 32) # 线性层: 256长度 -> 84长度 24 | self.fc3 = nn.Linear(32, 2) # 线性层:84长度 -> 2长度 25 | self.softmax = nn.Softmax(dim=1) # Softmax 26 | 27 | def forward(self, x): 28 | # print("x.shape",x.shape) 29 | # add sequence of convolutional and max pooling layers 30 | x = self.pool(F.relu(self.focus(x))) 31 | x = self.pool(F.relu(self.conv2(x))) 32 | x = self.dropout(x) 33 | # print("x.shape", x.shape) 34 | x = x.view(-1, 32 * 16 * 16) 35 | x = F.relu(self.fc1(x)) 36 | x = self.dropout(F.relu(self.fc2(x))) 37 | x = self.softmax(self.fc3(x)) 38 | # print(x) 39 | return x 40 | 41 | 42 | # create a complete CNN 43 | model = AwareModel() 44 | -------------------------------------------------------------------------------- /models/experimental.py: -------------------------------------------------------------------------------- 1 | # This file contains experimental modules 2 | 3 | import numpy as np 4 | import torch 5 | import torch.nn as nn 6 | 7 | from models.common import Conv, DWConv 8 | from utils.google_utils import attempt_download 9 | 10 | 11 | class CrossConv(nn.Module): 12 | # Cross Convolution Downsample 13 | def __init__(self, c1, c2, k=3, s=1, g=1, e=1.0, shortcut=False): 14 | # ch_in, ch_out, kernel, stride, groups, expansion, shortcut 15 | super(CrossConv, self).__init__() 16 | c_ = int(c2 * e) # hidden channels 17 | self.cv1 = Conv(c1, c_, (1, k), (1, s)) 18 | self.cv2 = Conv(c_, c2, (k, 1), (s, 1), g=g) 19 | self.add = shortcut and c1 == c2 20 | 21 | def forward(self, x): 22 | return x + self.cv2(self.cv1(x)) if self.add else self.cv2(self.cv1(x)) 23 | 24 | 25 | class Sum(nn.Module): 26 | # Weighted sum of 2 or more layers https://arxiv.org/abs/1911.09070 27 | def __init__(self, n, weight=False): # n: number of inputs 28 | super(Sum, self).__init__() 29 | self.weight = weight # apply weights boolean 30 | self.iter = range(n - 1) # iter object 31 | if weight: 32 | self.w = nn.Parameter(-torch.arange(1., n) / 2, requires_grad=True) # layer weights 33 | 34 | def forward(self, x): 35 | y = x[0] # no weight 36 | if self.weight: 37 | w = torch.sigmoid(self.w) * 2 38 | for i in self.iter: 39 | y = y + x[i + 1] * w[i] 40 | else: 41 | for i in self.iter: 42 | y = y + x[i + 1] 43 | return y 44 | 45 | 46 | class GhostConv(nn.Module): 47 | # Ghost Convolution https://github.com/huawei-noah/ghostnet 48 | def __init__(self, c1, c2, k=1, s=1, g=1, act=True): # ch_in, ch_out, kernel, stride, groups 49 | super(GhostConv, self).__init__() 50 | c_ = c2 // 2 # hidden channels 51 | self.cv1 = Conv(c1, c_, k, s, None, g, act) 52 | self.cv2 = Conv(c_, c_, 5, 1, None, c_, act) 53 | 54 | def forward(self, x): 55 | y = self.cv1(x) 56 | return torch.cat([y, self.cv2(y)], 1) 57 | 58 | 59 | class GhostBottleneck(nn.Module): 60 | # Ghost Bottleneck https://github.com/huawei-noah/ghostnet 61 | def __init__(self, c1, c2, k=3, s=1): # ch_in, ch_out, kernel, stride 62 | super(GhostBottleneck, self).__init__() 63 | c_ = c2 // 2 64 | self.conv = nn.Sequential(GhostConv(c1, c_, 1, 1), # pw 65 | DWConv(c_, c_, k, s, act=False) if s == 2 else nn.Identity(), # dw 66 | GhostConv(c_, c2, 1, 1, act=False)) # pw-linear 67 | self.shortcut = nn.Sequential(DWConv(c1, c1, k, s, act=False), 68 | Conv(c1, c2, 1, 1, act=False)) if s == 2 else nn.Identity() 69 | 70 | def forward(self, x): 71 | return self.conv(x) + self.shortcut(x) 72 | 73 | 74 | class MixConv2d(nn.Module): 75 | # Mixed Depthwise Conv https://arxiv.org/abs/1907.09595 76 | def __init__(self, c1, c2, k=(1, 3), s=1, equal_ch=True): 77 | super(MixConv2d, self).__init__() 78 | groups = len(k) 79 | if equal_ch: # equal c_ per group 80 | i = torch.linspace(0, groups - 1E-6, c2).floor() # c2 indices 81 | c_ = [(i == g).sum() for g in range(groups)] # intermediate channels 82 | else: # equal weight.numel() per group 83 | b = [c2] + [0] * groups 84 | a = np.eye(groups + 1, groups, k=-1) 85 | a -= np.roll(a, 1, axis=1) 86 | a *= np.array(k) ** 2 87 | a[0] = 1 88 | c_ = np.linalg.lstsq(a, b, rcond=None)[0].round() # solve for equal weight indices, ax = b 89 | 90 | self.m = nn.ModuleList([nn.Conv2d(c1, int(c_[g]), k[g], s, k[g] // 2, bias=False) for g in range(groups)]) 91 | self.bn = nn.BatchNorm2d(c2) 92 | self.act = nn.LeakyReLU(0.1, inplace=True) 93 | 94 | def forward(self, x): 95 | return x + self.act(self.bn(torch.cat([m(x) for m in self.m], 1))) 96 | 97 | 98 | class Ensemble(nn.ModuleList): 99 | # Ensemble of models 100 | def __init__(self): 101 | super(Ensemble, self).__init__() 102 | 103 | def forward(self, x, augment=False): 104 | y = [] 105 | for module in self: 106 | y.append(module(x, augment)[0]) 107 | # y = torch.stack(y).max(0)[0] # max ensemble 108 | # y = torch.stack(y).mean(0) # mean ensemble 109 | y = torch.cat(y, 1) # nms ensemble 110 | return y, None # inference, train output 111 | 112 | 113 | def attempt_load(weights, map_location=None): 114 | # Loads an ensemble of models weights=[a,b,c] or a single model weights=[a] or weights=a 115 | model = Ensemble() 116 | for w in weights if isinstance(weights, list) else [weights]: 117 | attempt_download(w) 118 | ckpt = torch.load(w, map_location=map_location) # load 119 | model.append(ckpt['ema' if ckpt.get('ema') else 'model'].float().fuse().eval()) # FP32 model 120 | 121 | # Compatibility updates 122 | for m in model.modules(): 123 | if type(m) in [nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6, nn.SiLU]: 124 | m.inplace = True # pytorch 1.7.0 compatibility 125 | elif type(m) is Conv: 126 | m._non_persistent_buffers_set = set() # pytorch 1.6.0 compatibility 127 | 128 | if len(model) == 1: 129 | return model[-1] # return model 130 | else: 131 | print('Ensemble created with %s\n' % weights) 132 | for k in ['names', 'stride']: 133 | setattr(model, k, getattr(model[-1], k)) 134 | return model # return ensemble 135 | -------------------------------------------------------------------------------- /models/export.py: -------------------------------------------------------------------------------- 1 | """Exports a YOLOv5 *.pt model to ONNX and TorchScript formats 2 | 3 | Usage: 4 | $ export PYTHONPATH="$PWD" && python models/export.py --weights ./weights/yolov5s.pt --img 640 --batch 1 5 | """ 6 | 7 | import argparse 8 | import sys 9 | import time 10 | 11 | sys.path.append('./') # to run '$ python *.py' files in subdirectories 12 | 13 | import torch 14 | import torch.nn as nn 15 | 16 | import models 17 | from models.experimental import attempt_load 18 | from utils.activations import Hardswish, SiLU 19 | from utils.general import set_logging, check_img_size 20 | from utils.torch_utils import select_device 21 | 22 | if __name__ == '__main__': 23 | parser = argparse.ArgumentParser() 24 | parser.add_argument('--weights', type=str, default='/home/shw/code/yolov5-new/yolov5/runs/train/exp117/weights/best.pt', help='weights path') # from yolov5/models/ 25 | parser.add_argument('--img-size', nargs='+', type=int, default=[672, 672], help='image size') # height, width 26 | parser.add_argument('--batch-size', type=int, default=1, help='batch size') 27 | parser.add_argument('--dynamic', action='store_true', help='dynamic ONNX axes') 28 | parser.add_argument('--grid', action='store_true', help='export Detect() layer grid') 29 | parser.add_argument('--device', default='3', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') 30 | opt = parser.parse_args() 31 | opt.img_size *= 2 if len(opt.img_size) == 1 else 1 # expand 32 | print(opt) 33 | set_logging() 34 | t = time.time() 35 | 36 | # Load PyTorch model 37 | device = select_device(opt.device) 38 | model = attempt_load(opt.weights, map_location=device) # load FP32 model 39 | labels = model.names 40 | 41 | # Checks 42 | gs = int(max(model.stride)) # grid size (max stride) 43 | opt.img_size = [check_img_size(x, gs) for x in opt.img_size] # verify img_size are gs-multiples 44 | 45 | # Input 46 | img = torch.zeros(opt.batch_size, 3, *opt.img_size).to(device) # image size(1,3,320,192) iDetection 47 | 48 | # Update model 49 | # for k, m in model.named_modules(): 50 | # m._non_persistent_buffers_set = set() # pytorch 1.6.0 compatibility 51 | # if isinstance(m, models.common.Conv): # assign export-friendly activations 52 | # if isinstance(m.act, nn.Hardswish): 53 | # m.act = nn.LeakyReLU(0.1, inplace=True) 54 | # elif isinstance(m.act, nn.SiLU): 55 | # m.act = nn.LeakyReLU(0.1, inplace=True) 56 | # elif isinstance(m, models.yolo.Detect): 57 | # m.forward = m.forward_export # assign forward (optional) 58 | model.model[-1].export = not opt.grid # set Detect() layer grid export 59 | y = model(img) # dry run 60 | 61 | # ONNX export 62 | try: 63 | import onnx 64 | 65 | print('\nStarting ONNX export with onnx %s...' % onnx.__version__) 66 | f = opt.weights.replace('.pt', '.onnx') # filename 67 | torch.onnx.export(model, img, f, verbose=True, opset_version=10, input_names=['data'], 68 | output_names=['classes', 'boxes'] if y is None else ['output']) 69 | 70 | # Checks 71 | onnx_model = onnx.load(f) # load onnx model 72 | onnx.checker.check_model(onnx_model) # check onnx model 73 | # print(onnx.helper.printable_graph(onnx_model.graph)) # print a human readable model 74 | print('ONNX export success, saved as %s' % f) 75 | except Exception as e: 76 | print('ONNX export failure: %s' % e) 77 | 78 | # Finish 79 | print('\nExport complete (%.2fs). Visualize with https://github.com/lutzroeder/netron.' % (time.time() - t)) 80 | -------------------------------------------------------------------------------- /models/export_multi.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import sys 3 | import time 4 | 5 | sys.path.append('./') # to run '$ python *.py' files in subdirectories 6 | import torch 7 | import torch.nn as nn 8 | import models 9 | from models.experimental import attempt_load 10 | from utils.activations import Hardswish 11 | from utils.general import set_logging, check_img_size 12 | from models.modal_ensemble_model_uva import ModalEnseModel 13 | from utils.torch_utils import select_device 14 | from torch.autograd import Variable 15 | 16 | if __name__ == '__main__': 17 | parser = argparse.ArgumentParser() 18 | parser.add_argument('--weights_visible', nargs='+', type=str, 19 | default='/home/shw/code/yolov5-new/yolov5/runs/train/exp371/weights/best.pt', 20 | help='model.pt path(s)') 21 | parser.add_argument('--weights_lwir', nargs='+', type=str, 22 | default='/home/shw/code/yolov5-new/yolov5/runs/train/exp372/weights/best.pt', 23 | help='model.pt path(s)') 24 | parser.add_argument('--img-size', nargs='+', type=int, default=[672, 672], help='image size') # height, width 25 | parser.add_argument('--batch-size', type=int, default=1, help='batch size') 26 | opt = parser.parse_args() 27 | opt.img_size *= 2 if len(opt.img_size) == 1 else 1 # expand 28 | print(opt) 29 | set_logging() 30 | t = time.time() 31 | 32 | device = select_device('0') 33 | 34 | # Load PyTorch model 35 | model = ModalEnseModel() 36 | model.load_weights(opt.weights_visible, opt.weights_lwir, device) 37 | 38 | # model = attempt_load(opt.weights_visible, map_location=device) # load FP32 model 39 | # labels = model.names 40 | 41 | # Checks 42 | # gs = int(max(model.model_visible.stride)) # grid size (max stride) 43 | # opt.img_size = [check_img_size(x, gs) for x in opt.img_size] # verify img_size are gs-multiples 44 | 45 | # Input 46 | img = torch.zeros(opt.batch_size, 3, *opt.img_size) # image size(1,3,320,192) iDetection 47 | lwir = torch.zeros(opt.batch_size, 3, *opt.img_size) 48 | # Update model 49 | # for k, m in model.named_modules(): 50 | # m._non_persistent_buffers_set = set() # pytorch 1.6.0 compatibility 51 | # if isinstance(m, models.common.Conv) and isinstance(m.act, nn.Hardswish): 52 | # # m.act = Hardswish() # assign activation 53 | # m.act = nn.LeakyReLU(0.01) 54 | # # if isinstance(m, models.yolo.Detect): 55 | # # m.forward = m.forward_export # assign forward (optional) 56 | # model.model_visible.model[-1].export = True # set Detect() layer export=True 57 | # model.model_lwir.model[-1].export = True 58 | # model.model[-1].export = True 59 | # img = Variable(img.to(device)) 60 | img = img.to(device, non_blocking=True) 61 | img = img.float() 62 | img /= 255.0 # 0 - 255 to 0.0 - 1.0 63 | 64 | lwir = lwir.to(device, non_blocking=True) 65 | lwir = lwir.float() 66 | lwir /= 255.0 # 0 - 255 to 0.0 - 1.0 67 | 68 | # lwir = Variable(lwir.to(device)) 69 | # y = model(img,augment=False) # dry run 70 | 71 | model.export() 72 | y = model(img, lwir, augment=False) # dry run 73 | 74 | # ONNX export 75 | try: 76 | import onnx 77 | 78 | print('\nStarting ONNX export with onnx %s...' % onnx.__version__) 79 | f = opt.weights_visible.replace('.pt', '.onnx') # filename 80 | torch.onnx.export(model, (img, lwir), f, verbose=True, opset_version=10, input_names=['img', 'lwir'], 81 | output_names=['classes', 'boxes'] if y is None else ['img_scale_1', 'img_scale_2', 82 | 'img_scale_3', 'lwir_scale_1', 83 | 'lwir_scale_2', 'lwir_scale_3']) 84 | 85 | # Checks 86 | onnx_model = onnx.load(f) # load onnx model 87 | onnx.checker.check_model(onnx_model) # check onnx model 88 | # print(onnx.helper.printable_graph(onnx_model.graph)) # print a human readable model 89 | print('ONNX export success, saved as %s' % f) 90 | except Exception as e: 91 | print('ONNX export failure: %s' % e) 92 | 93 | # Finish 94 | print('\nExport complete (%.2fs). Visualize with https://github.com/lutzroeder/netron.' % (time.time() - t)) 95 | -------------------------------------------------------------------------------- /models/export_multi_aware.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import sys 3 | import time 4 | 5 | sys.path.append('./') # to run '$ python *.py' files in subdirectories 6 | import torch 7 | import torch.nn as nn 8 | import models 9 | from models.experimental import attempt_load 10 | from utils.activations import Hardswish 11 | from utils.general import set_logging, check_img_size 12 | from models.modal_ensemble_model_uva_aware import ModalEnseModel 13 | from utils.torch_utils import select_device 14 | from torch.autograd import Variable 15 | import cv2 16 | from torchvision.transforms import Resize 17 | 18 | if __name__ == '__main__': 19 | parser = argparse.ArgumentParser() 20 | parser.add_argument('--weights_visible', nargs='+', type=str, 21 | default='/home/shw/code/yolov5-new/yolov5/runs/train/exp371/weights/best.pt', 22 | help='model.pt path(s)') 23 | parser.add_argument('--weights_lwir', nargs='+', type=str, 24 | default='/home/shw/code/yolov5-new/yolov5/runs/train/exp372/weights/best.pt', 25 | help='model.pt path(s)') 26 | parser.add_argument('--img-size', nargs='+', type=int, default=[672, 672], help='image size') # height, width 27 | parser.add_argument('--batch-size', type=int, default=1, help='batch size') 28 | opt = parser.parse_args() 29 | opt.img_size *= 2 if len(opt.img_size) == 1 else 1 # expand 30 | print(opt) 31 | set_logging() 32 | t = time.time() 33 | 34 | device = select_device('0') 35 | 36 | # Load PyTorch model 37 | model = ModalEnseModel(True) 38 | model.load_weights(opt.weights_visible, opt.weights_lwir, device) 39 | 40 | # model = attempt_load(opt.weights_visible, map_location=device) # load FP32 model 41 | # labels = model.names 42 | 43 | # Checks 44 | # gs = int(max(model.model_visible.stride)) # grid size (max stride) 45 | # opt.img_size = [check_img_size(x, gs) for x in opt.img_size] # verify img_size are gs-multiples 46 | 47 | # Input 48 | img = torch.zeros(opt.batch_size, 3, *opt.img_size) # image size(1,3,320,192) iDetection 49 | lwir = torch.zeros(opt.batch_size, 3, *opt.img_size) 50 | img_rs = torch.zeros(opt.batch_size, 3, 128, 128) 51 | # Update model 52 | # for k, m in model.named_modules(): 53 | # m._non_persistent_buffers_set = set() # pytorch 1.6.0 compatibility 54 | # if isinstance(m, models.common.Conv) and isinstance(m.act, nn.Hardswish): 55 | # # m.act = Hardswish() # assign activation 56 | # m.act = nn.LeakyReLU(0.01) 57 | # # if isinstance(m, models.yolo.Detect): 58 | # # m.forward = m.forward_export # assign forward (optional) 59 | # model.model_visible.model[-1].export = True # set Detect() layer export=True 60 | # model.model_lwir.model[-1].export = True 61 | # model.model[-1].export = True 62 | # img = Variable(img.to(device)) 63 | img = img.to(device, non_blocking=True) 64 | img = img.float() 65 | img /= 255.0 # 0 - 255 to 0.0 - 1.0 66 | 67 | lwir = lwir.to(device, non_blocking=True) 68 | lwir = lwir.float() 69 | lwir /= 255.0 # 0 - 255 to 0.0 - 1.0 70 | 71 | img_aware = img_rs.to(device, non_blocking=True) 72 | img_aware = img_aware.float() 73 | img_aware /= 255.0 74 | 75 | model.export() 76 | y = model(img, lwir, img_aware, augment=False) # dry run 77 | 78 | # ONNX export 79 | try: 80 | import onnx 81 | 82 | print('\nStarting ONNX export with onnx %s...' % onnx.__version__) 83 | f = opt.weights_visible.replace('.pt', '.onnx') # filename 84 | torch.onnx.export(model, (img, lwir, img_aware), f, verbose=True, opset_version=10, 85 | input_names=['img', 'lwir', 'img_aware'], 86 | output_names=['classes', 'boxes'] if y is None else ['img_scale_1', 'img_scale_2', 87 | 'img_scale_3', 'lwir_scale_1', 88 | 'lwir_scale_2', 'lwir_scale_3', 89 | 'aware_score']) 90 | 91 | # Checks 92 | onnx_model = onnx.load(f) # load onnx model 93 | onnx.checker.check_model(onnx_model) # check onnx model 94 | # print(onnx.helper.printable_graph(onnx_model.graph)) # print a human readable model 95 | print('ONNX export success, saved as %s' % f) 96 | except Exception as e: 97 | print('ONNX export failure: %s' % e) 98 | 99 | # Finish 100 | print('\nExport complete (%.2fs). Visualize with https://github.com/lutzroeder/netron.' % (time.time() - t)) 101 | -------------------------------------------------------------------------------- /models/funcs.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | # Original author: Francisco Massa: 4 | # https://github.com/fmassa/object-detection.torch 5 | # Ported to PyTorch by Max deGroot (02/01/2017) 6 | def nms(boxes, scores, overlap=0.5, top_k=200): 7 | """Apply non-maximum suppression at test time to avoid detecting too many 8 | overlapping bounding boxes for a given object. 9 | Args: 10 | boxes: (tensor) The location preds for the img, Shape: [num_priors,4]. 11 | scores: (tensor) The class predscores for the img, Shape:[num_priors]. 12 | overlap: (float) The overlap thresh for suppressing unnecessary boxes. 13 | top_k: (int) The Maximum number of box preds to consider. 14 | Return: 15 | The indices of the kept boxes with respect to num_priors. 16 | """ 17 | 18 | keep = scores.new(scores.size(0)).zero_().long() 19 | if boxes.numel() == 0: 20 | return keep 21 | x1 = boxes[:, 0] 22 | y1 = boxes[:, 1] 23 | x2 = boxes[:, 2] 24 | y2 = boxes[:, 3] 25 | area = torch.mul(x2 - x1, y2 - y1) 26 | v, idx = scores.sort(0) # sort in ascending order 27 | # I = I[v >= 0.01] 28 | idx = idx[-top_k:] # indices of the top-k largest vals 29 | xx1 = boxes.new() 30 | yy1 = boxes.new() 31 | xx2 = boxes.new() 32 | yy2 = boxes.new() 33 | w = boxes.new() 34 | h = boxes.new() 35 | 36 | # keep = torch.Tensor() 37 | count = 0 38 | while idx.numel() > 0: 39 | i = idx[-1] # index of current largest val 40 | # keep.append(i) 41 | keep[count] = i 42 | count += 1 43 | if idx.size(0) == 1: 44 | break 45 | idx = idx[:-1] # remove kept element from view 46 | # load bboxes of next highest vals 47 | torch.index_select(x1, 0, idx, out=xx1) 48 | torch.index_select(y1, 0, idx, out=yy1) 49 | torch.index_select(x2, 0, idx, out=xx2) 50 | torch.index_select(y2, 0, idx, out=yy2) 51 | # store element-wise max with next highest score 52 | xx1 = torch.clamp(xx1, min=x1[i]) 53 | yy1 = torch.clamp(yy1, min=y1[i]) 54 | xx2 = torch.clamp(xx2, max=x2[i]) 55 | yy2 = torch.clamp(yy2, max=y2[i]) 56 | w.resize_as_(xx2) 57 | h.resize_as_(yy2) 58 | w = xx2 - xx1 59 | h = yy2 - yy1 60 | # check sizes of xx1 and xx2.. after each iteration 61 | w = torch.clamp(w, min=0.0) 62 | h = torch.clamp(h, min=0.0) 63 | inter = w*h 64 | # IoU = i / (area(a) + area(b) - i) 65 | rem_areas = torch.index_select(area, 0, idx) # load remaining areas) 66 | union = (rem_areas - inter) + area[i] 67 | IoU = inter.float() / union.float() # store result in iou 68 | # keep only elements with an IoU <= overlap 69 | idx = idx[IoU.le(overlap)] 70 | return keep, count -------------------------------------------------------------------------------- /models/hub/anchors.yaml: -------------------------------------------------------------------------------- 1 | # Default YOLOv5 anchors for COCO data 2 | 3 | 4 | # P5 ------------------------------------------------------------------------------------------------------------------- 5 | # P5-640: 6 | anchors_p5_640: 7 | - [ 10,13, 16,30, 33,23 ] # P3/8 8 | - [ 30,61, 62,45, 59,119 ] # P4/16 9 | - [ 116,90, 156,198, 373,326 ] # P5/32 10 | 11 | 12 | # P6 ------------------------------------------------------------------------------------------------------------------- 13 | # P6-640: thr=0.25: 0.9964 BPR, 5.54 anchors past thr, n=12, img_size=640, metric_all=0.281/0.716-mean/best, past_thr=0.469-mean: 9,11, 21,19, 17,41, 43,32, 39,70, 86,64, 65,131, 134,130, 120,265, 282,180, 247,354, 512,387 14 | anchors_p6_640: 15 | - [ 9,11, 21,19, 17,41 ] # P3/8 16 | - [ 43,32, 39,70, 86,64 ] # P4/16 17 | - [ 65,131, 134,130, 120,265 ] # P5/32 18 | - [ 282,180, 247,354, 512,387 ] # P6/64 19 | 20 | # P6-1280: thr=0.25: 0.9950 BPR, 5.55 anchors past thr, n=12, img_size=1280, metric_all=0.281/0.714-mean/best, past_thr=0.468-mean: 19,27, 44,40, 38,94, 96,68, 86,152, 180,137, 140,301, 303,264, 238,542, 436,615, 739,380, 925,792 21 | anchors_p6_1280: 22 | - [ 19,27, 44,40, 38,94 ] # P3/8 23 | - [ 96,68, 86,152, 180,137 ] # P4/16 24 | - [ 140,301, 303,264, 238,542 ] # P5/32 25 | - [ 436,615, 739,380, 925,792 ] # P6/64 26 | 27 | # P6-1920: thr=0.25: 0.9950 BPR, 5.55 anchors past thr, n=12, img_size=1920, metric_all=0.281/0.714-mean/best, past_thr=0.468-mean: 28,41, 67,59, 57,141, 144,103, 129,227, 270,205, 209,452, 455,396, 358,812, 653,922, 1109,570, 1387,1187 28 | anchors_p6_1920: 29 | - [ 28,41, 67,59, 57,141 ] # P3/8 30 | - [ 144,103, 129,227, 270,205 ] # P4/16 31 | - [ 209,452, 455,396, 358,812 ] # P5/32 32 | - [ 653,922, 1109,570, 1387,1187 ] # P6/64 33 | 34 | 35 | # P7 ------------------------------------------------------------------------------------------------------------------- 36 | # P7-640: thr=0.25: 0.9962 BPR, 6.76 anchors past thr, n=15, img_size=640, metric_all=0.275/0.733-mean/best, past_thr=0.466-mean: 11,11, 13,30, 29,20, 30,46, 61,38, 39,92, 78,80, 146,66, 79,163, 149,150, 321,143, 157,303, 257,402, 359,290, 524,372 37 | anchors_p7_640: 38 | - [ 11,11, 13,30, 29,20 ] # P3/8 39 | - [ 30,46, 61,38, 39,92 ] # P4/16 40 | - [ 78,80, 146,66, 79,163 ] # P5/32 41 | - [ 149,150, 321,143, 157,303 ] # P6/64 42 | - [ 257,402, 359,290, 524,372 ] # P7/128 43 | 44 | # P7-1280: thr=0.25: 0.9968 BPR, 6.71 anchors past thr, n=15, img_size=1280, metric_all=0.273/0.732-mean/best, past_thr=0.463-mean: 19,22, 54,36, 32,77, 70,83, 138,71, 75,173, 165,159, 148,334, 375,151, 334,317, 251,626, 499,474, 750,326, 534,814, 1079,818 45 | anchors_p7_1280: 46 | - [ 19,22, 54,36, 32,77 ] # P3/8 47 | - [ 70,83, 138,71, 75,173 ] # P4/16 48 | - [ 165,159, 148,334, 375,151 ] # P5/32 49 | - [ 334,317, 251,626, 499,474 ] # P6/64 50 | - [ 750,326, 534,814, 1079,818 ] # P7/128 51 | 52 | # P7-1920: thr=0.25: 0.9968 BPR, 6.71 anchors past thr, n=15, img_size=1920, metric_all=0.273/0.732-mean/best, past_thr=0.463-mean: 29,34, 81,55, 47,115, 105,124, 207,107, 113,259, 247,238, 222,500, 563,227, 501,476, 376,939, 749,711, 1126,489, 801,1222, 1618,1227 53 | anchors_p7_1920: 54 | - [ 29,34, 81,55, 47,115 ] # P3/8 55 | - [ 105,124, 207,107, 113,259 ] # P4/16 56 | - [ 247,238, 222,500, 563,227 ] # P5/32 57 | - [ 501,476, 376,939, 749,711 ] # P6/64 58 | - [ 1126,489, 801,1222, 1618,1227 ] # P7/128 59 | -------------------------------------------------------------------------------- /models/hub/yolov3-spp.yaml: -------------------------------------------------------------------------------- 1 | # parameters 2 | nc: 80 # number of classes 3 | depth_multiple: 1.0 # model depth multiple 4 | width_multiple: 1.0 # layer channel multiple 5 | 6 | # anchors 7 | anchors: 8 | - [10,13, 16,30, 33,23] # P3/8 9 | - [30,61, 62,45, 59,119] # P4/16 10 | - [116,90, 156,198, 373,326] # P5/32 11 | 12 | # darknet53 backbone 13 | backbone: 14 | # [from, number, module, args] 15 | [[-1, 1, Conv, [32, 3, 1]], # 0 16 | [-1, 1, Conv, [64, 3, 2]], # 1-P1/2 17 | [-1, 1, Bottleneck, [64]], 18 | [-1, 1, Conv, [128, 3, 2]], # 3-P2/4 19 | [-1, 2, Bottleneck, [128]], 20 | [-1, 1, Conv, [256, 3, 2]], # 5-P3/8 21 | [-1, 8, Bottleneck, [256]], 22 | [-1, 1, Conv, [512, 3, 2]], # 7-P4/16 23 | [-1, 8, Bottleneck, [512]], 24 | [-1, 1, Conv, [1024, 3, 2]], # 9-P5/32 25 | [-1, 4, Bottleneck, [1024]], # 10 26 | ] 27 | 28 | # YOLOv3-SPP head 29 | head: 30 | [[-1, 1, Bottleneck, [1024, False]], 31 | [-1, 1, SPP, [512, [5, 9, 13]]], 32 | [-1, 1, Conv, [1024, 3, 1]], 33 | [-1, 1, Conv, [512, 1, 1]], 34 | [-1, 1, Conv, [1024, 3, 1]], # 15 (P5/32-large) 35 | 36 | [-2, 1, Conv, [256, 1, 1]], 37 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 38 | [[-1, 8], 1, Concat, [1]], # cat backbone P4 39 | [-1, 1, Bottleneck, [512, False]], 40 | [-1, 1, Bottleneck, [512, False]], 41 | [-1, 1, Conv, [256, 1, 1]], 42 | [-1, 1, Conv, [512, 3, 1]], # 22 (P4/16-medium) 43 | 44 | [-2, 1, Conv, [128, 1, 1]], 45 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 46 | [[-1, 6], 1, Concat, [1]], # cat backbone P3 47 | [-1, 1, Bottleneck, [256, False]], 48 | [-1, 2, Bottleneck, [256, False]], # 27 (P3/8-small) 49 | 50 | [[27, 22, 15], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) 51 | ] 52 | -------------------------------------------------------------------------------- /models/hub/yolov3-tiny.yaml: -------------------------------------------------------------------------------- 1 | # parameters 2 | nc: 80 # number of classes 3 | depth_multiple: 1.0 # model depth multiple 4 | width_multiple: 1.0 # layer channel multiple 5 | 6 | # anchors 7 | anchors: 8 | - [10,14, 23,27, 37,58] # P4/16 9 | - [81,82, 135,169, 344,319] # P5/32 10 | 11 | # YOLOv3-tiny backbone 12 | backbone: 13 | # [from, number, module, args] 14 | [[-1, 1, Conv, [16, 3, 1]], # 0 15 | [-1, 1, nn.MaxPool2d, [2, 2, 0]], # 1-P1/2 16 | [-1, 1, Conv, [32, 3, 1]], 17 | [-1, 1, nn.MaxPool2d, [2, 2, 0]], # 3-P2/4 18 | [-1, 1, Conv, [64, 3, 1]], 19 | [-1, 1, nn.MaxPool2d, [2, 2, 0]], # 5-P3/8 20 | [-1, 1, Conv, [128, 3, 1]], 21 | [-1, 1, nn.MaxPool2d, [2, 2, 0]], # 7-P4/16 22 | [-1, 1, Conv, [256, 3, 1]], 23 | [-1, 1, nn.MaxPool2d, [2, 2, 0]], # 9-P5/32 24 | [-1, 1, Conv, [512, 3, 1]], 25 | [-1, 1, nn.ZeroPad2d, [[0, 1, 0, 1]]], # 11 26 | [-1, 1, nn.MaxPool2d, [2, 1, 0]], # 12 27 | ] 28 | 29 | # YOLOv3-tiny head 30 | head: 31 | [[-1, 1, Conv, [1024, 3, 1]], 32 | [-1, 1, Conv, [256, 1, 1]], 33 | [-1, 1, Conv, [512, 3, 1]], # 15 (P5/32-large) 34 | 35 | [-2, 1, Conv, [128, 1, 1]], 36 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 37 | [[-1, 8], 1, Concat, [1]], # cat backbone P4 38 | [-1, 1, Conv, [256, 3, 1]], # 19 (P4/16-medium) 39 | 40 | [[19, 15], 1, Detect, [nc, anchors]], # Detect(P4, P5) 41 | ] 42 | -------------------------------------------------------------------------------- /models/hub/yolov3.yaml: -------------------------------------------------------------------------------- 1 | # parameters 2 | nc: 80 # number of classes 3 | depth_multiple: 1.0 # model depth multiple 4 | width_multiple: 1.0 # layer channel multiple 5 | 6 | # anchors 7 | anchors: 8 | - [10,13, 16,30, 33,23] # P3/8 9 | - [30,61, 62,45, 59,119] # P4/16 10 | - [116,90, 156,198, 373,326] # P5/32 11 | 12 | # darknet53 backbone 13 | backbone: 14 | # [from, number, module, args] 15 | [[-1, 1, Conv, [32, 3, 1]], # 0 16 | [-1, 1, Conv, [64, 3, 2]], # 1-P1/2 17 | [-1, 1, Bottleneck, [64]], 18 | [-1, 1, Conv, [128, 3, 2]], # 3-P2/4 19 | [-1, 2, Bottleneck, [128]], 20 | [-1, 1, Conv, [256, 3, 2]], # 5-P3/8 21 | [-1, 8, Bottleneck, [256]], 22 | [-1, 1, Conv, [512, 3, 2]], # 7-P4/16 23 | [-1, 8, Bottleneck, [512]], 24 | [-1, 1, Conv, [1024, 3, 2]], # 9-P5/32 25 | [-1, 4, Bottleneck, [1024]], # 10 26 | ] 27 | 28 | # YOLOv3 head 29 | head: 30 | [[-1, 1, Bottleneck, [1024, False]], 31 | [-1, 1, Conv, [512, [1, 1]]], 32 | [-1, 1, Conv, [1024, 3, 1]], 33 | [-1, 1, Conv, [512, 1, 1]], 34 | [-1, 1, Conv, [1024, 3, 1]], # 15 (P5/32-large) 35 | 36 | [-2, 1, Conv, [256, 1, 1]], 37 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 38 | [[-1, 8], 1, Concat, [1]], # cat backbone P4 39 | [-1, 1, Bottleneck, [512, False]], 40 | [-1, 1, Bottleneck, [512, False]], 41 | [-1, 1, Conv, [256, 1, 1]], 42 | [-1, 1, Conv, [512, 3, 1]], # 22 (P4/16-medium) 43 | 44 | [-2, 1, Conv, [128, 1, 1]], 45 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 46 | [[-1, 6], 1, Concat, [1]], # cat backbone P3 47 | [-1, 1, Bottleneck, [256, False]], 48 | [-1, 2, Bottleneck, [256, False]], # 27 (P3/8-small) 49 | 50 | [[27, 22, 15], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) 51 | ] 52 | -------------------------------------------------------------------------------- /models/hub/yolov5-fpn.yaml: -------------------------------------------------------------------------------- 1 | # parameters 2 | nc: 80 # number of classes 3 | depth_multiple: 1.0 # model depth multiple 4 | width_multiple: 1.0 # layer channel multiple 5 | 6 | # anchors 7 | anchors: 8 | - [10,13, 16,30, 33,23] # P3/8 9 | - [30,61, 62,45, 59,119] # P4/16 10 | - [116,90, 156,198, 373,326] # P5/32 11 | 12 | # YOLOv5 backbone 13 | backbone: 14 | # [from, number, module, args] 15 | [[-1, 1, Focus, [64, 3]], # 0-P1/2 16 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 17 | [-1, 3, Bottleneck, [128]], 18 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 19 | [-1, 9, BottleneckCSP, [256]], 20 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 21 | [-1, 9, BottleneckCSP, [512]], 22 | [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 23 | [-1, 1, SPP, [1024, [5, 9, 13]]], 24 | [-1, 6, BottleneckCSP, [1024]], # 9 25 | ] 26 | 27 | # YOLOv5 FPN head 28 | head: 29 | [[-1, 3, BottleneckCSP, [1024, False]], # 10 (P5/32-large) 30 | 31 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 32 | [[-1, 6], 1, Concat, [1]], # cat backbone P4 33 | [-1, 1, Conv, [512, 1, 1]], 34 | [-1, 3, BottleneckCSP, [512, False]], # 14 (P4/16-medium) 35 | 36 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 37 | [[-1, 4], 1, Concat, [1]], # cat backbone P3 38 | [-1, 1, Conv, [256, 1, 1]], 39 | [-1, 3, BottleneckCSP, [256, False]], # 18 (P3/8-small) 40 | 41 | [[18, 14, 10], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) 42 | ] 43 | -------------------------------------------------------------------------------- /models/hub/yolov5-p2.yaml: -------------------------------------------------------------------------------- 1 | # parameters 2 | nc: 80 # number of classes 3 | depth_multiple: 1.0 # model depth multiple 4 | width_multiple: 1.0 # layer channel multiple 5 | 6 | # anchors 7 | anchors: 3 8 | 9 | # YOLOv5 backbone 10 | backbone: 11 | # [from, number, module, args] 12 | [ [ -1, 1, Focus, [ 64, 3 ] ], # 0-P1/2 13 | [ -1, 1, Conv, [ 128, 3, 2 ] ], # 1-P2/4 14 | [ -1, 3, C3, [ 128 ] ], 15 | [ -1, 1, Conv, [ 256, 3, 2 ] ], # 3-P3/8 16 | [ -1, 9, C3, [ 256 ] ], 17 | [ -1, 1, Conv, [ 512, 3, 2 ] ], # 5-P4/16 18 | [ -1, 9, C3, [ 512 ] ], 19 | [ -1, 1, Conv, [ 1024, 3, 2 ] ], # 7-P5/32 20 | [ -1, 1, SPP, [ 1024, [ 5, 9, 13 ] ] ], 21 | [ -1, 3, C3, [ 1024, False ] ], # 9 22 | ] 23 | 24 | # YOLOv5 head 25 | head: 26 | [ [ -1, 1, Conv, [ 512, 1, 1 ] ], 27 | [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], 28 | [ [ -1, 6 ], 1, Concat, [ 1 ] ], # cat backbone P4 29 | [ -1, 3, C3, [ 512, False ] ], # 13 30 | 31 | [ -1, 1, Conv, [ 256, 1, 1 ] ], 32 | [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], 33 | [ [ -1, 4 ], 1, Concat, [ 1 ] ], # cat backbone P3 34 | [ -1, 3, C3, [ 256, False ] ], # 17 (P3/8-small) 35 | 36 | [ -1, 1, Conv, [ 128, 1, 1 ] ], 37 | [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], 38 | [ [ -1, 2 ], 1, Concat, [ 1 ] ], # cat backbone P2 39 | [ -1, 1, C3, [ 128, False ] ], # 21 (P2/4-xsmall) 40 | 41 | [ -1, 1, Conv, [ 128, 3, 2 ] ], 42 | [ [ -1, 18 ], 1, Concat, [ 1 ] ], # cat head P3 43 | [ -1, 3, C3, [ 256, False ] ], # 24 (P3/8-small) 44 | 45 | [ -1, 1, Conv, [ 256, 3, 2 ] ], 46 | [ [ -1, 14 ], 1, Concat, [ 1 ] ], # cat head P4 47 | [ -1, 3, C3, [ 512, False ] ], # 27 (P4/16-medium) 48 | 49 | [ -1, 1, Conv, [ 512, 3, 2 ] ], 50 | [ [ -1, 10 ], 1, Concat, [ 1 ] ], # cat head P5 51 | [ -1, 3, C3, [ 1024, False ] ], # 30 (P5/32-large) 52 | 53 | [ [ 24, 27, 30 ], 1, Detect, [ nc, anchors ] ], # Detect(P3, P4, P5) 54 | ] 55 | -------------------------------------------------------------------------------- /models/hub/yolov5-p6.yaml: -------------------------------------------------------------------------------- 1 | # parameters 2 | nc: 80 # number of classes 3 | depth_multiple: 1.0 # model depth multiple 4 | width_multiple: 1.0 # layer channel multiple 5 | 6 | # anchors 7 | anchors: 3 8 | 9 | # YOLOv5 backbone 10 | backbone: 11 | # [from, number, module, args] 12 | [ [ -1, 1, Focus, [ 64, 3 ] ], # 0-P1/2 13 | [ -1, 1, Conv, [ 128, 3, 2 ] ], # 1-P2/4 14 | [ -1, 3, C3, [ 128 ] ], 15 | [ -1, 1, Conv, [ 256, 3, 2 ] ], # 3-P3/8 16 | [ -1, 9, C3, [ 256 ] ], 17 | [ -1, 1, Conv, [ 512, 3, 2 ] ], # 5-P4/16 18 | [ -1, 9, C3, [ 512 ] ], 19 | [ -1, 1, Conv, [ 768, 3, 2 ] ], # 7-P5/32 20 | [ -1, 3, C3, [ 768 ] ], 21 | [ -1, 1, Conv, [ 1024, 3, 2 ] ], # 9-P6/64 22 | [ -1, 1, SPP, [ 1024, [ 3, 5, 7 ] ] ], 23 | [ -1, 3, C3, [ 1024, False ] ], # 11 24 | ] 25 | 26 | # YOLOv5 head 27 | head: 28 | [ [ -1, 1, Conv, [ 768, 1, 1 ] ], 29 | [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], 30 | [ [ -1, 8 ], 1, Concat, [ 1 ] ], # cat backbone P5 31 | [ -1, 3, C3, [ 768, False ] ], # 15 32 | 33 | [ -1, 1, Conv, [ 512, 1, 1 ] ], 34 | [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], 35 | [ [ -1, 6 ], 1, Concat, [ 1 ] ], # cat backbone P4 36 | [ -1, 3, C3, [ 512, False ] ], # 19 37 | 38 | [ -1, 1, Conv, [ 256, 1, 1 ] ], 39 | [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], 40 | [ [ -1, 4 ], 1, Concat, [ 1 ] ], # cat backbone P3 41 | [ -1, 3, C3, [ 256, False ] ], # 23 (P3/8-small) 42 | 43 | [ -1, 1, Conv, [ 256, 3, 2 ] ], 44 | [ [ -1, 20 ], 1, Concat, [ 1 ] ], # cat head P4 45 | [ -1, 3, C3, [ 512, False ] ], # 26 (P4/16-medium) 46 | 47 | [ -1, 1, Conv, [ 512, 3, 2 ] ], 48 | [ [ -1, 16 ], 1, Concat, [ 1 ] ], # cat head P5 49 | [ -1, 3, C3, [ 768, False ] ], # 29 (P5/32-large) 50 | 51 | [ -1, 1, Conv, [ 768, 3, 2 ] ], 52 | [ [ -1, 12 ], 1, Concat, [ 1 ] ], # cat head P6 53 | [ -1, 3, C3, [ 1024, False ] ], # 32 (P5/64-xlarge) 54 | 55 | [ [ 23, 26, 29, 32 ], 1, Detect, [ nc, anchors ] ], # Detect(P3, P4, P5, P6) 56 | ] 57 | -------------------------------------------------------------------------------- /models/hub/yolov5-p7.yaml: -------------------------------------------------------------------------------- 1 | # parameters 2 | nc: 80 # number of classes 3 | depth_multiple: 1.0 # model depth multiple 4 | width_multiple: 1.0 # layer channel multiple 5 | 6 | # anchors 7 | anchors: 3 8 | 9 | # YOLOv5 backbone 10 | backbone: 11 | # [from, number, module, args] 12 | [ [ -1, 1, Focus, [ 64, 3 ] ], # 0-P1/2 13 | [ -1, 1, Conv, [ 128, 3, 2 ] ], # 1-P2/4 14 | [ -1, 3, C3, [ 128 ] ], 15 | [ -1, 1, Conv, [ 256, 3, 2 ] ], # 3-P3/8 16 | [ -1, 9, C3, [ 256 ] ], 17 | [ -1, 1, Conv, [ 512, 3, 2 ] ], # 5-P4/16 18 | [ -1, 9, C3, [ 512 ] ], 19 | [ -1, 1, Conv, [ 768, 3, 2 ] ], # 7-P5/32 20 | [ -1, 3, C3, [ 768 ] ], 21 | [ -1, 1, Conv, [ 1024, 3, 2 ] ], # 9-P6/64 22 | [ -1, 3, C3, [ 1024 ] ], 23 | [ -1, 1, Conv, [ 1280, 3, 2 ] ], # 11-P7/128 24 | [ -1, 1, SPP, [ 1280, [ 3, 5 ] ] ], 25 | [ -1, 3, C3, [ 1280, False ] ], # 13 26 | ] 27 | 28 | # YOLOv5 head 29 | head: 30 | [ [ -1, 1, Conv, [ 1024, 1, 1 ] ], 31 | [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], 32 | [ [ -1, 10 ], 1, Concat, [ 1 ] ], # cat backbone P6 33 | [ -1, 3, C3, [ 1024, False ] ], # 17 34 | 35 | [ -1, 1, Conv, [ 768, 1, 1 ] ], 36 | [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], 37 | [ [ -1, 8 ], 1, Concat, [ 1 ] ], # cat backbone P5 38 | [ -1, 3, C3, [ 768, False ] ], # 21 39 | 40 | [ -1, 1, Conv, [ 512, 1, 1 ] ], 41 | [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], 42 | [ [ -1, 6 ], 1, Concat, [ 1 ] ], # cat backbone P4 43 | [ -1, 3, C3, [ 512, False ] ], # 25 44 | 45 | [ -1, 1, Conv, [ 256, 1, 1 ] ], 46 | [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], 47 | [ [ -1, 4 ], 1, Concat, [ 1 ] ], # cat backbone P3 48 | [ -1, 3, C3, [ 256, False ] ], # 29 (P3/8-small) 49 | 50 | [ -1, 1, Conv, [ 256, 3, 2 ] ], 51 | [ [ -1, 26 ], 1, Concat, [ 1 ] ], # cat head P4 52 | [ -1, 3, C3, [ 512, False ] ], # 32 (P4/16-medium) 53 | 54 | [ -1, 1, Conv, [ 512, 3, 2 ] ], 55 | [ [ -1, 22 ], 1, Concat, [ 1 ] ], # cat head P5 56 | [ -1, 3, C3, [ 768, False ] ], # 35 (P5/32-large) 57 | 58 | [ -1, 1, Conv, [ 768, 3, 2 ] ], 59 | [ [ -1, 18 ], 1, Concat, [ 1 ] ], # cat head P6 60 | [ -1, 3, C3, [ 1024, False ] ], # 38 (P6/64-xlarge) 61 | 62 | [ -1, 1, Conv, [ 1024, 3, 2 ] ], 63 | [ [ -1, 14 ], 1, Concat, [ 1 ] ], # cat head P7 64 | [ -1, 3, C3, [ 1280, False ] ], # 41 (P7/128-xxlarge) 65 | 66 | [ [ 29, 32, 35, 38, 41 ], 1, Detect, [ nc, anchors ] ], # Detect(P3, P4, P5, P6, P7) 67 | ] 68 | -------------------------------------------------------------------------------- /models/hub/yolov5-panet.yaml: -------------------------------------------------------------------------------- 1 | # parameters 2 | nc: 80 # number of classes 3 | depth_multiple: 1.0 # model depth multiple 4 | width_multiple: 1.0 # layer channel multiple 5 | 6 | # anchors 7 | anchors: 8 | - [10,13, 16,30, 33,23] # P3/8 9 | - [30,61, 62,45, 59,119] # P4/16 10 | - [116,90, 156,198, 373,326] # P5/32 11 | 12 | # YOLOv5 backbone 13 | backbone: 14 | # [from, number, module, args] 15 | [[-1, 1, Focus, [64, 3]], # 0-P1/2 16 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 17 | [-1, 3, BottleneckCSP, [128]], 18 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 19 | [-1, 9, BottleneckCSP, [256]], 20 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 21 | [-1, 9, BottleneckCSP, [512]], 22 | [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 23 | [-1, 1, SPP, [1024, [5, 9, 13]]], 24 | [-1, 3, BottleneckCSP, [1024, False]], # 9 25 | ] 26 | 27 | # YOLOv5 PANet head 28 | head: 29 | [[-1, 1, Conv, [512, 1, 1]], 30 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 31 | [[-1, 6], 1, Concat, [1]], # cat backbone P4 32 | [-1, 3, BottleneckCSP, [512, False]], # 13 33 | 34 | [-1, 1, Conv, [256, 1, 1]], 35 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 36 | [[-1, 4], 1, Concat, [1]], # cat backbone P3 37 | [-1, 3, BottleneckCSP, [256, False]], # 17 (P3/8-small) 38 | 39 | [-1, 1, Conv, [256, 3, 2]], 40 | [[-1, 14], 1, Concat, [1]], # cat head P4 41 | [-1, 3, BottleneckCSP, [512, False]], # 20 (P4/16-medium) 42 | 43 | [-1, 1, Conv, [512, 3, 2]], 44 | [[-1, 10], 1, Concat, [1]], # cat head P5 45 | [-1, 3, BottleneckCSP, [1024, False]], # 23 (P5/32-large) 46 | 47 | [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) 48 | ] 49 | -------------------------------------------------------------------------------- /models/hub/yolov5l6.yaml: -------------------------------------------------------------------------------- 1 | # parameters 2 | nc: 80 # number of classes 3 | depth_multiple: 1.0 # model depth multiple 4 | width_multiple: 1.0 # layer channel multiple 5 | 6 | # anchors 7 | anchors: 8 | - [ 19,27, 44,40, 38,94 ] # P3/8 9 | - [ 96,68, 86,152, 180,137 ] # P4/16 10 | - [ 140,301, 303,264, 238,542 ] # P5/32 11 | - [ 436,615, 739,380, 925,792 ] # P6/64 12 | 13 | # YOLOv5 backbone 14 | backbone: 15 | # [from, number, module, args] 16 | [ [ -1, 1, Focus, [ 64, 3 ] ], # 0-P1/2 17 | [ -1, 1, Conv, [ 128, 3, 2 ] ], # 1-P2/4 18 | [ -1, 3, C3, [ 128 ] ], 19 | [ -1, 1, Conv, [ 256, 3, 2 ] ], # 3-P3/8 20 | [ -1, 9, C3, [ 256 ] ], 21 | [ -1, 1, Conv, [ 512, 3, 2 ] ], # 5-P4/16 22 | [ -1, 9, C3, [ 512 ] ], 23 | [ -1, 1, Conv, [ 768, 3, 2 ] ], # 7-P5/32 24 | [ -1, 3, C3, [ 768 ] ], 25 | [ -1, 1, Conv, [ 1024, 3, 2 ] ], # 9-P6/64 26 | [ -1, 1, SPP, [ 1024, [ 3, 5, 7 ] ] ], 27 | [ -1, 3, C3, [ 1024, False ] ], # 11 28 | ] 29 | 30 | # YOLOv5 head 31 | head: 32 | [ [ -1, 1, Conv, [ 768, 1, 1 ] ], 33 | [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], 34 | [ [ -1, 8 ], 1, Concat, [ 1 ] ], # cat backbone P5 35 | [ -1, 3, C3, [ 768, False ] ], # 15 36 | 37 | [ -1, 1, Conv, [ 512, 1, 1 ] ], 38 | [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], 39 | [ [ -1, 6 ], 1, Concat, [ 1 ] ], # cat backbone P4 40 | [ -1, 3, C3, [ 512, False ] ], # 19 41 | 42 | [ -1, 1, Conv, [ 256, 1, 1 ] ], 43 | [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], 44 | [ [ -1, 4 ], 1, Concat, [ 1 ] ], # cat backbone P3 45 | [ -1, 3, C3, [ 256, False ] ], # 23 (P3/8-small) 46 | 47 | [ -1, 1, Conv, [ 256, 3, 2 ] ], 48 | [ [ -1, 20 ], 1, Concat, [ 1 ] ], # cat head P4 49 | [ -1, 3, C3, [ 512, False ] ], # 26 (P4/16-medium) 50 | 51 | [ -1, 1, Conv, [ 512, 3, 2 ] ], 52 | [ [ -1, 16 ], 1, Concat, [ 1 ] ], # cat head P5 53 | [ -1, 3, C3, [ 768, False ] ], # 29 (P5/32-large) 54 | 55 | [ -1, 1, Conv, [ 768, 3, 2 ] ], 56 | [ [ -1, 12 ], 1, Concat, [ 1 ] ], # cat head P6 57 | [ -1, 3, C3, [ 1024, False ] ], # 32 (P6/64-xlarge) 58 | 59 | [ [ 23, 26, 29, 32 ], 1, Detect, [ nc, anchors ] ], # Detect(P3, P4, P5, P6) 60 | ] 61 | -------------------------------------------------------------------------------- /models/hub/yolov5m6.yaml: -------------------------------------------------------------------------------- 1 | # parameters 2 | nc: 80 # number of classes 3 | depth_multiple: 0.67 # model depth multiple 4 | width_multiple: 0.75 # layer channel multiple 5 | 6 | # anchors 7 | anchors: 8 | - [ 19,27, 44,40, 38,94 ] # P3/8 9 | - [ 96,68, 86,152, 180,137 ] # P4/16 10 | - [ 140,301, 303,264, 238,542 ] # P5/32 11 | - [ 436,615, 739,380, 925,792 ] # P6/64 12 | 13 | # YOLOv5 backbone 14 | backbone: 15 | # [from, number, module, args] 16 | [ [ -1, 1, Focus, [ 64, 3 ] ], # 0-P1/2 17 | [ -1, 1, Conv, [ 128, 3, 2 ] ], # 1-P2/4 18 | [ -1, 3, C3, [ 128 ] ], 19 | [ -1, 1, Conv, [ 256, 3, 2 ] ], # 3-P3/8 20 | [ -1, 9, C3, [ 256 ] ], 21 | [ -1, 1, Conv, [ 512, 3, 2 ] ], # 5-P4/16 22 | [ -1, 9, C3, [ 512 ] ], 23 | [ -1, 1, Conv, [ 768, 3, 2 ] ], # 7-P5/32 24 | [ -1, 3, C3, [ 768 ] ], 25 | [ -1, 1, Conv, [ 1024, 3, 2 ] ], # 9-P6/64 26 | [ -1, 1, SPP, [ 1024, [ 3, 5, 7 ] ] ], 27 | [ -1, 3, C3, [ 1024, False ] ], # 11 28 | ] 29 | 30 | # YOLOv5 head 31 | head: 32 | [ [ -1, 1, Conv, [ 768, 1, 1 ] ], 33 | [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], 34 | [ [ -1, 8 ], 1, Concat, [ 1 ] ], # cat backbone P5 35 | [ -1, 3, C3, [ 768, False ] ], # 15 36 | 37 | [ -1, 1, Conv, [ 512, 1, 1 ] ], 38 | [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], 39 | [ [ -1, 6 ], 1, Concat, [ 1 ] ], # cat backbone P4 40 | [ -1, 3, C3, [ 512, False ] ], # 19 41 | 42 | [ -1, 1, Conv, [ 256, 1, 1 ] ], 43 | [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], 44 | [ [ -1, 4 ], 1, Concat, [ 1 ] ], # cat backbone P3 45 | [ -1, 3, C3, [ 256, False ] ], # 23 (P3/8-small) 46 | 47 | [ -1, 1, Conv, [ 256, 3, 2 ] ], 48 | [ [ -1, 20 ], 1, Concat, [ 1 ] ], # cat head P4 49 | [ -1, 3, C3, [ 512, False ] ], # 26 (P4/16-medium) 50 | 51 | [ -1, 1, Conv, [ 512, 3, 2 ] ], 52 | [ [ -1, 16 ], 1, Concat, [ 1 ] ], # cat head P5 53 | [ -1, 3, C3, [ 768, False ] ], # 29 (P5/32-large) 54 | 55 | [ -1, 1, Conv, [ 768, 3, 2 ] ], 56 | [ [ -1, 12 ], 1, Concat, [ 1 ] ], # cat head P6 57 | [ -1, 3, C3, [ 1024, False ] ], # 32 (P6/64-xlarge) 58 | 59 | [ [ 23, 26, 29, 32 ], 1, Detect, [ nc, anchors ] ], # Detect(P3, P4, P5, P6) 60 | ] 61 | -------------------------------------------------------------------------------- /models/hub/yolov5s6.yaml: -------------------------------------------------------------------------------- 1 | # parameters 2 | nc: 80 # number of classes 3 | depth_multiple: 0.33 # model depth multiple 4 | width_multiple: 0.50 # layer channel multiple 5 | 6 | # anchors 7 | anchors: 8 | - [ 19,27, 44,40, 38,94 ] # P3/8 9 | - [ 96,68, 86,152, 180,137 ] # P4/16 10 | - [ 140,301, 303,264, 238,542 ] # P5/32 11 | - [ 436,615, 739,380, 925,792 ] # P6/64 12 | 13 | # YOLOv5 backbone 14 | backbone: 15 | # [from, number, module, args] 16 | [ [ -1, 1, Focus, [ 64, 3 ] ], # 0-P1/2 17 | [ -1, 1, Conv, [ 128, 3, 2 ] ], # 1-P2/4 18 | [ -1, 3, C3, [ 128 ] ], 19 | [ -1, 1, Conv, [ 256, 3, 2 ] ], # 3-P3/8 20 | [ -1, 9, C3, [ 256 ] ], 21 | [ -1, 1, Conv, [ 512, 3, 2 ] ], # 5-P4/16 22 | [ -1, 9, C3, [ 512 ] ], 23 | [ -1, 1, Conv, [ 768, 3, 2 ] ], # 7-P5/32 24 | [ -1, 3, C3, [ 768 ] ], 25 | [ -1, 1, Conv, [ 1024, 3, 2 ] ], # 9-P6/64 26 | [ -1, 1, SPP, [ 1024, [ 3, 5, 7 ] ] ], 27 | [ -1, 3, C3, [ 1024, False ] ], # 11 28 | ] 29 | 30 | # YOLOv5 head 31 | head: 32 | [ [ -1, 1, Conv, [ 768, 1, 1 ] ], 33 | [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], 34 | [ [ -1, 8 ], 1, Concat, [ 1 ] ], # cat backbone P5 35 | [ -1, 3, C3, [ 768, False ] ], # 15 36 | 37 | [ -1, 1, Conv, [ 512, 1, 1 ] ], 38 | [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], 39 | [ [ -1, 6 ], 1, Concat, [ 1 ] ], # cat backbone P4 40 | [ -1, 3, C3, [ 512, False ] ], # 19 41 | 42 | [ -1, 1, Conv, [ 256, 1, 1 ] ], 43 | [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], 44 | [ [ -1, 4 ], 1, Concat, [ 1 ] ], # cat backbone P3 45 | [ -1, 3, C3, [ 256, False ] ], # 23 (P3/8-small) 46 | 47 | [ -1, 1, Conv, [ 256, 3, 2 ] ], 48 | [ [ -1, 20 ], 1, Concat, [ 1 ] ], # cat head P4 49 | [ -1, 3, C3, [ 512, False ] ], # 26 (P4/16-medium) 50 | 51 | [ -1, 1, Conv, [ 512, 3, 2 ] ], 52 | [ [ -1, 16 ], 1, Concat, [ 1 ] ], # cat head P5 53 | [ -1, 3, C3, [ 768, False ] ], # 29 (P5/32-large) 54 | 55 | [ -1, 1, Conv, [ 768, 3, 2 ] ], 56 | [ [ -1, 12 ], 1, Concat, [ 1 ] ], # cat head P6 57 | [ -1, 3, C3, [ 1024, False ] ], # 32 (P6/64-xlarge) 58 | 59 | [ [ 23, 26, 29, 32 ], 1, Detect, [ nc, anchors ] ], # Detect(P3, P4, P5, P6) 60 | ] 61 | -------------------------------------------------------------------------------- /models/hub/yolov5x6.yaml: -------------------------------------------------------------------------------- 1 | # parameters 2 | nc: 80 # number of classes 3 | depth_multiple: 1.33 # model depth multiple 4 | width_multiple: 1.25 # layer channel multiple 5 | 6 | # anchors 7 | anchors: 8 | - [ 19,27, 44,40, 38,94 ] # P3/8 9 | - [ 96,68, 86,152, 180,137 ] # P4/16 10 | - [ 140,301, 303,264, 238,542 ] # P5/32 11 | - [ 436,615, 739,380, 925,792 ] # P6/64 12 | 13 | # YOLOv5 backbone 14 | backbone: 15 | # [from, number, module, args] 16 | [ [ -1, 1, Focus, [ 64, 3 ] ], # 0-P1/2 17 | [ -1, 1, Conv, [ 128, 3, 2 ] ], # 1-P2/4 18 | [ -1, 3, C3, [ 128 ] ], 19 | [ -1, 1, Conv, [ 256, 3, 2 ] ], # 3-P3/8 20 | [ -1, 9, C3, [ 256 ] ], 21 | [ -1, 1, Conv, [ 512, 3, 2 ] ], # 5-P4/16 22 | [ -1, 9, C3, [ 512 ] ], 23 | [ -1, 1, Conv, [ 768, 3, 2 ] ], # 7-P5/32 24 | [ -1, 3, C3, [ 768 ] ], 25 | [ -1, 1, Conv, [ 1024, 3, 2 ] ], # 9-P6/64 26 | [ -1, 1, SPP, [ 1024, [ 3, 5, 7 ] ] ], 27 | [ -1, 3, C3, [ 1024, False ] ], # 11 28 | ] 29 | 30 | # YOLOv5 head 31 | head: 32 | [ [ -1, 1, Conv, [ 768, 1, 1 ] ], 33 | [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], 34 | [ [ -1, 8 ], 1, Concat, [ 1 ] ], # cat backbone P5 35 | [ -1, 3, C3, [ 768, False ] ], # 15 36 | 37 | [ -1, 1, Conv, [ 512, 1, 1 ] ], 38 | [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], 39 | [ [ -1, 6 ], 1, Concat, [ 1 ] ], # cat backbone P4 40 | [ -1, 3, C3, [ 512, False ] ], # 19 41 | 42 | [ -1, 1, Conv, [ 256, 1, 1 ] ], 43 | [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], 44 | [ [ -1, 4 ], 1, Concat, [ 1 ] ], # cat backbone P3 45 | [ -1, 3, C3, [ 256, False ] ], # 23 (P3/8-small) 46 | 47 | [ -1, 1, Conv, [ 256, 3, 2 ] ], 48 | [ [ -1, 20 ], 1, Concat, [ 1 ] ], # cat head P4 49 | [ -1, 3, C3, [ 512, False ] ], # 26 (P4/16-medium) 50 | 51 | [ -1, 1, Conv, [ 512, 3, 2 ] ], 52 | [ [ -1, 16 ], 1, Concat, [ 1 ] ], # cat head P5 53 | [ -1, 3, C3, [ 768, False ] ], # 29 (P5/32-large) 54 | 55 | [ -1, 1, Conv, [ 768, 3, 2 ] ], 56 | [ [ -1, 12 ], 1, Concat, [ 1 ] ], # cat head P6 57 | [ -1, 3, C3, [ 1024, False ] ], # 32 (P6/64-xlarge) 58 | 59 | [ [ 23, 26, 29, 32 ], 1, Detect, [ nc, anchors ] ], # Detect(P3, P4, P5, P6) 60 | ] 61 | -------------------------------------------------------------------------------- /models/modal_ensemble_model_uva.py: -------------------------------------------------------------------------------- 1 | ''' 2 | 面向后处理的模态集成模型定义 3 | 模态主要是可将光模型与红外模型的集成 4 | ''' 5 | import torch 6 | import torch.nn as nn 7 | from models.experimental import attempt_load 8 | from utils.general import check_img_size 9 | from models.experimental import Ensemble 10 | import collections 11 | 12 | 13 | class ModalEnseModel(nn.Module): 14 | 15 | def __init__(self): 16 | super(ModalEnseModel, self).__init__() 17 | 18 | self.model_visible = None 19 | self.model_lwir = None 20 | self.export_flag = False 21 | # self.eval() 22 | 23 | def forward(self, x_visible, x_lwir, augment=False): 24 | 25 | if self.export_flag is False: 26 | inf_out_visible, train_out_visible = self.model_visible(x_visible, 27 | augment=augment) # inference and training outputs 28 | inf_out_lwir, train_out_lwir = self.model_lwir(x_lwir, augment=augment) 29 | return torch.cat((inf_out_visible, inf_out_lwir), 1) 30 | else: 31 | fea_out_visible = self.model_visible(x_visible, augment=augment) # inference and training outputs 32 | fea_out_lwir = self.model_lwir(x_lwir, augment=augment) 33 | return fea_out_visible, fea_out_lwir 34 | 35 | def load_weights(self, visible_model_path, lwir_model_path, device): 36 | if self.model_visible is None: 37 | self.model_visible = attempt_load(visible_model_path, map_location=device) # load FP32 model 38 | if self.model_lwir is None: 39 | self.model_lwir = attempt_load(lwir_model_path, map_location=device) 40 | 41 | def half(self): 42 | if self.model_visible is not None: 43 | self.model_visible.half() 44 | if self.model_lwir is not None: 45 | self.model_lwir.half() 46 | 47 | def eval_model(self): 48 | self.eval() 49 | self.model_visible.eval() 50 | self.model_lwir.eval() 51 | 52 | def check_img_shape(self, imgsz): 53 | 54 | imgsz = check_img_size(imgsz, s=self.model_visible.stride.max()) # check img_size 55 | imgsz = check_img_size(imgsz, s=self.model_lwir.stride.max()) 56 | 57 | return imgsz 58 | 59 | def float(self): 60 | self.model_visible.float() 61 | self.model_lwir.float() 62 | 63 | def export(self): 64 | self.export_flag = True 65 | self.model_visible.model[-1].export = True 66 | self.model_lwir.model[-1].export = True 67 | -------------------------------------------------------------------------------- /models/modal_ensemble_model_uva_aware.py: -------------------------------------------------------------------------------- 1 | ''' 2 | 面向后处理的模态集成模型定义 3 | 模态主要是可将光模型与红外模型的集成 4 | ''' 5 | import torch 6 | import torch.nn as nn 7 | from models.experimental import attempt_load 8 | from utils.general import check_img_size 9 | from models.experimental import Ensemble 10 | import collections 11 | from models.aware_model import AwareModel 12 | from torchvision.transforms import Resize 13 | from utils.general import non_max_suppression 14 | import numpy as np 15 | 16 | 17 | class ModalEnseModel(nn.Module): 18 | 19 | def __init__(self, aware=False): 20 | super(ModalEnseModel, self).__init__() 21 | 22 | self.two_step = True 23 | self.model_visible = None 24 | self.model_lwir = None 25 | self.model_aware = None 26 | self.export_flag = False 27 | self.model_aware_path = "/home/shw/code/yolov5-master/runs/aware_model/aware.pth" 28 | self.torch_resize = Resize((128, 128)) 29 | self.aware = aware 30 | 31 | self.conf_thres = 0.25 32 | self.iou_thres = 0.45 33 | # self.classes = 34 | # self.agnostic_nms = 35 | # self.eval() 36 | 37 | def forward(self, x_visible, x_lwir, img_aware, augment=False): 38 | 39 | if self.export_flag is False: 40 | inf_out_visible, train_out_visible = self.model_visible(x_visible, 41 | augment=augment) # inference and training outputs 42 | inf_out_lwir, train_out_lwir = self.model_lwir(x_lwir, augment=augment) 43 | 44 | # 开启光照感知 45 | if self.aware and inf_out_visible is not None: 46 | aware_score = self.model_aware(img_aware) 47 | aware_score = self.label_smoothing(aware_score) 48 | 49 | for b_ix, s in enumerate(aware_score[..., 0]): 50 | inf_out_visible[b_ix][:, 5:] = inf_out_visible[b_ix][:, 5:] * s 51 | # print(aware_score.shape) 52 | # print(inf_out_visible.shape) 53 | # print(inf_out_visible[0]) 54 | # self.writew1w2(np.abs((aware_score[..., 0] - aware_score[..., 1]).cpu().numpy())) 55 | return torch.cat((inf_out_visible, inf_out_lwir), 1) 56 | else: 57 | fea_out_visible = self.model_visible(x_visible, augment=augment) # inference and training outputs 58 | fea_out_lwir = self.model_lwir(x_lwir, augment=augment) 59 | aware_score = self.model_aware(img_aware) 60 | aware_score = self.label_smoothing(aware_score) 61 | return fea_out_visible, fea_out_lwir, aware_score 62 | 63 | def load_weights(self, visible_model_path, lwir_model_path, device): 64 | if self.model_visible is None: 65 | self.model_visible = attempt_load(visible_model_path, map_location=device) # load FP32 model 66 | if self.model_lwir is None: 67 | self.model_lwir = attempt_load(lwir_model_path, map_location=device) 68 | if self.model_aware is None and self.aware: 69 | self.model_aware = AwareModel() 70 | # print(torch.load(self.model_aware_path)) 71 | self.model_aware.load_state_dict(torch.load(self.model_aware_path)) 72 | self.model_aware.to(device).eval() 73 | 74 | def half(self): 75 | if self.model_visible is not None: 76 | self.model_visible.half() 77 | if self.model_lwir is not None: 78 | self.model_lwir.half() 79 | 80 | def eval_model(self): 81 | self.eval() 82 | self.model_visible.eval() 83 | self.model_lwir.eval() 84 | 85 | def check_img_shape(self, imgsz): 86 | 87 | imgsz = check_img_size(imgsz, s=self.model_visible.stride.max()) # check img_size 88 | imgsz = check_img_size(imgsz, s=self.model_lwir.stride.max()) 89 | 90 | return imgsz 91 | 92 | def float(self): 93 | self.model_visible.float() 94 | self.model_lwir.float() 95 | 96 | def export(self): 97 | self.export_flag = True 98 | self.model_visible.model[-1].export = True 99 | self.model_lwir.model[-1].export = True 100 | 101 | def cal(self, aware_score): 102 | print(aware_score) 103 | aware_score[:, 0] = ((aware_score[:, 0] - aware_score[:, 1]) / 2) * torch.norm(aware_score, p=1) + 0.4 104 | print(aware_score) 105 | return aware_score 106 | 107 | def label_smoothing(self, inputs, epsilon=0.1): 108 | k = inputs.shape[-1] 109 | return (1 - epsilon) * inputs + (epsilon / k) 110 | 111 | def writew1w2(self, res): 112 | 113 | with open("w1w2ls.txt", "ab") as f: 114 | np.savetxt(f, res, delimiter=" ") 115 | 116 | 117 | def cal(aware_score): 118 | print(aware_score[:, 0]) 119 | print(aware_score[:, 1]) 120 | aware_score[:, 0] = ((aware_score[:, 0] - aware_score[:, 1]) / 2) * torch.norm(a, p=1) + 0.5 121 | return aware_score 122 | 123 | 124 | def label_smoothing(inputs, epsilon=0.1): 125 | k = inputs.shape[-1] 126 | return (1 - epsilon) * inputs + (epsilon / k) 127 | 128 | 129 | if __name__ == "__main__": 130 | a = torch.tensor([ 131 | [0.5, 0.5], 132 | [0.3, 0.7], 133 | [0.1, 0.9]]) 134 | 135 | print(label_smoothing(a)) 136 | -------------------------------------------------------------------------------- /models/modal_ensemble_model_uva_aware_nms.py: -------------------------------------------------------------------------------- 1 | ''' 2 | 面向后处理的模态集成模型定义 3 | 模态主要是可将光模型与红外模型的集成 4 | ''' 5 | import torch 6 | import torch.nn as nn 7 | from models.experimental import attempt_load 8 | from utils.general import check_img_size 9 | from models.experimental import Ensemble 10 | import collections 11 | from models.aware_model import AwareModel 12 | from torchvision.transforms import Resize 13 | from utils.general import non_max_suppression 14 | from models.funcs import nms 15 | 16 | 17 | class ModalEnseModel(nn.Module): 18 | 19 | def __init__(self, aware=False): 20 | super(ModalEnseModel, self).__init__() 21 | 22 | self.two_step = False 23 | self.model_visible = None 24 | self.model_lwir = None 25 | self.model_aware = None 26 | self.export_flag = False 27 | self.model_aware_path = "/home/shw/code/yolov5-master/runs/aware_model/aware.pth" 28 | self.torch_resize = Resize((128, 128)) 29 | self.aware = aware 30 | 31 | self.conf_thres = 0.25 32 | self.iou_thres = 0.45 33 | # self.classes = 34 | # self.agnostic_nms = 35 | # self.eval() 36 | 37 | def forward(self, x_visible, x_lwir, img_aware, augment=False): 38 | fea_out_visible = None 39 | fea_out_lwir = None 40 | if self.export_flag is False: 41 | inf_out_visible, train_out_visible = self.model_visible(x_visible, 42 | augment=augment) # inference and training outputs 43 | inf_out_lwir, train_out_lwir = self.model_lwir(x_lwir, augment=augment) 44 | # print(inf_out_visible.shape) 45 | # 开启光照感知 46 | if self.aware: 47 | aware_score = self.model_aware(img_aware) 48 | for b_ix, s in enumerate(aware_score[..., 0]): 49 | inf_out_visible[b_ix][:, 5:] = inf_out_visible[b_ix][:, 5:] * s 50 | 51 | # 开二阶段 52 | if self.two_step: 53 | inf_out_visible = non_max_suppression(inf_out_visible, self.conf_thres, self.iou_thres) 54 | inf_out_lwir = non_max_suppression(inf_out_lwir, self.conf_thres, self.iou_thres) 55 | batch = [] 56 | 57 | for idx, (vis, lw) in enumerate(zip(inf_out_visible, inf_out_lwir)): 58 | inp = torch.cat((vis, lw), 0) 59 | res, count = nms(inp[:, :4], inp[:, 4:5].squeeze(1), 0.6) 60 | res = res[:count] 61 | rs = torch.index_select(inp, 0, res) 62 | batch.append(rs) 63 | return batch 64 | else: 65 | return torch.cat((inf_out_visible, inf_out_lwir), 1) 66 | else: 67 | fea_out_visible = self.model_visible(x_visible, augment=augment) # inference and training outputs 68 | fea_out_lwir = self.model_lwir(x_lwir, augment=augment) 69 | return fea_out_visible, fea_out_lwir 70 | 71 | def load_weights(self, visible_model_path, lwir_model_path, device): 72 | if self.model_visible is None: 73 | self.model_visible = attempt_load(visible_model_path, map_location=device) # load FP32 model 74 | if self.model_lwir is None: 75 | self.model_lwir = attempt_load(lwir_model_path, map_location=device) 76 | if self.model_aware is None and self.aware: 77 | self.model_aware = AwareModel() 78 | # print(torch.load(self.model_aware_path)) 79 | self.model_aware.load_state_dict(torch.load(self.model_aware_path)) 80 | self.model_aware.to(device).eval() 81 | 82 | def half(self): 83 | if self.model_visible is not None: 84 | self.model_visible.half() 85 | if self.model_lwir is not None: 86 | self.model_lwir.half() 87 | 88 | def eval_model(self): 89 | self.eval() 90 | self.model_visible.eval() 91 | self.model_lwir.eval() 92 | 93 | def check_img_shape(self, imgsz): 94 | 95 | imgsz = check_img_size(imgsz, s=self.model_visible.stride.max()) # check img_size 96 | imgsz = check_img_size(imgsz, s=self.model_lwir.stride.max()) 97 | 98 | return imgsz 99 | 100 | def float(self): 101 | self.model_visible.float() 102 | self.model_lwir.float() 103 | 104 | def export(self): 105 | self.export_flag = True 106 | self.model_visible.model[-1].export = True 107 | self.model_lwir.model[-1].export = True 108 | -------------------------------------------------------------------------------- /models/pruned_yolov5l.yaml: -------------------------------------------------------------------------------- 1 | nc: 80 2 | depth_multiple: 1.0 3 | width_multiple: 1.0 4 | anchors: 5 | [[10, 13, 16, 30, 33, 23], 6 | [30, 61, 62, 45, 59, 119], 7 | [116, 90, 156, 198, 373, 326]] 8 | backbone: [ 9 | [-1, 1, Focus , [64, 3]], 10 | [-1, 1, Conv , [128, 3, 2]], 11 | [-1, 3, C3_Res_S , [128, True, 1, [0.5, 0.5], [1.0, 1.0, 1.0]]], 12 | [-1, 1, Conv , [256, 3, 2]], 13 | [-1, 0, C3_Res_S , [256, True, 1, [0.5, 0.5], []]], 14 | [-1, 1, Conv , [512, 3, 2]], 15 | [-1, 0, C3_Res_S , [512, True, 1, [0.5, 0.5], []]], 16 | [-1, 1, Conv , [1024, 3, 2]], 17 | [-1, 1, SPP , [1024, [5, 9, 13], 0.5]], 18 | [-1, 0, C3_Res_S , [1024, False, 1, [0.5, 0.5], [0.5]]]] # 9 19 | head: [ 20 | [-1, 1, Conv , [512, 1, 1]], 21 | [-1, 1, nn.Upsample , [ None , 2, 'nearest']], 22 | [[-1, 6], 1, Concat , [1]], 23 | [-1, 0, C3_Res_S , [512, False, 1, [0.5, 0.5], [0.5]]], 24 | [-1, 1, Conv , [256, 1, 1]], 25 | [-1, 1, nn.Upsample , [ None , 2, 'nearest']], 26 | [[-1, 4], 1, Concat , [1]], 27 | [-1, 1, C3_Res_S , [256, False, 1, [0.5, 0.5], [0.5, 0.5, 0.5]]],# 17 28 | [-1, 1, Conv , [256, 3, 2]], 29 | [[-1, 14], 1, Concat , [1]], 30 | [-1, 0, C3_Res_S , [512, False, 1, [0.5, 0.5], [0.5]]], 31 | [-1, 1, Conv , [512, 3, 2]], 32 | [[-1, 10], 1, Concat , [1]], 33 | [-1, 0, C3_Res_S , [1024, False, 1, [0.5, 0.5], [0.5]]], 34 | [[17, 20, 23], 1, Detect , [ nc , anchors ]]] 35 | -------------------------------------------------------------------------------- /models/yolov5l-transformer.yaml: -------------------------------------------------------------------------------- 1 | # parameters 2 | nc: 80 # number of classes 3 | depth_multiple: 1 # model depth multiple 4 | width_multiple: 1 # layer channel multiple 5 | 6 | # anchors 7 | anchors: 8 | - [10,13, 16,30, 33,23] # P3/8 9 | - [30,61, 62,45, 59,119] # P4/16 10 | - [116,90, 156,198, 373,326] # P5/32 11 | 12 | # YOLOv5 backbone 13 | backbone: 14 | # [from, number, module, args] 15 | [[-1, 1, Focus, [64, 3]], # 0-P1/2 16 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 17 | [-1, 3, C3, [128, True, 1, [0.5, 0.5], [1.0, 1.0, 1.0]]], 18 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 19 | [-1, 9, C3, [256, True, 1, [0.5, 0.5], [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]]], 20 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 21 | [-1, 9, C3, [512, True, 1, [0.5, 0.5], [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]]], 22 | [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 23 | [-1, 1, SPP, [1024, [5, 9, 13], 0.5]], 24 | # [-1, 3, TFB, [1024, False]], # 9 transformer 25 | [-1, 3, C3, [1024, False, 1, [0.5, 0.5], [0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5]]] 26 | ] 27 | 28 | # YOLOv5 head 29 | head: 30 | [[-1, 1, Conv, [512, 1, 1]], 31 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 32 | [[-1, 6], 1, Concat, [1]], # cat backbone P4 33 | [-1, 3, C3, [512, False, 1, [0.5, 0.5], [0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5]]], # 13 34 | 35 | [-1, 1, Conv, [256, 1, 1]], 36 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 37 | [[-1, 4], 1, Concat, [1]], # cat backbone P3 38 | [-1, 3, C3, [256, False, 1, [0.5, 0.5], [0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5]]], # 17 (P3/8-small) 39 | 40 | [-1, 1, Conv, [256, 3, 2]], 41 | [[-1, 14], 1, Concat, [1]], # cat head P4 42 | [-1, 3, C3, [512, False, 1, [0.5, 0.5], [0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5]]], # 20 (P4/16-medium) 43 | 44 | [-1, 1, Conv, [512, 3, 2]], 45 | [[-1, 10], 1, Concat, [1]], # cat head P5 46 | [-1, 3, C3, [1024, False, 1, [0.5, 0.5], [0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5]]], # 23 (P5/32-large) 47 | 48 | [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) 49 | ] -------------------------------------------------------------------------------- /models/yolov5l.yaml: -------------------------------------------------------------------------------- 1 | # parameters 2 | nc: 80 # number of classes 3 | depth_multiple: 1.0 # model depth multiple 4 | width_multiple: 1.0 # layer channel multiple 5 | 6 | # anchors 7 | anchors: 8 | - [10,13, 16,30, 33,23] # P3/8 9 | - [30,61, 62,45, 59,119] # P4/16 10 | - [116,90, 156,198, 373,326] # P5/32 11 | 12 | # YOLOv5 backbone 13 | backbone: 14 | # [from, number, module, args] 15 | [[-1, 1, Focus, [64, 3]], # 0-P1/2 16 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 17 | [-1, 3, C3_Res_S, [128, True, 1, [0.5, 0.5], [1.0, 1.0, 1.0]]], 18 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 19 | [-1, 9, C3_Res_S, [256, True, 1, [0.5, 0.5], [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]]], 20 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 21 | [-1, 9, C3_Res_S, [512, True, 1, [0.5, 0.5], [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]]], 22 | [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 23 | [-1, 1, SPP, [1024, [5, 9, 13], 0.5]], 24 | [-1, 3, C3_Res_S, [1024, False, 1, [0.5, 0.5], [0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5]]], # 9 25 | ] 26 | 27 | # YOLOv5 head 28 | head: 29 | [[-1, 1, Conv, [512, 1, 1]], 30 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 31 | [[-1, 6], 1, Concat, [1]], # cat backbone P4 32 | [-1, 3, C3_Res_S, [512, False, 1, [0.5, 0.5], [0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5]]], # 13 33 | 34 | [-1, 1, Conv, [256, 1, 1]], 35 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 36 | [[-1, 4], 1, Concat, [1]], # cat backbone P3 37 | [-1, 3, C3_Res_S, [256, False, 1, [0.5, 0.5], [0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5]]], # 17 (P3/8-small) 38 | 39 | [-1, 1, Conv, [256, 3, 2]], 40 | [[-1, 14], 1, Concat, [1]], # cat head P4 41 | [-1, 3, C3_Res_S, [512, False, 1, [0.5, 0.5], [0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5]]], # 20 (P4/16-medium) 42 | 43 | [-1, 1, Conv, [512, 3, 2]], 44 | [[-1, 10], 1, Concat, [1]], # cat head P5 45 | [-1, 3, C3_Res_S, [1024, False, 1, [0.5, 0.5], [0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5]]], # 23 (P5/32-large) 46 | 47 | [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) 48 | ] -------------------------------------------------------------------------------- /models/yolov5l_trs.yaml: -------------------------------------------------------------------------------- 1 | # parameters 2 | nc: 80 # number of classes 3 | depth_multiple: 1.0 # model depth multiple 4 | width_multiple: 1.0 # layer channel multiple 5 | 6 | # anchors 7 | anchors: 8 | - [10,13, 16,30, 33,23] # P3/8 9 | - [30,61, 62,45, 59,119] # P4/16 10 | - [116,90, 156,198, 373,326] # P5/32 11 | 12 | # YOLOv5 backbone 13 | backbone: 14 | # [from, number, module, args] 15 | [[-1, 1, Focus, [64, 3]], # 0-P1/2 16 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 17 | [-1, 3, C3_Res_S, [128, True, 1, [0.5, 0.5], [1.0, 1.0, 1.0]]], 18 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 19 | [-1, 9, C3_Res_S, [256, True, 1, [0.5, 0.5], [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]]], 20 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 21 | [-1, 9, C3_Res_S, [512, True, 1, [0.5, 0.5], [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]]], 22 | [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 23 | [-1, 1, SPP, [1024, [5, 9, 13], 0.5]], 24 | [-1, 3, C3_Res_S, [1024, False, 1, [0.5, 0.5], [0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5]]], # 9 25 | ] 26 | 27 | # YOLOv5 head 28 | head: 29 | [[-1, 1, Conv, [512, 1, 1]], 30 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 31 | [[-1, 6], 1, Concat, [1]], # cat backbone P4 32 | [-1, 3, C3_Res_S, [512, False, 1, [0.5, 0.5], [0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5]]], # 13 33 | 34 | [-1, 1, Conv, [256, 1, 1]], 35 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 36 | [[-1, 4], 1, Concat, [1]], # cat backbone P3 37 | [-1, 3, C3_Res_S, [256, False, 1, [0.5, 0.5], [0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5]]], # 17 (P3/8-small) 38 | 39 | [-1, 1, Conv, [256, 3, 2]], 40 | [[-1, 14], 1, Concat, [1]], # cat head P4 41 | [-1, 3, C3_Res_S, [512, False, 1, [0.5, 0.5], [0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5]]], # 20 (P4/16-medium) 42 | 43 | [[13, 17, 20], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) 44 | ] -------------------------------------------------------------------------------- /models/yolov5m.yaml: -------------------------------------------------------------------------------- 1 | # parameters 2 | nc: 80 # number of classes 3 | depth_multiple: 0.67 # model depth multiple 4 | width_multiple: 0.75 # layer channel multiple 5 | 6 | # anchors 7 | anchors: 8 | - [10,13, 16,30, 33,23] # P3/8 9 | - [30,61, 62,45, 59,119] # P4/16 10 | - [116,90, 156,198, 373,326] # P5/32 11 | 12 | # YOLOv5 backbone 13 | backbone: 14 | # [from, number, module, args] 15 | [[-1, 1, Focus, [64, 3]], # 0-P1/2 16 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 17 | [-1, 3, C3, [128]], 18 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 19 | [-1, 9, C3, [256]], 20 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 21 | [-1, 9, C3, [512]], 22 | [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 23 | [-1, 1, SPP, [1024, [5, 9, 13]]], 24 | [-1, 3, C3, [1024, False]], # 9 25 | ] 26 | 27 | # YOLOv5 head 28 | head: 29 | [[-1, 1, Conv, [512, 1, 1]], 30 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 31 | [[-1, 6], 1, Concat, [1]], # cat backbone P4 32 | [-1, 3, C3, [512, False]], # 13 33 | 34 | [-1, 1, Conv, [256, 1, 1]], 35 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 36 | [[-1, 4], 1, Concat, [1]], # cat backbone P3 37 | [-1, 3, C3, [256, False]], # 17 (P3/8-small) 38 | 39 | [-1, 1, Conv, [256, 3, 2]], 40 | [[-1, 14], 1, Concat, [1]], # cat head P4 41 | [-1, 3, C3, [512, False]], # 20 (P4/16-medium) 42 | 43 | [-1, 1, Conv, [512, 3, 2]], 44 | [[-1, 10], 1, Concat, [1]], # cat head P5 45 | [-1, 3, C3, [1024, False]], # 23 (P5/32-large) 46 | 47 | [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) 48 | ] 49 | -------------------------------------------------------------------------------- /models/yolov5s-ASFF.yaml: -------------------------------------------------------------------------------- 1 | # parameters 2 | nc: 80 # number of classes 3 | depth_multiple: 0.33 # model depth multiple 4 | width_multiple: 0.50 # layer channel multiple 5 | 6 | # anchors 7 | anchors: 8 | - [10,13, 16,30, 33,23] # P3/8 9 | - [30,61, 62,45, 59,119] # P4/16 10 | - [116,90, 156,198, 373,326] # P5/32 11 | 12 | # YOLOv5 backbone 13 | backbone: 14 | # [from, number, module, args] 15 | [[-1, 1, Focus, [64, 3]], # 0-P1/2 16 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 17 | [-1, 3, C3_Res_S, [128, True, 1, [0.5, 0.5], [1.0, 1.0, 1.0]]], 18 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 19 | [-1, 9, C3_Res_S, [256, True, 1, [0.5, 0.5], [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]]], 20 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 21 | [-1, 9, C3_Res_S, [512, True, 1, [0.5, 0.5], [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]]], 22 | [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 23 | [-1, 1, SPP, [1024, [5, 9, 13], 0.5]], 24 | [-1, 3, C3TR, [1024, False]], # 9 transformer 25 | ] 26 | 27 | # YOLOv5 head 28 | head: 29 | [[-1, 1, Conv, [512, 1, 1]], 30 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 31 | [[-1, 6], 1, Concat, [1]], # cat backbone P4 32 | [-1, 3, C3_Res_S, [512, False, 1, [0.5, 0.5], [0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5]]], # 13 33 | 34 | [-1, 1, Conv, [256, 1, 1]], 35 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 36 | [[-1, 4], 1, Concat, [1]], # cat backbone P3 37 | [-1, 3, C3_Res_S, [256, False, 1, [0.5, 0.5], [0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5]]], # 17 (P3/8-small) 38 | 39 | [-1, 1, Conv, [256, 3, 2]], 40 | [[-1, 14], 1, Concat, [1]], # cat head P4 41 | [-1, 3, C3_Res_S, [512, False, 1, [0.5, 0.5], [0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5]]], # 20 (P4/16-medium) 42 | 43 | [-1, 1, Conv, [512, 3, 2]], 44 | [[-1, 10], 1, Concat, [1]], # cat head P5 45 | [-1, 3, C3_Res_S, [1024, False, 1, [0.5, 0.5], [0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5]]], # 23 (P5/32-large) 46 | 47 | [[17,20,23], 1, ASFFV5, [0, 512, 0.5 ]], #24 48 | [[17,20,23], 1, ASFFV5, [1, 256, 0.5 ]], #25 49 | [[17,20,23], 1, ASFFV5, [2, 128 ,0.5]], #26 50 | #[[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) 51 | [[26, 25, 24], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) 52 | ] -------------------------------------------------------------------------------- /models/yolov5s-c3-cbl.yaml: -------------------------------------------------------------------------------- 1 | # parameters 2 | nc: 80 # number of classes 3 | depth_multiple: 0.33 # model depth multiple 4 | width_multiple: 0.50 # layer channel multiple 5 | 6 | # anchors 7 | anchors: 8 | - [10,13, 16,30, 33,23] # P3/8 9 | - [30,61, 62,45, 59,119] # P4/16 10 | - [116,90, 156,198, 373,326] # P5/32 11 | 12 | # YOLOv5 backbone 13 | backbone: 14 | # [from, number, module, args] 15 | [[-1, 1, Focus, [64, 3]], # 0-P1/2 16 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 17 | [-1, 3, C3_BL, [128, True, 1, [0.5, 0.5], [1.0, 1.0, 1.0]]],#2 18 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 19 | [-1, 9, C3_BL, [256, True, 1, [0.5, 0.5], [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]]],#4 20 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 21 | [-1, 9, C3_BL, [512, True, 1, [0.5, 0.5], [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]]],#6 22 | [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 23 | [-1, 1, SPP, [1024, [5, 9, 13], 0.5]],#8 24 | [-1, 3, C3_BL, [1024, False, 1, [0.5, 0.5], [0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5]]], # 9 25 | ] 26 | 27 | # YOLOv5 head 28 | head: 29 | [[-1, 1, Conv, [512, 1, 1]], 30 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 31 | [[-1, 6], 1, Concat, [1]], # cat backbone P4 32 | [-1, 3, C3_BL, [512, False, 1, [0.5, 0.5], [0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5]]], # 13 33 | 34 | [-1, 1, Conv, [256, 1, 1]], 35 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 36 | [[-1, 4], 1, Concat, [1]], # cat backbone P3 37 | [-1, 3, C3_BL, [256, False, 1, [0.5, 0.5], [0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5]]], # 17 (P3/8-small) 38 | 39 | [-1, 1, Conv, [256, 3, 2]], 40 | [[-1, 14], 1, Concat, [1]], # cat head P4 41 | [-1, 3, C3_BL, [512, False, 1, [0.5, 0.5], [0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5]]], # 20 (P4/16-medium) 42 | 43 | [-1, 1, Conv, [512, 3, 2]], 44 | [[-1, 10], 1, Concat, [1]], # cat head P5 45 | [-1, 3, C3_BL, [1024, False, 1, [0.5, 0.5], [0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5]]], # 23 (P5/32-large) 46 | 47 | [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) 48 | ] -------------------------------------------------------------------------------- /models/yolov5s-tiny.yaml: -------------------------------------------------------------------------------- 1 | # parameters 2 | nc: 80 # number of classes 3 | depth_multiple: 0.33 # model depth multiple 4 | width_multiple: 0.50 # layer channel multiple 5 | 6 | # anchors 7 | anchors: 8 | - [10,13, 16,30, 33,23] # P3/8 9 | - [30,61, 62,45, 59,119] # P4/16 10 | - [116,90, 156,198, 373,326] # P5/32 11 | 12 | # YOLOv5 backbone 13 | backbone: 14 | # [from, number, module, args] 15 | [[-1, 1, Focus, [64, 3]], # 0-P1/2 16 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 17 | [-1, 3, C3, [128, True, 1, [0.5, 0.5], [1.0, 1.0, 1.0]]], 18 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 19 | [-1, 9, C3, [256, True, 1, [0.5, 0.5], [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]]], 20 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 21 | [-1, 9, C3, [512, True, 1, [0.5, 0.5], [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]]], 22 | [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 23 | [-1, 1, SPP, [1024, [5, 9, 13], 0.5]], 24 | [-1, 3, C3, [1024, False, 1, [0.5, 0.5], [0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5]]], # 9 25 | ] 26 | 27 | # YOLOv5 head 28 | head: 29 | [[-1, 1, Conv, [512, 1, 1]], 30 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 31 | [[-1, 6], 1, Concat, [1]], # cat backbone P4 32 | [-1, 3, C3, [512, False, 1, [0.5, 0.5], [0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5]]], # 13 33 | 34 | [-1, 1, Conv, [256, 1, 1]], 35 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 36 | [[-1, 4], 1, Concat, [1]], # cat backbone P3 37 | [-1, 3, C3, [256, False, 1, [0.5, 0.5], [0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5]]], # 17 (P3/8-small) 38 | 39 | [-1, 1, Conv, [256, 3, 2]], 40 | [[-1, 14], 1, Concat, [1]], # cat head P4 41 | [-1, 3, C3, [512, False, 1, [0.5, 0.5], [0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5]]], # 20 (P4/16-medium) 42 | 43 | [-1, 1, Conv, [512, 3, 2]], 44 | [[-1, 10], 1, Concat, [1]], # cat head P5 45 | [-1, 3, C3, [1024, False, 1, [0.5, 0.5], [0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5]]], # 23 (P5/32-large) 46 | 47 | [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) 48 | ] -------------------------------------------------------------------------------- /models/yolov5s-transformer.yaml: -------------------------------------------------------------------------------- 1 | # parameters 2 | nc: 80 # number of classes 3 | depth_multiple: 1 # model depth multiple 4 | width_multiple: 1 # layer channel multiple 5 | 6 | # anchors 7 | anchors: 8 | - [10,13, 16,30, 33,23] # P3/8 9 | - [30,61, 62,45, 59,119] # P4/16 10 | - [116,90, 156,198, 373,326] # P5/32 11 | 12 | # YOLOv5 backbone 13 | backbone: 14 | # [from, number, module, args] 15 | [[-1, 1, Focus, [64, 3]], # 0-P1/2 16 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 17 | [-1, 3, C3_Res_S, [128, True, 1, [0.5, 0.5], [1.0, 1.0, 1.0]]], 18 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 19 | [-1, 9, C3_Res_S, [256, True, 1, [0.5, 0.5], [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]]], 20 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 21 | [-1, 9, C3_Res_S, [512, True, 1, [0.5, 0.5], [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]]], 22 | [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 23 | [-1, 1, SPP, [1024, [5, 9, 13], 0.5]], 24 | [-1, 3, TFB, [1024, False]], # 9 transformer 25 | # [-1, 3, C3_Res_S, [1024, False, 1, [0.5, 0.5], [0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5]]] 26 | ] 27 | 28 | # YOLOv5 head 29 | head: 30 | [[-1, 1, Conv, [512, 1, 1]], 31 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 32 | [[-1, 6], 1, Concat, [1]], # cat backbone P4 33 | [-1, 3, C3_Res_S, [512, False, 1, [0.5, 0.5], [0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5]]], # 13 34 | 35 | [-1, 1, Conv, [256, 1, 1]], 36 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 37 | [[-1, 4], 1, Concat, [1]], # cat backbone P3 38 | [-1, 3, C3_Res_S, [256, False, 1, [0.5, 0.5], [0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5]]], # 17 (P3/8-small) 39 | 40 | [-1, 1, Conv, [256, 3, 2]], 41 | [[-1, 14], 1, Concat, [1]], # cat head P4 42 | [-1, 3, C3_Res_S, [512, False, 1, [0.5, 0.5], [0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5]]], # 20 (P4/16-medium) 43 | 44 | [-1, 1, Conv, [512, 3, 2]], 45 | [[-1, 10], 1, Concat, [1]], # cat head P5 46 | [-1, 3, C3_Res_S, [1024, False, 1, [0.5, 0.5], [0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5]]], # 23 (P5/32-large) 47 | 48 | [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) 49 | ] -------------------------------------------------------------------------------- /models/yolov5s.yaml: -------------------------------------------------------------------------------- 1 | # parameters 2 | nc: 80 # number of classes 3 | depth_multiple: 0.33 # model depth multiple 4 | width_multiple: 0.50 # layer channel multiple 5 | 6 | # anchors 7 | anchors: 8 | - [10,13, 16,30, 33,23] # P3/8 9 | - [30,61, 62,45, 59,119] # P4/16 10 | - [116,90, 156,198, 373,326] # P5/32 11 | 12 | # YOLOv5 backbone 13 | backbone: 14 | # [from, number, module, args] 15 | [[-1, 1, Focus, [64, 3]], # 0-P1/2 16 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 17 | [-1, 3, C3_Res_S, [128, True, 1, [0.5, 0.5], [1.0, 1.0, 1.0]]], 18 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 19 | [-1, 9, C3_Res_S, [256, True, 1, [0.5, 0.5], [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]]], 20 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 21 | [-1, 9, C3_Res_S, [512, True, 1, [0.5, 0.5], [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]]], 22 | [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 23 | [-1, 1, SPP, [1024, [5, 9, 13], 0.5]], 24 | [-1, 3, C3_Res_S, [1024, False, 1, [0.5, 0.5], [0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5]]], # 9 25 | ] 26 | 27 | # YOLOv5 head 28 | head: 29 | [[-1, 1, Conv, [512, 1, 1]], 30 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 31 | [[-1, 6], 1, Concat, [1]], # cat backbone P4 32 | [-1, 3, C3_Res_S, [512, False, 1, [0.5, 0.5], [0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5]]], # 13 33 | 34 | [-1, 1, Conv, [256, 1, 1]], 35 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 36 | [[-1, 4], 1, Concat, [1]], # cat backbone P3 37 | [-1, 3, C3_Res_S, [256, False, 1, [0.5, 0.5], [0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5]]], # 17 (P3/8-small) 38 | 39 | [-1, 1, Conv, [256, 3, 2]], 40 | [[-1, 14], 1, Concat, [1]], # cat head P4 41 | [-1, 3, C3_Res_S, [512, False, 1, [0.5, 0.5], [0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5]]], # 20 (P4/16-medium) 42 | 43 | [-1, 1, Conv, [512, 3, 2]], 44 | [[-1, 10], 1, Concat, [1]], # cat head P5 45 | [-1, 3, C3_Res_S, [1024, False, 1, [0.5, 0.5], [0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5]]], # 23 (P5/32-large) 46 | 47 | [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) 48 | ] -------------------------------------------------------------------------------- /models/yolov5s_medium_fusion.yaml: -------------------------------------------------------------------------------- 1 | # parameters 2 | nc: 80 # number of classes 3 | depth_multiple: 0.33 # model depth multiple 4 | width_multiple: 0.50 # layer channel multiple 5 | 6 | # anchors 7 | anchors: 8 | - [10,13, 16,30, 33,23] # P3/8 9 | - [30,61, 62,45, 59,119] # P4/16 10 | - [116,90, 156,198, 373,326] # P5/32 11 | 12 | # YOLOv5 backbone 13 | backbone: 14 | # [from, number, module, args] 15 | [[-1, 1, Focus, [64, 3]], # 0-P1/2 16 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 17 | [-1, 3, C3_Res_S, [128, True, 1, [0.5, 0.5], [1.0, 1.0, 1.0]]], 18 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 19 | [-1, 9, C3_Res_S, [256, True, 1, [0.5, 0.5], [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]]], 20 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 21 | [-1, 9, C3_Res_S, [512, True, 1, [0.5, 0.5], [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]]], 22 | [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 23 | [-1, 1, SPP, [1024, [5, 9, 13], 0.5]], 24 | [-1, 3, C3_Res_S, [1024, False, 1, [0.5, 0.5], [0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5]]], # 9 25 | ] 26 | 27 | # YOLOv5 head 28 | head: 29 | [[-1, 1, Conv, [512, 1, 1]], 30 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 31 | [[-1, 6], 1, Concat, [1]], # cat backbone P4 32 | [-1, 3, C3_Res_S, [512, False, 1, [0.5, 0.5], [0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5]]], # 13 33 | 34 | [-1, 1, Conv, [256, 1, 1]], 35 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 36 | [[-1, 4], 1, Concat, [1]], # cat backbone P3 37 | [-1, 3, C3_Res_S, [256, False, 1, [0.5, 0.5], [0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5]]], # 17 (P3/8-small) 38 | 39 | [-1, 1, Conv, [256, 3, 2]], 40 | [[-1, 14], 1, Concat, [1]], # cat head P4 41 | [-1, 3, C3_Res_S, [512, False, 1, [0.5, 0.5], [0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5]]], # 20 (P4/16-medium) 42 | 43 | [-1, 1, Conv, [512, 3, 2]], 44 | [[-1, 10], 1, Concat, [1]], # cat head P5 45 | [-1, 3, C3_Res_S, [1024, False, 1, [0.5, 0.5], [0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5]]], # 23 (P5/32-large) 46 | 47 | # [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) 48 | ] -------------------------------------------------------------------------------- /models/yolov5s_trs.yaml: -------------------------------------------------------------------------------- 1 | # parameters 2 | nc: 80 # number of classes 3 | depth_multiple: 0.33 # model depth multiple 4 | width_multiple: 0.50 # layer channel multiple 5 | 6 | # anchors 7 | anchors: 8 | - [10,13, 16,30, 33,23] # P3/8 9 | - [30,61, 62,45, 59,119] # P4/16 10 | - [116,90, 156,198, 373,326] # P5/32 11 | 12 | # YOLOv5 backbone 13 | backbone: 14 | # [from, number, module, args] 15 | [[-1, 1, Focus, [64, 3]], # 0-P1/2 16 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 17 | [-1, 3, C3, [128, True, 1, [0.5, 0.5], [1.0, 1.0, 1.0]]],#2 18 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 19 | [-1, 9, C3, [256, True, 1, [0.5, 0.5], [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]]],#4 20 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 21 | [-1, 9, C3, [512, True, 1, [0.5, 0.5], [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]]],#6 22 | [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 23 | [-1, 1, SPP, [1024, [5, 9, 13], 0.5]],#8 24 | [-1, 3, C3, [1024, False, 1, [0.5, 0.5], [0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5]]], # 9 25 | ] 26 | 27 | # YOLOv5 head 28 | head: 29 | [[-1, 1, Conv, [512, 1, 1]],# 10 30 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],# 11 31 | [[-1, 6], 1, Concat, [1]], # cat backbone P4 12 32 | [-1, 3, C3, [512, False, 1, [0.5, 0.5], [0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5]]], # 13 33 | 34 | [-1, 1, Conv, [256, 1, 1]],# 14 35 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],# 15 36 | [[-1, 4], 1, Concat, [1]], # cat backbone P3# 16 37 | [-1, 3, C3, [256, False, 1, [0.5, 0.5], [0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5]]], # 17 (P3/8-small) 38 | 39 | [-1, 1, Conv, [256, 3, 2]],# 18 40 | [[-1, 14], 1, Concat, [1]], # cat head P4 19 41 | [-1, 3, C3, [512, False, 1, [0.5, 0.5], [0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5]]], # 20 (P4/16-medium) 42 | 43 | [[13, 17, 20], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) 44 | ] -------------------------------------------------------------------------------- /models/yolov5x.yaml: -------------------------------------------------------------------------------- 1 | # parameters 2 | nc: 80 # number of classes 3 | depth_multiple: 1.33 # model depth multiple 4 | width_multiple: 1.25 # layer channel multiple 5 | 6 | # anchors 7 | anchors: 8 | - [10,13, 16,30, 33,23] # P3/8 9 | - [30,61, 62,45, 59,119] # P4/16 10 | - [116,90, 156,198, 373,326] # P5/32 11 | 12 | # YOLOv5 backbone 13 | backbone: 14 | # [from, number, module, args] 15 | [[-1, 1, Focus, [64, 3]], # 0-P1/2 16 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 17 | [-1, 3, C3, [128]], 18 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 19 | [-1, 9, C3, [256]], 20 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 21 | [-1, 9, C3, [512]], 22 | [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 23 | [-1, 1, SPP, [1024, [5, 9, 13]]], 24 | [-1, 3, C3, [1024, False]], # 9 25 | ] 26 | 27 | # YOLOv5 head 28 | head: 29 | [[-1, 1, Conv, [512, 1, 1]], 30 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 31 | [[-1, 6], 1, Concat, [1]], # cat backbone P4 32 | [-1, 3, C3, [512, False]], # 13 33 | 34 | [-1, 1, Conv, [256, 1, 1]], 35 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 36 | [[-1, 4], 1, Concat, [1]], # cat backbone P3 37 | [-1, 3, C3, [256, False]], # 17 (P3/8-small) 38 | 39 | [-1, 1, Conv, [256, 3, 2]], 40 | [[-1, 14], 1, Concat, [1]], # cat head P4 41 | [-1, 3, C3, [512, False]], # 20 (P4/16-medium) 42 | 43 | [-1, 1, Conv, [512, 3, 2]], 44 | [[-1, 10], 1, Concat, [1]], # cat head P5 45 | [-1, 3, C3, [1024, False]], # 23 (P5/32-large) 46 | 47 | [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) 48 | ] 49 | -------------------------------------------------------------------------------- /sparsity.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | def updateBN(scale, model): 4 | for m in model.modules(): 5 | if isinstance(m, torch.nn.BatchNorm2d): 6 | m.weight.grad.data.add_(scale*torch.sign(m.weight.data)) # L1 7 | 8 | def maskBN(model, soft=False, mask_thresh_alpha=0.2): 9 | total = 0 10 | for m in model.modules(): 11 | if isinstance(m, torch.nn.BatchNorm2d): 12 | total += m.weight.data.shape[0] # channels numbers 13 | bn = torch.zeros(total) 14 | index = 0 15 | for m in model.modules(): 16 | if isinstance(m, torch.nn.BatchNorm2d): 17 | size = m.weight.data.shape[0] 18 | bn[index:(index+size)] = m.weight.data.abs().clone() 19 | index += size 20 | bn_mean = torch.mean(bn) 21 | mask_thresh = mask_thresh_alpha * bn_mean 22 | print('The number of mask channels in this update is ', torch.sum(bn.abs().clone().le(mask_thresh))) 23 | for m in model.modules(): 24 | if isinstance(m, torch.nn.BatchNorm2d): 25 | weight_copy = m.weight.data.abs().clone() 26 | mask = weight_copy.le(mask_thresh).float() 27 | 28 | scale_alpha = -1 29 | if soft: 30 | scale_alpha = -0.9 31 | m.weight.data.add_(scale_alpha*mask*m.weight.data.clone()) 32 | m.bias.data.add_(scale_alpha*mask*m.bias.data.clone()) 33 | m.running_mean.data.add_(scale_alpha*mask*m.running_mean.data.clone()) 34 | m.running_var.data.add_(scale_alpha*mask*m.running_var.data.clone()) 35 | -------------------------------------------------------------------------------- /train_aware.py: -------------------------------------------------------------------------------- 1 | # coding=utf-8 2 | """ 3 | 主程序:主要完成四个功能 4 | (1)训练:定义网络,损失函数,优化器,进行训练,生成模型 5 | (2)验证:验证模型准确率 6 | (3)测试:测试模型在测试集上的准确率 7 | (4)help:打印log信息 8 | """ 9 | 10 | import os 11 | # import models 12 | import torch as t 13 | from datasets.aware_datasets import AwareData 14 | from torch.utils.data import DataLoader 15 | from models.aware_model import AwareModel 16 | from torch.autograd import Variable 17 | from torchvision import models 18 | from torch import nn 19 | from torchnet import meter 20 | import time 21 | import csv 22 | 23 | """模型训练:定义网络,定义数据,定义损失函数和优化器,训练并计算指标,计算在验证集上的准确率""" 24 | 25 | data_root = "/home/shw/data/uveAwareData" 26 | batch_size = 16 27 | max_epoch = 30 28 | 29 | save_model = "/home/shw/code/yolov5-master/runs/aware_model" 30 | 31 | def train(): 32 | """根据命令行参数更新配置""" 33 | 34 | """(1)step1:加载网络,若有预训练模型也加载""" 35 | # model = getattr(models,opt.model)() 36 | model = AwareModel() 37 | model.cuda() 38 | 39 | """(2)step2:处理数据""" 40 | train_data = AwareData("/home/shw/data/uveAwareData", mode="train") # 训练集 41 | val_data = AwareData("/home/shw/data/uveAwareData", mode="val") # 验证集 42 | 43 | train_dataloader = DataLoader(train_data, batch_size, shuffle=True, num_workers=batch_size) 44 | val_dataloader = DataLoader(val_data, batch_size, shuffle=False, num_workers=batch_size) 45 | 46 | """(3)step3:定义损失函数和优化器""" 47 | criterion = t.nn.CrossEntropyLoss() # 交叉熵损 48 | optimizer = t.optim.SGD(model.parameters(), lr=0.01) 49 | 50 | """(4)step4:统计指标,平滑处理之后的损失,还有混淆矩阵""" 51 | loss_meter = meter.AverageValueMeter() 52 | confusion_matrix = meter.ConfusionMeter(2) 53 | previous_loss = 1e10 54 | 55 | """(5)开始训练""" 56 | for epoch in range(max_epoch): 57 | 58 | loss_meter.reset() 59 | confusion_matrix.reset() 60 | 61 | for ii, (data, label) in enumerate(train_dataloader): 62 | # 训练模型参数 63 | input = Variable(data) 64 | target = Variable(label) 65 | 66 | input = input.cuda()/255.0 67 | target = target.cuda() 68 | 69 | # 梯度清零 70 | optimizer.zero_grad() 71 | score = model(input) 72 | # print(target) 73 | loss = criterion(score, target.long()) 74 | # print("loss---",loss.item()) 75 | loss.backward() # 反向传播 76 | 77 | # 更新参数 78 | optimizer.step() 79 | 80 | # 更新统计指标及可视化 81 | loss_meter.add(loss.item()) 82 | # print score.shape,target.shape 83 | confusion_matrix.add(score.detach(), target.detach()) 84 | 85 | # model.save() 86 | name = time.strftime('model' + '%m%d_%H:%M:%S.pth') 87 | t.save(model.state_dict(), os.path.join(save_model,name)) 88 | 89 | """计算验证集上的指标及可视化""" 90 | val_cm, val_accuracy = val(model, val_dataloader) 91 | print("val_accuracy",val_accuracy) 92 | 93 | 94 | # """如果损失不再下降,则降低学习率""" 95 | # if loss_meter.value()[0] > previous_loss: 96 | # lr = lr * opt.lr_decay 97 | # for param_group in optimizer.param_groups: 98 | # param_group["lr"] = lr 99 | # 100 | # previous_loss = loss_meter.value()[0] 101 | 102 | 103 | """计算模型在验证集上的准确率等信息""" 104 | 105 | 106 | @t.no_grad() 107 | def val(model, dataloader): 108 | model.eval() # 将模型设置为验证模式 109 | 110 | confusion_matrix = meter.ConfusionMeter(2) 111 | for ii, data in enumerate(dataloader): 112 | input, label = data 113 | val_input = Variable(input/255.0) 114 | val_label = Variable(label.long()) 115 | 116 | val_input = val_input.cuda() 117 | val_label = val_label.cuda() 118 | 119 | score = model(val_input) 120 | confusion_matrix.add(score.detach().squeeze(), label.long()) 121 | 122 | model.train() # 模型恢复为训练模式 123 | cm_value = confusion_matrix.value() 124 | accuracy = 100. * (cm_value[0][0] + cm_value[1][1]) / (cm_value.sum()) 125 | 126 | return confusion_matrix, accuracy 127 | 128 | 129 | 130 | 131 | def write_csv(results, file_name): 132 | with open(file_name, "w") as f: 133 | writer = csv.writer(f) 134 | writer.writerow(['id', 'label']) 135 | writer.writerows(results) 136 | 137 | 138 | if __name__ == '__main__': 139 | os.environ['CUDA_VISIBLE_DEVICE'] = "3" 140 | train() 141 | -------------------------------------------------------------------------------- /tsne/draw_w1_w2.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import matplotlib.pyplot as plt 3 | import matplotlib.font_manager as fm # 字体管理器 4 | plt.rcParams['font.sans-serif'] = ['SimHei'] # 步骤一(替换sans-serif字体) 5 | plt.rcParams['axes.unicode_minus'] = False # 步骤二(解决坐标轴负数的负号显示问题) 6 | if __name__ == "__main__": 7 | pro = np.loadtxt("../w1w2pro.txt") 8 | ls = np.loadtxt("../w1w2ls.txt") 9 | 10 | pro_s = [] 11 | ls_s = [] 12 | nor_s = [] 13 | # sample data 14 | for i in range(len(pro)): 15 | if i % 10 == 0: 16 | pro_s.append(pro[i]) 17 | ls_s.append(ls[i]) 18 | 19 | x_data = [i for i in range(len(pro_s))] 20 | nor_s = [1 for i in range(len(pro_s))] 21 | # print(pro) 22 | # print(ls) 23 | 24 | ln1, = plt.plot(x_data, pro_s,linestyle='dotted') 25 | ln2, = plt.plot(x_data, ls_s,linestyle='dashed') 26 | ln3, = plt.plot(x_data, nor_s,color="blue") 27 | 28 | # my_font = fm.FontProperties(fname="/usr/share/fonts/wqy-microhei/wqy-microhei.ttc") 29 | 30 | # plt.title("Pro与Ls对比") # 设置标题及字体 31 | 32 | plt.legend(handles=[ln1, ln2,ln3], labels=['pro', 'ls','nor']) 33 | 34 | ax = plt.gca() 35 | ax.spines['right'].set_color('none') # right边框属性设置为none 不显示 36 | ax.spines['top'].set_color('none') # top边框属性设置为none 不显示 37 | # ax.set_xlabel("样本编号") 38 | # ax.set_ylabel("|w1-w2|") 39 | plt.show() 40 | -------------------------------------------------------------------------------- /tsne/evaluate.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function 2 | 3 | from scipy import spatial 4 | import numpy as np 5 | 6 | 7 | class Evaluation(object): 8 | 9 | def make_samples(self): 10 | raise NotImplementedError("Needs to implemented this method") 11 | 12 | 13 | def distance(v1, v2, d_type='d1'): 14 | assert v1.shape == v2.shape, "shape of two vectors need to be same!" 15 | 16 | if d_type == 'd1': 17 | return np.sum(np.absolute(v1 - v2)) 18 | elif d_type == 'd2': 19 | return np.sum((v1 - v2) ** 2) 20 | elif d_type == 'd2-norm': 21 | return 2 - 2 * np.dot(v1, v2) 22 | elif d_type == 'd3': 23 | pass 24 | elif d_type == 'd4': 25 | pass 26 | elif d_type == 'd5': 27 | pass 28 | elif d_type == 'd6': 29 | pass 30 | elif d_type == 'd7': 31 | return 2 - 2 * np.dot(v1, v2) 32 | elif d_type == 'd8': 33 | return 2 - 2 * np.dot(v1, v2) 34 | elif d_type == 'cosine': 35 | return spatial.distance.cosine(v1, v2) 36 | elif d_type == 'square': 37 | return np.sum((v1 - v2) ** 2) 38 | 39 | 40 | def AP(label, results, sort=True): 41 | ''' infer a query, return it's ap 42 | arguments 43 | label : query's class 44 | results: a dict with two keys, see the example below 45 | { 46 | 'dis': , 47 | 'cls': 48 | } 49 | sort : sort the results by distance 50 | ''' 51 | if sort: 52 | results = sorted(results, key=lambda x: x['dis']) 53 | precision = [] 54 | hit = 0 55 | for i, result in enumerate(results): 56 | if result['cls'] == label: 57 | hit += 1 58 | precision.append(hit / (i + 1.)) 59 | if hit == 0: 60 | return 0. 61 | return np.mean(precision) 62 | 63 | 64 | def infer(query, samples=None, db=None, sample_db_fn=None, depth=None, d_type='d1'): 65 | ''' infer a query, return it's ap 66 | arguments 67 | query : a dict with three keys, see the template 68 | { 69 | 'img': , 70 | 'cls': , 71 | 'hist' 72 | } 73 | samples : a list of { 74 | 'img': , 75 | 'cls': , 76 | 'hist' 77 | } 78 | db : an instance of class Database 79 | sample_db_fn: a function making samples, should be given if Database != None 80 | depth : retrieved depth during inference, the default depth is equal to database size 81 | d_type : distance type 82 | ''' 83 | assert samples != None or ( 84 | db != None and sample_db_fn != None), "need to give either samples or db plus sample_db_fn" 85 | if db: 86 | samples = sample_db_fn(db) 87 | 88 | q_img, q_cls, q_hist = query['img'], query['cls'], query['hist'] 89 | results = [] 90 | for idx, sample in enumerate(samples): 91 | s_img, s_cls, s_hist = sample['img'], sample['cls'], sample['hist'] 92 | if q_img == s_img: 93 | continue 94 | results.append({ 95 | 'dis': distance(q_hist, s_hist, d_type=d_type), 96 | 'cls': s_cls 97 | }) 98 | results = sorted(results, key=lambda x: x['dis']) 99 | if depth and depth <= len(results): 100 | results = results[:depth] 101 | ap = AP(q_cls, results, sort=False) 102 | 103 | return ap, results 104 | 105 | 106 | def evaluate(db, sample_db_fn, depth=None, d_type='d1'): 107 | ''' infer the whole database 108 | arguments 109 | db : an instance of class Database 110 | sample_db_fn: a function making samples, should be given if Database != None 111 | depth : retrieved depth during inference, the default depth is equal to database size 112 | d_type : distance type 113 | ''' 114 | classes = db.get_class() 115 | ret = {c: [] for c in classes} 116 | 117 | samples = sample_db_fn(db) 118 | for query in samples: 119 | ap, _ = infer(query, samples=samples, depth=depth, d_type=d_type) 120 | ret[query['cls']].append(ap) 121 | 122 | return ret 123 | 124 | 125 | def evaluate_class(db, f_class=None, f_instance=None, depth=None, d_type='d1'): 126 | ''' infer the whole database 127 | arguments 128 | db : an instance of class Database 129 | f_class: a class that generate features, needs to implement make_samples method 130 | depth : retrieved depth during inference, the default depth is equal to database size 131 | d_type : distance type 132 | ''' 133 | assert f_class or f_instance, "needs to give class_name or an instance of class" 134 | 135 | classes = db.get_class() 136 | ret = {c: [] for c in classes} 137 | 138 | if f_class: 139 | f = f_class() 140 | elif f_instance: 141 | f = f_instance 142 | samples = f.make_samples(db) 143 | for query in samples: 144 | ap, _ = infer(query, samples=samples, depth=depth, d_type=d_type) 145 | ret[query['cls']].append(ap) 146 | 147 | return ret 148 | -------------------------------------------------------------------------------- /tsne/resnet.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function 2 | 3 | import torch 4 | import torch.nn as nn 5 | from torch.autograd import Variable 6 | from torchvision import models 7 | from torchvision.models.resnet import Bottleneck, BasicBlock, ResNet 8 | import torch.utils.model_zoo as model_zoo 9 | 10 | from six.moves import cPickle 11 | import numpy as np 12 | import scipy.misc 13 | import imageio 14 | import os 15 | import cv2 16 | 17 | import matplotlib.pyplot as plt 18 | 19 | ''' 20 | downloading problem in mac OSX should refer to this answer: 21 | https://stackoverflow.com/a/42334357 22 | ''' 23 | 24 | # configs for histogram 25 | RES_model = 'resnet101' # model type 26 | pick_layer = 'avg' # extract feature of this layer 27 | d_type = 'd1' # distance type 28 | 29 | depth = 3 # retrieved depth, set to None will count the ap for whole database 30 | 31 | ''' MMAP 32 | depth 33 | depthNone, resnet152,avg,d1, MMAP 0.78474710149 34 | depth100, resnet152,avg,d1, MMAP 0.819713653589 35 | depth30, resnet152,avg,d1, MMAP 0.884925001919 36 | depth10, resnet152,avg,d1, MMAP 0.944355078125 37 | depth5, resnet152,avg,d1, MMAP 0.961788675194 38 | depth3, resnet152,avg,d1, MMAP 0.965623938039 39 | depth1, resnet152,avg,d1, MMAP 0.958696281702 40 | (exps below use depth=None) 41 | resnet34,avg,cosine, MMAP 0.755842698037 42 | resnet101,avg,cosine, MMAP 0.757435452078 43 | resnet101,avg,d1, MMAP 0.764556148137 44 | resnet152,avg,cosine, MMAP 0.776918319273 45 | resnet152,avg,d1, MMAP 0.78474710149 46 | resnet152,max,d1, MMAP 0.748099342614 47 | resnet152,fc,cosine, MMAP 0.776918319273 48 | resnet152,fc,d1, MMAP 0.70010267663 49 | ''' 50 | 51 | use_gpu = torch.cuda.is_available() 52 | means = np.array([103.939, 116.779, 123.68]) / 255. # mean of three channels in the order of BGR 53 | 54 | # cache dir 55 | cache_dir = 'cache' 56 | if not os.path.exists(cache_dir): 57 | os.makedirs(cache_dir) 58 | 59 | # from https://github.com/pytorch/vision/blob/master/torchvision/models/resnet.py 60 | model_urls = { 61 | 'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth', 62 | 'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth', 63 | 'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth', 64 | 'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth', 65 | 'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth', 66 | } 67 | 68 | cls_idx = { 69 | "bao_lu_la_ji": 0, 70 | "cheng_san_jing_ying": 1, 71 | "gu_ding_tan_fan": 2, 72 | "ling_san_la_ji": 3, 73 | "kua_men_ying_ye": 4, 74 | "liu_dong_tan_fan": 5, 75 | "luan_dui_wu_liao": 6, 76 | "luan_la_tiao_fu": 7, 77 | "luan_shai_yi_wu": 8, 78 | "luan_she_guang_gao_pai": 9 79 | } 80 | # cls_idx = { 81 | # "cat": 0, 82 | # "dog": 1 83 | # } 84 | 85 | def brightness( im_file ): 86 | im = Image.open(im_file).convert('L') 87 | stat = ImageStat.Stat(im) 88 | return stat.rms[0] 89 | 90 | class ResidualNet(ResNet): 91 | def __init__(self, model=RES_model, pretrained=True): 92 | if model == "resnet18": 93 | super().__init__(BasicBlock, [2, 2, 2, 2], 1000) 94 | if pretrained: 95 | self.load_state_dict(model_zoo.load_url(model_urls['resnet18'])) 96 | elif model == "resnet34": 97 | super().__init__(BasicBlock, [3, 4, 6, 3], 1000) 98 | if pretrained: 99 | self.load_state_dict(model_zoo.load_url(model_urls['resnet34'])) 100 | elif model == "resnet50": 101 | super().__init__(Bottleneck, [3, 4, 6, 3], 1000) 102 | if pretrained: 103 | self.load_state_dict(model_zoo.load_url(model_urls['resnet50'])) 104 | elif model == "resnet101": 105 | super().__init__(Bottleneck, [3, 4, 23, 3], 1000) 106 | if pretrained: 107 | self.load_state_dict(model_zoo.load_url(model_urls['resnet101'])) 108 | elif model == "resnet152": 109 | super().__init__(Bottleneck, [3, 8, 36, 3], 1000) 110 | if pretrained: 111 | self.load_state_dict(model_zoo.load_url(model_urls['resnet152'])) 112 | 113 | def forward(self, x): 114 | x = self.conv1(x) 115 | x = self.bn1(x) 116 | x = self.relu(x) 117 | x = self.maxpool(x) 118 | x = self.layer1(x) 119 | x = self.layer2(x) 120 | x = self.layer3(x) 121 | x = self.layer4(x) # x after layer4, shape = N * 512 * H/32 * W/32 122 | max_pool = torch.nn.MaxPool2d((x.size(-2), x.size(-1)), stride=(x.size(-2), x.size(-1)), padding=0, 123 | ceil_mode=False) 124 | Max = max_pool(x) # avg.size = N * 512 * 1 * 1 125 | Max = Max.view(Max.size(0), -1) # avg.size = N * 512 126 | avg_pool = torch.nn.AvgPool2d((x.size(-2), x.size(-1)), stride=(x.size(-2), x.size(-1)), padding=0, 127 | ceil_mode=False, count_include_pad=True) 128 | avg = avg_pool(x) # avg.size = N * 512 * 1 * 1 129 | avg = avg.view(avg.size(0), -1) # avg.size = N * 512 130 | fc = self.fc(avg) # fc.size = N * 1000 131 | output = { 132 | 'max': Max, 133 | 'avg': avg, 134 | 'fc': fc 135 | } 136 | return output 137 | 138 | 139 | class ResNetExt(object): 140 | 141 | def __init__(self, model): 142 | self.model = model 143 | 144 | def extract(self): 145 | img_dir = "/home/shw/data/UVA/val/images/visible" 146 | res_model = ResidualNet(model=self.model) 147 | res_model.eval() 148 | 149 | features = [] 150 | labels = [] 151 | val = [] 152 | for cls_name in os.listdir(img_dir): 153 | # print(cls_name + "..." + str(len(features))) 154 | cls_id = 0 155 | for name in os.listdir(os.path.join(img_dir, cls_name)): 156 | img_path = os.path.join(img_dir, cls_name, name) 157 | # img = scipy.misc.imread(img_path, mode="RGB") 158 | # img = scipy.misc.imresize(img, size=(16, 16)) 159 | img = cv2.imread(img_path) 160 | # img = img[:, :, ::-1] # switch to BGR 161 | # img = np.transpose(img, (2, 0, 1)) / 255. 162 | img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) 163 | # img[0] -= means[0] # reduce B's mean 164 | # img[1] -= means[1] # reduce G's mean 165 | # img[2] -= means[2] # reduce R's mean 166 | # img = np.expand_dims(img, axis=0) 167 | # inputs = torch.autograd.Variable(torch.from_numpy(img).float()) 168 | # d_hist = res_model(inputs)[pick_layer] 169 | # d_hist = d_hist.data.cpu().numpy().flatten() 170 | # d_hist /= np.sum(d_hist) # normalize 171 | 172 | # print("图片亮度值为:", image.mean()) 173 | # print(d_hist.shape) 174 | val.append(img.mean()/255) 175 | # features.append(img) 176 | # lb = np.array([cls_id]) 177 | # labels.append(lb) 178 | # print(d_hist) 179 | print(val) 180 | print(len(val)) 181 | plt.subplot() 182 | plt.xlabel('lightness') 183 | plt.ylabel('number') 184 | plt.hist(val, 255,stacked=True,alpha=0.5) 185 | plt.show() 186 | # feature_mat = features 187 | # np.savetxt("features.txt", feature_mat) 188 | # label_mat = np.array(labels) 189 | # np.savetxt("labels.txt", label_mat) 190 | 191 | 192 | class ResNetFeat(object): 193 | 194 | def make_samples(self, db, verbose=True): 195 | sample_cache = '{}-{}'.format(RES_model, pick_layer) 196 | 197 | try: 198 | samples = cPickle.load(open(os.path.join(cache_dir, sample_cache), "rb", True)) 199 | for sample in samples: 200 | sample['hist'] /= np.sum(sample['hist']) # normalize 201 | if verbose: 202 | print("Using cache..., config=%s, distance=%s, depth=%s" % (sample_cache, d_type, depth)) 203 | except: 204 | if verbose: 205 | print("Counting histogram..., config=%s, distance=%s, depth=%s" % (sample_cache, d_type, depth)) 206 | 207 | res_model = ResidualNet(model=RES_model) 208 | res_model.eval() 209 | if use_gpu: 210 | res_model = res_model.cuda() 211 | samples = [] 212 | data = db.get_data() 213 | for d in data.itertuples(): 214 | d_img, d_cls = getattr(d, "img"), getattr(d, "cls") 215 | img = imageio.imread(d_img, mode="RGB") 216 | img = img[:, :, ::-1] # switch to BGR 217 | img = np.transpose(img, (2, 0, 1)) / 255. 218 | img[0] -= means[0] # reduce B's mean 219 | img[1] -= means[1] # reduce G's mean 220 | img[2] -= means[2] # reduce R's mean 221 | img = np.expand_dims(img, axis=0) 222 | try: 223 | if use_gpu: 224 | inputs = torch.autograd.Variable(torch.from_numpy(img).cuda().float()) 225 | else: 226 | inputs = torch.autograd.Variable(torch.from_numpy(img).float()) 227 | d_hist = res_model(inputs)[pick_layer] 228 | d_hist = d_hist.data.cpu().numpy().flatten() 229 | d_hist /= np.sum(d_hist) # normalize 230 | samples.append({ 231 | 'img': d_img, 232 | 'cls': d_cls, 233 | 'hist': d_hist 234 | }) 235 | except: 236 | pass 237 | cPickle.dump(samples, open(os.path.join(cache_dir, sample_cache), "wb", True)) 238 | 239 | return samples 240 | 241 | 242 | if __name__ == "__main__": 243 | # evaluate database 244 | # db = Database() 245 | # APs = evaluate_class(db, f_class=ResNetFeat, d_type=d_type, depth=depth) 246 | # cls_MAPs = [] 247 | # for cls, cls_APs in APs.items(): 248 | # MAP = np.mean(cls_APs) 249 | # print("Class {}, MAP {}".format(cls, MAP)) 250 | # cls_MAPs.append(MAP) 251 | # print("MMAP", np.mean(cls_MAPs)) 252 | 253 | # img_dir = "G:\graduation\dataset\streetDataset\hk-kmeans-data" 254 | # print(os.listdir(img_dir)) 255 | 256 | ext = ResNetExt("resnet18") 257 | ext.extract() 258 | -------------------------------------------------------------------------------- /tsne/tsne_torch.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import matplotlib.pyplot as pyplot 3 | import argparse 4 | import torch 5 | 6 | parser = argparse.ArgumentParser() 7 | parser.add_argument("--xfile", type=str, default="features.txt", help="file name of feature stored") 8 | parser.add_argument("--yfile", type=str, default="labels.txt", help="file name of label stored") 9 | parser.add_argument("--cuda", type=int, default=1, help="if use cuda accelarate") 10 | 11 | opt = parser.parse_args() 12 | print("get choice from args", opt) 13 | xfile = opt.xfile 14 | yfile = opt.yfile 15 | 16 | if opt.cuda: 17 | print("set use cuda") 18 | torch.set_default_tensor_type(torch.cuda.DoubleTensor) 19 | else: 20 | torch.set_default_tensor_type(torch.DoubleTensor) 21 | 22 | cls_idx = { 23 | "bao_lu_la_ji": 0, 24 | "cheng_san_jing_ying": 1, 25 | "gu_ding_tan_fan": 2, 26 | "ling_san_la_ji": 3, 27 | "kua_men_ying_ye": 4, 28 | "liu_dong_tan_fan": 5, 29 | "luan_dui_wu_liao": 6, 30 | "luan_la_tiao_fu": 7, 31 | "luan_shai_yi_wu": 8, 32 | "luan_she_guang_gao_pai": 9 33 | } 34 | cls_color = { 35 | 0: "#FFFF00", # 黄色 36 | 1: "#FF0000", # 大红 37 | 2: "#FF00FF", # 粉色 38 | 3: "#B9D3EE", # 浅蓝色 39 | 4: "#C7C7C7", # 灰色 40 | 5: "#B3EE3A", # 青色 41 | 6: "#242424", # 黑色 42 | 7: "#00FFFF", # 亮蓝色 43 | 8: "#00CD00", # 绿色 44 | 9: "#9400D3" # 深紫色 45 | } 46 | cls_color = { 47 | 0: "red", # 占道堆物 48 | 1: "blue", # 暴露垃圾 49 | 2: "green",# 乱设广告怕 50 | 3: "black" # 违规摆摊 51 | } 52 | cls_idx = { 53 | 0: 1, 54 | 1: 3, 55 | 2: 3, 56 | 3: 1, 57 | 4: 3, 58 | 5: 3, 59 | 6: 0, 60 | 7: 2, 61 | 8: 2, 62 | 9: 2 63 | } 64 | 65 | def Hbeta_torch(D, beta=1.0): 66 | P = torch.exp(-D.clone() * beta) 67 | 68 | sumP = torch.sum(P) 69 | 70 | H = torch.log(sumP) + beta * torch.sum(D * P) / sumP 71 | P = P / sumP 72 | 73 | return H, P 74 | 75 | 76 | def x2p_torch(X, tol=1e-5, perplexity=30.0): 77 | """ 78 | Performs a binary search to get P-values in such a way that each 79 | conditional Gaussian has the same perplexity. 80 | """ 81 | 82 | # Initialize some variables 83 | print("Computing pairwise distances...") 84 | (n, d) = X.shape 85 | 86 | sum_X = torch.sum(X * X, 1) 87 | D = torch.add(torch.add(-2 * torch.mm(X, X.t()), sum_X).t(), sum_X) 88 | 89 | P = torch.zeros(n, n) 90 | beta = torch.ones(n, 1) 91 | logU = torch.log(torch.tensor([perplexity])) 92 | n_list = [i for i in range(n)] 93 | 94 | # Loop over all datapoints 95 | for i in range(n): 96 | 97 | # Print progress 98 | if i % 500 == 0: 99 | print("Computing P-values for point %d of %d..." % (i, n)) 100 | 101 | # Compute the Gaussian kernel and entropy for the current precision 102 | # there may be something wrong with this setting None 103 | betamin = None 104 | betamax = None 105 | Di = D[i, n_list[0:i] + n_list[i + 1:n]] 106 | 107 | (H, thisP) = Hbeta_torch(Di, beta[i]) 108 | 109 | # Evaluate whether the perplexity is within tolerance 110 | Hdiff = H - logU 111 | tries = 0 112 | while torch.abs(Hdiff) > tol and tries < 50: 113 | 114 | # If not, increase or decrease precision 115 | if Hdiff > 0: 116 | betamin = beta[i].clone() 117 | if betamax is None: 118 | beta[i] = beta[i] * 2. 119 | else: 120 | beta[i] = (beta[i] + betamax) / 2. 121 | else: 122 | betamax = beta[i].clone() 123 | if betamin is None: 124 | beta[i] = beta[i] / 2. 125 | else: 126 | beta[i] = (beta[i] + betamin) / 2. 127 | 128 | # Recompute the values 129 | (H, thisP) = Hbeta_torch(Di, beta[i]) 130 | 131 | Hdiff = H - logU 132 | tries += 1 133 | 134 | # Set the final row of P 135 | P[i, n_list[0:i] + n_list[i + 1:n]] = thisP 136 | 137 | # Return final P-matrix 138 | return P 139 | 140 | 141 | def pca_torch(X, no_dims=50): 142 | print("Preprocessing the data using PCA...") 143 | (n, d) = X.shape 144 | X = X - torch.mean(X, 0) 145 | 146 | (l, M) = torch.eig(torch.mm(X.t(), X), True) 147 | # split M real 148 | for i in range(d): 149 | if l[i, 1] != 0: 150 | M[:, i + 1] = M[:, i] 151 | i += 1 152 | 153 | Y = torch.mm(X, M[:, 0:no_dims]) 154 | return Y 155 | 156 | 157 | def tsne(X, no_dims=2, initial_dims=50, perplexity=30.0): 158 | """ 159 | Runs t-SNE on the dataset in the NxD array X to reduce its 160 | dimensionality to no_dims dimensions. The syntaxis of the function is 161 | `Y = tsne.tsne(X, no_dims, perplexity), where X is an NxD NumPy array. 162 | """ 163 | 164 | # Check inputs 165 | if isinstance(no_dims, float): 166 | print("Error: array X should not have type float.") 167 | return -1 168 | if round(no_dims) != no_dims: 169 | print("Error: number of dimensions should be an integer.") 170 | return -1 171 | 172 | # Initialize variables 173 | print(X.shape) 174 | X = pca_torch(X, initial_dims) 175 | (n, d) = X.shape 176 | max_iter = 500 177 | initial_momentum = 0.5 178 | final_momentum = 0.8 179 | eta = 500 180 | min_gain = 0.01 181 | Y = torch.randn(n, no_dims) 182 | dY = torch.zeros(n, no_dims) 183 | iY = torch.zeros(n, no_dims) 184 | gains = torch.ones(n, no_dims) 185 | 186 | # Compute P-values 187 | P = x2p_torch(X, 1e-5, perplexity) 188 | P = P + P.t() 189 | P = P / torch.sum(P) 190 | P = P * 4. # early exaggeration 191 | print("get P shape", P.shape) 192 | P = torch.max(P, torch.tensor([1e-21])) 193 | 194 | # Run iterations 195 | for iter in range(max_iter): 196 | 197 | # Compute pairwise affinities 198 | sum_Y = torch.sum(Y * Y, 1) 199 | num = -2. * torch.mm(Y, Y.t()) 200 | num = 1. / (1. + torch.add(torch.add(num, sum_Y).t(), sum_Y)) 201 | num[range(n), range(n)] = 0. 202 | Q = num / torch.sum(num) 203 | Q = torch.max(Q, torch.tensor([1e-12])) 204 | 205 | # Compute gradient 206 | PQ = P - Q 207 | for i in range(n): 208 | dY[i, :] = torch.sum((PQ[:, i] * num[:, i]).repeat(no_dims, 1).t() * (Y[i, :] - Y), 0) 209 | 210 | # Perform the update 211 | if iter < 20: 212 | momentum = initial_momentum 213 | else: 214 | momentum = final_momentum 215 | 216 | gains = (gains + 0.2) * ((dY > 0.) != (iY > 0.)).double() + (gains * 0.8) * ((dY > 0.) == (iY > 0.)).double() 217 | gains[gains < min_gain] = min_gain 218 | iY = momentum * iY - eta * (gains * dY) 219 | Y = Y + iY 220 | Y = Y - torch.mean(Y, 0) 221 | 222 | # Compute current value of cost function 223 | if (iter + 1) % 10 == 0: 224 | C = torch.sum(P * torch.log(P / Q)) 225 | print("Iteration %d: error is %f" % (iter + 1, C)) 226 | 227 | # Stop lying about P-values 228 | if iter == 100: 229 | P = P / 4. 230 | 231 | # Return solution 232 | return Y 233 | 234 | 235 | if __name__ == "__main__": 236 | print("Run Y = tsne.tsne(X, no_dims, perplexity) to perform t-SNE on your dataset.") 237 | 238 | X = np.loadtxt(xfile) 239 | X = torch.Tensor(X) 240 | labels = np.loadtxt(yfile).tolist() 241 | # print(labels) 242 | labels = [cls_color[cls_idx[i]] for i in labels] 243 | # print(labels) 244 | # labels = [cls_color[i] for i in labels] 245 | # print(labels) 246 | # confirm that x file get same number point than label file 247 | # otherwise may cause error in scatter 248 | print(len(X), len(labels)) 249 | assert (len(X[:, 0]) == len(X[:, 1])) 250 | assert (len(X) == len(labels)) 251 | 252 | with torch.no_grad(): 253 | Y = tsne(X, 2, 50, 20.0) 254 | 255 | if opt.cuda: 256 | Y = Y.cpu().numpy() 257 | 258 | pyplot.title("resnet101 avg ") 259 | pyplot.scatter(Y[:, 0], Y[:, 1], 20, labels) 260 | pyplot.show() 261 | -------------------------------------------------------------------------------- /utils/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sbbug/yolov5-prune-multi/7ebdf3ce3050a0b7c865920bb88d768a446ab3e9/utils/__init__.py -------------------------------------------------------------------------------- /utils/activations.py: -------------------------------------------------------------------------------- 1 | # Activation functions 2 | 3 | import torch 4 | import torch.nn as nn 5 | import torch.nn.functional as F 6 | 7 | 8 | # SiLU https://arxiv.org/pdf/1606.08415.pdf ---------------------------------------------------------------------------- 9 | class SiLU(nn.Module): # export-friendly version of nn.SiLU() 10 | @staticmethod 11 | def forward(x): 12 | return x * torch.sigmoid(x) 13 | 14 | 15 | class Hardswish(nn.Module): # export-friendly version of nn.Hardswish() 16 | @staticmethod 17 | def forward(x): 18 | # return x * F.hardsigmoid(x) # for torchscript and CoreML 19 | return x * F.hardtanh(x + 3, 0., 6.) / 6. # for torchscript, CoreML and ONNX 20 | 21 | 22 | class MemoryEfficientSwish(nn.Module): 23 | class F(torch.autograd.Function): 24 | @staticmethod 25 | def forward(ctx, x): 26 | ctx.save_for_backward(x) 27 | return x * torch.sigmoid(x) 28 | 29 | @staticmethod 30 | def backward(ctx, grad_output): 31 | x = ctx.saved_tensors[0] 32 | sx = torch.sigmoid(x) 33 | return grad_output * (sx * (1 + x * (1 - sx))) 34 | 35 | def forward(self, x): 36 | return self.F.apply(x) 37 | 38 | 39 | # Mish https://github.com/digantamisra98/Mish -------------------------------------------------------------------------- 40 | class Mish(nn.Module): 41 | @staticmethod 42 | def forward(x): 43 | return x * F.softplus(x).tanh() 44 | 45 | 46 | class MemoryEfficientMish(nn.Module): 47 | class F(torch.autograd.Function): 48 | @staticmethod 49 | def forward(ctx, x): 50 | ctx.save_for_backward(x) 51 | return x.mul(torch.tanh(F.softplus(x))) # x * tanh(ln(1 + exp(x))) 52 | 53 | @staticmethod 54 | def backward(ctx, grad_output): 55 | x = ctx.saved_tensors[0] 56 | sx = torch.sigmoid(x) 57 | fx = F.softplus(x).tanh() 58 | return grad_output * (fx + x * sx * (1 - fx * fx)) 59 | 60 | def forward(self, x): 61 | return self.F.apply(x) 62 | 63 | 64 | # FReLU https://arxiv.org/abs/2007.11824 ------------------------------------------------------------------------------- 65 | class FReLU(nn.Module): 66 | def __init__(self, c1, k=3): # ch_in, kernel 67 | super().__init__() 68 | self.conv = nn.Conv2d(c1, c1, k, 1, 1, groups=c1, bias=False) 69 | self.bn = nn.BatchNorm2d(c1) 70 | 71 | def forward(self, x): 72 | return torch.max(x, self.bn(self.conv(x))) 73 | -------------------------------------------------------------------------------- /utils/autoanchor.py: -------------------------------------------------------------------------------- 1 | # Auto-anchor utils 2 | 3 | import numpy as np 4 | import torch 5 | import yaml 6 | from scipy.cluster.vq import kmeans 7 | from tqdm import tqdm 8 | 9 | from utils.general import colorstr 10 | 11 | 12 | def check_anchor_order(m): 13 | # Check anchor order against stride order for YOLOv5 Detect() module m, and correct if necessary 14 | a = m.anchor_grid.prod(-1).view(-1) # anchor area 15 | da = a[-1] - a[0] # delta a 16 | ds = m.stride[-1] - m.stride[0] # delta s 17 | if da.sign() != ds.sign(): # same order 18 | print('Reversing anchor order') 19 | m.anchors[:] = m.anchors.flip(0) 20 | m.anchor_grid[:] = m.anchor_grid.flip(0) 21 | 22 | 23 | def check_anchors(dataset, model, thr=4.0, imgsz=640): 24 | # Check anchor fit to data, recompute if necessary 25 | prefix = colorstr('autoanchor: ') 26 | print(f'\n{prefix}Analyzing anchors... ', end='') 27 | m = model.module.model[-1] if hasattr(model, 'module') else model.model[-1] # Detect() 28 | shapes = imgsz * dataset.shapes / dataset.shapes.max(1, keepdims=True) 29 | scale = np.random.uniform(0.9, 1.1, size=(shapes.shape[0], 1)) # augment scale 30 | wh = torch.tensor(np.concatenate([l[:, 3:5] * s for s, l in zip(shapes * scale, dataset.labels)])).float() # wh 31 | 32 | def metric(k): # compute metric 33 | r = wh[:, None] / k[None] 34 | x = torch.min(r, 1. / r).min(2)[0] # ratio metric 35 | best = x.max(1)[0] # best_x 36 | aat = (x > 1. / thr).float().sum(1).mean() # anchors above threshold 37 | bpr = (best > 1. / thr).float().mean() # best possible recall 38 | return bpr, aat 39 | 40 | anchors = m.anchor_grid.clone().cpu().view(-1, 2) # current anchors 41 | bpr, aat = metric(anchors) 42 | print(f'anchors/target = {aat:.2f}, Best Possible Recall (BPR) = {bpr:.4f}', end='') 43 | if bpr < 0.98: # threshold to recompute 44 | print('. Attempting to improve anchors, please wait...') 45 | na = m.anchor_grid.numel() // 2 # number of anchors 46 | try: 47 | anchors = kmean_anchors(dataset, n=na, img_size=imgsz, thr=thr, gen=1000, verbose=False) 48 | except Exception as e: 49 | print(f'{prefix}ERROR: {e}') 50 | new_bpr = metric(anchors)[0] 51 | if new_bpr > bpr: # replace anchors 52 | anchors = torch.tensor(anchors, device=m.anchors.device).type_as(m.anchors) 53 | m.anchor_grid[:] = anchors.clone().view_as(m.anchor_grid) # for inference 54 | m.anchors[:] = anchors.clone().view_as(m.anchors) / m.stride.to(m.anchors.device).view(-1, 1, 1) # loss 55 | check_anchor_order(m) 56 | print(f'{prefix}New anchors saved to model. Update model *.yaml to use these anchors in the future.') 57 | else: 58 | print(f'{prefix}Original anchors better than new anchors. Proceeding with original anchors.') 59 | print('') # newline 60 | 61 | 62 | def kmean_anchors(path='./data/coco128.yaml', n=9, img_size=640, thr=4.0, gen=1000, verbose=True): 63 | """ Creates kmeans-evolved anchors from training dataset 64 | 65 | Arguments: 66 | path: path to dataset *.yaml, or a loaded dataset 67 | n: number of anchors 68 | img_size: image size used for training 69 | thr: anchor-label wh ratio threshold hyperparameter hyp['anchor_t'] used for training, default=4.0 70 | gen: generations to evolve anchors using genetic algorithm 71 | verbose: print all results 72 | 73 | Return: 74 | k: kmeans evolved anchors 75 | 76 | Usage: 77 | from utils.autoanchor import *; _ = kmean_anchors() 78 | """ 79 | thr = 1. / thr 80 | prefix = colorstr('autoanchor: ') 81 | 82 | def metric(k, wh): # compute metrics 83 | r = wh[:, None] / k[None] 84 | x = torch.min(r, 1. / r).min(2)[0] # ratio metric 85 | # x = wh_iou(wh, torch.tensor(k)) # iou metric 86 | return x, x.max(1)[0] # x, best_x 87 | 88 | def anchor_fitness(k): # mutation fitness 89 | _, best = metric(torch.tensor(k, dtype=torch.float32), wh) 90 | return (best * (best > thr).float()).mean() # fitness 91 | 92 | def print_results(k): 93 | k = k[np.argsort(k.prod(1))] # sort small to large 94 | x, best = metric(k, wh0) 95 | bpr, aat = (best > thr).float().mean(), (x > thr).float().mean() * n # best possible recall, anch > thr 96 | print(f'{prefix}thr={thr:.2f}: {bpr:.4f} best possible recall, {aat:.2f} anchors past thr') 97 | print(f'{prefix}n={n}, img_size={img_size}, metric_all={x.mean():.3f}/{best.mean():.3f}-mean/best, ' 98 | f'past_thr={x[x > thr].mean():.3f}-mean: ', end='') 99 | for i, x in enumerate(k): 100 | print('%i,%i' % (round(x[0]), round(x[1])), end=', ' if i < len(k) - 1 else '\n') # use in *.cfg 101 | return k 102 | 103 | if isinstance(path, str): # *.yaml file 104 | with open(path) as f: 105 | data_dict = yaml.load(f, Loader=yaml.SafeLoader) # model dict 106 | from utils.datasets import LoadImagesAndLabels 107 | dataset = LoadImagesAndLabels(data_dict['train'], augment=True, rect=True) 108 | else: 109 | dataset = path # dataset 110 | 111 | # Get label wh 112 | shapes = img_size * dataset.shapes / dataset.shapes.max(1, keepdims=True) 113 | wh0 = np.concatenate([l[:, 3:5] * s for s, l in zip(shapes, dataset.labels)]) # wh 114 | 115 | # Filter 116 | i = (wh0 < 3.0).any(1).sum() 117 | if i: 118 | print(f'{prefix}WARNING: Extremely small objects found. {i} of {len(wh0)} labels are < 3 pixels in size.') 119 | wh = wh0[(wh0 >= 2.0).any(1)] # filter > 2 pixels 120 | # wh = wh * (np.random.rand(wh.shape[0], 1) * 0.9 + 0.1) # multiply by random scale 0-1 121 | 122 | # Kmeans calculation 123 | print(f'{prefix}Running kmeans for {n} anchors on {len(wh)} points...') 124 | s = wh.std(0) # sigmas for whitening 125 | k, dist = kmeans(wh / s, n, iter=30) # points, mean distance 126 | assert len(k) == n, print(f'{prefix}ERROR: scipy.cluster.vq.kmeans requested {n} points but returned only {len(k)}') 127 | k *= s 128 | wh = torch.tensor(wh, dtype=torch.float32) # filtered 129 | wh0 = torch.tensor(wh0, dtype=torch.float32) # unfiltered 130 | k = print_results(k) 131 | 132 | # Plot 133 | # k, d = [None] * 20, [None] * 20 134 | # for i in tqdm(range(1, 21)): 135 | # k[i-1], d[i-1] = kmeans(wh / s, i) # points, mean distance 136 | # fig, ax = plt.subplots(1, 2, figsize=(14, 7), tight_layout=True) 137 | # ax = ax.ravel() 138 | # ax[0].plot(np.arange(1, 21), np.array(d) ** 2, marker='.') 139 | # fig, ax = plt.subplots(1, 2, figsize=(14, 7)) # plot wh 140 | # ax[0].hist(wh[wh[:, 0]<100, 0],400) 141 | # ax[1].hist(wh[wh[:, 1]<100, 1],400) 142 | # fig.savefig('wh.png', dpi=200) 143 | 144 | # Evolve 145 | npr = np.random 146 | f, sh, mp, s = anchor_fitness(k), k.shape, 0.9, 0.1 # fitness, generations, mutation prob, sigma 147 | pbar = tqdm(range(gen), desc=f'{prefix}Evolving anchors with Genetic Algorithm:') # progress bar 148 | for _ in pbar: 149 | v = np.ones(sh) 150 | while (v == 1).all(): # mutate until a change occurs (prevent duplicates) 151 | v = ((npr.random(sh) < mp) * npr.random() * npr.randn(*sh) * s + 1).clip(0.3, 3.0) 152 | kg = (k.copy() * v).clip(min=2.0) 153 | fg = anchor_fitness(kg) 154 | if fg > f: 155 | f, k = fg, kg.copy() 156 | pbar.desc = f'{prefix}Evolving anchors with Genetic Algorithm: fitness = {f:.4f}' 157 | if verbose: 158 | print_results(k) 159 | 160 | return print_results(k) 161 | -------------------------------------------------------------------------------- /utils/aws/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sbbug/yolov5-prune-multi/7ebdf3ce3050a0b7c865920bb88d768a446ab3e9/utils/aws/__init__.py -------------------------------------------------------------------------------- /utils/aws/mime.sh: -------------------------------------------------------------------------------- 1 | # AWS EC2 instance startup 'MIME' script https://aws.amazon.com/premiumsupport/knowledge-center/execute-user-data-ec2/ 2 | # This script will run on every instance restart, not only on first start 3 | # --- DO NOT COPY ABOVE COMMENTS WHEN PASTING INTO USERDATA --- 4 | 5 | Content-Type: multipart/mixed; boundary="//" 6 | MIME-Version: 1.0 7 | 8 | --// 9 | Content-Type: text/cloud-config; charset="us-ascii" 10 | MIME-Version: 1.0 11 | Content-Transfer-Encoding: 7bit 12 | Content-Disposition: attachment; filename="cloud-config.txt" 13 | 14 | #cloud-config 15 | cloud_final_modules: 16 | - [scripts-user, always] 17 | 18 | --// 19 | Content-Type: text/x-shellscript; charset="us-ascii" 20 | MIME-Version: 1.0 21 | Content-Transfer-Encoding: 7bit 22 | Content-Disposition: attachment; filename="userdata.txt" 23 | 24 | #!/bin/bash 25 | # --- paste contents of userdata.sh here --- 26 | --// 27 | -------------------------------------------------------------------------------- /utils/aws/resume.py: -------------------------------------------------------------------------------- 1 | # Resume all interrupted trainings in yolov5/ dir including DPP trainings 2 | # Usage: $ python utils/aws/resume.py 3 | 4 | import os 5 | import sys 6 | from pathlib import Path 7 | 8 | import torch 9 | import yaml 10 | 11 | sys.path.append('./') # to run '$ python *.py' files in subdirectories 12 | 13 | port = 0 # --master_port 14 | path = Path('').resolve() 15 | for last in path.rglob('*/**/last.pt'): 16 | ckpt = torch.load(last) 17 | if ckpt['optimizer'] is None: 18 | continue 19 | 20 | # Load opt.yaml 21 | with open(last.parent.parent / 'opt.yaml') as f: 22 | opt = yaml.load(f, Loader=yaml.SafeLoader) 23 | 24 | # Get device count 25 | d = opt['device'].split(',') # devices 26 | nd = len(d) # number of devices 27 | ddp = nd > 1 or (nd == 0 and torch.cuda.device_count() > 1) # distributed data parallel 28 | 29 | if ddp: # multi-GPU 30 | port += 1 31 | cmd = f'python -m torch.distributed.launch --nproc_per_node {nd} --master_port {port} train.py --resume {last}' 32 | else: # single-GPU 33 | cmd = f'python train.py --resume {last}' 34 | 35 | cmd += ' > /dev/null 2>&1 &' # redirect output to dev/null and run in daemon thread 36 | print(cmd) 37 | os.system(cmd) 38 | -------------------------------------------------------------------------------- /utils/aws/userdata.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # AWS EC2 instance startup script https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/user-data.html 3 | # This script will run only once on first instance start (for a re-start script see mime.sh) 4 | # /home/ubuntu (ubuntu) or /home/ec2-user (amazon-linux) is working dir 5 | # Use >300 GB SSD 6 | 7 | cd home/ubuntu 8 | if [ ! -d yolov5 ]; then 9 | echo "Running first-time script." # install dependencies, download COCO, pull Docker 10 | git clone https://github.com/ultralytics/yolov5 && sudo chmod -R 777 yolov5 11 | cd yolov5 12 | bash data/scripts/get_coco.sh && echo "Data done." & 13 | sudo docker pull ultralytics/yolov5:latest && echo "Docker done." & 14 | python -m pip install --upgrade pip && pip install -r requirements.txt && python detect.py && echo "Requirements done." & 15 | wait && echo "All tasks done." # finish background tasks 16 | else 17 | echo "Running re-start script." # resume interrupted runs 18 | i=0 19 | list=$(sudo docker ps -qa) # container list i.e. $'one\ntwo\nthree\nfour' 20 | while IFS= read -r id; do 21 | ((i++)) 22 | echo "restarting container $i: $id" 23 | sudo docker start $id 24 | # sudo docker exec -it $id python train.py --resume # single-GPU 25 | sudo docker exec -d $id python utils/aws/resume.py # multi-scenario 26 | done <<<"$list" 27 | fi 28 | -------------------------------------------------------------------------------- /utils/google_app_engine/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM gcr.io/google-appengine/python 2 | 3 | # Create a virtualenv for dependencies. This isolates these packages from 4 | # system-level packages. 5 | # Use -p python3 or -p python3.7 to select python version. Default is version 2. 6 | RUN virtualenv /env -p python3 7 | 8 | # Setting these environment variables are the same as running 9 | # source /env/bin/activate. 10 | ENV VIRTUAL_ENV /env 11 | ENV PATH /env/bin:$PATH 12 | 13 | RUN apt-get update && apt-get install -y python-opencv 14 | 15 | # Copy the application's requirements.txt and run pip to install all 16 | # dependencies into the virtualenv. 17 | ADD requirements.txt /app/requirements.txt 18 | RUN pip install -r /app/requirements.txt 19 | 20 | # Add the application source code. 21 | ADD . /app 22 | 23 | # Run a WSGI server to serve the application. gunicorn must be declared as 24 | # a dependency in requirements.txt. 25 | CMD gunicorn -b :$PORT main:app 26 | -------------------------------------------------------------------------------- /utils/google_app_engine/additional_requirements.txt: -------------------------------------------------------------------------------- 1 | # add these requirements in your app on top of the existing ones 2 | pip==18.1 3 | Flask==1.0.2 4 | gunicorn==19.9.0 5 | -------------------------------------------------------------------------------- /utils/google_app_engine/app.yaml: -------------------------------------------------------------------------------- 1 | runtime: custom 2 | env: flex 3 | 4 | service: yolov5app 5 | 6 | liveness_check: 7 | initial_delay_sec: 600 8 | 9 | manual_scaling: 10 | instances: 1 11 | resources: 12 | cpu: 1 13 | memory_gb: 4 14 | disk_size_gb: 20 -------------------------------------------------------------------------------- /utils/google_utils.py: -------------------------------------------------------------------------------- 1 | # Google utils: https://cloud.google.com/storage/docs/reference/libraries 2 | 3 | import os 4 | import platform 5 | import subprocess 6 | import time 7 | from pathlib import Path 8 | 9 | import requests 10 | import torch 11 | 12 | 13 | def gsutil_getsize(url=''): 14 | # gs://bucket/file size https://cloud.google.com/storage/docs/gsutil/commands/du 15 | s = subprocess.check_output(f'gsutil du {url}', shell=True).decode('utf-8') 16 | return eval(s.split(' ')[0]) if len(s) else 0 # bytes 17 | 18 | 19 | def attempt_download(file, repo='ultralytics/yolov5'): 20 | # Attempt file download if does not exist 21 | file = Path(str(file).strip().replace("'", '').lower()) 22 | 23 | if not file.exists(): 24 | try: 25 | response = requests.get(f'https://api.github.com/repos/{repo}/releases/latest').json() # github api 26 | assets = [x['name'] for x in response['assets']] # release assets, i.e. ['yolov5s.pt', 'yolov5m.pt', ...] 27 | tag = response['tag_name'] # i.e. 'v1.0' 28 | except: # fallback plan 29 | assets = ['yolov5s.pt', 'yolov5m.pt', 'yolov5l.pt', 'yolov5x.pt'] 30 | tag = subprocess.check_output('git tag', shell=True).decode().split()[-1] 31 | 32 | name = file.name 33 | if name in assets: 34 | msg = f'{file} missing, try downloading from https://github.com/{repo}/releases/' 35 | redundant = False # second download option 36 | try: # GitHub 37 | url = f'https://github.com/{repo}/releases/download/{tag}/{name}' 38 | print(f'Downloading {url} to {file}...') 39 | torch.hub.download_url_to_file(url, file) 40 | assert file.exists() and file.stat().st_size > 1E6 # check 41 | except Exception as e: # GCP 42 | print(f'Download error: {e}') 43 | assert redundant, 'No secondary mirror' 44 | url = f'https://storage.googleapis.com/{repo}/ckpt/{name}' 45 | print(f'Downloading {url} to {file}...') 46 | os.system(f'curl -L {url} -o {file}') # torch.hub.download_url_to_file(url, weights) 47 | finally: 48 | if not file.exists() or file.stat().st_size < 1E6: # check 49 | file.unlink(missing_ok=True) # remove partial downloads 50 | print(f'ERROR: Download failure: {msg}') 51 | print('') 52 | return 53 | 54 | 55 | def gdrive_download(id='16TiPfZj7htmTyhntwcZyEEAejOUxuT6m', file='tmp.zip'): 56 | # Downloads a file from Google Drive. from yolov5.utils.google_utils import *; gdrive_download() 57 | t = time.time() 58 | file = Path(file) 59 | cookie = Path('cookie') # gdrive cookie 60 | print(f'Downloading https://drive.google.com/uc?export=download&id={id} as {file}... ', end='') 61 | file.unlink(missing_ok=True) # remove existing file 62 | cookie.unlink(missing_ok=True) # remove existing cookie 63 | 64 | # Attempt file download 65 | out = "NUL" if platform.system() == "Windows" else "/dev/null" 66 | os.system(f'curl -c ./cookie -s -L "drive.google.com/uc?export=download&id={id}" > {out}') 67 | if os.path.exists('cookie'): # large file 68 | s = f'curl -Lb ./cookie "drive.google.com/uc?export=download&confirm={get_token()}&id={id}" -o {file}' 69 | else: # small file 70 | s = f'curl -s -L -o {file} "drive.google.com/uc?export=download&id={id}"' 71 | r = os.system(s) # execute, capture return 72 | cookie.unlink(missing_ok=True) # remove existing cookie 73 | 74 | # Error check 75 | if r != 0: 76 | file.unlink(missing_ok=True) # remove partial 77 | print('Download error ') # raise Exception('Download error') 78 | return r 79 | 80 | # Unzip if archive 81 | if file.suffix == '.zip': 82 | print('unzipping... ', end='') 83 | os.system(f'unzip -q {file}') # unzip 84 | file.unlink() # remove zip to free space 85 | 86 | print(f'Done ({time.time() - t:.1f}s)') 87 | return r 88 | 89 | 90 | def get_token(cookie="./cookie"): 91 | with open(cookie) as f: 92 | for line in f: 93 | if "download" in line: 94 | return line.split()[-1] 95 | return "" 96 | 97 | # def upload_blob(bucket_name, source_file_name, destination_blob_name): 98 | # # Uploads a file to a bucket 99 | # # https://cloud.google.com/storage/docs/uploading-objects#storage-upload-object-python 100 | # 101 | # storage_client = storage.Client() 102 | # bucket = storage_client.get_bucket(bucket_name) 103 | # blob = bucket.blob(destination_blob_name) 104 | # 105 | # blob.upload_from_filename(source_file_name) 106 | # 107 | # print('File {} uploaded to {}.'.format( 108 | # source_file_name, 109 | # destination_blob_name)) 110 | # 111 | # 112 | # def download_blob(bucket_name, source_blob_name, destination_file_name): 113 | # # Uploads a blob from a bucket 114 | # storage_client = storage.Client() 115 | # bucket = storage_client.get_bucket(bucket_name) 116 | # blob = bucket.blob(source_blob_name) 117 | # 118 | # blob.download_to_filename(destination_file_name) 119 | # 120 | # print('Blob {} downloaded to {}.'.format( 121 | # source_blob_name, 122 | # destination_file_name)) 123 | -------------------------------------------------------------------------------- /utils/loss.py: -------------------------------------------------------------------------------- 1 | # Loss functions 2 | 3 | import torch 4 | import torch.nn as nn 5 | 6 | from utils.general import bbox_iou 7 | from utils.torch_utils import is_parallel 8 | 9 | 10 | def smooth_BCE(eps=0.1): # https://github.com/ultralytics/yolov3/issues/238#issuecomment-598028441 11 | # return positive, negative label smoothing BCE targets 12 | return 1.0 - 0.5 * eps, 0.5 * eps 13 | 14 | 15 | class BCEBlurWithLogitsLoss(nn.Module): 16 | # BCEwithLogitLoss() with reduced missing label effects. 17 | def __init__(self, alpha=0.05): 18 | super(BCEBlurWithLogitsLoss, self).__init__() 19 | self.loss_fcn = nn.BCEWithLogitsLoss(reduction='none') # must be nn.BCEWithLogitsLoss() 20 | self.alpha = alpha 21 | 22 | def forward(self, pred, true): 23 | loss = self.loss_fcn(pred, true) 24 | pred = torch.sigmoid(pred) # prob from logits 25 | dx = pred - true # reduce only missing label effects 26 | # dx = (pred - true).abs() # reduce missing label and false label effects 27 | alpha_factor = 1 - torch.exp((dx - 1) / (self.alpha + 1e-4)) 28 | loss *= alpha_factor 29 | return loss.mean() 30 | 31 | 32 | class FocalLoss(nn.Module): 33 | # Wraps focal loss around existing loss_fcn(), i.e. criteria = FocalLoss(nn.BCEWithLogitsLoss(), gamma=1.5) 34 | def __init__(self, loss_fcn, gamma=1.5, alpha=0.25): 35 | super(FocalLoss, self).__init__() 36 | self.loss_fcn = loss_fcn # must be nn.BCEWithLogitsLoss() 37 | self.gamma = gamma 38 | self.alpha = alpha 39 | self.reduction = loss_fcn.reduction 40 | self.loss_fcn.reduction = 'none' # required to apply FL to each element 41 | 42 | def forward(self, pred, true): 43 | loss = self.loss_fcn(pred, true) 44 | # p_t = torch.exp(-loss) 45 | # loss *= self.alpha * (1.000001 - p_t) ** self.gamma # non-zero power for gradient stability 46 | 47 | # TF implementation https://github.com/tensorflow/addons/blob/v0.7.1/tensorflow_addons/losses/focal_loss.py 48 | pred_prob = torch.sigmoid(pred) # prob from logits 49 | p_t = true * pred_prob + (1 - true) * (1 - pred_prob) 50 | alpha_factor = true * self.alpha + (1 - true) * (1 - self.alpha) 51 | modulating_factor = (1.0 - p_t) ** self.gamma 52 | loss *= alpha_factor * modulating_factor 53 | 54 | if self.reduction == 'mean': 55 | return loss.mean() 56 | elif self.reduction == 'sum': 57 | return loss.sum() 58 | else: # 'none' 59 | return loss 60 | 61 | 62 | class QFocalLoss(nn.Module): 63 | # Wraps Quality focal loss around existing loss_fcn(), i.e. criteria = FocalLoss(nn.BCEWithLogitsLoss(), gamma=1.5) 64 | def __init__(self, loss_fcn, gamma=1.5, alpha=0.25): 65 | super(QFocalLoss, self).__init__() 66 | self.loss_fcn = loss_fcn # must be nn.BCEWithLogitsLoss() 67 | self.gamma = gamma 68 | self.alpha = alpha 69 | self.reduction = loss_fcn.reduction 70 | self.loss_fcn.reduction = 'none' # required to apply FL to each element 71 | 72 | def forward(self, pred, true): 73 | loss = self.loss_fcn(pred, true) 74 | 75 | pred_prob = torch.sigmoid(pred) # prob from logits 76 | alpha_factor = true * self.alpha + (1 - true) * (1 - self.alpha) 77 | modulating_factor = torch.abs(true - pred_prob) ** self.gamma 78 | loss *= alpha_factor * modulating_factor 79 | 80 | if self.reduction == 'mean': 81 | return loss.mean() 82 | elif self.reduction == 'sum': 83 | return loss.sum() 84 | else: # 'none' 85 | return loss 86 | 87 | 88 | class ComputeLoss: 89 | # Compute losses 90 | def __init__(self, model, autobalance=False): 91 | super(ComputeLoss, self).__init__() 92 | device = next(model.parameters()).device # get model device 93 | h = model.hyp # hyperparameters 94 | 95 | # Define criteria 96 | BCEcls = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h['cls_pw']], device=device)) 97 | BCEobj = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h['obj_pw']], device=device)) 98 | 99 | # Class label smoothing https://arxiv.org/pdf/1902.04103.pdf eqn 3 100 | self.cp, self.cn = smooth_BCE(eps=0.0) 101 | 102 | # Focal loss 103 | g = h['fl_gamma'] # focal loss gamma 104 | if g > 0: 105 | BCEcls, BCEobj = FocalLoss(BCEcls, g), FocalLoss(BCEobj, g) 106 | 107 | det = model.module.model[-1] if is_parallel(model) else model.model[-1] # Detect() module 108 | self.balance = {3: [4.0, 1.0, 0.4]}.get(det.nl, [4.0, 1.0, 0.25, 0.06, .02]) # P3-P7 109 | self.ssi = list(det.stride).index(16) if autobalance else 0 # stride 16 index 110 | self.BCEcls, self.BCEobj, self.gr, self.hyp, self.autobalance = BCEcls, BCEobj, model.gr, h, autobalance 111 | for k in 'na', 'nc', 'nl', 'anchors': 112 | setattr(self, k, getattr(det, k)) 113 | 114 | def __call__(self, p, targets): # predictions, targets, model 115 | device = targets.device 116 | lcls, lbox, lobj = torch.zeros(1, device=device), torch.zeros(1, device=device), torch.zeros(1, device=device) 117 | tcls, tbox, indices, anchors = self.build_targets(p, targets) # targets 118 | 119 | # Losses 120 | for i, pi in enumerate(p): # layer index, layer predictions 121 | b, a, gj, gi = indices[i] # image, anchor, gridy, gridx 122 | tobj = torch.zeros_like(pi[..., 0], device=device) # target obj 123 | 124 | n = b.shape[0] # number of targets 125 | if n: 126 | ps = pi[b, a, gj, gi] # prediction subset corresponding to targets 127 | 128 | # Regression 129 | pxy = ps[:, :2].sigmoid() * 2. - 0.5 130 | pwh = (ps[:, 2:4].sigmoid() * 2) ** 2 * anchors[i] 131 | pbox = torch.cat((pxy, pwh), 1) # predicted box 132 | iou = bbox_iou(pbox.T, tbox[i], x1y1x2y2=False, CIoU=True) # iou(prediction, target) 133 | lbox += (1.0 - iou).mean() # iou loss 134 | 135 | # Objectness 136 | tobj[b, a, gj, gi] = (1.0 - self.gr) + self.gr * iou.detach().clamp(0).type(tobj.dtype) # iou ratio 137 | 138 | # Classification 139 | if self.nc > 1: # cls loss (only if multiple classes) 140 | t = torch.full_like(ps[:, 5:], self.cn, device=device) # targets 141 | t[range(n), tcls[i]] = self.cp 142 | lcls += self.BCEcls(ps[:, 5:], t) # BCE 143 | 144 | # Append targets to text file 145 | # with open('targets.txt', 'a') as file: 146 | # [file.write('%11.5g ' * 4 % tuple(x) + '\n') for x in torch.cat((txy[i], twh[i]), 1)] 147 | 148 | obji = self.BCEobj(pi[..., 4], tobj) 149 | lobj += obji * self.balance[i] # obj loss 150 | if self.autobalance: 151 | self.balance[i] = self.balance[i] * 0.9999 + 0.0001 / obji.detach().item() 152 | 153 | if self.autobalance: 154 | self.balance = [x / self.balance[self.ssi] for x in self.balance] 155 | lbox *= self.hyp['box'] 156 | lobj *= self.hyp['obj'] 157 | lcls *= self.hyp['cls'] 158 | bs = tobj.shape[0] # batch size 159 | 160 | loss = lbox + lobj + lcls 161 | return loss * bs, torch.cat((lbox, lobj, lcls, loss)).detach() 162 | 163 | def build_targets(self, p, targets): 164 | # Build targets for compute_loss(), input targets(image,class,x,y,w,h) 165 | na, nt = self.na, targets.shape[0] # number of anchors, targets 166 | tcls, tbox, indices, anch = [], [], [], [] 167 | gain = torch.ones(7, device=targets.device) # normalized to gridspace gain 168 | ai = torch.arange(na, device=targets.device).float().view(na, 1).repeat(1, nt) # same as .repeat_interleave(nt) 169 | targets = torch.cat((targets.repeat(na, 1, 1), ai[:, :, None]), 2) # append anchor indices 170 | 171 | g = 0.5 # bias 172 | off = torch.tensor([[0, 0], 173 | [1, 0], [0, 1], [-1, 0], [0, -1], # j,k,l,m 174 | # [1, 1], [1, -1], [-1, 1], [-1, -1], # jk,jm,lk,lm 175 | ], device=targets.device).float() * g # offsets 176 | 177 | for i in range(self.nl): 178 | anchors = self.anchors[i] 179 | gain[2:6] = torch.tensor(p[i].shape)[[3, 2, 3, 2]] # xyxy gain 180 | 181 | # Match targets to anchors 182 | t = targets * gain 183 | if nt: 184 | # Matches 185 | r = t[:, :, 4:6] / anchors[:, None] # wh ratio 186 | j = torch.max(r, 1. / r).max(2)[0] < self.hyp['anchor_t'] # compare 187 | # j = wh_iou(anchors, t[:, 4:6]) > model.hyp['iou_t'] # iou(3,n)=wh_iou(anchors(3,2), gwh(n,2)) 188 | t = t[j] # filter 189 | 190 | # Offsets 191 | gxy = t[:, 2:4] # grid xy 192 | gxi = gain[[2, 3]] - gxy # inverse 193 | j, k = ((gxy % 1. < g) & (gxy > 1.)).T 194 | l, m = ((gxi % 1. < g) & (gxi > 1.)).T 195 | j = torch.stack((torch.ones_like(j), j, k, l, m)) 196 | t = t.repeat((5, 1, 1))[j] 197 | offsets = (torch.zeros_like(gxy)[None] + off[:, None])[j] 198 | else: 199 | t = targets[0] 200 | offsets = 0 201 | 202 | # Define 203 | b, c = t[:, :2].long().T # image, class 204 | gxy = t[:, 2:4] # grid xy 205 | gwh = t[:, 4:6] # grid wh 206 | gij = (gxy - offsets).long() 207 | gi, gj = gij.T # grid xy indices 208 | 209 | # Append 210 | a = t[:, 6].long() # anchor indices 211 | indices.append((b, a, gj.clamp_(0, gain[3] - 1), gi.clamp_(0, gain[2] - 1))) # image, anchor, grid indices 212 | tbox.append(torch.cat((gxy - gij, gwh), 1)) # box 213 | anch.append(anchors[a]) # anchors 214 | tcls.append(c) # class 215 | 216 | return tcls, tbox, indices, anch 217 | -------------------------------------------------------------------------------- /utils/loss_multi.py: -------------------------------------------------------------------------------- 1 | # Loss functions 2 | 3 | import torch 4 | import torch.nn as nn 5 | 6 | from utils.general import bbox_iou 7 | from utils.torch_utils import is_parallel 8 | 9 | 10 | def smooth_BCE(eps=0.1): # https://github.com/ultralytics/yolov3/issues/238#issuecomment-598028441 11 | # return positive, negative label smoothing BCE targets 12 | return 1.0 - 0.5 * eps, 0.5 * eps 13 | 14 | 15 | class BCEBlurWithLogitsLoss(nn.Module): 16 | # BCEwithLogitLoss() with reduced missing label effects. 17 | def __init__(self, alpha=0.05): 18 | super(BCEBlurWithLogitsLoss, self).__init__() 19 | self.loss_fcn = nn.BCEWithLogitsLoss(reduction='none') # must be nn.BCEWithLogitsLoss() 20 | self.alpha = alpha 21 | 22 | def forward(self, pred, true): 23 | loss = self.loss_fcn(pred, true) 24 | pred = torch.sigmoid(pred) # prob from logits 25 | dx = pred - true # reduce only missing label effects 26 | # dx = (pred - true).abs() # reduce missing label and false label effects 27 | alpha_factor = 1 - torch.exp((dx - 1) / (self.alpha + 1e-4)) 28 | loss *= alpha_factor 29 | return loss.mean() 30 | 31 | 32 | class FocalLoss(nn.Module): 33 | # Wraps focal loss around existing loss_fcn(), i.e. criteria = FocalLoss(nn.BCEWithLogitsLoss(), gamma=1.5) 34 | def __init__(self, loss_fcn, gamma=1.5, alpha=0.25): 35 | super(FocalLoss, self).__init__() 36 | self.loss_fcn = loss_fcn # must be nn.BCEWithLogitsLoss() 37 | self.gamma = gamma 38 | self.alpha = alpha 39 | self.reduction = loss_fcn.reduction 40 | self.loss_fcn.reduction = 'none' # required to apply FL to each element 41 | 42 | def forward(self, pred, true): 43 | loss = self.loss_fcn(pred, true) 44 | # p_t = torch.exp(-loss) 45 | # loss *= self.alpha * (1.000001 - p_t) ** self.gamma # non-zero power for gradient stability 46 | 47 | # TF implementation https://github.com/tensorflow/addons/blob/v0.7.1/tensorflow_addons/losses/focal_loss.py 48 | pred_prob = torch.sigmoid(pred) # prob from logits 49 | p_t = true * pred_prob + (1 - true) * (1 - pred_prob) 50 | alpha_factor = true * self.alpha + (1 - true) * (1 - self.alpha) 51 | modulating_factor = (1.0 - p_t) ** self.gamma 52 | loss *= alpha_factor * modulating_factor 53 | 54 | if self.reduction == 'mean': 55 | return loss.mean() 56 | elif self.reduction == 'sum': 57 | return loss.sum() 58 | else: # 'none' 59 | return loss 60 | 61 | 62 | class QFocalLoss(nn.Module): 63 | # Wraps Quality focal loss around existing loss_fcn(), i.e. criteria = FocalLoss(nn.BCEWithLogitsLoss(), gamma=1.5) 64 | def __init__(self, loss_fcn, gamma=1.5, alpha=0.25): 65 | super(QFocalLoss, self).__init__() 66 | self.loss_fcn = loss_fcn # must be nn.BCEWithLogitsLoss() 67 | self.gamma = gamma 68 | self.alpha = alpha 69 | self.reduction = loss_fcn.reduction 70 | self.loss_fcn.reduction = 'none' # required to apply FL to each element 71 | 72 | def forward(self, pred, true): 73 | loss = self.loss_fcn(pred, true) 74 | 75 | pred_prob = torch.sigmoid(pred) # prob from logits 76 | alpha_factor = true * self.alpha + (1 - true) * (1 - self.alpha) 77 | modulating_factor = torch.abs(true - pred_prob) ** self.gamma 78 | loss *= alpha_factor * modulating_factor 79 | 80 | if self.reduction == 'mean': 81 | return loss.mean() 82 | elif self.reduction == 'sum': 83 | return loss.sum() 84 | else: # 'none' 85 | return loss 86 | 87 | 88 | class ComputeLoss: 89 | # Compute losses 90 | def __init__(self, model, autobalance=False): 91 | super(ComputeLoss, self).__init__() 92 | device = next(model.parameters()).device # get model device 93 | h = model.hyp # hyperparameters 94 | 95 | # Define criteria 96 | BCEcls = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h['cls_pw']], device=device)) 97 | BCEobj = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h['obj_pw']], device=device)) 98 | 99 | # Class label smoothing https://arxiv.org/pdf/1902.04103.pdf eqn 3 100 | self.cp, self.cn = smooth_BCE(eps=0.0) 101 | 102 | # Focal loss 103 | g = h['fl_gamma'] # focal loss gamma 104 | if g > 0: 105 | BCEcls, BCEobj = FocalLoss(BCEcls, g), FocalLoss(BCEobj, g) 106 | 107 | det = model.module.model_vs[-1] if is_parallel(model) else model.detect # Detect() module 108 | self.balance = {3: [4.0, 1.0, 0.4]}.get(det.nl, [4.0, 1.0, 0.25, 0.06, .02]) # P3-P7 109 | self.ssi = list(det.stride).index(16) if autobalance else 0 # stride 16 index 110 | self.BCEcls, self.BCEobj, self.gr, self.hyp, self.autobalance = BCEcls, BCEobj, model.gr, h, autobalance 111 | for k in 'na', 'nc', 'nl', 'anchors': 112 | setattr(self, k, getattr(det, k)) 113 | 114 | def __call__(self, p, targets): # predictions, targets, model 115 | device = targets.device 116 | lcls, lbox, lobj = torch.zeros(1, device=device), torch.zeros(1, device=device), torch.zeros(1, device=device) 117 | tcls, tbox, indices, anchors = self.build_targets(p, targets) # targets 118 | 119 | # Losses 120 | for i, pi in enumerate(p): # layer index, layer predictions 121 | b, a, gj, gi = indices[i] # image, anchor, gridy, gridx 122 | tobj = torch.zeros_like(pi[..., 0], device=device) # target obj 123 | 124 | n = b.shape[0] # number of targets 125 | if n: 126 | ps = pi[b, a, gj, gi] # prediction subset corresponding to targets 127 | 128 | # Regression 129 | pxy = ps[:, :2].sigmoid() * 2. - 0.5 130 | pwh = (ps[:, 2:4].sigmoid() * 2) ** 2 * anchors[i] 131 | pbox = torch.cat((pxy, pwh), 1) # predicted box 132 | iou = bbox_iou(pbox.T, tbox[i], x1y1x2y2=False, CIoU=True) # iou(prediction, target) 133 | lbox += (1.0 - iou).mean() # iou loss 134 | 135 | # Objectness 136 | tobj[b, a, gj, gi] = (1.0 - self.gr) + self.gr * iou.detach().clamp(0).type(tobj.dtype) # iou ratio 137 | 138 | # Classification 139 | if self.nc > 1: # cls loss (only if multiple classes) 140 | t = torch.full_like(ps[:, 5:], self.cn, device=device) # targets 141 | t[range(n), tcls[i]] = self.cp 142 | lcls += self.BCEcls(ps[:, 5:], t) # BCE 143 | 144 | # Append targets to text file 145 | # with open('targets.txt', 'a') as file: 146 | # [file.write('%11.5g ' * 4 % tuple(x) + '\n') for x in torch.cat((txy[i], twh[i]), 1)] 147 | 148 | obji = self.BCEobj(pi[..., 4], tobj) 149 | lobj += obji * self.balance[i] # obj loss 150 | if self.autobalance: 151 | self.balance[i] = self.balance[i] * 0.9999 + 0.0001 / obji.detach().item() 152 | 153 | if self.autobalance: 154 | self.balance = [x / self.balance[self.ssi] for x in self.balance] 155 | lbox *= self.hyp['box'] 156 | lobj *= self.hyp['obj'] 157 | lcls *= self.hyp['cls'] 158 | bs = tobj.shape[0] # batch size 159 | 160 | loss = lbox + lobj + lcls 161 | return loss * bs, torch.cat((lbox, lobj, lcls, loss)).detach() 162 | 163 | def build_targets(self, p, targets): 164 | # Build targets for compute_loss(), input targets(image,class,x,y,w,h) 165 | na, nt = self.na, targets.shape[0] # number of anchors, targets 166 | tcls, tbox, indices, anch = [], [], [], [] 167 | gain = torch.ones(7, device=targets.device) # normalized to gridspace gain 168 | ai = torch.arange(na, device=targets.device).float().view(na, 1).repeat(1, nt) # same as .repeat_interleave(nt) 169 | targets = torch.cat((targets.repeat(na, 1, 1), ai[:, :, None]), 2) # append anchor indices 170 | 171 | g = 0.5 # bias 172 | off = torch.tensor([[0, 0], 173 | [1, 0], [0, 1], [-1, 0], [0, -1], # j,k,l,m 174 | # [1, 1], [1, -1], [-1, 1], [-1, -1], # jk,jm,lk,lm 175 | ], device=targets.device).float() * g # offsets 176 | 177 | for i in range(self.nl): 178 | anchors = self.anchors[i] 179 | gain[2:6] = torch.tensor(p[i].shape)[[3, 2, 3, 2]] # xyxy gain 180 | 181 | # Match targets to anchors 182 | t = targets * gain 183 | if nt: 184 | # Matches 185 | r = t[:, :, 4:6] / anchors[:, None] # wh ratio 186 | j = torch.max(r, 1. / r).max(2)[0] < self.hyp['anchor_t'] # compare 187 | # j = wh_iou(anchors, t[:, 4:6]) > model.hyp['iou_t'] # iou(3,n)=wh_iou(anchors(3,2), gwh(n,2)) 188 | t = t[j] # filter 189 | 190 | # Offsets 191 | gxy = t[:, 2:4] # grid xy 192 | gxi = gain[[2, 3]] - gxy # inverse 193 | j, k = ((gxy % 1. < g) & (gxy > 1.)).T 194 | l, m = ((gxi % 1. < g) & (gxi > 1.)).T 195 | j = torch.stack((torch.ones_like(j), j, k, l, m)) 196 | t = t.repeat((5, 1, 1))[j] 197 | offsets = (torch.zeros_like(gxy)[None] + off[:, None])[j] 198 | else: 199 | t = targets[0] 200 | offsets = 0 201 | 202 | # Define 203 | b, c = t[:, :2].long().T # image, class 204 | gxy = t[:, 2:4] # grid xy 205 | gwh = t[:, 4:6] # grid wh 206 | gij = (gxy - offsets).long() 207 | gi, gj = gij.T # grid xy indices 208 | 209 | # Append 210 | a = t[:, 6].long() # anchor indices 211 | indices.append((b, a, gj.clamp_(0, gain[3] - 1), gi.clamp_(0, gain[2] - 1))) # image, anchor, grid indices 212 | tbox.append(torch.cat((gxy - gij, gwh), 1)) # box 213 | anch.append(anchors[a]) # anchors 214 | tcls.append(c) # class 215 | 216 | return tcls, tbox, indices, anch 217 | -------------------------------------------------------------------------------- /utils/metrics.py: -------------------------------------------------------------------------------- 1 | # Model validation metrics 2 | 3 | from pathlib import Path 4 | 5 | import matplotlib.pyplot as plt 6 | import numpy as np 7 | import torch 8 | 9 | from . import general 10 | 11 | 12 | def fitness(x): 13 | # Model fitness as a weighted combination of metrics 14 | w = [0.0, 0.0, 0.1, 0.9] # weights for [P, R, mAP@0.5, mAP@0.5:0.95] 15 | return (x[:, :4] * w).sum(1) 16 | 17 | 18 | def ap_per_class(tp, conf, pred_cls, target_cls, plot=False, save_dir='.', names=()): 19 | """ Compute the average precision, given the recall and precision curves. 20 | Source: https://github.com/rafaelpadilla/Object-Detection-Metrics. 21 | # Arguments 22 | tp: True positives (nparray, nx1 or nx10). 23 | conf: Objectness value from 0-1 (nparray). 24 | pred_cls: Predicted object classes (nparray). 25 | target_cls: True object classes (nparray). 26 | plot: Plot precision-recall curve at mAP@0.5 27 | save_dir: Plot save directory 28 | # Returns 29 | The average precision as computed in py-faster-rcnn. 30 | """ 31 | 32 | # Sort by objectness 33 | i = np.argsort(-conf) 34 | tp, conf, pred_cls = tp[i], conf[i], pred_cls[i] 35 | 36 | # Find unique classes 37 | unique_classes = np.unique(target_cls) 38 | nc = unique_classes.shape[0] # number of classes, number of detections 39 | 40 | # Create Precision-Recall curve and compute AP for each class 41 | px, py = np.linspace(0, 1, 1000), [] # for plotting 42 | ap, p, r = np.zeros((nc, tp.shape[1])), np.zeros((nc, 1000)), np.zeros((nc, 1000)) 43 | for ci, c in enumerate(unique_classes): 44 | i = pred_cls == c 45 | n_l = (target_cls == c).sum() # number of labels 46 | n_p = i.sum() # number of predictions 47 | 48 | if n_p == 0 or n_l == 0: 49 | continue 50 | else: 51 | # Accumulate FPs and TPs 52 | fpc = (1 - tp[i]).cumsum(0) 53 | tpc = tp[i].cumsum(0) 54 | 55 | # Recall 56 | recall = tpc / (n_l + 1e-16) # recall curve 57 | r[ci] = np.interp(-px, -conf[i], recall[:, 0], left=0) # negative x, xp because xp decreases 58 | 59 | # Precision 60 | precision = tpc / (tpc + fpc) # precision curve 61 | p[ci] = np.interp(-px, -conf[i], precision[:, 0], left=1) # p at pr_score 62 | 63 | # AP from recall-precision curve 64 | for j in range(tp.shape[1]): 65 | ap[ci, j], mpre, mrec = compute_ap(recall[:, j], precision[:, j]) 66 | if plot and j == 0: 67 | py.append(np.interp(px, mrec, mpre)) # precision at mAP@0.5 68 | 69 | # Compute F1 (harmonic mean of precision and recall) 70 | f1 = 2 * p * r / (p + r + 1e-16) 71 | if plot: 72 | plot_pr_curve(px, py, ap, Path(save_dir) / 'PR_curve.png', names) 73 | plot_mc_curve(px, f1, Path(save_dir) / 'F1_curve.png', names, ylabel='F1') 74 | plot_mc_curve(px, p, Path(save_dir) / 'P_curve.png', names, ylabel='Precision') 75 | plot_mc_curve(px, r, Path(save_dir) / 'R_curve.png', names, ylabel='Recall') 76 | 77 | i = f1.mean(0).argmax() # max F1 index 78 | return p[:, i], r[:, i], ap, f1[:, i], unique_classes.astype('int32') 79 | 80 | 81 | def compute_ap(recall, precision): 82 | """ Compute the average precision, given the recall and precision curves 83 | # Arguments 84 | recall: The recall curve (list) 85 | precision: The precision curve (list) 86 | # Returns 87 | Average precision, precision curve, recall curve 88 | """ 89 | 90 | # Append sentinel values to beginning and end 91 | mrec = np.concatenate(([0.], recall, [recall[-1] + 0.01])) 92 | mpre = np.concatenate(([1.], precision, [0.])) 93 | 94 | # Compute the precision envelope 95 | mpre = np.flip(np.maximum.accumulate(np.flip(mpre))) 96 | 97 | # Integrate area under curve 98 | method = 'interp' # methods: 'continuous', 'interp' 99 | if method == 'interp': 100 | x = np.linspace(0, 1, 101) # 101-point interp (COCO) 101 | ap = np.trapz(np.interp(x, mrec, mpre), x) # integrate 102 | else: # 'continuous' 103 | i = np.where(mrec[1:] != mrec[:-1])[0] # points where x axis (recall) changes 104 | ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1]) # area under curve 105 | 106 | return ap, mpre, mrec 107 | 108 | 109 | class ConfusionMatrix: 110 | # Updated version of https://github.com/kaanakan/object_detection_confusion_matrix 111 | def __init__(self, nc, conf=0.25, iou_thres=0.45): 112 | self.matrix = np.zeros((nc + 1, nc + 1)) 113 | self.nc = nc # number of classes 114 | self.conf = conf 115 | self.iou_thres = iou_thres 116 | 117 | def process_batch(self, detections, labels): 118 | """ 119 | Return intersection-over-union (Jaccard index) of boxes. 120 | Both sets of boxes are expected to be in (x1, y1, x2, y2) format. 121 | Arguments: 122 | detections (Array[N, 6]), x1, y1, x2, y2, conf, class 123 | labels (Array[M, 5]), class, x1, y1, x2, y2 124 | Returns: 125 | None, updates confusion matrix accordingly 126 | """ 127 | detections = detections[detections[:, 4] > self.conf] 128 | gt_classes = labels[:, 0].int() 129 | detection_classes = detections[:, 5].int() 130 | iou = general.box_iou(labels[:, 1:], detections[:, :4]) 131 | 132 | x = torch.where(iou > self.iou_thres) 133 | if x[0].shape[0]: 134 | matches = torch.cat((torch.stack(x, 1), iou[x[0], x[1]][:, None]), 1).cpu().numpy() 135 | if x[0].shape[0] > 1: 136 | matches = matches[matches[:, 2].argsort()[::-1]] 137 | matches = matches[np.unique(matches[:, 1], return_index=True)[1]] 138 | matches = matches[matches[:, 2].argsort()[::-1]] 139 | matches = matches[np.unique(matches[:, 0], return_index=True)[1]] 140 | else: 141 | matches = np.zeros((0, 3)) 142 | 143 | n = matches.shape[0] > 0 144 | m0, m1, _ = matches.transpose().astype(np.int16) 145 | for i, gc in enumerate(gt_classes): 146 | j = m0 == i 147 | if n and sum(j) == 1: 148 | self.matrix[gc, detection_classes[m1[j]]] += 1 # correct 149 | else: 150 | self.matrix[self.nc, gc] += 1 # background FP 151 | 152 | if n: 153 | for i, dc in enumerate(detection_classes): 154 | if not any(m1 == i): 155 | self.matrix[dc, self.nc] += 1 # background FN 156 | 157 | def matrix(self): 158 | return self.matrix 159 | 160 | def plot(self, save_dir='', names=()): 161 | try: 162 | import seaborn as sn 163 | 164 | array = self.matrix / (self.matrix.sum(0).reshape(1, self.nc + 1) + 1E-6) # normalize 165 | array[array < 0.005] = np.nan # don't annotate (would appear as 0.00) 166 | 167 | fig = plt.figure(figsize=(12, 9), tight_layout=True) 168 | sn.set(font_scale=1.0 if self.nc < 50 else 0.8) # for label size 169 | labels = (0 < len(names) < 99) and len(names) == self.nc # apply names to ticklabels 170 | sn.heatmap(array, annot=self.nc < 30, annot_kws={"size": 8}, cmap='Blues', fmt='.2f', square=True, 171 | xticklabels=names + ['background FP'] if labels else "auto", 172 | yticklabels=names + ['background FN'] if labels else "auto").set_facecolor((1, 1, 1)) 173 | fig.axes[0].set_xlabel('True') 174 | fig.axes[0].set_ylabel('Predicted') 175 | fig.savefig(Path(save_dir) / 'confusion_matrix.png', dpi=250) 176 | except Exception as e: 177 | pass 178 | 179 | def print(self): 180 | for i in range(self.nc + 1): 181 | print(' '.join(map(str, self.matrix[i]))) 182 | 183 | 184 | # Plots ---------------------------------------------------------------------------------------------------------------- 185 | 186 | def plot_pr_curve(px, py, ap, save_dir='pr_curve.png', names=()): 187 | # Precision-recall curve 188 | fig, ax = plt.subplots(1, 1, figsize=(9, 6), tight_layout=True) 189 | py = np.stack(py, axis=1) 190 | 191 | if 0 < len(names) < 21: # display per-class legend if < 21 classes 192 | for i, y in enumerate(py.T): 193 | ax.plot(px, y, linewidth=1, label=f'{names[i]} {ap[i, 0]:.3f}') # plot(recall, precision) 194 | else: 195 | ax.plot(px, py, linewidth=1, color='grey') # plot(recall, precision) 196 | 197 | ax.plot(px, py.mean(1), linewidth=3, color='blue', label='all classes %.3f mAP@0.5' % ap[:, 0].mean()) 198 | ax.set_xlabel('Recall') 199 | ax.set_ylabel('Precision') 200 | ax.set_xlim(0, 1) 201 | ax.set_ylim(0, 1) 202 | plt.legend(bbox_to_anchor=(1.04, 1), loc="upper left") 203 | fig.savefig(Path(save_dir), dpi=250) 204 | 205 | 206 | def plot_mc_curve(px, py, save_dir='mc_curve.png', names=(), xlabel='Confidence', ylabel='Metric'): 207 | # Metric-confidence curve 208 | fig, ax = plt.subplots(1, 1, figsize=(9, 6), tight_layout=True) 209 | 210 | if 0 < len(names) < 21: # display per-class legend if < 21 classes 211 | for i, y in enumerate(py): 212 | ax.plot(px, y, linewidth=1, label=f'{names[i]}') # plot(confidence, metric) 213 | else: 214 | ax.plot(px, py.T, linewidth=1, color='grey') # plot(confidence, metric) 215 | 216 | y = py.mean(0) 217 | ax.plot(px, y, linewidth=3, color='blue', label=f'all classes {y.max():.2f} at {px[y.argmax()]:.3f}') 218 | ax.set_xlabel(xlabel) 219 | ax.set_ylabel(ylabel) 220 | ax.set_xlim(0, 1) 221 | ax.set_ylim(0, 1) 222 | plt.legend(bbox_to_anchor=(1.04, 1), loc="upper left") 223 | fig.savefig(Path(save_dir), dpi=250) 224 | -------------------------------------------------------------------------------- /utils/wandb_logging/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sbbug/yolov5-prune-multi/7ebdf3ce3050a0b7c865920bb88d768a446ab3e9/utils/wandb_logging/__init__.py -------------------------------------------------------------------------------- /utils/wandb_logging/log_dataset.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | from pathlib import Path 3 | 4 | import yaml 5 | 6 | from wandb_utils import WandbLogger 7 | from utils.datasets import LoadImagesAndLabels 8 | 9 | WANDB_ARTIFACT_PREFIX = 'wandb-artifact://' 10 | 11 | 12 | def create_dataset_artifact(opt): 13 | with open(opt.data) as f: 14 | data = yaml.load(f, Loader=yaml.SafeLoader) # data dict 15 | logger = WandbLogger(opt, '', None, data, job_type='create_dataset') 16 | nc, names = (1, ['item']) if opt.single_cls else (int(data['nc']), data['names']) 17 | names = {k: v for k, v in enumerate(names)} # to index dictionary 18 | logger.log_dataset_artifact(LoadImagesAndLabels(data['train']), names, name='train') # trainset 19 | logger.log_dataset_artifact(LoadImagesAndLabels(data['val']), names, name='val') # valset 20 | 21 | # Update data.yaml with artifact links 22 | data['train'] = WANDB_ARTIFACT_PREFIX + str(Path(opt.project) / 'train') 23 | data['val'] = WANDB_ARTIFACT_PREFIX + str(Path(opt.project) / 'val') 24 | path = opt.data if opt.overwrite_config else opt.data.replace('.', '_wandb.') # updated data.yaml path 25 | data.pop('download', None) # download via artifact instead of predefined field 'download:' 26 | with open(path, 'w') as f: 27 | yaml.dump(data, f) 28 | print("New Config file => ", path) 29 | 30 | 31 | if __name__ == '__main__': 32 | parser = argparse.ArgumentParser() 33 | parser.add_argument('--data', type=str, default='data/coco128.yaml', help='data.yaml path') 34 | parser.add_argument('--single-cls', action='store_true', help='train as single-class dataset') 35 | parser.add_argument('--project', type=str, default='YOLOv5', help='name of W&B Project') 36 | parser.add_argument('--overwrite_config', action='store_true', help='overwrite data.yaml') 37 | opt = parser.parse_args() 38 | 39 | create_dataset_artifact(opt) 40 | -------------------------------------------------------------------------------- /utils/wandb_logging/wandb_utils.py: -------------------------------------------------------------------------------- 1 | import json 2 | import shutil 3 | import sys 4 | from datetime import datetime 5 | from pathlib import Path 6 | 7 | import torch 8 | 9 | sys.path.append(str(Path(__file__).parent.parent.parent)) # add utils/ to path 10 | from utils.general import colorstr, xywh2xyxy 11 | 12 | try: 13 | import wandb 14 | except ImportError: 15 | wandb = None 16 | print(f"{colorstr('wandb: ')}Install Weights & Biases for YOLOv5 logging with 'pip install wandb' (recommended)") 17 | 18 | WANDB_ARTIFACT_PREFIX = 'wandb-artifact://' 19 | 20 | 21 | def remove_prefix(from_string, prefix): 22 | return from_string[len(prefix):] 23 | 24 | 25 | class WandbLogger(): 26 | def __init__(self, opt, name, run_id, data_dict, job_type='Training'): 27 | self.wandb = wandb 28 | self.wandb_run = wandb.init(config=opt, resume="allow", 29 | project='YOLOv5' if opt.project == 'runs/train' else Path(opt.project).stem, 30 | name=name, 31 | job_type=job_type, 32 | id=run_id) if self.wandb else None 33 | 34 | if job_type == 'Training': 35 | self.setup_training(opt, data_dict) 36 | if opt.bbox_interval == -1: 37 | opt.bbox_interval = (opt.epochs // 10) if opt.epochs > 10 else opt.epochs 38 | if opt.save_period == -1: 39 | opt.save_period = (opt.epochs // 10) if opt.epochs > 10 else opt.epochs 40 | 41 | def setup_training(self, opt, data_dict): 42 | self.log_dict = {} 43 | self.train_artifact_path, self.trainset_artifact = \ 44 | self.download_dataset_artifact(data_dict['train'], opt.artifact_alias) 45 | self.test_artifact_path, self.testset_artifact = \ 46 | self.download_dataset_artifact(data_dict['val'], opt.artifact_alias) 47 | self.result_artifact, self.result_table, self.weights = None, None, None 48 | if self.train_artifact_path is not None: 49 | train_path = Path(self.train_artifact_path) / 'data/images/' 50 | data_dict['train'] = str(train_path) 51 | if self.test_artifact_path is not None: 52 | test_path = Path(self.test_artifact_path) / 'data/images/' 53 | data_dict['val'] = str(test_path) 54 | self.result_artifact = wandb.Artifact("run_" + wandb.run.id + "_progress", "evaluation") 55 | self.result_table = wandb.Table(["epoch", "id", "prediction", "avg_confidence"]) 56 | if opt.resume_from_artifact: 57 | modeldir, _ = self.download_model_artifact(opt.resume_from_artifact) 58 | if modeldir: 59 | self.weights = Path(modeldir) / "best.pt" 60 | opt.weights = self.weights 61 | 62 | def download_dataset_artifact(self, path, alias): 63 | if path.startswith(WANDB_ARTIFACT_PREFIX): 64 | dataset_artifact = wandb.use_artifact(remove_prefix(path, WANDB_ARTIFACT_PREFIX) + ":" + alias) 65 | assert dataset_artifact is not None, "'Error: W&B dataset artifact doesn\'t exist'" 66 | datadir = dataset_artifact.download() 67 | labels_zip = Path(datadir) / "data/labels.zip" 68 | shutil.unpack_archive(labels_zip, Path(datadir) / 'data/labels', 'zip') 69 | print("Downloaded dataset to : ", datadir) 70 | return datadir, dataset_artifact 71 | return None, None 72 | 73 | def download_model_artifact(self, name): 74 | model_artifact = wandb.use_artifact(name + ":latest") 75 | assert model_artifact is not None, 'Error: W&B model artifact doesn\'t exist' 76 | modeldir = model_artifact.download() 77 | print("Downloaded model to : ", modeldir) 78 | return modeldir, model_artifact 79 | 80 | def log_model(self, path, opt, epoch): 81 | datetime_suffix = datetime.today().strftime('%Y-%m-%d-%H-%M-%S') 82 | model_artifact = wandb.Artifact('run_' + wandb.run.id + '_model', type='model', metadata={ 83 | 'original_url': str(path), 84 | 'epoch': epoch + 1, 85 | 'save period': opt.save_period, 86 | 'project': opt.project, 87 | 'datetime': datetime_suffix 88 | }) 89 | model_artifact.add_file(str(path / 'last.pt'), name='last.pt') 90 | model_artifact.add_file(str(path / 'best.pt'), name='best.pt') 91 | wandb.log_artifact(model_artifact) 92 | print("Saving model artifact on epoch ", epoch + 1) 93 | 94 | def log_dataset_artifact(self, dataset, class_to_id, name='dataset'): 95 | artifact = wandb.Artifact(name=name, type="dataset") 96 | image_path = dataset.path 97 | artifact.add_dir(image_path, name='data/images') 98 | table = wandb.Table(columns=["id", "train_image", "Classes"]) 99 | class_set = wandb.Classes([{'id': id, 'name': name} for id, name in class_to_id.items()]) 100 | for si, (img, labels, paths, shapes) in enumerate(dataset): 101 | height, width = shapes[0] 102 | labels[:, 2:] = (xywh2xyxy(labels[:, 2:].view(-1, 4))) 103 | labels[:, 2:] *= torch.Tensor([width, height, width, height]) 104 | box_data = [] 105 | img_classes = {} 106 | for cls, *xyxy in labels[:, 1:].tolist(): 107 | cls = int(cls) 108 | box_data.append({"position": {"minX": xyxy[0], "minY": xyxy[1], "maxX": xyxy[2], "maxY": xyxy[3]}, 109 | "class_id": cls, 110 | "box_caption": "%s" % (class_to_id[cls]), 111 | "scores": {"acc": 1}, 112 | "domain": "pixel"}) 113 | img_classes[cls] = class_to_id[cls] 114 | boxes = {"ground_truth": {"box_data": box_data, "class_labels": class_to_id}} # inference-space 115 | table.add_data(si, wandb.Image(paths, classes=class_set, boxes=boxes), json.dumps(img_classes)) 116 | artifact.add(table, name) 117 | labels_path = 'labels'.join(image_path.rsplit('images', 1)) 118 | zip_path = Path(labels_path).parent / (name + '_labels.zip') 119 | if not zip_path.is_file(): # make_archive won't check if file exists 120 | shutil.make_archive(zip_path.with_suffix(''), 'zip', labels_path) 121 | artifact.add_file(str(zip_path), name='data/labels.zip') 122 | wandb.log_artifact(artifact) 123 | print("Saving data to W&B...") 124 | 125 | def log(self, log_dict): 126 | if self.wandb_run: 127 | for key, value in log_dict.items(): 128 | self.log_dict[key] = value 129 | 130 | def end_epoch(self): 131 | if self.wandb_run and self.log_dict: 132 | wandb.log(self.log_dict) 133 | self.log_dict = {} 134 | 135 | def finish_run(self): 136 | if self.wandb_run: 137 | if self.result_artifact: 138 | print("Add Training Progress Artifact") 139 | self.result_artifact.add(self.result_table, 'result') 140 | train_results = wandb.JoinedTable(self.testset_artifact.get("val"), self.result_table, "id") 141 | self.result_artifact.add(train_results, 'joined_result') 142 | wandb.log_artifact(self.result_artifact) 143 | if self.log_dict: 144 | wandb.log(self.log_dict) 145 | wandb.run.finish() 146 | --------------------------------------------------------------------------------