├── CODE_OF_CONDUCT.md
├── CONTRIBUTING.md
├── Datasets-PreProcessing.md
├── Dockerfile
├── GETTING_STARTED.md
├── INSTALL.md
├── LICENSE
├── MODEL_ZOO.md
├── README.md
├── TECHNICAL_DETAILS.md
├── colab
└── PedestronColab.ipynb
├── configs
├── .DS_Store
├── cascade_mask_rcnn_r101_fpn_1x.py
├── cascade_mask_rcnn_r50_caffe_c4_1x.py
├── cascade_mask_rcnn_r50_fpn_1x.py
├── cascade_mask_rcnn_x101_32x4d_fpn_1x.py
├── cascade_mask_rcnn_x101_64x4d_fpn_1x.py
├── cascade_rcnn_r101_fpn_1x.py
├── cascade_rcnn_r50_caffe_c4_1x.py
├── cascade_rcnn_r50_fpn_1x.py
├── cascade_rcnn_x101_32x4d_fpn_1x.py
├── cascade_rcnn_x101_64x4d_fpn_1x.py
├── dcn
│ ├── README.md
│ ├── cascade_mask_rcnn_dconv_c3-c5_r50_fpn_1x.py
│ ├── cascade_rcnn_dconv_c3-c5_r50_fpn_1x.py
│ ├── faster_rcnn_dconv_c3-c5_r50_fpn_1x.py
│ ├── faster_rcnn_dconv_c3-c5_x101_32x4d_fpn_1x.py
│ ├── faster_rcnn_dpool_r50_fpn_1x.py
│ ├── faster_rcnn_mdconv_c3-c5_r50_fpn_1x.py
│ ├── faster_rcnn_mdpool_r50_fpn_1x.py
│ └── mask_rcnn_dconv_c3-c5_r50_fpn_1x.py
├── elephant
│ ├── .DS_Store
│ ├── caltech
│ │ ├── cascade_hrnet.py
│ │ └── csp_hrnet.py
│ ├── cityperson
│ │ ├── .DS_Store
│ │ ├── cascade_hrnet.py
│ │ ├── cascade_mobilenet.py
│ │ ├── csp_hrnet_refine.py
│ │ ├── csp_r50.py
│ │ ├── csp_r50_hw.py
│ │ ├── faster_rcnn_hrnet.py
│ │ ├── ga_retinanet_ResNeXt101.py
│ │ ├── htc_ResNeXt101.py
│ │ ├── mgan_vgg.py
│ │ └── retinanet_ResNeXt101.py
│ ├── crowdhuman
│ │ └── cascade_hrnet.py
│ ├── eurocity
│ │ ├── cascade_hrnet.py
│ │ └── faster_rcnn_hrnet.py
│ └── wider_pedestrain
│ │ └── cascade_hrnet.py
├── empirical_attention
│ ├── README.md
│ ├── faster_rcnn_r50_fpn_attention_0010_1x.py
│ ├── faster_rcnn_r50_fpn_attention_0010_dcn_1x.py
│ ├── faster_rcnn_r50_fpn_attention_1111_1x.py
│ └── faster_rcnn_r50_fpn_attention_1111_dcn_1x.py
├── fast_mask_rcnn_r101_fpn_1x.py
├── fast_mask_rcnn_r50_caffe_c4_1x.py
├── fast_mask_rcnn_r50_fpn_1x.py
├── fast_rcnn_r101_fpn_1x.py
├── fast_rcnn_r50_caffe_c4_1x.py
├── fast_rcnn_r50_fpn_1x.py
├── faster_rcnn_ohem_r50_fpn_1x.py
├── faster_rcnn_r101_fpn_1x.py
├── faster_rcnn_r50_caffe_c4_1x.py
├── faster_rcnn_r50_fpn_1x.py
├── faster_rcnn_x101_32x4d_fpn_1x.py
├── faster_rcnn_x101_64x4d_fpn_1x.py
├── fcos
│ ├── README.md
│ ├── fcos_mstrain_640_800_r101_caffe_fpn_gn_2x_4gpu.py
│ ├── fcos_mstrain_640_800_x101_64x4d_fpn_gn_2x.py
│ └── fcos_r50_caffe_fpn_gn_1x_4gpu.py
├── fp16
│ ├── faster_rcnn_r50_fpn_fp16_1x.py
│ ├── mask_rcnn_r50_fpn_fp16_1x.py
│ └── retinanet_r50_fpn_fp16_1x.py
├── gcnet
│ ├── README.md
│ ├── mask_rcnn_r16_gcb_c3-c5_r50_fpn_1x.py
│ ├── mask_rcnn_r16_gcb_c3-c5_r50_fpn_syncbn_1x.py
│ ├── mask_rcnn_r4_gcb_c3-c5_r50_fpn_1x.py
│ ├── mask_rcnn_r4_gcb_c3-c5_r50_fpn_syncbn_1x.py
│ └── mask_rcnn_r50_fpn_sbn_1x.py
├── ghm
│ ├── README.md
│ └── retinanet_ghm_r50_fpn_1x.py
├── gn+ws
│ ├── README.md
│ ├── faster_rcnn_r50_fpn_gn_ws_1x.py
│ ├── mask_rcnn_r50_fpn_gn_ws_20_23_24e.py
│ ├── mask_rcnn_r50_fpn_gn_ws_2x.py
│ └── mask_rcnn_x101_32x4d_fpn_gn_ws_2x.py
├── gn
│ ├── README.md
│ ├── mask_rcnn_r101_fpn_gn_2x.py
│ ├── mask_rcnn_r50_fpn_gn_2x.py
│ └── mask_rcnn_r50_fpn_gn_contrib_2x.py
├── grid_rcnn
│ ├── README.md
│ ├── grid_rcnn_gn_head_r50_fpn_2x.py
│ └── grid_rcnn_gn_head_x101_32x4d_fpn_2x.py
├── guided_anchoring
│ ├── README.md
│ ├── ga_fast_r50_caffe_fpn_1x.py
│ ├── ga_faster_r50_caffe_fpn_1x.py
│ ├── ga_faster_x101_32x4d_fpn_1x.py
│ ├── ga_retinanet_r50_caffe_fpn_1x.py
│ ├── ga_retinanet_x101_32x4d_fpn_1x.py
│ ├── ga_rpn_r101_caffe_rpn_1x.py
│ ├── ga_rpn_r50_caffe_fpn_1x.py
│ └── ga_rpn_x101_32x4d_fpn_1x.py
├── hrnet
│ ├── README.md
│ ├── cascade_rcnn_hrnetv2p_w32_20e.py
│ ├── faster_rcnn_hrnetv2p_w18_1x.py
│ ├── faster_rcnn_hrnetv2p_w32_1x.py
│ ├── faster_rcnn_hrnetv2p_w40_1x.py
│ ├── mask_rcnn_hrnetv2p_w18_1x.py
│ └── mask_rcnn_hrnetv2p_w32_1x.py
├── htc
│ ├── README.md
│ ├── htc_dconv_c3-c5_mstrain_400_1400_x101_64x4d_fpn_20e.py
│ ├── htc_r101_fpn_20e.py
│ ├── htc_r50_fpn_1x.py
│ ├── htc_r50_fpn_20e.py
│ ├── htc_without_semantic_r50_fpn_1x.py
│ ├── htc_x101_32x4d_fpn_20e_16gpu.py
│ └── htc_x101_64x4d_fpn_20e_16gpu.py
├── libra_rcnn
│ ├── README.md
│ ├── libra_fast_rcnn_r50_fpn_1x.py
│ ├── libra_faster_rcnn_r101_fpn_1x.py
│ ├── libra_faster_rcnn_r50_fpn_1x.py
│ ├── libra_faster_rcnn_x101_64x4d_fpn_1x.py
│ └── libra_retinanet_r50_fpn_1x.py
├── mask_rcnn_r101_fpn_1x.py
├── mask_rcnn_r50_caffe_c4_1x.py
├── mask_rcnn_r50_fpn_1x.py
├── mask_rcnn_x101_32x4d_fpn_1x.py
├── mask_rcnn_x101_64x4d_fpn_1x.py
├── ms_rcnn
│ ├── README.md
│ ├── ms_rcnn_r101_caffe_fpn_1x.py
│ ├── ms_rcnn_r50_caffe_fpn_1x.py
│ └── ms_rcnn_x101_64x4d_fpn_1x.py
├── pascal_voc
│ ├── faster_rcnn_r50_fpn_1x_voc0712.py
│ ├── ssd300_voc.py
│ └── ssd512_voc.py
├── retinanet_r101_fpn_1x.py
├── retinanet_r50_fpn_1x.py
├── retinanet_x101_32x4d_fpn_1x.py
├── retinanet_x101_64x4d_fpn_1x.py
├── rpn_r101_fpn_1x.py
├── rpn_r50_caffe_c4_1x.py
├── rpn_r50_fpn_1x.py
├── rpn_x101_32x4d_fpn_1x.py
├── rpn_x101_64x4d_fpn_1x.py
├── scratch
│ ├── README.md
│ ├── scratch_faster_rcnn_r50_fpn_gn_6x.py
│ └── scratch_mask_rcnn_r50_fpn_gn_6x.py
├── solider
│ └── cp
│ │ ├── swin_base.py
│ │ ├── swin_small.py
│ │ └── swin_tiny.py
├── ssd300_coco.py
├── ssd512_coco.py
└── wider_face
│ ├── README.md
│ └── ssd300_wider_face.py
├── convert_model_version.py
├── demo
├── 1.png
├── 2.png
└── 3.png
├── gifs
├── 1.gif
├── 2.gif
└── gm.png
├── icon
└── demo.jpg
├── mmdet.egg-info
├── PKG-INFO
├── SOURCES.txt
├── dependency_links.txt
├── not-zip-safe
├── requires.txt
└── top_level.txt
├── mmdet
├── .DS_Store
├── __init__.py
├── __pycache__
│ ├── __init__.cpython-37.pyc
│ └── version.cpython-37.pyc
├── apis
│ ├── __init__.py
│ ├── __pycache__
│ │ ├── __init__.cpython-37.pyc
│ │ ├── env.cpython-37.pyc
│ │ ├── inference.cpython-37.pyc
│ │ └── train.cpython-37.pyc
│ ├── env.py
│ ├── inference.py
│ └── train.py
├── core
│ ├── __init__.py
│ ├── __pycache__
│ │ └── __init__.cpython-37.pyc
│ ├── anchor
│ │ ├── __init__.py
│ │ ├── __pycache__
│ │ │ ├── __init__.cpython-37.pyc
│ │ │ ├── anchor_generator.cpython-37.pyc
│ │ │ ├── anchor_target.cpython-37.pyc
│ │ │ └── guided_anchor_target.cpython-37.pyc
│ │ ├── anchor_generator.py
│ │ ├── anchor_target.py
│ │ └── guided_anchor_target.py
│ ├── bbox
│ │ ├── __init__.py
│ │ ├── __pycache__
│ │ │ ├── __init__.cpython-37.pyc
│ │ │ ├── assign_sampling.cpython-37.pyc
│ │ │ ├── bbox_target.cpython-37.pyc
│ │ │ ├── geometry.cpython-37.pyc
│ │ │ └── transforms.cpython-37.pyc
│ │ ├── assign_sampling.py
│ │ ├── assigners
│ │ │ ├── __init__.py
│ │ │ ├── __pycache__
│ │ │ │ ├── __init__.cpython-37.pyc
│ │ │ │ ├── approx_max_iou_assigner.cpython-37.pyc
│ │ │ │ ├── assign_result.cpython-37.pyc
│ │ │ │ ├── base_assigner.cpython-37.pyc
│ │ │ │ └── max_iou_assigner.cpython-37.pyc
│ │ │ ├── approx_max_iou_assigner.py
│ │ │ ├── assign_result.py
│ │ │ ├── base_assigner.py
│ │ │ └── max_iou_assigner.py
│ │ ├── bbox_target.py
│ │ ├── geometry.py
│ │ ├── samplers
│ │ │ ├── __init__.py
│ │ │ ├── __pycache__
│ │ │ │ ├── __init__.cpython-37.pyc
│ │ │ │ ├── base_sampler.cpython-37.pyc
│ │ │ │ ├── combined_sampler.cpython-37.pyc
│ │ │ │ ├── instance_balanced_pos_sampler.cpython-37.pyc
│ │ │ │ ├── iou_balanced_neg_sampler.cpython-37.pyc
│ │ │ │ ├── ohem_sampler.cpython-37.pyc
│ │ │ │ ├── pseudo_sampler.cpython-37.pyc
│ │ │ │ ├── random_sampler.cpython-37.pyc
│ │ │ │ └── sampling_result.cpython-37.pyc
│ │ │ ├── base_sampler.py
│ │ │ ├── combined_sampler.py
│ │ │ ├── instance_balanced_pos_sampler.py
│ │ │ ├── iou_balanced_neg_sampler.py
│ │ │ ├── ohem_sampler.py
│ │ │ ├── pseudo_sampler.py
│ │ │ ├── random_sampler.py
│ │ │ └── sampling_result.py
│ │ └── transforms.py
│ ├── evaluation
│ │ ├── __init__.py
│ │ ├── __pycache__
│ │ │ ├── __init__.cpython-37.pyc
│ │ │ ├── bbox_overlaps.cpython-37.pyc
│ │ │ ├── class_names.cpython-37.pyc
│ │ │ ├── coco_utils.cpython-37.pyc
│ │ │ ├── eval_hooks.cpython-37.pyc
│ │ │ ├── eval_mr.cpython-37.pyc
│ │ │ ├── mean_ap.cpython-37.pyc
│ │ │ └── recall.cpython-37.pyc
│ │ ├── bbox_overlaps.py
│ │ ├── class_names.py
│ │ ├── coco_utils.py
│ │ ├── eval_hooks.py
│ │ ├── eval_mr.py
│ │ ├── mean_ap.py
│ │ └── recall.py
│ ├── fp16
│ │ ├── __init__.py
│ │ ├── __pycache__
│ │ │ ├── __init__.cpython-37.pyc
│ │ │ ├── decorators.cpython-37.pyc
│ │ │ ├── hooks.cpython-37.pyc
│ │ │ └── utils.cpython-37.pyc
│ │ ├── decorators.py
│ │ ├── hooks.py
│ │ └── utils.py
│ ├── mask
│ │ ├── __init__.py
│ │ ├── __pycache__
│ │ │ ├── __init__.cpython-37.pyc
│ │ │ ├── mask_target.cpython-37.pyc
│ │ │ └── utils.cpython-37.pyc
│ │ ├── mask_target.py
│ │ └── utils.py
│ ├── my_mmcv
│ │ ├── __init__.py
│ │ ├── __pycache__
│ │ │ └── __init__.cpython-37.pyc
│ │ └── runner
│ │ │ ├── __init__.py
│ │ │ ├── __pycache__
│ │ │ ├── __init__.cpython-37.pyc
│ │ │ └── mean_teacher_runner.cpython-37.pyc
│ │ │ ├── hooks
│ │ │ ├── __init__.py
│ │ │ ├── __pycache__
│ │ │ │ ├── __init__.cpython-37.pyc
│ │ │ │ └── mean_teacher_optimizer.cpython-37.pyc
│ │ │ ├── logger
│ │ │ │ ├── __init__.py
│ │ │ │ ├── __pycache__
│ │ │ │ │ ├── __init__.cpython-37.pyc
│ │ │ │ │ └── wandb.cpython-37.pyc
│ │ │ │ └── wandb.py
│ │ │ └── mean_teacher_optimizer.py
│ │ │ └── mean_teacher_runner.py
│ ├── post_processing
│ │ ├── __init__.py
│ │ ├── __pycache__
│ │ │ ├── __init__.cpython-37.pyc
│ │ │ ├── bbox_nms.cpython-37.pyc
│ │ │ └── merge_augs.cpython-37.pyc
│ │ ├── bbox_nms.py
│ │ └── merge_augs.py
│ └── utils
│ │ ├── __init__.py
│ │ ├── __pycache__
│ │ ├── __init__.cpython-37.pyc
│ │ ├── dist_utils.cpython-37.pyc
│ │ └── misc.cpython-37.pyc
│ │ ├── dist_utils.py
│ │ └── misc.py
├── datasets
│ ├── __init__.py
│ ├── __pycache__
│ │ ├── __init__.cpython-37.pyc
│ │ ├── builder.cpython-37.pyc
│ │ ├── coco.cpython-37.pyc
│ │ ├── coco_csp_ori.cpython-37.pyc
│ │ ├── custom.cpython-37.pyc
│ │ ├── dataset_wrappers.cpython-37.pyc
│ │ ├── ecp_coco.cpython-37.pyc
│ │ ├── extra_aug.cpython-37.pyc
│ │ ├── registry.cpython-37.pyc
│ │ ├── transforms.cpython-37.pyc
│ │ ├── utils.cpython-37.pyc
│ │ ├── voc.cpython-37.pyc
│ │ ├── wider_face.cpython-37.pyc
│ │ └── xml_style.cpython-37.pyc
│ ├── builder.py
│ ├── coco.py
│ ├── coco_csp_ori.py
│ ├── custom.py
│ ├── dataset_wrappers.py
│ ├── ecp_coco.py
│ ├── extra_aug.py
│ ├── loader
│ │ ├── __init__.py
│ │ ├── __pycache__
│ │ │ ├── __init__.cpython-37.pyc
│ │ │ ├── build_loader.cpython-37.pyc
│ │ │ └── sampler.cpython-37.pyc
│ │ ├── build_loader.py
│ │ └── sampler.py
│ ├── registry.py
│ ├── transforms.py
│ ├── utils.py
│ ├── voc.py
│ ├── wider_face.py
│ └── xml_style.py
├── models
│ ├── .DS_Store
│ ├── __init__.py
│ ├── __pycache__
│ │ ├── __init__.cpython-37.pyc
│ │ ├── builder.cpython-37.pyc
│ │ └── registry.cpython-37.pyc
│ ├── anchor_heads
│ │ ├── __init__.py
│ │ ├── __pycache__
│ │ │ ├── __init__.cpython-37.pyc
│ │ │ ├── anchor_head.cpython-37.pyc
│ │ │ ├── csp_head.cpython-37.pyc
│ │ │ ├── csp_si_head.cpython-37.pyc
│ │ │ ├── csp_trans_head.cpython-37.pyc
│ │ │ ├── fcos_head.cpython-37.pyc
│ │ │ ├── ga_retina_head.cpython-37.pyc
│ │ │ ├── ga_rpn_head.cpython-37.pyc
│ │ │ ├── guided_anchor_head.cpython-37.pyc
│ │ │ ├── retina_head.cpython-37.pyc
│ │ │ ├── rpn_head.cpython-37.pyc
│ │ │ ├── ssd_head.cpython-37.pyc
│ │ │ └── transformers.cpython-37.pyc
│ │ ├── anchor_head.py
│ │ ├── csp_head.py
│ │ ├── csp_si_head.py
│ │ ├── csp_trans_head.py
│ │ ├── fcos_head.py
│ │ ├── ga_retina_head.py
│ │ ├── ga_rpn_head.py
│ │ ├── guided_anchor_head.py
│ │ ├── retina_head.py
│ │ ├── rpn_head.py
│ │ ├── ssd_head.py
│ │ ├── swin_transformer.py
│ │ └── transformers.py
│ ├── backbones
│ │ ├── __init__.py
│ │ ├── __pycache__
│ │ │ ├── __init__.cpython-37.pyc
│ │ │ ├── hrnet.cpython-37.pyc
│ │ │ ├── mobilenet.cpython-37.pyc
│ │ │ ├── resnet.cpython-37.pyc
│ │ │ ├── resnext.cpython-37.pyc
│ │ │ ├── senet.cpython-37.pyc
│ │ │ ├── ssd_vgg.cpython-37.pyc
│ │ │ ├── swin.cpython-37.pyc
│ │ │ └── vgg.cpython-37.pyc
│ │ ├── hrnet.py
│ │ ├── mobilenet.py
│ │ ├── resnet.py
│ │ ├── resnext.py
│ │ ├── senet.py
│ │ ├── ssd_vgg.py
│ │ ├── swin_transformer.py
│ │ └── vgg.py
│ ├── bbox_heads
│ │ ├── .DS_Store
│ │ ├── __init__.py
│ │ ├── __pycache__
│ │ │ ├── __init__.cpython-37.pyc
│ │ │ ├── bbox_head.cpython-37.pyc
│ │ │ ├── cascade_ped_head.cpython-37.pyc
│ │ │ ├── convfc_bbox_head.cpython-37.pyc
│ │ │ ├── mgan_head.cpython-37.pyc
│ │ │ └── refine_head.cpython-37.pyc
│ │ ├── bbox_head.py
│ │ ├── cascade_ped_head.py
│ │ ├── convfc_bbox_head.py
│ │ ├── mgan_head.py
│ │ └── refine_head.py
│ ├── builder.py
│ ├── detectors
│ │ ├── __init__.py
│ │ ├── __pycache__
│ │ │ ├── __init__.cpython-37.pyc
│ │ │ ├── base.cpython-37.pyc
│ │ │ ├── cascade_rcnn.cpython-37.pyc
│ │ │ ├── csp.cpython-37.pyc
│ │ │ ├── fast_rcnn.cpython-37.pyc
│ │ │ ├── faster_rcnn.cpython-37.pyc
│ │ │ ├── fcos.cpython-37.pyc
│ │ │ ├── grid_rcnn.cpython-37.pyc
│ │ │ ├── htc.cpython-37.pyc
│ │ │ ├── mask_rcnn.cpython-37.pyc
│ │ │ ├── mask_scoring_rcnn.cpython-37.pyc
│ │ │ ├── mgan.cpython-37.pyc
│ │ │ ├── retinanet.cpython-37.pyc
│ │ │ ├── rpn.cpython-37.pyc
│ │ │ ├── single_stage.cpython-37.pyc
│ │ │ ├── test_mixins.cpython-37.pyc
│ │ │ └── two_stage.cpython-37.pyc
│ │ ├── base.py
│ │ ├── cascade_rcnn.py
│ │ ├── cascade_rcnn_csp.py
│ │ ├── csp.py
│ │ ├── fast_rcnn.py
│ │ ├── faster_rcnn.py
│ │ ├── fcos.py
│ │ ├── grid_rcnn.py
│ │ ├── htc.py
│ │ ├── mask_rcnn.py
│ │ ├── mask_scoring_rcnn.py
│ │ ├── mgan.py
│ │ ├── retinanet.py
│ │ ├── rpn.py
│ │ ├── single_stage.py
│ │ ├── test_mixins.py
│ │ └── two_stage.py
│ ├── losses
│ │ ├── __init__.py
│ │ ├── __pycache__
│ │ │ ├── __init__.cpython-37.pyc
│ │ │ ├── accuracy.cpython-37.pyc
│ │ │ ├── balanced_l1_loss.cpython-37.pyc
│ │ │ ├── cross_entropy_loss.cpython-37.pyc
│ │ │ ├── focal_loss.cpython-37.pyc
│ │ │ ├── ghm_loss.cpython-37.pyc
│ │ │ ├── iou_loss.cpython-37.pyc
│ │ │ ├── kldiv.cpython-37.pyc
│ │ │ ├── mse_loss.cpython-37.pyc
│ │ │ ├── smooth_l1_loss.cpython-37.pyc
│ │ │ └── utils.cpython-37.pyc
│ │ ├── accuracy.py
│ │ ├── balanced_l1_loss.py
│ │ ├── cross_entropy_loss.py
│ │ ├── focal_loss.py
│ │ ├── ghm_loss.py
│ │ ├── iou_loss.py
│ │ ├── kldiv.py
│ │ ├── mse_loss.py
│ │ ├── smooth_l1_loss.py
│ │ └── utils.py
│ ├── mask_heads
│ │ ├── __init__.py
│ │ ├── __pycache__
│ │ │ ├── __init__.cpython-37.pyc
│ │ │ ├── fcn_mask_head.cpython-37.pyc
│ │ │ ├── fused_semantic_head.cpython-37.pyc
│ │ │ ├── grid_head.cpython-37.pyc
│ │ │ ├── htc_mask_head.cpython-37.pyc
│ │ │ └── maskiou_head.cpython-37.pyc
│ │ ├── fcn_mask_head.py
│ │ ├── fused_semantic_head.py
│ │ ├── grid_head.py
│ │ ├── htc_mask_head.py
│ │ └── maskiou_head.py
│ ├── necks
│ │ ├── __init__.py
│ │ ├── __pycache__
│ │ │ ├── __init__.cpython-37.pyc
│ │ │ ├── bfp.cpython-37.pyc
│ │ │ ├── csp_neck.cpython-37.pyc
│ │ │ ├── fpn.cpython-37.pyc
│ │ │ ├── hrcspfpn.cpython-37.pyc
│ │ │ └── hrfpn.cpython-37.pyc
│ │ ├── bfp.py
│ │ ├── csp_neck.py
│ │ ├── fpn.py
│ │ ├── hrcspfpn.py
│ │ └── hrfpn.py
│ ├── plugins
│ │ ├── __init__.py
│ │ ├── __pycache__
│ │ │ ├── __init__.cpython-37.pyc
│ │ │ ├── generalized_attention.cpython-37.pyc
│ │ │ └── non_local.cpython-37.pyc
│ │ ├── generalized_attention.py
│ │ └── non_local.py
│ ├── registry.py
│ ├── roi_extractors
│ │ ├── __init__.py
│ │ ├── __pycache__
│ │ │ ├── __init__.cpython-37.pyc
│ │ │ └── single_level.cpython-37.pyc
│ │ └── single_level.py
│ ├── shared_heads
│ │ ├── __init__.py
│ │ ├── __pycache__
│ │ │ ├── __init__.cpython-37.pyc
│ │ │ └── res_layer.cpython-37.pyc
│ │ └── res_layer.py
│ └── utils
│ │ ├── __init__.py
│ │ ├── __pycache__
│ │ ├── __init__.cpython-37.pyc
│ │ ├── conv_module.cpython-37.pyc
│ │ ├── conv_ws.cpython-37.pyc
│ │ ├── norm.cpython-37.pyc
│ │ ├── scale.cpython-37.pyc
│ │ └── weight_init.cpython-37.pyc
│ │ ├── conv_module.py
│ │ ├── conv_ws.py
│ │ ├── norm.py
│ │ ├── scale.py
│ │ └── weight_init.py
├── ops
│ ├── __init__.py
│ ├── __pycache__
│ │ └── __init__.cpython-37.pyc
│ ├── dcn
│ │ ├── __init__.py
│ │ ├── __pycache__
│ │ │ └── __init__.cpython-37.pyc
│ │ ├── deform_conv_cuda.cpython-37m-x86_64-linux-gnu.so
│ │ ├── deform_pool_cuda.cpython-37m-x86_64-linux-gnu.so
│ │ ├── functions
│ │ │ ├── __init__.py
│ │ │ ├── __pycache__
│ │ │ │ ├── __init__.cpython-37.pyc
│ │ │ │ ├── deform_conv.cpython-37.pyc
│ │ │ │ └── deform_pool.cpython-37.pyc
│ │ │ ├── deform_conv.py
│ │ │ └── deform_pool.py
│ │ ├── modules
│ │ │ ├── __init__.py
│ │ │ ├── __pycache__
│ │ │ │ ├── __init__.cpython-37.pyc
│ │ │ │ ├── deform_conv.cpython-37.pyc
│ │ │ │ └── deform_pool.cpython-37.pyc
│ │ │ ├── deform_conv.py
│ │ │ └── deform_pool.py
│ │ └── src
│ │ │ ├── deform_conv_cuda.cpp
│ │ │ ├── deform_conv_cuda_kernel.cu
│ │ │ ├── deform_pool_cuda.cpp
│ │ │ └── deform_pool_cuda_kernel.cu
│ ├── gcb
│ │ ├── __init__.py
│ │ ├── __pycache__
│ │ │ ├── __init__.cpython-37.pyc
│ │ │ └── context_block.cpython-37.pyc
│ │ └── context_block.py
│ ├── masked_conv
│ │ ├── __init__.py
│ │ ├── __pycache__
│ │ │ └── __init__.cpython-37.pyc
│ │ ├── functions
│ │ │ ├── __init__.py
│ │ │ ├── __pycache__
│ │ │ │ ├── __init__.cpython-37.pyc
│ │ │ │ └── masked_conv.cpython-37.pyc
│ │ │ └── masked_conv.py
│ │ ├── masked_conv2d_cuda.cpython-37m-x86_64-linux-gnu.so
│ │ ├── modules
│ │ │ ├── __init__.py
│ │ │ ├── __pycache__
│ │ │ │ ├── __init__.cpython-37.pyc
│ │ │ │ └── masked_conv.cpython-37.pyc
│ │ │ └── masked_conv.py
│ │ └── src
│ │ │ ├── masked_conv2d_cuda.cpp
│ │ │ └── masked_conv2d_kernel.cu
│ ├── nms
│ │ ├── __init__.py
│ │ ├── __pycache__
│ │ │ ├── __init__.cpython-37.pyc
│ │ │ └── nms_wrapper.cpython-37.pyc
│ │ ├── nms_cpu.cpython-37m-x86_64-linux-gnu.so
│ │ ├── nms_cuda.cpython-37m-x86_64-linux-gnu.so
│ │ ├── nms_wrapper.py
│ │ ├── soft_nms_cpu.cpython-37m-x86_64-linux-gnu.so
│ │ └── src
│ │ │ ├── nms_cpu.cpp
│ │ │ ├── nms_cuda.cpp
│ │ │ ├── nms_kernel.cu
│ │ │ ├── soft_nms_cpu.cpp
│ │ │ └── soft_nms_cpu.pyx
│ ├── roi_align
│ │ ├── __init__.py
│ │ ├── __pycache__
│ │ │ └── __init__.cpython-37.pyc
│ │ ├── functions
│ │ │ ├── __init__.py
│ │ │ ├── __pycache__
│ │ │ │ ├── __init__.cpython-37.pyc
│ │ │ │ └── roi_align.cpython-37.pyc
│ │ │ └── roi_align.py
│ │ ├── gradcheck.py
│ │ ├── modules
│ │ │ ├── __init__.py
│ │ │ ├── __pycache__
│ │ │ │ ├── __init__.cpython-37.pyc
│ │ │ │ └── roi_align.cpython-37.pyc
│ │ │ └── roi_align.py
│ │ ├── roi_align_cuda.cpython-37m-x86_64-linux-gnu.so
│ │ └── src
│ │ │ ├── roi_align_cuda.cpp
│ │ │ └── roi_align_kernel.cu
│ ├── roi_pool
│ │ ├── __init__.py
│ │ ├── __pycache__
│ │ │ └── __init__.cpython-37.pyc
│ │ ├── functions
│ │ │ ├── __init__.py
│ │ │ ├── __pycache__
│ │ │ │ ├── __init__.cpython-37.pyc
│ │ │ │ └── roi_pool.cpython-37.pyc
│ │ │ └── roi_pool.py
│ │ ├── gradcheck.py
│ │ ├── modules
│ │ │ ├── __init__.py
│ │ │ ├── __pycache__
│ │ │ │ ├── __init__.cpython-37.pyc
│ │ │ │ └── roi_pool.cpython-37.pyc
│ │ │ └── roi_pool.py
│ │ ├── roi_pool_cuda.cpython-37m-x86_64-linux-gnu.so
│ │ └── src
│ │ │ ├── roi_pool_cuda.cpp
│ │ │ └── roi_pool_kernel.cu
│ └── sigmoid_focal_loss
│ │ ├── __init__.py
│ │ ├── __pycache__
│ │ └── __init__.cpython-37.pyc
│ │ ├── functions
│ │ ├── __init__.py
│ │ ├── __pycache__
│ │ │ ├── __init__.cpython-37.pyc
│ │ │ └── sigmoid_focal_loss.cpython-37.pyc
│ │ └── sigmoid_focal_loss.py
│ │ ├── modules
│ │ ├── __init__.py
│ │ ├── __pycache__
│ │ │ ├── __init__.cpython-37.pyc
│ │ │ └── sigmoid_focal_loss.cpython-37.pyc
│ │ └── sigmoid_focal_loss.py
│ │ ├── sigmoid_focal_loss_cuda.cpython-37m-x86_64-linux-gnu.so
│ │ └── src
│ │ ├── sigmoid_focal_loss.cpp
│ │ └── sigmoid_focal_loss_cuda.cu
├── utils
│ ├── __init__.py
│ ├── __pycache__
│ │ ├── __init__.cpython-37.pyc
│ │ └── registry.cpython-37.pyc
│ └── registry.py
└── version.py
├── run.sh
├── setup.py
└── tools
├── ECPB
├── LICENSE
├── README.md
├── README_ECPB.pdf
├── TheEuroCityPersonsBenchmark.docx
├── __init__.py
├── conversion_test
│ ├── ecp.json
│ ├── kitti.txt
│ └── kitti_in.txt
├── create_tfrecords.py
├── dataconverter.py
├── dataloader.py
├── detect.py
├── eval.py
├── match.py
├── params.py
├── qualitative_analysis.py
├── statistics.py
└── utils.py
├── analyze_logs.py
├── caltech
├── convert_json_to_txt.py
├── eval_caltech
│ ├── AS.mat
│ ├── ResultsEval
│ │ └── gt-new
│ │ │ ├── gt-All.mat
│ │ │ ├── gt-Occ=heavy.mat
│ │ │ └── gt-Reasonable.mat
│ ├── bbApply.m
│ ├── bbGt.m
│ ├── dbEval.m
│ ├── dbInfo.m
│ ├── extract_img_anno.m
│ ├── getPrmDflt.m
│ └── vbb.m
└── test.json
├── check_log_loss.py
├── cityPerson
├── __pycache__
│ ├── coco.cpython-37.pyc
│ ├── eval_MR_multisetup.cpython-37.pyc
│ └── eval_demo.cpython-37.pyc
├── coco.py
├── eval_MR_multisetup.py
└── eval_demo.py
├── coco_eval.py
├── convert_datasets
├── citypersons
│ └── convert_cityperson_to_coco.py
├── crowdhuman
│ └── convert_crowdhuman_to_coco.py
├── eurocity
│ └── convert_eurocity_to_coco.py
├── pascal_voc.py
└── pycococreatortools.py
├── crowdhuman
├── ORI_eval_MR_multisetup.py
├── coco.py
├── eval_MR_multisetup.py
└── eval_demo.py
├── demo.py
├── detectron2pytorch.py
├── dist_test.sh
├── dist_train.sh
├── publish_model.py
├── slurm_test.sh
├── slurm_train.sh
├── test.py
├── test_caltech.py
├── test_city_person.py
├── test_crowdhuman.py
├── test_euroCity.py
├── test_wider_mr.py
├── train.py
├── upgrade_model_version.py
├── validate.py
└── voc_eval.py
/CONTRIBUTING.md:
--------------------------------------------------------------------------------
1 | # Contributing to mmdetection
2 |
3 | All kinds of contributions are welcome, including but not limited to the following.
4 |
5 | - Fixes (typo, bugs)
6 | - New features and components
7 |
8 | ## Workflow
9 |
10 | 1. fork and pull the latest mmdetection
11 | 2. checkout a new branch (do not use master branch for PRs)
12 | 3. commit your changes
13 | 4. create a PR
14 |
15 | Note
16 | - If you plan to add some new features that involve large changes, it is encouraged to open an issue for discussion first.
17 | - If you are the author of some papers and would like to include your method to mmdetection,
18 | please contact Kai Chen (chenkaidev[at]gmail[dot]com). We will much appreciate your contribution.
19 |
20 | ## Code style
21 |
22 | ### Python
23 | We adopt [PEP8](https://www.python.org/dev/peps/pep-0008/) as the preferred code style.
24 | We use [flake8](http://flake8.pycqa.org/en/latest/) as the linter and [yapf](https://github.com/google/yapf) as the formatter.
25 | Please upgrade to the latest yapf (>=0.27.0) and refer to the [configuration](.style.yapf).
26 |
27 | >Before you create a PR, make sure that your code lints and is formatted by yapf.
28 |
29 | ### C++ and CUDA
30 | We follow the [Google C++ Style Guide](https://google.github.io/styleguide/cppguide.html).
--------------------------------------------------------------------------------
/Dockerfile:
--------------------------------------------------------------------------------
1 | ARG PYTORCH="1.3"
2 | ARG CUDA="10.1"
3 | ARG CUDNN="7"
4 |
5 | FROM pytorch/pytorch:${PYTORCH}-cuda${CUDA}-cudnn${CUDNN}-devel
6 |
7 | ENV TORCH_CUDA_ARCH_LIST="6.0 6.1 7.0+PTX"
8 | ENV TORCH_NVCC_FLAGS="-Xfatbin -compress-all"
9 | ENV CMAKE_PREFIX_PATH="$(dirname $(which conda))/../"
10 |
11 | RUN apt-get update && apt-get install -y git ninja-build libglib2.0-0 libgl1-mesa-dev libsm6 libxrender-dev libxext6 \
12 | && apt-get clean \
13 | && rm -rf /var/lib/apt/lists/*
14 |
15 | # Install MMCV
16 | RUN pip install Cython
17 | RUN pip install mmcv==0.2.10
18 | RUN pip install scipy
19 | # Install mmdetection
20 | RUN conda clean --all
21 | #RUN git clone https://github.com/open-mmlab/mmdetection.git /mmdetection
22 | WORKDIR /pedestron
23 | ADD . .
24 |
25 | ENV FORCE_CUDA="1"
26 | RUN pip install Cython
27 | RUN pip install "git+https://github.com/cocodataset/cocoapi.git#subdirectory=PythonAPI"
28 | RUN pip install --no-cache-dir -e .
29 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # SOLIDER on [Pedestrian Detection]
2 |
3 | [](https://paperswithcode.com/sota/pedestrian-detection-on-citypersons?p=beyond-appearance-a-semantic-controllable)
4 |
5 | This repo provides details about how to use [SOLIDER](https://github.com/tinyvision/SOLIDER) pretrained representation on pedestrian detection task.
6 | We modify the code from [Pedestron](https://github.com/hasanirtiza/Pedestron), and you can refer to the original repo for more details.
7 |
8 | ## Installation and Datasets
9 |
10 | Details of installation and dataset preparation can be found in [Pedestron](https://github.com/hasanirtiza/Pedestron).
11 |
12 | ## Prepare Pre-trained Models
13 | You can download models from [SOLIDER](https://github.com/tinyvision/SOLIDER), or use [SOLIDER](https://github.com/tinyvision/SOLIDER) to train your own models.
14 | Before training, you should use `convert_model_version.py` to convert the pretrained models to the right version.
15 |
16 | ## Training
17 |
18 | Train with single GPU:
19 |
20 | ```shell
21 | python tools/train.py ${CONFIG_FILE}
22 | ```
23 |
24 | Train with multiple GPUs:
25 | ```shell
26 | ./tools/dist_train.sh ${CONFIG_FILE} ${GPU_NUM} [optional arguments]
27 | ```
28 |
29 | `CONFIG_FILE` can be found in `configs/solider`.
30 |
31 | Test:
32 |
33 | ```shell
34 | sh run.sh
35 | ```
36 |
37 | ## Performance
38 |
39 | | Method | Model | CityPersons
(MR-2) |
40 | | ------ | :---: | :---: |
41 | | SOLIDER | Swin Tiny | 10.3/40.8 |
42 | | SOLIDER | Swin Small | 10.0/39.2 |
43 | | SOLIDER | Swin Base | 9.7/39.4 |
44 |
45 | - We use the pretrained models from [SOLIDER](https://github.com/tinyvision/SOLIDER).
46 | - The semantic weight is set to 1.0 in these experiments.
47 |
48 | ## Citation
49 |
50 | If you find this code useful for your research, please cite our paper
51 |
52 | ```
53 | @inproceedings{chen2023beyond,
54 | title={Beyond Appearance: a Semantic Controllable Self-Supervised Learning Framework for Human-Centric Visual Tasks},
55 | author={Weihua Chen and Xianzhe Xu and Jian Jia and Hao Luo and Yaohua Wang and Fan Wang and Rong Jin and Xiuyu Sun},
56 | booktitle={The IEEE/CVF Conference on Computer Vision and Pattern Recognition},
57 | year={2023},
58 | }
59 | ```
60 |
--------------------------------------------------------------------------------
/configs/.DS_Store:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/configs/.DS_Store
--------------------------------------------------------------------------------
/configs/elephant/.DS_Store:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/configs/elephant/.DS_Store
--------------------------------------------------------------------------------
/configs/elephant/cityperson/.DS_Store:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/configs/elephant/cityperson/.DS_Store
--------------------------------------------------------------------------------
/configs/elephant/cityperson/mgan_vgg.py:
--------------------------------------------------------------------------------
1 | # model settings
2 | model = dict(
3 | type='MGAN',
4 | pretrained='modelzoo://vgg16',
5 | backbone=dict(
6 | type='VGG',
7 | depth=16,
8 | frozen_stages=1),
9 | neck=None,
10 | rpn_head=dict(
11 | type='RPNHead',
12 | in_channels=512,
13 | feat_channels=512,
14 | anchor_scales=[4., 5.4, 7.2, 9.8, 13.2, 17.9, 24.2, 33.0, 44.1, 59.6, 80.0],
15 | anchor_ratios=[2.44],
16 | anchor_strides=[8],
17 | target_means=[.0, .0, .0, .0],
18 | target_stds=[1.0, 1.0, 1.0, 1.0],
19 | ),
20 | bbox_roi_extractor=dict(
21 | type='SingleRoIExtractor',
22 | roi_layer=dict(type='RoIAlign', out_size=7, sample_num=2),
23 | out_channels=512,
24 | featmap_strides=[8]),
25 | mgan_head=dict(
26 | type='MGANHead'),
27 | bbox_head=dict(
28 | type='SharedFCBBoxHead',
29 | num_fcs=2,
30 | in_channels=512,
31 | fc_out_channels=1024,
32 | roi_feat_size=7,
33 | num_classes=2,
34 | target_means=[0., 0., 0., 0.],
35 | target_stds=[0.1, 0.1, 0.2, 0.2],
36 | reg_class_agnostic=False,
37 | )
38 | )
39 | test_cfg = dict(
40 | rpn=dict(
41 | nms_across_levels=False,
42 | nms_pre=12000,
43 | nms_post=2000,
44 | max_num=2000,
45 | nms_thr=0.7,
46 | min_bbox_size=0),
47 | rcnn=dict(
48 | score_thr=0.0, nms=dict(type='nms', iou_thr=0.5), max_per_img=100)
49 | )
50 | # dataset settings
51 | dataset_type = 'CocoDataset'
52 | data_root = 'datasets/CityPersons/'
53 | img_norm_cfg = dict(mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
54 | data = dict(
55 | imgs_per_gpu=2,
56 | workers_per_gpu=2,
57 | test=dict(
58 | type=dataset_type,
59 | ann_file=data_root + 'val_gt_for_mmdetction.json',
60 | img_prefix=data_root + '/leftImg8bit_trainvaltest/leftImg8bit/val_all_in_folder/',
61 | img_scale=(2048, 1024),
62 | img_norm_cfg=img_norm_cfg,
63 | size_divisor=32,
64 | flip_ratio=0,
65 | with_mask=False,
66 | with_label=False,
67 | test_mode=True)
68 | )
69 | # yapf:enable
70 | # runtime settings
71 | total_epochs = 12
72 | dist_params = dict(backend='nccl')
73 | log_level = 'INFO'
74 | work_dir = '../work_dirs/mgan_50_65'
75 | load_from = None
76 | resume_from = None
77 | workflow = [('train', 1)]
78 |
--------------------------------------------------------------------------------
/configs/empirical_attention/README.md:
--------------------------------------------------------------------------------
1 | # An Empirical Study of Spatial Attention Mechanisms in Deep Networks
2 |
3 | ## Introduction
4 |
5 | ```
6 | @article{zhu2019empirical,
7 | title={An Empirical Study of Spatial Attention Mechanisms in Deep Networks},
8 | author={Zhu, Xizhou and Cheng, Dazhi and Zhang, Zheng and Lin, Stephen and Dai, Jifeng},
9 | journal={arXiv preprint arXiv:1904.05873},
10 | year={2019}
11 | }
12 | ```
13 |
14 |
15 | ## Results and Models
16 |
17 | | Backbone | Attention Component | DCN | Lr schd | box AP | Download |
18 | |:---------:|:-------------------:|:----:|:-------:|:------:|:--------:|
19 | | R-50 | 1111 | N | 1x | 38.6 | - |
20 | | R-50 | 0010 | N | 1x | 38.2 | - |
21 | | R-50 | 1111 | Y | 1x | 41.0 | - |
22 | | R-50 | 0010 | Y | 1x | 40.8 | - |
23 |
24 |
--------------------------------------------------------------------------------
/configs/ghm/README.md:
--------------------------------------------------------------------------------
1 | # Gradient Harmonized Single-stage Detector
2 |
3 | ## Introduction
4 |
5 | ```
6 | @inproceedings{li2019gradient,
7 | title={Gradient Harmonized Single-stage Detector},
8 | author={Li, Buyu and Liu, Yu and Wang, Xiaogang},
9 | booktitle={AAAI Conference on Artificial Intelligence},
10 | year={2019}
11 | }
12 | ```
13 |
14 | ## Results and Models
15 |
16 | | Backbone | Style | Lr schd | Mem (GB) | Train time (s/iter) | Inf time (fps) | box AP | Download |
17 | | :-------------: | :-----: | :-----: | :------: | :-----------------: | :------------: | :----: | :------: |
18 | | R-50-FPN | pytorch | 1x | 3.9 | 0.500 | 9.4 | 36.9 | [model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmdetection/models/ghm/retinanet_ghm_r50_fpn_1x_20190608-b9aa5862.pth) |
19 | | R-101-FPN | pytorch | 1x | 5.8 | 0.625 | 8.5 | 39.0 | [model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmdetection/models/ghm/retinanet_ghm_r101_fpn_1x_20190608-b885b74a.pth) |
20 | | X-101-32x4d-FPN | pytorch | 1x | 7.0 | 0.818 | 7.6 | 40.5 | [model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmdetection/models/ghm/retinanet_ghm_x101_32x4d_fpn_1x_20190608-ed295d22.pth) |
21 | | X-101-64x4d-FPN | pytorch | 1x | 9.9 | 1.191 | 6.1 | 41.6 | [model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmdetection/models/ghm/retinanet_ghm_x101_64x4d_fpn_1x_20190608-7f2037ce.pth) |
--------------------------------------------------------------------------------
/configs/gn/README.md:
--------------------------------------------------------------------------------
1 | # Group Normalization
2 |
3 | ## Introduction
4 |
5 | ```
6 | @inproceedings{wu2018group,
7 | title={Group Normalization},
8 | author={Wu, Yuxin and He, Kaiming},
9 | booktitle={Proceedings of the European Conference on Computer Vision (ECCV)},
10 | year={2018}
11 | }
12 | ```
13 |
14 | ## Results and Models
15 |
16 | | Backbone | model | Lr schd | Mem (GB) | Train time (s/iter) | Inf time (fps) | box AP | mask AP | Download |
17 | |:-------------:|:----------:|:-------:|:--------:|:-------------------:|:--------------:|:------:|:-------:|:--------:|
18 | | R-50-FPN (d) | Mask R-CNN | 2x | 7.2 | 0.806 | 5.4 | 39.8 | 36.1 | [model](https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/gn/mask_rcnn_r50_fpn_gn_2x_20180113-86832cf2.pth) |
19 | | R-50-FPN (d) | Mask R-CNN | 3x | 7.2 | 0.806 | 5.4 | 40.1 | 36.4 | [model](https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/gn/mask_rcnn_r50_fpn_gn_3x_20180113-8e82f48d.pth) |
20 | | R-101-FPN (d) | Mask R-CNN | 2x | 9.9 | 0.970 | 4.8 | 41.5 | 37.0 | [model](https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/gn/mask_rcnn_r101_fpn_gn_2x_20180113-9598649c.pth) |
21 | | R-101-FPN (d) | Mask R-CNN | 3x | 9.9 | 0.970 | 4.8 | 41.6 | 37.3 | [model](https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/gn/mask_rcnn_r101_fpn_gn_3x_20180113-a14ffb96.pth) |
22 | | R-50-FPN (c) | Mask R-CNN | 2x | 7.2 | 0.806 | 5.4 | 39.7 | 35.9 | [model](https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/gn/mask_rcnn_r50_fpn_gn_contrib_2x_20180113-ec93305c.pth) |
23 | | R-50-FPN (c) | Mask R-CNN | 3x | 7.2 | 0.806 | 5.4 | 40.0 | 36.2 | [model](https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/gn/mask_rcnn_r50_fpn_gn_contrib_3x_20180113-9d230cab.pth) |
24 |
25 | **Notes:**
26 | - (d) means pretrained model converted from Detectron, and (c) means the contributed model pretrained by [@thangvubk](https://github.com/thangvubk).
27 | - The `3x` schedule is epoch [28, 34, 36].
28 | - **Memory, Train/Inf time is outdated.**
--------------------------------------------------------------------------------
/configs/grid_rcnn/README.md:
--------------------------------------------------------------------------------
1 | # Grid R-CNN
2 |
3 | ## Introduction
4 |
5 | ```
6 | @inproceedings{lu2019grid,
7 | title={Grid r-cnn},
8 | author={Lu, Xin and Li, Buyu and Yue, Yuxin and Li, Quanquan and Yan, Junjie},
9 | booktitle={Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition},
10 | year={2019}
11 | }
12 |
13 | @article{lu2019grid,
14 | title={Grid R-CNN Plus: Faster and Better},
15 | author={Lu, Xin and Li, Buyu and Yue, Yuxin and Li, Quanquan and Yan, Junjie},
16 | journal={arXiv preprint arXiv:1906.05688},
17 | year={2019}
18 | }
19 | ```
20 |
21 | ## Results and Models
22 |
23 | | Backbone | Lr schd | Mem (GB) | Train time (s/iter) | Inf time (fps) | box AP | Download |
24 | |:-----------:|:-------:|:--------:|:-------------------:|:--------------:|:------:|:--------:|
25 | | R-50 | 2x | 4.8 | 1.172 | 10.9 | 40.3 | [model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmdetection/models/grid_rcnn/grid_rcnn_gn_head_r50_fpn_2x_20190619-5b29cf9d.pth) |
26 | | R-101 | 2x | 6.7 | 1.214 | 10.0 | 41.7 | [model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmdetection/models/grid_rcnn/grid_rcnn_gn_head_r101_fpn_2x_20190619-a4b61645.pth) |
27 | | X-101-32x4d | 2x | 8.0 | 1.335 | 8.5 | 43.0 | [model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmdetection/models/grid_rcnn/grid_rcnn_gn_head_x101_32x4d_fpn_2x_20190619-0bbfd87a.pth) |
28 | | X-101-64x4d | 2x | 10.9 | 1.753 | 6.4 | 43.1 | [model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmdetection/models/grid_rcnn/grid_rcnn_gn_head_x101_64x4d_fpn_2x_20190619-8f4e20bb.pth) |
29 |
30 | **Notes:**
31 | - All models are trained with 8 GPUs instead of 32 GPUs in the original paper.
32 | - The warming up lasts for 1 epoch and `2x` here indicates 25 epochs.
33 |
--------------------------------------------------------------------------------
/configs/libra_rcnn/README.md:
--------------------------------------------------------------------------------
1 | # Libra R-CNN: Towards Balanced Learning for Object Detection
2 |
3 | ## Introduction
4 |
5 | We provide config files to reproduce the results in the CVPR 2019 paper [Libra R-CNN](https://arxiv.org/pdf/1904.02701.pdf).
6 |
7 | ```
8 | @inproceedings{pang2019libra,
9 | title={Libra R-CNN: Towards Balanced Learning for Object Detection},
10 | author={Pang, Jiangmiao and Chen, Kai and Shi, Jianping and Feng, Huajun and Ouyang, Wanli and Dahua Lin},
11 | booktitle={IEEE Conference on Computer Vision and Pattern Recognition},
12 | year={2019}
13 | }
14 | ```
15 |
16 | ## Results and models
17 |
18 | The results on COCO 2017val are shown in the below table. (results on test-dev are usually slightly higher than val)
19 |
20 | | Architecture | Backbone | Style | Lr schd | Mem (GB) | Train time (s/iter) | Inf time (fps) | box AP | Download |
21 | |:---------:|:-------:|:-------:|:--------:|:-------------------:|:--------------:|:------:|:-------:|:--------:|
22 | | Faster R-CNN | R-50-FPN | pytorch | 1x | 4.2 | 0.375 | 12.0 | 38.5 | [model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmdetection/models/libra_rcnn/libra_faster_rcnn_r50_fpn_1x_20190610-bf0ea559.pth) |
23 | | Fast R-CNN | R-50-FPN | pytorch | 1x | 3.7 | 0.272 | 16.3 | 38.5 | [model](https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/libra_rcnn/libra_fast_rcnn_r50_fpn_1x_20190525-a43f88b5.pth) |
24 | | Faster R-CNN | R-101-FPN | pytorch | 1x | 6.0 | 0.495 | 10.4 | 40.3 | [model](https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/libra_rcnn/libra_faster_rcnn_r101_fpn_1x_20190525-94e94051.pth) |
25 | | Faster R-CNN | X-101-64x4d-FPN | pytorch | 1x | 10.1 | 1.050 | 6.8 | 42.7 | [model](https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/libra_rcnn/libra_faster_rcnn_x101_64x4d_fpn_1x_20190525-359c134a.pth) |
26 | | RetinaNet | R-50-FPN | pytorch | 1x | 3.7 | 0.328 | 11.8 | 37.7 | [model](https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/libra_rcnn/libra_retinanet_r50_fpn_1x_20190525-ead2a6bb.pth) |
27 |
--------------------------------------------------------------------------------
/configs/ms_rcnn/README.md:
--------------------------------------------------------------------------------
1 | # Mask Scoring R-CNN
2 |
3 | ## Introduction
4 |
5 | ```
6 | @inproceedings{huang2019msrcnn,
7 | title={Mask Scoring R-CNN},
8 | author={Zhaojin Huang and Lichao Huang and Yongchao Gong and Chang Huang and Xinggang Wang},
9 | booktitle={IEEE Conference on Computer Vision and Pattern Recognition},
10 | year={2019},
11 | }
12 | ```
13 |
14 | ## Results and Models
15 |
16 | | Backbone | style | Lr schd | Mem (GB) | Train time (s/iter) | Inf time (fps) | box AP | mask AP | Download |
17 | |:-------------:|:----------:|:-------:|:--------:|:-------------------:|:--------------:|:------:|:-------:|:--------:|
18 | | R-50-FPN | caffe | 1x | 4.3 | 0.537 | 10.1 | 37.4 | 35.5 | [model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmdetection/models/ms-rcnn/ms_rcnn_r50_caffe_fpn_1x_20190624-619934b5.pth) |
19 | | R-50-FPN | caffe | 2x | - | - | - | 38.2 | 35.9 | [model](https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/ms-rcnn/ms_rcnn_r50_caffe_fpn_2x_20190525-a07be31e.pth) |
20 | | R-101-FPN | caffe | 1x | 6.2 | 0.682 | 9.1 | 39.8 | 37.2 | [model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmdetection/models/ms-rcnn/ms_rcnn_r101_caffe_fpn_1x_20190624-677a5548.pth) |
21 | | R-101-FPN | caffe | 2x | - | - | - | 40.7 | 37.8 | [model](https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/ms-rcnn/ms_rcnn_r101_caffe_fpn_2x_20190525-4aee1528.pth) |
22 | | R-X101-32x4d | pytorch | 2x | 7.6 | 0.844 | 8.0 | 41.7 | 38.5 | [model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmdetection/models/ms-rcnn/ms_rcnn_x101_32x4d_fpn_2x_20190628-ab454d07.pth) |
23 | | R-X101-64x4d | pytorch | 1x | 10.5 | 1.214 | 6.4 | 42.0 | 39.1 | [model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmdetection/models/ms-rcnn/ms_rcnn_x101_64x4d_fpn_1x_20190628-dec32bda.pth) |
24 | | R-X101-64x4d | pytorch | 2x | - | - | - | 42.2 | 38.9 | [model](https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/ms-rcnn/ms_rcnn_x101_64x4d_fpn_2x_20190525-c044c25a.pth) |
25 |
--------------------------------------------------------------------------------
/configs/scratch/README.md:
--------------------------------------------------------------------------------
1 | # Rethinking ImageNet Pre-training
2 |
3 | ## Introduction
4 |
5 | ```
6 | @article{he2018rethinking,
7 | title={Rethinking imagenet pre-training},
8 | author={He, Kaiming and Girshick, Ross and Doll{\'a}r, Piotr},
9 | journal={arXiv preprint arXiv:1811.08883},
10 | year={2018}
11 | }
12 | ```
13 |
14 | ## Results and Models
15 |
16 | | Model | Backbone | Style | Lr schd | box AP | mask AP | Download |
17 | |:------------:|:---------:|:-------:|:-------:|:------:|:-------:|:--------:|
18 | | Faster R-CNN | R-50-FPN | pytorch | 6x | 40.1 | - | [model](https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/scratch/scratch_faster_rcnn_r50_fpn_gn_6x-20190515-ff554978.pth) |
19 | | Mask R-CNN | R-50-FPN | pytorch | 6x | 41.0 | 37.4 | [model](https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/scratch/scratch_mask_rcnn_r50_fpn_gn_6x_20190515-96743f5e.pth) |
20 |
21 | Note:
22 | - The above models are trained with 16 GPUs.
--------------------------------------------------------------------------------
/configs/wider_face/README.md:
--------------------------------------------------------------------------------
1 | ## WIDER Face Dataset
2 |
3 | To use the WIDER Face dataset you need to download it
4 | and extract to the `data/WIDERFace` folder. Annotation in the VOC format
5 | can be found in this [repo](https://github.com/sovrasov/wider-face-pascal-voc-annotations.git).
6 | You should move the annotation files from `WIDER_train_annotations` and `WIDER_val_annotations` folders
7 | to the `Annotation` folders inside the corresponding directories `WIDER_train` and `WIDER_val`.
8 | Also annotation lists `val.txt` and `train.txt` should be copied to `data/WIDERFace` from `WIDER_train_annotations` and `WIDER_val_annotations`.
9 | The directory should be like this:
10 |
11 | ```
12 | mmdetection
13 | ├── mmdet
14 | ├── tools
15 | ├── configs
16 | ├── data
17 | │ ├── WIDERFace
18 | │ │ ├── WIDER_train
19 | │ | │ ├──0--Parade
20 | │ | │ ├── ...
21 | │ | │ ├── Annotations
22 | │ │ ├── WIDER_val
23 | │ | │ ├──0--Parade
24 | │ | │ ├── ...
25 | │ | │ ├── Annotations
26 | │ │ ├── val.txt
27 | │ │ ├── train.txt
28 |
29 | ```
30 |
31 | After that you can train the SSD300 on WIDER by launching training with the `ssd300_wider_face.py` config or
32 | create your own config based on the presented one.
33 |
--------------------------------------------------------------------------------
/convert_model_version.py:
--------------------------------------------------------------------------------
1 | import torch
2 |
3 | path = 'path/to/SOLIDER/log/lup/swin_tiny/checkpoint.pth'
4 | state_dict = torch.load(path)
5 | state_dict = state_dict["teacher"]
6 | torch.save(state_dict, path.replace('.pth','_new.pth'), _use_new_zipfile_serialization=False)
7 |
--------------------------------------------------------------------------------
/demo/1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/demo/1.png
--------------------------------------------------------------------------------
/demo/2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/demo/2.png
--------------------------------------------------------------------------------
/demo/3.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/demo/3.png
--------------------------------------------------------------------------------
/gifs/1.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/gifs/1.gif
--------------------------------------------------------------------------------
/gifs/2.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/gifs/2.gif
--------------------------------------------------------------------------------
/gifs/gm.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/gifs/gm.png
--------------------------------------------------------------------------------
/icon/demo.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/icon/demo.jpg
--------------------------------------------------------------------------------
/mmdet.egg-info/dependency_links.txt:
--------------------------------------------------------------------------------
1 |
2 |
--------------------------------------------------------------------------------
/mmdet.egg-info/not-zip-safe:
--------------------------------------------------------------------------------
1 |
2 |
--------------------------------------------------------------------------------
/mmdet.egg-info/requires.txt:
--------------------------------------------------------------------------------
1 | mmcv>=0.2.6
2 | numpy
3 | matplotlib
4 | six
5 | terminaltables
6 | pycocotools
7 | torch>=1.1
8 |
--------------------------------------------------------------------------------
/mmdet.egg-info/top_level.txt:
--------------------------------------------------------------------------------
1 | mmdet
2 |
--------------------------------------------------------------------------------
/mmdet/.DS_Store:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/.DS_Store
--------------------------------------------------------------------------------
/mmdet/__init__.py:
--------------------------------------------------------------------------------
1 | from .version import __version__, short_version
2 |
3 | __all__ = ['__version__', 'short_version']
4 |
--------------------------------------------------------------------------------
/mmdet/__pycache__/__init__.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/__pycache__/__init__.cpython-37.pyc
--------------------------------------------------------------------------------
/mmdet/__pycache__/version.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/__pycache__/version.cpython-37.pyc
--------------------------------------------------------------------------------
/mmdet/apis/__init__.py:
--------------------------------------------------------------------------------
1 | from .env import init_dist, get_root_logger, set_random_seed
2 | from .train import train_detector
3 | from .inference import init_detector, inference_detector, show_result
4 |
5 | __all__ = [
6 | 'init_dist', 'get_root_logger', 'set_random_seed', 'train_detector',
7 | 'init_detector', 'inference_detector', 'show_result'
8 | ]
9 |
--------------------------------------------------------------------------------
/mmdet/apis/__pycache__/__init__.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/apis/__pycache__/__init__.cpython-37.pyc
--------------------------------------------------------------------------------
/mmdet/apis/__pycache__/env.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/apis/__pycache__/env.cpython-37.pyc
--------------------------------------------------------------------------------
/mmdet/apis/__pycache__/inference.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/apis/__pycache__/inference.cpython-37.pyc
--------------------------------------------------------------------------------
/mmdet/apis/__pycache__/train.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/apis/__pycache__/train.cpython-37.pyc
--------------------------------------------------------------------------------
/mmdet/apis/env.py:
--------------------------------------------------------------------------------
1 | import logging
2 | import os
3 | import random
4 | import subprocess
5 |
6 | import numpy as np
7 | import torch
8 | import torch.distributed as dist
9 | import torch.multiprocessing as mp
10 | from mmcv.runner import get_dist_info
11 |
12 |
13 | def init_dist(launcher, backend='nccl', **kwargs):
14 | if mp.get_start_method(allow_none=True) is None:
15 | mp.set_start_method('spawn')
16 | if launcher == 'pytorch':
17 | _init_dist_pytorch(backend, **kwargs)
18 | elif launcher == 'mpi':
19 | _init_dist_mpi(backend, **kwargs)
20 | elif launcher == 'slurm':
21 | _init_dist_slurm(backend, **kwargs)
22 | else:
23 | raise ValueError('Invalid launcher type: {}'.format(launcher))
24 |
25 |
26 | def _init_dist_pytorch(backend, **kwargs):
27 | # TODO: use local_rank instead of rank % num_gpus
28 | rank = int(os.environ['RANK'])
29 | num_gpus = torch.cuda.device_count()
30 | torch.cuda.set_device(rank % num_gpus)
31 | dist.init_process_group(backend=backend, **kwargs)
32 |
33 |
34 | def _init_dist_mpi(backend, **kwargs):
35 | raise NotImplementedError
36 |
37 |
38 | def _init_dist_slurm(backend, port=29500, **kwargs):
39 | proc_id = int(os.environ['SLURM_PROCID'])
40 | ntasks = int(os.environ['SLURM_NTASKS'])
41 | node_list = os.environ['SLURM_NODELIST']
42 | num_gpus = torch.cuda.device_count()
43 | torch.cuda.set_device(proc_id % num_gpus)
44 | addr = subprocess.getoutput(
45 | 'scontrol show hostname {} | head -n1'.format(node_list))
46 | os.environ['MASTER_PORT'] = str(port)
47 | os.environ['MASTER_ADDR'] = addr
48 | os.environ['WORLD_SIZE'] = str(ntasks)
49 | os.environ['RANK'] = str(proc_id)
50 | dist.init_process_group(backend=backend)
51 |
52 |
53 | def set_random_seed(seed):
54 | random.seed(seed)
55 | np.random.seed(seed)
56 | torch.manual_seed(seed)
57 | torch.cuda.manual_seed_all(seed)
58 |
59 |
60 | def get_root_logger(log_level=logging.INFO):
61 | logger = logging.getLogger()
62 | if not logger.hasHandlers():
63 | logging.basicConfig(
64 | format='%(asctime)s - %(levelname)s - %(message)s',
65 | level=log_level)
66 | rank, _ = get_dist_info()
67 | if rank != 0:
68 | logger.setLevel('ERROR')
69 | return logger
70 |
--------------------------------------------------------------------------------
/mmdet/core/__init__.py:
--------------------------------------------------------------------------------
1 | from .anchor import * # noqa: F401, F403
2 | from .bbox import * # noqa: F401, F403
3 | from .evaluation import * # noqa: F401, F403
4 | from .fp16 import * # noqa: F401, F403
5 | from .mask import * # noqa: F401, F403
6 | from .post_processing import * # noqa: F401, F403
7 | from .utils import * # noqa: F401, F403
8 |
--------------------------------------------------------------------------------
/mmdet/core/__pycache__/__init__.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/core/__pycache__/__init__.cpython-37.pyc
--------------------------------------------------------------------------------
/mmdet/core/anchor/__init__.py:
--------------------------------------------------------------------------------
1 | from .anchor_generator import AnchorGenerator
2 | from .anchor_target import anchor_target, anchor_inside_flags
3 | from .guided_anchor_target import ga_loc_target, ga_shape_target
4 |
5 | __all__ = [
6 | 'AnchorGenerator', 'anchor_target', 'anchor_inside_flags', 'ga_loc_target',
7 | 'ga_shape_target'
8 | ]
9 |
--------------------------------------------------------------------------------
/mmdet/core/anchor/__pycache__/__init__.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/core/anchor/__pycache__/__init__.cpython-37.pyc
--------------------------------------------------------------------------------
/mmdet/core/anchor/__pycache__/anchor_generator.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/core/anchor/__pycache__/anchor_generator.cpython-37.pyc
--------------------------------------------------------------------------------
/mmdet/core/anchor/__pycache__/anchor_target.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/core/anchor/__pycache__/anchor_target.cpython-37.pyc
--------------------------------------------------------------------------------
/mmdet/core/anchor/__pycache__/guided_anchor_target.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/core/anchor/__pycache__/guided_anchor_target.cpython-37.pyc
--------------------------------------------------------------------------------
/mmdet/core/bbox/__init__.py:
--------------------------------------------------------------------------------
1 | from .geometry import bbox_overlaps
2 | from .assigners import BaseAssigner, MaxIoUAssigner, AssignResult
3 | from .samplers import (BaseSampler, PseudoSampler, RandomSampler,
4 | InstanceBalancedPosSampler, IoUBalancedNegSampler,
5 | CombinedSampler, SamplingResult)
6 | from .assign_sampling import build_assigner, build_sampler, assign_and_sample
7 | from .transforms import (bbox2delta, delta2bbox, bbox_flip, bbox_mapping,
8 | bbox_mapping_back, bbox2roi, roi2bbox, bbox2result,
9 | distance2bbox, csp_height2bbox, csp_topdown2bbox,
10 | csp_height2bbox_part, csp_vis_height2bbox, csp_heightwidth2bbox, csp_heightwidth2bbox_part, csp_height2bbox_four_part)
11 | from .bbox_target import bbox_target
12 |
13 | __all__ = [
14 | 'bbox_overlaps', 'BaseAssigner', 'MaxIoUAssigner', 'AssignResult',
15 | 'BaseSampler', 'PseudoSampler', 'RandomSampler',
16 | 'InstanceBalancedPosSampler', 'IoUBalancedNegSampler', 'CombinedSampler',
17 | 'SamplingResult', 'build_assigner', 'build_sampler', 'assign_and_sample',
18 | 'bbox2delta', 'delta2bbox', 'bbox_flip', 'bbox_mapping',
19 | 'bbox_mapping_back', 'bbox2roi', 'roi2bbox', 'bbox2result',
20 | 'distance2bbox', 'bbox_target', 'csp_height2bbox', 'csp_topdown2bbox',
21 | 'csp_height2bbox_part', 'csp_vis_height2bbox', 'csp_heightwidth2bbox', 'csp_heightwidth2bbox_part', 'csp_height2bbox_four_part'
22 | ]
23 |
--------------------------------------------------------------------------------
/mmdet/core/bbox/__pycache__/__init__.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/core/bbox/__pycache__/__init__.cpython-37.pyc
--------------------------------------------------------------------------------
/mmdet/core/bbox/__pycache__/assign_sampling.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/core/bbox/__pycache__/assign_sampling.cpython-37.pyc
--------------------------------------------------------------------------------
/mmdet/core/bbox/__pycache__/bbox_target.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/core/bbox/__pycache__/bbox_target.cpython-37.pyc
--------------------------------------------------------------------------------
/mmdet/core/bbox/__pycache__/geometry.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/core/bbox/__pycache__/geometry.cpython-37.pyc
--------------------------------------------------------------------------------
/mmdet/core/bbox/__pycache__/transforms.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/core/bbox/__pycache__/transforms.cpython-37.pyc
--------------------------------------------------------------------------------
/mmdet/core/bbox/assign_sampling.py:
--------------------------------------------------------------------------------
1 | import mmcv
2 |
3 | from . import assigners, samplers
4 |
5 |
6 | def build_assigner(cfg, **kwargs):
7 | if isinstance(cfg, assigners.BaseAssigner):
8 | return cfg
9 | elif isinstance(cfg, dict):
10 | return mmcv.runner.obj_from_dict(cfg, assigners, default_args=kwargs)
11 | else:
12 | raise TypeError('Invalid type {} for building a sampler'.format(
13 | type(cfg)))
14 |
15 |
16 | def build_sampler(cfg, **kwargs):
17 | if isinstance(cfg, samplers.BaseSampler):
18 | return cfg
19 | elif isinstance(cfg, dict):
20 | return mmcv.runner.obj_from_dict(cfg, samplers, default_args=kwargs)
21 | else:
22 | raise TypeError('Invalid type {} for building a sampler'.format(
23 | type(cfg)))
24 |
25 |
26 | def assign_and_sample(bboxes, gt_bboxes, gt_bboxes_ignore, gt_labels, cfg):
27 | bbox_assigner = build_assigner(cfg.assigner)
28 | bbox_sampler = build_sampler(cfg.sampler)
29 | assign_result = bbox_assigner.assign(bboxes, gt_bboxes, gt_bboxes_ignore,
30 | gt_labels)
31 | sampling_result = bbox_sampler.sample(assign_result, bboxes, gt_bboxes,
32 | gt_labels)
33 | return assign_result, sampling_result
34 |
--------------------------------------------------------------------------------
/mmdet/core/bbox/assigners/__init__.py:
--------------------------------------------------------------------------------
1 | from .base_assigner import BaseAssigner
2 | from .max_iou_assigner import MaxIoUAssigner
3 | from .approx_max_iou_assigner import ApproxMaxIoUAssigner
4 | from .assign_result import AssignResult
5 |
6 | __all__ = [
7 | 'BaseAssigner', 'MaxIoUAssigner', 'ApproxMaxIoUAssigner', 'AssignResult'
8 | ]
9 |
--------------------------------------------------------------------------------
/mmdet/core/bbox/assigners/__pycache__/__init__.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/core/bbox/assigners/__pycache__/__init__.cpython-37.pyc
--------------------------------------------------------------------------------
/mmdet/core/bbox/assigners/__pycache__/approx_max_iou_assigner.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/core/bbox/assigners/__pycache__/approx_max_iou_assigner.cpython-37.pyc
--------------------------------------------------------------------------------
/mmdet/core/bbox/assigners/__pycache__/assign_result.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/core/bbox/assigners/__pycache__/assign_result.cpython-37.pyc
--------------------------------------------------------------------------------
/mmdet/core/bbox/assigners/__pycache__/base_assigner.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/core/bbox/assigners/__pycache__/base_assigner.cpython-37.pyc
--------------------------------------------------------------------------------
/mmdet/core/bbox/assigners/__pycache__/max_iou_assigner.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/core/bbox/assigners/__pycache__/max_iou_assigner.cpython-37.pyc
--------------------------------------------------------------------------------
/mmdet/core/bbox/assigners/assign_result.py:
--------------------------------------------------------------------------------
1 | import torch
2 |
3 |
4 | class AssignResult(object):
5 |
6 | def __init__(self, num_gts, gt_inds, max_overlaps, labels=None):
7 | self.num_gts = num_gts
8 | self.gt_inds = gt_inds
9 | self.max_overlaps = max_overlaps
10 | self.labels = labels
11 |
12 | def add_gt_(self, gt_labels):
13 | self_inds = torch.arange(
14 | 1, len(gt_labels) + 1, dtype=torch.long, device=gt_labels.device)
15 | self.gt_inds = torch.cat([self_inds, self.gt_inds])
16 | self.max_overlaps = torch.cat(
17 | [self.max_overlaps.new_ones(self.num_gts), self.max_overlaps])
18 | if self.labels is not None:
19 | self.labels = torch.cat([gt_labels, self.labels])
20 |
--------------------------------------------------------------------------------
/mmdet/core/bbox/assigners/base_assigner.py:
--------------------------------------------------------------------------------
1 | from abc import ABCMeta, abstractmethod
2 |
3 |
4 | class BaseAssigner(metaclass=ABCMeta):
5 |
6 | @abstractmethod
7 | def assign(self, bboxes, gt_bboxes, gt_bboxes_ignore=None, gt_labels=None):
8 | pass
9 |
--------------------------------------------------------------------------------
/mmdet/core/bbox/geometry.py:
--------------------------------------------------------------------------------
1 | import torch
2 |
3 |
4 | def bbox_overlaps(bboxes1, bboxes2, mode='iou', is_aligned=False):
5 | """Calculate overlap between two set of bboxes.
6 |
7 | If ``is_aligned`` is ``False``, then calculate the ious between each bbox
8 | of bboxes1 and bboxes2, otherwise the ious between each aligned pair of
9 | bboxes1 and bboxes2.
10 |
11 | Args:
12 | bboxes1 (Tensor): shape (m, 4)
13 | bboxes2 (Tensor): shape (n, 4), if is_aligned is ``True``, then m and n
14 | must be equal.
15 | mode (str): "iou" (intersection over union) or iof (intersection over
16 | foreground).
17 |
18 | Returns:
19 | ious(Tensor): shape (m, n) if is_aligned == False else shape (m, 1)
20 | """
21 |
22 | assert mode in ['iou', 'iof']
23 |
24 | rows = bboxes1.size(0)
25 | cols = bboxes2.size(0)
26 | if is_aligned:
27 | assert rows == cols
28 |
29 | if rows * cols == 0:
30 | return bboxes1.new(rows, 1) if is_aligned else bboxes1.new(rows, cols)
31 |
32 | if is_aligned:
33 | lt = torch.max(bboxes1[:, :2], bboxes2[:, :2]) # [rows, 2]
34 | rb = torch.min(bboxes1[:, 2:], bboxes2[:, 2:]) # [rows, 2]
35 |
36 | wh = (rb - lt + 1).clamp(min=0) # [rows, 2]
37 | overlap = wh[:, 0] * wh[:, 1]
38 | area1 = (bboxes1[:, 2] - bboxes1[:, 0] + 1) * (
39 | bboxes1[:, 3] - bboxes1[:, 1] + 1)
40 |
41 | if mode == 'iou':
42 | area2 = (bboxes2[:, 2] - bboxes2[:, 0] + 1) * (
43 | bboxes2[:, 3] - bboxes2[:, 1] + 1)
44 | ious = overlap / (area1 + area2 - overlap)
45 | else:
46 | ious = overlap / area1
47 | else:
48 | bboxes2 = bboxes2.type(torch.float64)
49 | lt = torch.max(bboxes1[:, None, :2], bboxes2[:, :2]) # [rows, cols, 2]
50 | rb = torch.min(bboxes1[:, None, 2:], bboxes2[:, 2:]) # [rows, cols, 2]
51 |
52 | wh = (rb - lt + 1).clamp(min=0) # [rows, cols, 2]
53 | overlap = wh[:, :, 0] * wh[:, :, 1]
54 | area1 = (bboxes1[:, 2] - bboxes1[:, 0] + 1) * (
55 | bboxes1[:, 3] - bboxes1[:, 1] + 1)
56 |
57 | if mode == 'iou':
58 | area2 = (bboxes2[:, 2] - bboxes2[:, 0] + 1) * (
59 | bboxes2[:, 3] - bboxes2[:, 1] + 1)
60 | ious = overlap / (area1[:, None] + area2 - overlap)
61 | else:
62 | ious = overlap / (area1[:, None])
63 |
64 | return ious
65 |
--------------------------------------------------------------------------------
/mmdet/core/bbox/samplers/__init__.py:
--------------------------------------------------------------------------------
1 | from .base_sampler import BaseSampler
2 | from .pseudo_sampler import PseudoSampler
3 | from .random_sampler import RandomSampler
4 | from .instance_balanced_pos_sampler import InstanceBalancedPosSampler
5 | from .iou_balanced_neg_sampler import IoUBalancedNegSampler
6 | from .combined_sampler import CombinedSampler
7 | from .ohem_sampler import OHEMSampler
8 | from .sampling_result import SamplingResult
9 |
10 | __all__ = [
11 | 'BaseSampler', 'PseudoSampler', 'RandomSampler',
12 | 'InstanceBalancedPosSampler', 'IoUBalancedNegSampler', 'CombinedSampler',
13 | 'OHEMSampler', 'SamplingResult'
14 | ]
15 |
--------------------------------------------------------------------------------
/mmdet/core/bbox/samplers/__pycache__/__init__.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/core/bbox/samplers/__pycache__/__init__.cpython-37.pyc
--------------------------------------------------------------------------------
/mmdet/core/bbox/samplers/__pycache__/base_sampler.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/core/bbox/samplers/__pycache__/base_sampler.cpython-37.pyc
--------------------------------------------------------------------------------
/mmdet/core/bbox/samplers/__pycache__/combined_sampler.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/core/bbox/samplers/__pycache__/combined_sampler.cpython-37.pyc
--------------------------------------------------------------------------------
/mmdet/core/bbox/samplers/__pycache__/instance_balanced_pos_sampler.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/core/bbox/samplers/__pycache__/instance_balanced_pos_sampler.cpython-37.pyc
--------------------------------------------------------------------------------
/mmdet/core/bbox/samplers/__pycache__/iou_balanced_neg_sampler.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/core/bbox/samplers/__pycache__/iou_balanced_neg_sampler.cpython-37.pyc
--------------------------------------------------------------------------------
/mmdet/core/bbox/samplers/__pycache__/ohem_sampler.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/core/bbox/samplers/__pycache__/ohem_sampler.cpython-37.pyc
--------------------------------------------------------------------------------
/mmdet/core/bbox/samplers/__pycache__/pseudo_sampler.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/core/bbox/samplers/__pycache__/pseudo_sampler.cpython-37.pyc
--------------------------------------------------------------------------------
/mmdet/core/bbox/samplers/__pycache__/random_sampler.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/core/bbox/samplers/__pycache__/random_sampler.cpython-37.pyc
--------------------------------------------------------------------------------
/mmdet/core/bbox/samplers/__pycache__/sampling_result.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/core/bbox/samplers/__pycache__/sampling_result.cpython-37.pyc
--------------------------------------------------------------------------------
/mmdet/core/bbox/samplers/combined_sampler.py:
--------------------------------------------------------------------------------
1 | from .base_sampler import BaseSampler
2 | from ..assign_sampling import build_sampler
3 |
4 |
5 | class CombinedSampler(BaseSampler):
6 |
7 | def __init__(self, pos_sampler, neg_sampler, **kwargs):
8 | super(CombinedSampler, self).__init__(**kwargs)
9 | self.pos_sampler = build_sampler(pos_sampler, **kwargs)
10 | self.neg_sampler = build_sampler(neg_sampler, **kwargs)
11 |
12 | def _sample_pos(self, **kwargs):
13 | raise NotImplementedError
14 |
15 | def _sample_neg(self, **kwargs):
16 | raise NotImplementedError
17 |
--------------------------------------------------------------------------------
/mmdet/core/bbox/samplers/instance_balanced_pos_sampler.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import torch
3 |
4 | from .random_sampler import RandomSampler
5 |
6 |
7 | class InstanceBalancedPosSampler(RandomSampler):
8 |
9 | def _sample_pos(self, assign_result, num_expected, **kwargs):
10 | pos_inds = torch.nonzero(assign_result.gt_inds > 0)
11 | if pos_inds.numel() != 0:
12 | pos_inds = pos_inds.squeeze(1)
13 | if pos_inds.numel() <= num_expected:
14 | return pos_inds
15 | else:
16 | unique_gt_inds = assign_result.gt_inds[pos_inds].unique()
17 | num_gts = len(unique_gt_inds)
18 | num_per_gt = int(round(num_expected / float(num_gts)) + 1)
19 | sampled_inds = []
20 | for i in unique_gt_inds:
21 | inds = torch.nonzero(assign_result.gt_inds == i.item())
22 | if inds.numel() != 0:
23 | inds = inds.squeeze(1)
24 | else:
25 | continue
26 | if len(inds) > num_per_gt:
27 | inds = self.random_choice(inds, num_per_gt)
28 | sampled_inds.append(inds)
29 | sampled_inds = torch.cat(sampled_inds)
30 | if len(sampled_inds) < num_expected:
31 | num_extra = num_expected - len(sampled_inds)
32 | extra_inds = np.array(
33 | list(set(pos_inds.cpu()) - set(sampled_inds.cpu())))
34 | if len(extra_inds) > num_extra:
35 | extra_inds = self.random_choice(extra_inds, num_extra)
36 | extra_inds = torch.from_numpy(extra_inds).to(
37 | assign_result.gt_inds.device).long()
38 | sampled_inds = torch.cat([sampled_inds, extra_inds])
39 | elif len(sampled_inds) > num_expected:
40 | sampled_inds = self.random_choice(sampled_inds, num_expected)
41 | return sampled_inds
42 |
--------------------------------------------------------------------------------
/mmdet/core/bbox/samplers/pseudo_sampler.py:
--------------------------------------------------------------------------------
1 | import torch
2 |
3 | from .base_sampler import BaseSampler
4 | from .sampling_result import SamplingResult
5 |
6 |
7 | class PseudoSampler(BaseSampler):
8 |
9 | def __init__(self, **kwargs):
10 | pass
11 |
12 | def _sample_pos(self, **kwargs):
13 | raise NotImplementedError
14 |
15 | def _sample_neg(self, **kwargs):
16 | raise NotImplementedError
17 |
18 | def sample(self, assign_result, bboxes, gt_bboxes, **kwargs):
19 | pos_inds = torch.nonzero(
20 | assign_result.gt_inds > 0).squeeze(-1).unique()
21 | neg_inds = torch.nonzero(
22 | assign_result.gt_inds == 0).squeeze(-1).unique()
23 | gt_flags = bboxes.new_zeros(bboxes.shape[0], dtype=torch.uint8)
24 | sampling_result = SamplingResult(pos_inds, neg_inds, bboxes, gt_bboxes,
25 | assign_result, gt_flags)
26 | return sampling_result
27 |
--------------------------------------------------------------------------------
/mmdet/core/bbox/samplers/random_sampler.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import torch
3 |
4 | from .base_sampler import BaseSampler
5 |
6 |
7 | class RandomSampler(BaseSampler):
8 |
9 | def __init__(self,
10 | num,
11 | pos_fraction,
12 | neg_pos_ub=-1,
13 | add_gt_as_proposals=True,
14 | **kwargs):
15 | super(RandomSampler, self).__init__(num, pos_fraction, neg_pos_ub,
16 | add_gt_as_proposals)
17 |
18 | @staticmethod
19 | def random_choice(gallery, num):
20 | """Random select some elements from the gallery.
21 |
22 | It seems that Pytorch's implementation is slower than numpy so we use
23 | numpy to randperm the indices.
24 | """
25 | assert len(gallery) >= num
26 | if isinstance(gallery, list):
27 | gallery = np.array(gallery)
28 | cands = np.arange(len(gallery))
29 | np.random.shuffle(cands)
30 | rand_inds = cands[:num]
31 | if not isinstance(gallery, np.ndarray):
32 | rand_inds = torch.from_numpy(rand_inds).long().to(gallery.device)
33 | return gallery[rand_inds]
34 |
35 | def _sample_pos(self, assign_result, num_expected, **kwargs):
36 | """Randomly sample some positive samples."""
37 | pos_inds = torch.nonzero(assign_result.gt_inds > 0, as_tuple=False)
38 | if pos_inds.numel() != 0:
39 | pos_inds = pos_inds.squeeze(1)
40 | if pos_inds.numel() <= num_expected:
41 | return pos_inds
42 | else:
43 | return self.random_choice(pos_inds, num_expected)
44 |
45 | def _sample_neg(self, assign_result, num_expected, **kwargs):
46 | """Randomly sample some negative samples."""
47 | neg_inds = torch.nonzero(assign_result.gt_inds == 0, as_tuple=False)
48 | if neg_inds.numel() != 0:
49 | neg_inds = neg_inds.squeeze(1)
50 | if len(neg_inds) <= num_expected:
51 | return neg_inds
52 | else:
53 | return self.random_choice(neg_inds, num_expected)
54 |
--------------------------------------------------------------------------------
/mmdet/core/bbox/samplers/sampling_result.py:
--------------------------------------------------------------------------------
1 | import torch
2 |
3 |
4 | class SamplingResult(object):
5 |
6 | def __init__(self, pos_inds, neg_inds, bboxes, gt_bboxes, assign_result,
7 | gt_flags):
8 | self.pos_inds = pos_inds
9 | self.neg_inds = neg_inds
10 | self.pos_bboxes = bboxes[pos_inds]
11 | self.neg_bboxes = bboxes[neg_inds]
12 | self.pos_is_gt = gt_flags[pos_inds]
13 |
14 | self.num_gts = gt_bboxes.shape[0]
15 | self.pos_assigned_gt_inds = assign_result.gt_inds[pos_inds] - 1
16 | self.pos_gt_bboxes = gt_bboxes[self.pos_assigned_gt_inds, :]
17 | if assign_result.labels is not None:
18 | self.pos_gt_labels = assign_result.labels[pos_inds]
19 | else:
20 | self.pos_gt_labels = None
21 |
22 | @property
23 | def bboxes(self):
24 | return torch.cat([self.pos_bboxes, self.neg_bboxes])
25 |
--------------------------------------------------------------------------------
/mmdet/core/evaluation/__init__.py:
--------------------------------------------------------------------------------
1 | from .class_names import (voc_classes, imagenet_det_classes,
2 | imagenet_vid_classes, coco_classes, dataset_aliases,
3 | get_classes)
4 | from .coco_utils import coco_eval, fast_eval_recall, results2json
5 | from .eval_hooks import (DistEvalHook, DistEvalmAPHook, CocoDistEvalRecallHook,
6 | CocoDistEvalmAPHook, CocoDistEvalMRHook)
7 | from .mean_ap import average_precision, eval_map, print_map_summary
8 | from .recall import (eval_recalls, print_recall_summary, plot_num_recall,
9 | plot_iou_recall)
10 |
11 | __all__ = [
12 | 'voc_classes', 'imagenet_det_classes', 'imagenet_vid_classes',
13 | 'coco_classes', 'dataset_aliases', 'get_classes', 'coco_eval',
14 | 'fast_eval_recall', 'results2json', 'DistEvalHook', 'DistEvalmAPHook',
15 | 'CocoDistEvalRecallHook', 'CocoDistEvalmAPHook', 'average_precision',
16 | 'eval_map', 'print_map_summary', 'eval_recalls', 'print_recall_summary',
17 | 'plot_num_recall', 'plot_iou_recall', 'CocoDistEvalMRHook'
18 | ]
19 |
--------------------------------------------------------------------------------
/mmdet/core/evaluation/__pycache__/__init__.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/core/evaluation/__pycache__/__init__.cpython-37.pyc
--------------------------------------------------------------------------------
/mmdet/core/evaluation/__pycache__/bbox_overlaps.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/core/evaluation/__pycache__/bbox_overlaps.cpython-37.pyc
--------------------------------------------------------------------------------
/mmdet/core/evaluation/__pycache__/class_names.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/core/evaluation/__pycache__/class_names.cpython-37.pyc
--------------------------------------------------------------------------------
/mmdet/core/evaluation/__pycache__/coco_utils.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/core/evaluation/__pycache__/coco_utils.cpython-37.pyc
--------------------------------------------------------------------------------
/mmdet/core/evaluation/__pycache__/eval_hooks.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/core/evaluation/__pycache__/eval_hooks.cpython-37.pyc
--------------------------------------------------------------------------------
/mmdet/core/evaluation/__pycache__/eval_mr.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/core/evaluation/__pycache__/eval_mr.cpython-37.pyc
--------------------------------------------------------------------------------
/mmdet/core/evaluation/__pycache__/mean_ap.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/core/evaluation/__pycache__/mean_ap.cpython-37.pyc
--------------------------------------------------------------------------------
/mmdet/core/evaluation/__pycache__/recall.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/core/evaluation/__pycache__/recall.cpython-37.pyc
--------------------------------------------------------------------------------
/mmdet/core/evaluation/bbox_overlaps.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 |
3 |
4 | def bbox_overlaps(bboxes1, bboxes2, mode='iou'):
5 | """Calculate the ious between each bbox of bboxes1 and bboxes2.
6 |
7 | Args:
8 | bboxes1(ndarray): shape (n, 4)
9 | bboxes2(ndarray): shape (k, 4)
10 | mode(str): iou (intersection over union) or iof (intersection
11 | over foreground)
12 |
13 | Returns:
14 | ious(ndarray): shape (n, k)
15 | """
16 |
17 | assert mode in ['iou', 'iof']
18 |
19 | bboxes1 = bboxes1.astype(np.float32)
20 | bboxes2 = bboxes2.astype(np.float32)
21 | rows = bboxes1.shape[0]
22 | cols = bboxes2.shape[0]
23 | ious = np.zeros((rows, cols), dtype=np.float32)
24 | if rows * cols == 0:
25 | return ious
26 | exchange = False
27 | if bboxes1.shape[0] > bboxes2.shape[0]:
28 | bboxes1, bboxes2 = bboxes2, bboxes1
29 | ious = np.zeros((cols, rows), dtype=np.float32)
30 | exchange = True
31 | area1 = (bboxes1[:, 2] - bboxes1[:, 0] + 1) * (
32 | bboxes1[:, 3] - bboxes1[:, 1] + 1)
33 | area2 = (bboxes2[:, 2] - bboxes2[:, 0] + 1) * (
34 | bboxes2[:, 3] - bboxes2[:, 1] + 1)
35 | for i in range(bboxes1.shape[0]):
36 | x_start = np.maximum(bboxes1[i, 0], bboxes2[:, 0])
37 | y_start = np.maximum(bboxes1[i, 1], bboxes2[:, 1])
38 | x_end = np.minimum(bboxes1[i, 2], bboxes2[:, 2])
39 | y_end = np.minimum(bboxes1[i, 3], bboxes2[:, 3])
40 | overlap = np.maximum(x_end - x_start + 1, 0) * np.maximum(
41 | y_end - y_start + 1, 0)
42 | if mode == 'iou':
43 | union = area1[i] + area2 - overlap
44 | else:
45 | union = area1[i] if not exchange else area2
46 | ious[i, :] = overlap / union
47 | if exchange:
48 | ious = ious.T
49 | return ious
50 |
--------------------------------------------------------------------------------
/mmdet/core/fp16/__init__.py:
--------------------------------------------------------------------------------
1 | from .decorators import auto_fp16, force_fp32
2 | from .hooks import Fp16OptimizerHook, wrap_fp16_model
3 |
4 | __all__ = ['auto_fp16', 'force_fp32', 'Fp16OptimizerHook', 'wrap_fp16_model']
5 |
--------------------------------------------------------------------------------
/mmdet/core/fp16/__pycache__/__init__.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/core/fp16/__pycache__/__init__.cpython-37.pyc
--------------------------------------------------------------------------------
/mmdet/core/fp16/__pycache__/decorators.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/core/fp16/__pycache__/decorators.cpython-37.pyc
--------------------------------------------------------------------------------
/mmdet/core/fp16/__pycache__/hooks.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/core/fp16/__pycache__/hooks.cpython-37.pyc
--------------------------------------------------------------------------------
/mmdet/core/fp16/__pycache__/utils.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/core/fp16/__pycache__/utils.cpython-37.pyc
--------------------------------------------------------------------------------
/mmdet/core/fp16/utils.py:
--------------------------------------------------------------------------------
1 | from collections import abc
2 |
3 | import numpy as np
4 | import torch
5 |
6 |
7 | def cast_tensor_type(inputs, src_type, dst_type):
8 | if isinstance(inputs, torch.Tensor):
9 | return inputs.to(dst_type)
10 | elif isinstance(inputs, str):
11 | return inputs
12 | elif isinstance(inputs, np.ndarray):
13 | return inputs
14 | elif isinstance(inputs, abc.Mapping):
15 | return type(inputs)({
16 | k: cast_tensor_type(v, src_type, dst_type)
17 | for k, v in inputs.items()
18 | })
19 | elif isinstance(inputs, abc.Iterable):
20 | return type(inputs)(
21 | cast_tensor_type(item, src_type, dst_type) for item in inputs)
22 | else:
23 | return inputs
24 |
--------------------------------------------------------------------------------
/mmdet/core/mask/__init__.py:
--------------------------------------------------------------------------------
1 | from .utils import split_combined_polys
2 | from .mask_target import mask_target
3 |
4 | __all__ = ['split_combined_polys', 'mask_target']
5 |
--------------------------------------------------------------------------------
/mmdet/core/mask/__pycache__/__init__.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/core/mask/__pycache__/__init__.cpython-37.pyc
--------------------------------------------------------------------------------
/mmdet/core/mask/__pycache__/mask_target.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/core/mask/__pycache__/mask_target.cpython-37.pyc
--------------------------------------------------------------------------------
/mmdet/core/mask/__pycache__/utils.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/core/mask/__pycache__/utils.cpython-37.pyc
--------------------------------------------------------------------------------
/mmdet/core/mask/mask_target.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import numpy as np
3 | import mmcv
4 |
5 |
6 | def mask_target(pos_proposals_list, pos_assigned_gt_inds_list, gt_masks_list,
7 | cfg):
8 | cfg_list = [cfg for _ in range(len(pos_proposals_list))]
9 | mask_targets = map(mask_target_single, pos_proposals_list,
10 | pos_assigned_gt_inds_list, gt_masks_list, cfg_list)
11 | mask_targets = torch.cat(list(mask_targets))
12 | return mask_targets
13 |
14 |
15 | def mask_target_single(pos_proposals, pos_assigned_gt_inds, gt_masks, cfg):
16 | mask_size = cfg.mask_size
17 | num_pos = pos_proposals.size(0)
18 | mask_targets = []
19 | if num_pos > 0:
20 | proposals_np = pos_proposals.cpu().numpy()
21 | pos_assigned_gt_inds = pos_assigned_gt_inds.cpu().numpy()
22 | for i in range(num_pos):
23 | gt_mask = gt_masks[pos_assigned_gt_inds[i]]
24 | bbox = proposals_np[i, :].astype(np.int32)
25 | x1, y1, x2, y2 = bbox
26 | w = np.maximum(x2 - x1 + 1, 1)
27 | h = np.maximum(y2 - y1 + 1, 1)
28 | # mask is uint8 both before and after resizing
29 | target = mmcv.imresize(gt_mask[y1:y1 + h, x1:x1 + w],
30 | (mask_size, mask_size))
31 | mask_targets.append(target)
32 | mask_targets = torch.from_numpy(np.stack(mask_targets)).float().to(
33 | pos_proposals.device)
34 | else:
35 | mask_targets = pos_proposals.new_zeros((0, mask_size, mask_size))
36 | return mask_targets
37 |
--------------------------------------------------------------------------------
/mmdet/core/mask/utils.py:
--------------------------------------------------------------------------------
1 | import mmcv
2 |
3 |
4 | def split_combined_polys(polys, poly_lens, polys_per_mask):
5 | """Split the combined 1-D polys into masks.
6 |
7 | A mask is represented as a list of polys, and a poly is represented as
8 | a 1-D array. In dataset, all masks are concatenated into a single 1-D
9 | tensor. Here we need to split the tensor into original representations.
10 |
11 | Args:
12 | polys (list): a list (length = image num) of 1-D tensors
13 | poly_lens (list): a list (length = image num) of poly length
14 | polys_per_mask (list): a list (length = image num) of poly number
15 | of each mask
16 |
17 | Returns:
18 | list: a list (length = image num) of list (length = mask num) of
19 | list (length = poly num) of numpy array
20 | """
21 | mask_polys_list = []
22 | for img_id in range(len(polys)):
23 | polys_single = polys[img_id]
24 | polys_lens_single = poly_lens[img_id].tolist()
25 | polys_per_mask_single = polys_per_mask[img_id].tolist()
26 |
27 | split_polys = mmcv.slice_list(polys_single, polys_lens_single)
28 | mask_polys = mmcv.slice_list(split_polys, polys_per_mask_single)
29 | mask_polys_list.append(mask_polys)
30 | return mask_polys_list
31 |
--------------------------------------------------------------------------------
/mmdet/core/my_mmcv/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/core/my_mmcv/__init__.py
--------------------------------------------------------------------------------
/mmdet/core/my_mmcv/__pycache__/__init__.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/core/my_mmcv/__pycache__/__init__.cpython-37.pyc
--------------------------------------------------------------------------------
/mmdet/core/my_mmcv/runner/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/core/my_mmcv/runner/__init__.py
--------------------------------------------------------------------------------
/mmdet/core/my_mmcv/runner/__pycache__/__init__.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/core/my_mmcv/runner/__pycache__/__init__.cpython-37.pyc
--------------------------------------------------------------------------------
/mmdet/core/my_mmcv/runner/__pycache__/mean_teacher_runner.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/core/my_mmcv/runner/__pycache__/mean_teacher_runner.cpython-37.pyc
--------------------------------------------------------------------------------
/mmdet/core/my_mmcv/runner/hooks/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/core/my_mmcv/runner/hooks/__init__.py
--------------------------------------------------------------------------------
/mmdet/core/my_mmcv/runner/hooks/__pycache__/__init__.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/core/my_mmcv/runner/hooks/__pycache__/__init__.cpython-37.pyc
--------------------------------------------------------------------------------
/mmdet/core/my_mmcv/runner/hooks/__pycache__/mean_teacher_optimizer.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/core/my_mmcv/runner/hooks/__pycache__/mean_teacher_optimizer.cpython-37.pyc
--------------------------------------------------------------------------------
/mmdet/core/my_mmcv/runner/hooks/logger/__init__.py:
--------------------------------------------------------------------------------
1 |
2 |
--------------------------------------------------------------------------------
/mmdet/core/my_mmcv/runner/hooks/logger/__pycache__/__init__.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/core/my_mmcv/runner/hooks/logger/__pycache__/__init__.cpython-37.pyc
--------------------------------------------------------------------------------
/mmdet/core/my_mmcv/runner/hooks/logger/__pycache__/wandb.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/core/my_mmcv/runner/hooks/logger/__pycache__/wandb.cpython-37.pyc
--------------------------------------------------------------------------------
/mmdet/core/my_mmcv/runner/hooks/mean_teacher_optimizer.py:
--------------------------------------------------------------------------------
1 | from torch.nn.utils import clip_grad
2 |
3 | from mmcv.runner.hooks.hook import Hook
4 |
5 |
6 | class OptimizerHook(Hook):
7 |
8 | def __init__(self, grad_clip=None, mean_teacher=None):
9 | self.grad_clip = grad_clip
10 | self.mean_teacher = mean_teacher
11 |
12 | def clip_grads(self, params):
13 | clip_grad.clip_grad_norm_(
14 | filter(lambda p: p.requires_grad, params), **self.grad_clip)
15 |
16 | def after_train_iter(self, runner):
17 | runner.optimizer.zero_grad()
18 | runner.outputs['loss'].backward()
19 | if self.grad_clip is not None:
20 | self.clip_grads(runner.model.parameters())
21 | runner.optimizer.step()
22 |
23 | #mean teacher
24 | if self.mean_teacher:
25 | for k, v in runner.model.module.state_dict().items():
26 | if k.find('num_batches_tracked') == -1:
27 | runner.teacher_dict[k] = self.mean_teacher.alpha * runner.teacher_dict[k] + (1 - self.mean_teacher.alpha) * v
28 | else:
29 | runner.teacher_dict[k] = 1 * v
--------------------------------------------------------------------------------
/mmdet/core/post_processing/__init__.py:
--------------------------------------------------------------------------------
1 | from .bbox_nms import multiclass_nms
2 | from .merge_augs import (merge_aug_proposals, merge_aug_bboxes,
3 | merge_aug_scores, merge_aug_masks)
4 |
5 | __all__ = [
6 | 'multiclass_nms', 'merge_aug_proposals', 'merge_aug_bboxes',
7 | 'merge_aug_scores', 'merge_aug_masks'
8 | ]
9 |
--------------------------------------------------------------------------------
/mmdet/core/post_processing/__pycache__/__init__.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/core/post_processing/__pycache__/__init__.cpython-37.pyc
--------------------------------------------------------------------------------
/mmdet/core/post_processing/__pycache__/bbox_nms.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/core/post_processing/__pycache__/bbox_nms.cpython-37.pyc
--------------------------------------------------------------------------------
/mmdet/core/post_processing/__pycache__/merge_augs.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/core/post_processing/__pycache__/merge_augs.cpython-37.pyc
--------------------------------------------------------------------------------
/mmdet/core/post_processing/bbox_nms.py:
--------------------------------------------------------------------------------
1 | import torch
2 |
3 | from mmdet.ops.nms import nms_wrapper
4 |
5 |
6 | def multiclass_nms(multi_bboxes,
7 | multi_scores,
8 | score_thr,
9 | nms_cfg,
10 | max_num=-1,
11 | score_factors=None):
12 | """NMS for multi-class bboxes.
13 |
14 | Args:
15 | multi_bboxes (Tensor): shape (n, #class*4) or (n, 4)
16 | multi_scores (Tensor): shape (n, #class)
17 | score_thr (float): bbox threshold, bboxes with scores lower than it
18 | will not be considered.
19 | nms_thr (float): NMS IoU threshold
20 | max_num (int): if there are more than max_num bboxes after NMS,
21 | only top max_num will be kept.
22 | score_factors (Tensor): The factors multiplied to scores before
23 | applying NMS
24 |
25 | Returns:
26 | tuple: (bboxes, labels), tensors of shape (k, 5) and (k, 1). Labels
27 | are 0-based.
28 | """
29 | num_classes = multi_scores.shape[1]
30 | bboxes, labels = [], []
31 | nms_cfg_ = nms_cfg.copy()
32 | nms_type = nms_cfg_.pop('type', 'nms')
33 | nms_op = getattr(nms_wrapper, nms_type)
34 | for i in range(1, num_classes):
35 | cls_inds = multi_scores[:, i] > score_thr
36 | if not cls_inds.any():
37 | continue
38 | # get bboxes and scores of this class
39 | if multi_bboxes.shape[1] == 4:
40 | _bboxes = multi_bboxes[cls_inds, :]
41 | else:
42 | _bboxes = multi_bboxes[cls_inds, i * 4:(i + 1) * 4]
43 | _scores = multi_scores[cls_inds, i]
44 | if score_factors is not None:
45 | _scores *= score_factors[cls_inds]
46 | cls_dets = torch.cat([_bboxes, _scores[:, None]], dim=1)
47 | cls_dets, _ = nms_op(cls_dets, **nms_cfg_)
48 | cls_labels = multi_bboxes.new_full(
49 | (cls_dets.shape[0], ), i - 1, dtype=torch.long)
50 | bboxes.append(cls_dets)
51 | labels.append(cls_labels)
52 | if bboxes:
53 | bboxes = torch.cat(bboxes)
54 | labels = torch.cat(labels)
55 | if bboxes.shape[0] > max_num:
56 | _, inds = bboxes[:, -1].sort(descending=True)
57 | inds = inds[:max_num]
58 | bboxes = bboxes[inds]
59 | labels = labels[inds]
60 | else:
61 | bboxes = multi_bboxes.new_zeros((0, 5))
62 | labels = multi_bboxes.new_zeros((0, ), dtype=torch.long)
63 |
64 | return bboxes, labels
65 |
--------------------------------------------------------------------------------
/mmdet/core/utils/__init__.py:
--------------------------------------------------------------------------------
1 | from .dist_utils import allreduce_grads, DistOptimizerHook
2 | from .misc import tensor2imgs, unmap, multi_apply
3 |
4 | __all__ = [
5 | 'allreduce_grads', 'DistOptimizerHook', 'tensor2imgs', 'unmap',
6 | 'multi_apply'
7 | ]
8 |
--------------------------------------------------------------------------------
/mmdet/core/utils/__pycache__/__init__.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/core/utils/__pycache__/__init__.cpython-37.pyc
--------------------------------------------------------------------------------
/mmdet/core/utils/__pycache__/dist_utils.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/core/utils/__pycache__/dist_utils.cpython-37.pyc
--------------------------------------------------------------------------------
/mmdet/core/utils/__pycache__/misc.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/core/utils/__pycache__/misc.cpython-37.pyc
--------------------------------------------------------------------------------
/mmdet/core/utils/misc.py:
--------------------------------------------------------------------------------
1 | from functools import partial
2 |
3 | import mmcv
4 | import numpy as np
5 | from six.moves import map, zip
6 |
7 |
8 | def tensor2imgs(tensor, mean=(0, 0, 0), std=(1, 1, 1), to_rgb=True):
9 | num_imgs = tensor.size(0)
10 | mean = np.array(mean, dtype=np.float32)
11 | std = np.array(std, dtype=np.float32)
12 | imgs = []
13 | for img_id in range(num_imgs):
14 | img = tensor[img_id, ...].cpu().numpy().transpose(1, 2, 0)
15 | img = mmcv.imdenormalize(
16 | img, mean, std, to_bgr=to_rgb).astype(np.uint8)
17 | imgs.append(np.ascontiguousarray(img))
18 | return imgs
19 |
20 |
21 | def multi_apply(func, *args, **kwargs):
22 | pfunc = partial(func, **kwargs) if kwargs else func
23 | map_results = map(pfunc, *args)
24 | return tuple(map(list, zip(*map_results)))
25 |
26 |
27 | def unmap(data, count, inds, fill=0):
28 | """ Unmap a subset of item (data) back to the original set of items (of
29 | size count) """
30 | if data.dim() == 1:
31 | ret = data.new_full((count, ), fill)
32 | ret[inds] = data
33 | else:
34 | new_size = (count, ) + data.size()[1:]
35 | ret = data.new_full(new_size, fill)
36 | ret[inds, :] = data
37 | return ret
38 |
--------------------------------------------------------------------------------
/mmdet/datasets/__init__.py:
--------------------------------------------------------------------------------
1 | from .custom import CustomDataset
2 | from .xml_style import XMLDataset
3 | from .coco import CocoDataset
4 | from .voc import VOCDataset
5 | from .wider_face import WIDERFaceDataset
6 | from .loader import GroupSampler, DistributedGroupSampler, build_dataloader
7 | from .utils import to_tensor, random_scale, show_ann
8 | from .dataset_wrappers import ConcatDataset, RepeatDataset
9 | from .extra_aug import ExtraAugmentation
10 | from .registry import DATASETS
11 | from .builder import build_dataset
12 | from .coco_csp_ori import CocoCSPORIDataset
13 | from .ecp_coco import ECPCocoDataset
14 |
15 | __all__ = [
16 | 'CustomDataset', 'XMLDataset', 'CocoDataset',
17 | 'VOCDataset', 'GroupSampler', 'ECPCocoDataset',
18 | 'DistributedGroupSampler', 'build_dataloader', 'to_tensor', 'random_scale',
19 | 'show_ann', 'ConcatDataset', 'RepeatDataset', 'ExtraAugmentation',
20 | 'WIDERFaceDataset', 'DATASETS', 'build_dataset', 'CocoCSPORIDataset'
21 | ]
22 |
--------------------------------------------------------------------------------
/mmdet/datasets/__pycache__/__init__.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/datasets/__pycache__/__init__.cpython-37.pyc
--------------------------------------------------------------------------------
/mmdet/datasets/__pycache__/builder.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/datasets/__pycache__/builder.cpython-37.pyc
--------------------------------------------------------------------------------
/mmdet/datasets/__pycache__/coco.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/datasets/__pycache__/coco.cpython-37.pyc
--------------------------------------------------------------------------------
/mmdet/datasets/__pycache__/coco_csp_ori.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/datasets/__pycache__/coco_csp_ori.cpython-37.pyc
--------------------------------------------------------------------------------
/mmdet/datasets/__pycache__/custom.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/datasets/__pycache__/custom.cpython-37.pyc
--------------------------------------------------------------------------------
/mmdet/datasets/__pycache__/dataset_wrappers.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/datasets/__pycache__/dataset_wrappers.cpython-37.pyc
--------------------------------------------------------------------------------
/mmdet/datasets/__pycache__/ecp_coco.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/datasets/__pycache__/ecp_coco.cpython-37.pyc
--------------------------------------------------------------------------------
/mmdet/datasets/__pycache__/extra_aug.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/datasets/__pycache__/extra_aug.cpython-37.pyc
--------------------------------------------------------------------------------
/mmdet/datasets/__pycache__/registry.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/datasets/__pycache__/registry.cpython-37.pyc
--------------------------------------------------------------------------------
/mmdet/datasets/__pycache__/transforms.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/datasets/__pycache__/transforms.cpython-37.pyc
--------------------------------------------------------------------------------
/mmdet/datasets/__pycache__/utils.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/datasets/__pycache__/utils.cpython-37.pyc
--------------------------------------------------------------------------------
/mmdet/datasets/__pycache__/voc.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/datasets/__pycache__/voc.cpython-37.pyc
--------------------------------------------------------------------------------
/mmdet/datasets/__pycache__/wider_face.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/datasets/__pycache__/wider_face.cpython-37.pyc
--------------------------------------------------------------------------------
/mmdet/datasets/__pycache__/xml_style.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/datasets/__pycache__/xml_style.cpython-37.pyc
--------------------------------------------------------------------------------
/mmdet/datasets/builder.py:
--------------------------------------------------------------------------------
1 | import copy
2 |
3 | from mmdet.utils import build_from_cfg
4 | from .dataset_wrappers import ConcatDataset, RepeatDataset
5 | from .registry import DATASETS
6 |
7 |
8 | def _concat_dataset(cfg):
9 | ann_files = cfg['ann_file']
10 | img_prefixes = cfg.get('img_prefix', None)
11 | seg_prefixes = cfg.get('seg_prefixes', None)
12 | proposal_files = cfg.get('proposal_file', None)
13 |
14 | datasets = []
15 | num_dset = len(ann_files)
16 | for i in range(num_dset):
17 | data_cfg = copy.deepcopy(cfg)
18 | data_cfg['ann_file'] = ann_files[i]
19 | if isinstance(img_prefixes, (list, tuple)):
20 | data_cfg['img_prefix'] = img_prefixes[i]
21 | if isinstance(seg_prefixes, (list, tuple)):
22 | data_cfg['seg_prefix'] = seg_prefixes[i]
23 | if isinstance(proposal_files, (list, tuple)):
24 | data_cfg['proposal_file'] = proposal_files[i]
25 | datasets.append(build_dataset(data_cfg))
26 |
27 | return ConcatDataset(datasets)
28 |
29 |
30 | def build_dataset(cfg):
31 | if cfg['type'] == 'RepeatDataset':
32 | dataset = RepeatDataset(build_dataset(cfg['dataset']), cfg['times'])
33 | elif isinstance(cfg['ann_file'], (list, tuple)):
34 | dataset = _concat_dataset(cfg)
35 | else:
36 | dataset = build_from_cfg(cfg, DATASETS)
37 |
38 | return dataset
39 |
--------------------------------------------------------------------------------
/mmdet/datasets/dataset_wrappers.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | from torch.utils.data.dataset import ConcatDataset as _ConcatDataset
3 |
4 | from .registry import DATASETS
5 |
6 |
7 | @DATASETS.register_module
8 | class ConcatDataset(_ConcatDataset):
9 | """A wrapper of concatenated dataset.
10 |
11 | Same as :obj:`torch.utils.data.dataset.ConcatDataset`, but
12 | concat the group flag for image aspect ratio.
13 |
14 | Args:
15 | datasets (list[:obj:`Dataset`]): A list of datasets.
16 | """
17 |
18 | def __init__(self, datasets):
19 | super(ConcatDataset, self).__init__(datasets)
20 | self.CLASSES = datasets[0].CLASSES
21 | if hasattr(datasets[0], 'flag'):
22 | flags = []
23 | for i in range(0, len(datasets)):
24 | flags.append(datasets[i].flag)
25 | self.flag = np.concatenate(flags)
26 |
27 |
28 | @DATASETS.register_module
29 | class RepeatDataset(object):
30 | """A wrapper of repeated dataset.
31 |
32 | The length of repeated dataset will be `times` larger than the original
33 | dataset. This is useful when the data loading time is long but the dataset
34 | is small. Using RepeatDataset can reduce the data loading time between
35 | epochs.
36 |
37 | Args:
38 | dataset (:obj:`Dataset`): The dataset to be repeated.
39 | times (int): Repeat times.
40 | """
41 |
42 | def __init__(self, dataset, times):
43 | self.dataset = dataset
44 | self.times = times
45 | self.CLASSES = dataset.CLASSES
46 | if hasattr(self.dataset, 'flag'):
47 | self.flag = np.tile(self.dataset.flag, times)
48 |
49 | self._ori_len = len(self.dataset)
50 |
51 | def __getitem__(self, idx):
52 | return self.dataset[idx % self._ori_len]
53 |
54 | def __len__(self):
55 | return self.times * self._ori_len
56 |
--------------------------------------------------------------------------------
/mmdet/datasets/loader/__init__.py:
--------------------------------------------------------------------------------
1 | from .build_loader import build_dataloader
2 | from .sampler import GroupSampler, DistributedGroupSampler
3 |
4 | __all__ = ['GroupSampler', 'DistributedGroupSampler', 'build_dataloader']
5 |
--------------------------------------------------------------------------------
/mmdet/datasets/loader/__pycache__/__init__.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/datasets/loader/__pycache__/__init__.cpython-37.pyc
--------------------------------------------------------------------------------
/mmdet/datasets/loader/__pycache__/build_loader.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/datasets/loader/__pycache__/build_loader.cpython-37.pyc
--------------------------------------------------------------------------------
/mmdet/datasets/loader/__pycache__/sampler.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/datasets/loader/__pycache__/sampler.cpython-37.pyc
--------------------------------------------------------------------------------
/mmdet/datasets/loader/build_loader.py:
--------------------------------------------------------------------------------
1 | import platform
2 | from functools import partial
3 |
4 | from mmcv.runner import get_dist_info
5 | from mmcv.parallel import collate
6 | from torch.utils.data import DataLoader
7 |
8 | from .sampler import GroupSampler, DistributedGroupSampler, DistributedSampler
9 |
10 | if platform.system() != 'Windows':
11 | # https://github.com/pytorch/pytorch/issues/973
12 | import resource
13 | rlimit = resource.getrlimit(resource.RLIMIT_NOFILE)
14 | resource.setrlimit(resource.RLIMIT_NOFILE, (4096, rlimit[1]))
15 |
16 |
17 | def build_dataloader(dataset,
18 | imgs_per_gpu,
19 | workers_per_gpu,
20 | num_gpus=1,
21 | dist=True,
22 | **kwargs):
23 | shuffle = kwargs.get('shuffle', True)
24 | if dist:
25 | rank, world_size = get_dist_info()
26 | if shuffle:
27 | sampler = DistributedGroupSampler(dataset, imgs_per_gpu,
28 | world_size, rank)
29 | else:
30 | sampler = DistributedSampler(
31 | dataset, world_size, rank, shuffle=False)
32 | batch_size = imgs_per_gpu
33 | num_workers = workers_per_gpu
34 | else:
35 | sampler = GroupSampler(dataset, imgs_per_gpu) if shuffle else None
36 | batch_size = num_gpus * imgs_per_gpu
37 | num_workers = num_gpus * workers_per_gpu
38 |
39 | data_loader = DataLoader(
40 | dataset,
41 | batch_size=batch_size,
42 | sampler=sampler,
43 | num_workers=num_workers,
44 | collate_fn=partial(collate, samples_per_gpu=imgs_per_gpu),
45 | pin_memory=False,
46 | **kwargs)
47 |
48 | return data_loader
49 |
--------------------------------------------------------------------------------
/mmdet/datasets/registry.py:
--------------------------------------------------------------------------------
1 | from mmdet.utils import Registry
2 |
3 | DATASETS = Registry('dataset')
4 |
--------------------------------------------------------------------------------
/mmdet/datasets/utils.py:
--------------------------------------------------------------------------------
1 | from collections import Sequence
2 |
3 | import matplotlib.pyplot as plt
4 | import mmcv
5 | import numpy as np
6 | import torch
7 |
8 |
9 | def to_tensor(data):
10 | """Convert objects of various python types to :obj:`torch.Tensor`.
11 |
12 | Supported types are: :class:`numpy.ndarray`, :class:`torch.Tensor`,
13 | :class:`Sequence`, :class:`int` and :class:`float`.
14 | """
15 | if isinstance(data, torch.Tensor):
16 | return data
17 | elif isinstance(data, np.ndarray):
18 | return torch.from_numpy(data)
19 | elif isinstance(data, Sequence) and not mmcv.is_str(data):
20 | return torch.tensor(data)
21 | elif isinstance(data, int):
22 | return torch.LongTensor([data])
23 | elif isinstance(data, float):
24 | return torch.FloatTensor([data])
25 | else:
26 | raise TypeError('type {} cannot be converted to tensor.'.format(
27 | type(data)))
28 |
29 |
30 | def random_scale(img_scales, mode='range'):
31 | """Randomly select a scale from a list of scales or scale ranges.
32 |
33 | Args:
34 | img_scales (list[tuple]): Image scale or scale range.
35 | mode (str): "range" or "value".
36 |
37 | Returns:
38 | tuple: Sampled image scale.
39 | """
40 | num_scales = len(img_scales)
41 | if num_scales == 1: # fixed scale is specified
42 | img_scale = img_scales[0]
43 | elif num_scales == 2: # randomly sample a scale
44 | if mode == 'range':
45 | img_scale_long = [max(s) for s in img_scales]
46 | img_scale_short = [min(s) for s in img_scales]
47 | long_edge = np.random.randint(
48 | min(img_scale_long),
49 | max(img_scale_long) + 1)
50 | short_edge = np.random.randint(
51 | min(img_scale_short),
52 | max(img_scale_short) + 1)
53 | img_scale = (long_edge, short_edge)
54 | elif mode == 'value':
55 | img_scale = img_scales[np.random.randint(num_scales)]
56 | else:
57 | if mode != 'value':
58 | raise ValueError(
59 | 'Only "value" mode supports more than 2 image scales')
60 | img_scale = img_scales[np.random.randint(num_scales)]
61 | return img_scale
62 |
63 |
64 | def show_ann(coco, img, ann_info):
65 | plt.imshow(mmcv.bgr2rgb(img))
66 | plt.axis('off')
67 | coco.showAnns(ann_info)
68 | plt.show()
69 |
--------------------------------------------------------------------------------
/mmdet/datasets/voc.py:
--------------------------------------------------------------------------------
1 | from .registry import DATASETS
2 | from .xml_style import XMLDataset
3 |
4 |
5 | @DATASETS.register_module
6 | class VOCDataset(XMLDataset):
7 |
8 | CLASSES = ('aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car',
9 | 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse',
10 | 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train',
11 | 'tvmonitor')
12 |
13 | def __init__(self, **kwargs):
14 | super(VOCDataset, self).__init__(**kwargs)
15 | if 'VOC2007' in self.img_prefix:
16 | self.year = 2007
17 | elif 'VOC2012' in self.img_prefix:
18 | self.year = 2012
19 | else:
20 | raise ValueError('Cannot infer dataset year from img_prefix')
21 |
--------------------------------------------------------------------------------
/mmdet/datasets/wider_face.py:
--------------------------------------------------------------------------------
1 | import os.path as osp
2 | import xml.etree.ElementTree as ET
3 |
4 | import mmcv
5 |
6 | from .registry import DATASETS
7 | from .xml_style import XMLDataset
8 |
9 |
10 | @DATASETS.register_module
11 | class WIDERFaceDataset(XMLDataset):
12 | """
13 | Reader for the WIDER Face dataset in PASCAL VOC format.
14 | Conversion scripts can be found in
15 | https://github.com/sovrasov/wider-face-pascal-voc-annotations
16 | """
17 | CLASSES = ('face', )
18 |
19 | def __init__(self, **kwargs):
20 | super(WIDERFaceDataset, self).__init__(**kwargs)
21 |
22 | def load_annotations(self, ann_file):
23 | img_infos = []
24 | img_ids = mmcv.list_from_file(ann_file)
25 | for img_id in img_ids:
26 | filename = '{}.jpg'.format(img_id)
27 | xml_path = osp.join(self.img_prefix, 'Annotations',
28 | '{}.xml'.format(img_id))
29 | tree = ET.parse(xml_path)
30 | root = tree.getroot()
31 | size = root.find('size')
32 | width = int(size.find('width').text)
33 | height = int(size.find('height').text)
34 | folder = root.find('folder').text
35 | img_infos.append(
36 | dict(
37 | id=img_id,
38 | filename=osp.join(folder, filename),
39 | width=width,
40 | height=height))
41 |
42 | return img_infos
43 |
--------------------------------------------------------------------------------
/mmdet/models/.DS_Store:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/models/.DS_Store
--------------------------------------------------------------------------------
/mmdet/models/__init__.py:
--------------------------------------------------------------------------------
1 | from .backbones import * # noqa: F401,F403
2 | from .necks import * # noqa: F401,F403
3 | from .roi_extractors import * # noqa: F401,F403
4 | from .anchor_heads import * # noqa: F401,F403
5 | from .shared_heads import * # noqa: F401,F403
6 | from .bbox_heads import * # noqa: F401,F403
7 | from .mask_heads import * # noqa: F401,F403
8 | from .losses import * # noqa: F401,F403
9 | from .detectors import * # noqa: F401,F403
10 | from .registry import (BACKBONES, NECKS, ROI_EXTRACTORS, SHARED_HEADS, HEADS,
11 | LOSSES, DETECTORS)
12 | from .builder import (build_backbone, build_neck, build_roi_extractor,
13 | build_shared_head, build_head, build_loss,
14 | build_detector)
15 |
16 | __all__ = [
17 | 'BACKBONES', 'NECKS', 'ROI_EXTRACTORS', 'SHARED_HEADS', 'HEADS', 'LOSSES',
18 | 'DETECTORS', 'build_backbone', 'build_neck', 'build_roi_extractor',
19 | 'build_shared_head', 'build_head', 'build_loss', 'build_detector'
20 | ]
21 |
--------------------------------------------------------------------------------
/mmdet/models/__pycache__/__init__.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/models/__pycache__/__init__.cpython-37.pyc
--------------------------------------------------------------------------------
/mmdet/models/__pycache__/builder.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/models/__pycache__/builder.cpython-37.pyc
--------------------------------------------------------------------------------
/mmdet/models/__pycache__/registry.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/models/__pycache__/registry.cpython-37.pyc
--------------------------------------------------------------------------------
/mmdet/models/anchor_heads/__init__.py:
--------------------------------------------------------------------------------
1 | from .anchor_head import AnchorHead
2 | from .guided_anchor_head import GuidedAnchorHead, FeatureAdaption
3 | from .fcos_head import FCOSHead
4 | from .rpn_head import RPNHead
5 | from .ga_rpn_head import GARPNHead
6 | from .retina_head import RetinaHead
7 | from .ga_retina_head import GARetinaHead
8 | from .ssd_head import SSDHead
9 | from .csp_head import CSPHead
10 | from .csp_trans_head import CSPTransHead
11 | from .csp_si_head import CSPSIHead
12 |
13 | __all__ = [
14 | 'AnchorHead', 'GuidedAnchorHead', 'FeatureAdaption', 'RPNHead',
15 | 'GARPNHead', 'RetinaHead', 'GARetinaHead', 'SSDHead', 'FCOSHead',
16 | 'CSPHead', 'CSPTransHead', 'CSPSIHead',
17 | ]
18 |
--------------------------------------------------------------------------------
/mmdet/models/anchor_heads/__pycache__/__init__.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/models/anchor_heads/__pycache__/__init__.cpython-37.pyc
--------------------------------------------------------------------------------
/mmdet/models/anchor_heads/__pycache__/anchor_head.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/models/anchor_heads/__pycache__/anchor_head.cpython-37.pyc
--------------------------------------------------------------------------------
/mmdet/models/anchor_heads/__pycache__/csp_head.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/models/anchor_heads/__pycache__/csp_head.cpython-37.pyc
--------------------------------------------------------------------------------
/mmdet/models/anchor_heads/__pycache__/csp_si_head.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/models/anchor_heads/__pycache__/csp_si_head.cpython-37.pyc
--------------------------------------------------------------------------------
/mmdet/models/anchor_heads/__pycache__/csp_trans_head.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/models/anchor_heads/__pycache__/csp_trans_head.cpython-37.pyc
--------------------------------------------------------------------------------
/mmdet/models/anchor_heads/__pycache__/fcos_head.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/models/anchor_heads/__pycache__/fcos_head.cpython-37.pyc
--------------------------------------------------------------------------------
/mmdet/models/anchor_heads/__pycache__/ga_retina_head.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/models/anchor_heads/__pycache__/ga_retina_head.cpython-37.pyc
--------------------------------------------------------------------------------
/mmdet/models/anchor_heads/__pycache__/ga_rpn_head.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/models/anchor_heads/__pycache__/ga_rpn_head.cpython-37.pyc
--------------------------------------------------------------------------------
/mmdet/models/anchor_heads/__pycache__/guided_anchor_head.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/models/anchor_heads/__pycache__/guided_anchor_head.cpython-37.pyc
--------------------------------------------------------------------------------
/mmdet/models/anchor_heads/__pycache__/retina_head.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/models/anchor_heads/__pycache__/retina_head.cpython-37.pyc
--------------------------------------------------------------------------------
/mmdet/models/anchor_heads/__pycache__/rpn_head.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/models/anchor_heads/__pycache__/rpn_head.cpython-37.pyc
--------------------------------------------------------------------------------
/mmdet/models/anchor_heads/__pycache__/ssd_head.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/models/anchor_heads/__pycache__/ssd_head.cpython-37.pyc
--------------------------------------------------------------------------------
/mmdet/models/anchor_heads/__pycache__/transformers.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/models/anchor_heads/__pycache__/transformers.cpython-37.pyc
--------------------------------------------------------------------------------
/mmdet/models/backbones/__init__.py:
--------------------------------------------------------------------------------
1 | from .resnet import ResNet, make_res_layer
2 | from .resnext import ResNeXt
3 | from .ssd_vgg import SSDVGG
4 | from .hrnet import HRNet
5 | from .senet import SENet
6 | from .mobilenet import MobilenetV2
7 | from .vgg import VGG
8 | from .swin_transformer import SwinTiny, SwinSmall, SwinBase
9 |
10 | __all__ = ['ResNet', 'make_res_layer', 'ResNeXt', 'SSDVGG', 'HRNet', 'SENet', 'MobilenetV2', 'VGG', 'SwinTiny', 'SwinSmall', 'SwinBase']
11 |
--------------------------------------------------------------------------------
/mmdet/models/backbones/__pycache__/__init__.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/models/backbones/__pycache__/__init__.cpython-37.pyc
--------------------------------------------------------------------------------
/mmdet/models/backbones/__pycache__/hrnet.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/models/backbones/__pycache__/hrnet.cpython-37.pyc
--------------------------------------------------------------------------------
/mmdet/models/backbones/__pycache__/mobilenet.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/models/backbones/__pycache__/mobilenet.cpython-37.pyc
--------------------------------------------------------------------------------
/mmdet/models/backbones/__pycache__/resnet.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/models/backbones/__pycache__/resnet.cpython-37.pyc
--------------------------------------------------------------------------------
/mmdet/models/backbones/__pycache__/resnext.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/models/backbones/__pycache__/resnext.cpython-37.pyc
--------------------------------------------------------------------------------
/mmdet/models/backbones/__pycache__/senet.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/models/backbones/__pycache__/senet.cpython-37.pyc
--------------------------------------------------------------------------------
/mmdet/models/backbones/__pycache__/ssd_vgg.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/models/backbones/__pycache__/ssd_vgg.cpython-37.pyc
--------------------------------------------------------------------------------
/mmdet/models/backbones/__pycache__/swin.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/models/backbones/__pycache__/swin.cpython-37.pyc
--------------------------------------------------------------------------------
/mmdet/models/backbones/__pycache__/vgg.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/models/backbones/__pycache__/vgg.cpython-37.pyc
--------------------------------------------------------------------------------
/mmdet/models/backbones/vgg.py:
--------------------------------------------------------------------------------
1 | import logging
2 |
3 | import torch.nn as nn
4 | from mmcv.cnn import (VGG, constant_init, kaiming_init,
5 | normal_init)
6 |
7 | from mmcv.runner import load_checkpoint
8 | from ..registry import BACKBONES
9 |
10 |
11 | @BACKBONES.register_module
12 | class VGG(VGG):
13 | def __init__(self,
14 | depth=16,
15 | with_last_pool=False,
16 | ceil_mode=True,
17 | frozen_stages=-1,
18 | ):
19 | super(VGG, self).__init__(
20 | depth,
21 | with_last_pool=with_last_pool,
22 | ceil_mode=ceil_mode,
23 | frozen_stages=frozen_stages,
24 | )
25 |
26 | def init_weights(self, pretrained=None):
27 | if isinstance(pretrained, str):
28 | logger = logging.getLogger()
29 | load_checkpoint(self, pretrained, strict=False, logger=logger)
30 | elif pretrained is None:
31 | for m in self.features.modules():
32 | if isinstance(m, nn.Conv2d):
33 | kaiming_init(m)
34 | elif isinstance(m, nn.BatchNorm2d):
35 | constant_init(m, 1)
36 | elif isinstance(m, nn.Linear):
37 | normal_init(m, std=0.01)
38 | else:
39 | raise TypeError('pretrained must be a str or None')
40 |
41 | def forward(self, x):
42 | # remove the pool4
43 | for layer in self.features[:23]:
44 | x = layer(x)
45 | for layer in self.features[24:]:
46 | x = layer(x)
47 | return tuple([x])
48 |
49 |
50 |
--------------------------------------------------------------------------------
/mmdet/models/bbox_heads/.DS_Store:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/models/bbox_heads/.DS_Store
--------------------------------------------------------------------------------
/mmdet/models/bbox_heads/__init__.py:
--------------------------------------------------------------------------------
1 | from .bbox_head import BBoxHead
2 | from .convfc_bbox_head import ConvFCBBoxHead, SharedFCBBoxHead
3 | from .cascade_ped_head import CascadePedFCBBoxHead
4 | from .mgan_head import MGANHead
5 | from .refine_head import RefineHead
6 |
7 | __all__ = ['BBoxHead', 'ConvFCBBoxHead', 'SharedFCBBoxHead', 'CascadePedFCBBoxHead', 'MGANHead', 'RefineHead']
8 |
--------------------------------------------------------------------------------
/mmdet/models/bbox_heads/__pycache__/__init__.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/models/bbox_heads/__pycache__/__init__.cpython-37.pyc
--------------------------------------------------------------------------------
/mmdet/models/bbox_heads/__pycache__/bbox_head.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/models/bbox_heads/__pycache__/bbox_head.cpython-37.pyc
--------------------------------------------------------------------------------
/mmdet/models/bbox_heads/__pycache__/cascade_ped_head.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/models/bbox_heads/__pycache__/cascade_ped_head.cpython-37.pyc
--------------------------------------------------------------------------------
/mmdet/models/bbox_heads/__pycache__/convfc_bbox_head.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/models/bbox_heads/__pycache__/convfc_bbox_head.cpython-37.pyc
--------------------------------------------------------------------------------
/mmdet/models/bbox_heads/__pycache__/mgan_head.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/models/bbox_heads/__pycache__/mgan_head.cpython-37.pyc
--------------------------------------------------------------------------------
/mmdet/models/bbox_heads/__pycache__/refine_head.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/models/bbox_heads/__pycache__/refine_head.cpython-37.pyc
--------------------------------------------------------------------------------
/mmdet/models/bbox_heads/mgan_head.py:
--------------------------------------------------------------------------------
1 | import torch.nn as nn
2 |
3 | from ..registry import HEADS
4 | from ..utils import ConvModule
5 | from mmdet.core import auto_fp16
6 |
7 |
8 | @HEADS.register_module
9 | class MGANHead(nn.Module):
10 |
11 | def __init__(self,
12 | num_convs=2,
13 | roi_feat_size=7,
14 | in_channels=512,
15 | conv_out_channels=512,
16 | conv_cfg=None,
17 | norm_cfg=None):
18 | super(MGANHead, self).__init__()
19 | self.num_convs = num_convs
20 | self.roi_feat_size = roi_feat_size
21 | self.in_channels = in_channels
22 | self.conv_out_channels = conv_out_channels
23 |
24 | self.conv_cfg = conv_cfg
25 | self.norm_cfg = norm_cfg
26 | self.fp16_enabled = False
27 |
28 | self.convs = nn.ModuleList()
29 | for i in range(self.num_convs):
30 | in_channels = (
31 | self.in_channels if i == 0 else self.conv_out_channels)
32 | self.convs.append(
33 | ConvModule(
34 | in_channels,
35 | self.conv_out_channels,
36 | 3,
37 | padding=1,
38 | conv_cfg=conv_cfg,
39 | norm_cfg=norm_cfg))
40 | logits_in_channel = self.conv_out_channels
41 | self.conv_logits = nn.Conv2d(logits_in_channel, 1, 1)
42 | self.relu = nn.ReLU(inplace=True)
43 | self.debug_imgs = None
44 |
45 | @auto_fp16()
46 | def forward(self, x):
47 | for conv in self.convs:
48 | x = conv(x)
49 | x = self.conv_logits(x).sigmoid() * x
50 | return x
51 |
52 |
53 |
--------------------------------------------------------------------------------
/mmdet/models/builder.py:
--------------------------------------------------------------------------------
1 | from torch import nn
2 |
3 | from mmdet.utils import build_from_cfg
4 | from .registry import (BACKBONES, NECKS, ROI_EXTRACTORS, SHARED_HEADS, HEADS,
5 | LOSSES, DETECTORS)
6 |
7 |
8 | def build(cfg, registry, default_args=None):
9 | if isinstance(cfg, list):
10 | modules = [
11 | build_from_cfg(cfg_, registry, default_args) for cfg_ in cfg
12 | ]
13 | return nn.Sequential(*modules)
14 | else:
15 | return build_from_cfg(cfg, registry, default_args)
16 |
17 |
18 | def build_backbone(cfg):
19 | return build(cfg, BACKBONES)
20 |
21 |
22 | def build_neck(cfg):
23 | return build(cfg, NECKS)
24 |
25 |
26 | def build_roi_extractor(cfg):
27 | return build(cfg, ROI_EXTRACTORS)
28 |
29 |
30 | def build_shared_head(cfg):
31 | return build(cfg, SHARED_HEADS)
32 |
33 |
34 | def build_head(cfg):
35 | return build(cfg, HEADS)
36 |
37 |
38 | def build_loss(cfg):
39 | return build(cfg, LOSSES)
40 |
41 |
42 | def build_detector(cfg, train_cfg=None, test_cfg=None):
43 | return build(cfg, DETECTORS, dict(train_cfg=train_cfg, test_cfg=test_cfg))
44 |
--------------------------------------------------------------------------------
/mmdet/models/detectors/__init__.py:
--------------------------------------------------------------------------------
1 | from .base import BaseDetector
2 | from .single_stage import SingleStageDetector
3 | from .two_stage import TwoStageDetector
4 | from .rpn import RPN
5 | from .fast_rcnn import FastRCNN
6 | from .faster_rcnn import FasterRCNN
7 | from .mask_rcnn import MaskRCNN
8 | from .cascade_rcnn import CascadeRCNN
9 | from .htc import HybridTaskCascade
10 | from .retinanet import RetinaNet
11 | from .fcos import FCOS
12 | from .grid_rcnn import GridRCNN
13 | from .mask_scoring_rcnn import MaskScoringRCNN
14 | from .mgan import MGAN
15 | from .csp import CSP
16 |
17 | __all__ = [
18 | 'BaseDetector', 'SingleStageDetector', 'TwoStageDetector', 'RPN',
19 | 'FastRCNN', 'FasterRCNN', 'MaskRCNN', 'CascadeRCNN', 'HybridTaskCascade',
20 | 'RetinaNet', 'FCOS', 'GridRCNN', 'MaskScoringRCNN','MGAN', 'CSP',
21 | ]
22 |
--------------------------------------------------------------------------------
/mmdet/models/detectors/__pycache__/__init__.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/models/detectors/__pycache__/__init__.cpython-37.pyc
--------------------------------------------------------------------------------
/mmdet/models/detectors/__pycache__/base.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/models/detectors/__pycache__/base.cpython-37.pyc
--------------------------------------------------------------------------------
/mmdet/models/detectors/__pycache__/cascade_rcnn.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/models/detectors/__pycache__/cascade_rcnn.cpython-37.pyc
--------------------------------------------------------------------------------
/mmdet/models/detectors/__pycache__/csp.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/models/detectors/__pycache__/csp.cpython-37.pyc
--------------------------------------------------------------------------------
/mmdet/models/detectors/__pycache__/fast_rcnn.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/models/detectors/__pycache__/fast_rcnn.cpython-37.pyc
--------------------------------------------------------------------------------
/mmdet/models/detectors/__pycache__/faster_rcnn.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/models/detectors/__pycache__/faster_rcnn.cpython-37.pyc
--------------------------------------------------------------------------------
/mmdet/models/detectors/__pycache__/fcos.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/models/detectors/__pycache__/fcos.cpython-37.pyc
--------------------------------------------------------------------------------
/mmdet/models/detectors/__pycache__/grid_rcnn.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/models/detectors/__pycache__/grid_rcnn.cpython-37.pyc
--------------------------------------------------------------------------------
/mmdet/models/detectors/__pycache__/htc.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/models/detectors/__pycache__/htc.cpython-37.pyc
--------------------------------------------------------------------------------
/mmdet/models/detectors/__pycache__/mask_rcnn.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/models/detectors/__pycache__/mask_rcnn.cpython-37.pyc
--------------------------------------------------------------------------------
/mmdet/models/detectors/__pycache__/mask_scoring_rcnn.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/models/detectors/__pycache__/mask_scoring_rcnn.cpython-37.pyc
--------------------------------------------------------------------------------
/mmdet/models/detectors/__pycache__/mgan.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/models/detectors/__pycache__/mgan.cpython-37.pyc
--------------------------------------------------------------------------------
/mmdet/models/detectors/__pycache__/retinanet.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/models/detectors/__pycache__/retinanet.cpython-37.pyc
--------------------------------------------------------------------------------
/mmdet/models/detectors/__pycache__/rpn.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/models/detectors/__pycache__/rpn.cpython-37.pyc
--------------------------------------------------------------------------------
/mmdet/models/detectors/__pycache__/single_stage.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/models/detectors/__pycache__/single_stage.cpython-37.pyc
--------------------------------------------------------------------------------
/mmdet/models/detectors/__pycache__/test_mixins.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/models/detectors/__pycache__/test_mixins.cpython-37.pyc
--------------------------------------------------------------------------------
/mmdet/models/detectors/__pycache__/two_stage.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/models/detectors/__pycache__/two_stage.cpython-37.pyc
--------------------------------------------------------------------------------
/mmdet/models/detectors/fast_rcnn.py:
--------------------------------------------------------------------------------
1 | from .two_stage import TwoStageDetector
2 | from ..registry import DETECTORS
3 |
4 |
5 | @DETECTORS.register_module
6 | class FastRCNN(TwoStageDetector):
7 |
8 | def __init__(self,
9 | backbone,
10 | bbox_roi_extractor,
11 | bbox_head,
12 | train_cfg,
13 | test_cfg,
14 | neck=None,
15 | shared_head=None,
16 | mask_roi_extractor=None,
17 | mask_head=None,
18 | pretrained=None):
19 | super(FastRCNN, self).__init__(
20 | backbone=backbone,
21 | neck=neck,
22 | shared_head=shared_head,
23 | bbox_roi_extractor=bbox_roi_extractor,
24 | bbox_head=bbox_head,
25 | train_cfg=train_cfg,
26 | test_cfg=test_cfg,
27 | mask_roi_extractor=mask_roi_extractor,
28 | mask_head=mask_head,
29 | pretrained=pretrained)
30 |
31 | def forward_test(self, imgs, img_metas, proposals, **kwargs):
32 | for var, name in [(imgs, 'imgs'), (img_metas, 'img_metas')]:
33 | if not isinstance(var, list):
34 | raise TypeError('{} must be a list, but got {}'.format(
35 | name, type(var)))
36 |
37 | num_augs = len(imgs)
38 | if num_augs != len(img_metas):
39 | raise ValueError(
40 | 'num of augmentations ({}) != num of image meta ({})'.format(
41 | len(imgs), len(img_metas)))
42 | # TODO: remove the restriction of imgs_per_gpu == 1 when prepared
43 | imgs_per_gpu = imgs[0].size(0)
44 | assert imgs_per_gpu == 1
45 |
46 | if num_augs == 1:
47 | return self.simple_test(imgs[0], img_metas[0], proposals[0],
48 | **kwargs)
49 | else:
50 | return self.aug_test(imgs, img_metas, proposals, **kwargs)
51 |
--------------------------------------------------------------------------------
/mmdet/models/detectors/faster_rcnn.py:
--------------------------------------------------------------------------------
1 | from .two_stage import TwoStageDetector
2 | from ..registry import DETECTORS
3 |
4 |
5 | @DETECTORS.register_module
6 | class FasterRCNN(TwoStageDetector):
7 |
8 | def __init__(self,
9 | backbone,
10 | rpn_head,
11 | bbox_roi_extractor,
12 | bbox_head,
13 | train_cfg,
14 | test_cfg,
15 | neck=None,
16 | shared_head=None,
17 | pretrained=None):
18 | super(FasterRCNN, self).__init__(
19 | backbone=backbone,
20 | neck=neck,
21 | shared_head=shared_head,
22 | rpn_head=rpn_head,
23 | bbox_roi_extractor=bbox_roi_extractor,
24 | bbox_head=bbox_head,
25 | train_cfg=train_cfg,
26 | test_cfg=test_cfg,
27 | pretrained=pretrained)
28 |
--------------------------------------------------------------------------------
/mmdet/models/detectors/fcos.py:
--------------------------------------------------------------------------------
1 | from .single_stage import SingleStageDetector
2 | from ..registry import DETECTORS
3 |
4 |
5 | @DETECTORS.register_module
6 | class FCOS(SingleStageDetector):
7 |
8 | def __init__(self,
9 | backbone,
10 | neck,
11 | bbox_head,
12 | train_cfg=None,
13 | test_cfg=None,
14 | pretrained=None):
15 | super(FCOS, self).__init__(backbone, neck, bbox_head, train_cfg,
16 | test_cfg, pretrained)
17 |
--------------------------------------------------------------------------------
/mmdet/models/detectors/mask_rcnn.py:
--------------------------------------------------------------------------------
1 | from .two_stage import TwoStageDetector
2 | from ..registry import DETECTORS
3 |
4 |
5 | @DETECTORS.register_module
6 | class MaskRCNN(TwoStageDetector):
7 |
8 | def __init__(self,
9 | backbone,
10 | rpn_head,
11 | bbox_roi_extractor,
12 | bbox_head,
13 | mask_roi_extractor,
14 | mask_head,
15 | train_cfg,
16 | test_cfg,
17 | neck=None,
18 | shared_head=None,
19 | pretrained=None):
20 | super(MaskRCNN, self).__init__(
21 | backbone=backbone,
22 | neck=neck,
23 | shared_head=shared_head,
24 | rpn_head=rpn_head,
25 | bbox_roi_extractor=bbox_roi_extractor,
26 | bbox_head=bbox_head,
27 | mask_roi_extractor=mask_roi_extractor,
28 | mask_head=mask_head,
29 | train_cfg=train_cfg,
30 | test_cfg=test_cfg,
31 | pretrained=pretrained)
32 |
--------------------------------------------------------------------------------
/mmdet/models/detectors/retinanet.py:
--------------------------------------------------------------------------------
1 | from .single_stage import SingleStageDetector
2 | from ..registry import DETECTORS
3 |
4 |
5 | @DETECTORS.register_module
6 | class RetinaNet(SingleStageDetector):
7 |
8 | def __init__(self,
9 | backbone,
10 | neck,
11 | bbox_head,
12 | train_cfg=None,
13 | test_cfg=None,
14 | pretrained=None):
15 | super(RetinaNet, self).__init__(backbone, neck, bbox_head, train_cfg,
16 | test_cfg, pretrained)
17 |
--------------------------------------------------------------------------------
/mmdet/models/losses/__init__.py:
--------------------------------------------------------------------------------
1 | from .accuracy import accuracy, Accuracy
2 | from .cross_entropy_loss import (cross_entropy, binary_cross_entropy,
3 | mask_cross_entropy, CrossEntropyLoss)
4 | from .focal_loss import sigmoid_focal_loss, FocalLoss
5 | from .smooth_l1_loss import smooth_l1_loss, SmoothL1Loss
6 | from .ghm_loss import GHMC, GHMR
7 | from .balanced_l1_loss import balanced_l1_loss, BalancedL1Loss
8 | from .mse_loss import mse_loss, MSELoss
9 | from .iou_loss import iou_loss, bounded_iou_loss, IoULoss, BoundedIoULoss
10 | from .utils import reduce_loss, weight_reduce_loss, weighted_loss
11 | from .kldiv import KLDivLoss
12 | __all__ = [
13 | 'accuracy', 'Accuracy', 'cross_entropy', 'binary_cross_entropy',
14 | 'mask_cross_entropy', 'CrossEntropyLoss', 'sigmoid_focal_loss',
15 | 'FocalLoss', 'smooth_l1_loss', 'SmoothL1Loss', 'balanced_l1_loss',
16 | 'BalancedL1Loss', 'mse_loss', 'MSELoss', 'iou_loss', 'KLDivLoss', 'bounded_iou_loss',
17 | 'IoULoss', 'BoundedIoULoss', 'GHMC', 'GHMR', 'reduce_loss',
18 | 'weight_reduce_loss', 'weighted_loss'
19 | ]
20 |
--------------------------------------------------------------------------------
/mmdet/models/losses/__pycache__/__init__.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/models/losses/__pycache__/__init__.cpython-37.pyc
--------------------------------------------------------------------------------
/mmdet/models/losses/__pycache__/accuracy.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/models/losses/__pycache__/accuracy.cpython-37.pyc
--------------------------------------------------------------------------------
/mmdet/models/losses/__pycache__/balanced_l1_loss.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/models/losses/__pycache__/balanced_l1_loss.cpython-37.pyc
--------------------------------------------------------------------------------
/mmdet/models/losses/__pycache__/cross_entropy_loss.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/models/losses/__pycache__/cross_entropy_loss.cpython-37.pyc
--------------------------------------------------------------------------------
/mmdet/models/losses/__pycache__/focal_loss.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/models/losses/__pycache__/focal_loss.cpython-37.pyc
--------------------------------------------------------------------------------
/mmdet/models/losses/__pycache__/ghm_loss.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/models/losses/__pycache__/ghm_loss.cpython-37.pyc
--------------------------------------------------------------------------------
/mmdet/models/losses/__pycache__/iou_loss.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/models/losses/__pycache__/iou_loss.cpython-37.pyc
--------------------------------------------------------------------------------
/mmdet/models/losses/__pycache__/kldiv.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/models/losses/__pycache__/kldiv.cpython-37.pyc
--------------------------------------------------------------------------------
/mmdet/models/losses/__pycache__/mse_loss.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/models/losses/__pycache__/mse_loss.cpython-37.pyc
--------------------------------------------------------------------------------
/mmdet/models/losses/__pycache__/smooth_l1_loss.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/models/losses/__pycache__/smooth_l1_loss.cpython-37.pyc
--------------------------------------------------------------------------------
/mmdet/models/losses/__pycache__/utils.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/models/losses/__pycache__/utils.cpython-37.pyc
--------------------------------------------------------------------------------
/mmdet/models/losses/accuracy.py:
--------------------------------------------------------------------------------
1 | import torch.nn as nn
2 |
3 |
4 | def accuracy(pred, target, topk=1):
5 | assert isinstance(topk, (int, tuple))
6 | if isinstance(topk, int):
7 | topk = (topk, )
8 | return_single = True
9 | else:
10 | return_single = False
11 |
12 | maxk = max(topk)
13 | _, pred_label = pred.topk(maxk, dim=1)
14 | pred_label = pred_label.t()
15 | correct = pred_label.eq(target.view(1, -1).expand_as(pred_label))
16 |
17 | res = []
18 | for k in topk:
19 | correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
20 | res.append(correct_k.mul_(100.0 / pred.size(0)))
21 | return res[0] if return_single else res
22 |
23 |
24 | class Accuracy(nn.Module):
25 |
26 | def __init__(self, topk=(1, )):
27 | super().__init__()
28 | self.topk = topk
29 |
30 | def forward(self, pred, target):
31 | return accuracy(pred, target, self.topk)
32 |
--------------------------------------------------------------------------------
/mmdet/models/losses/balanced_l1_loss.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import torch
3 | import torch.nn as nn
4 |
5 | from .utils import weighted_loss
6 | from ..registry import LOSSES
7 |
8 |
9 | @weighted_loss
10 | def balanced_l1_loss(pred,
11 | target,
12 | beta=1.0,
13 | alpha=0.5,
14 | gamma=1.5,
15 | reduction='mean'):
16 | assert beta > 0
17 | assert pred.size() == target.size() and target.numel() > 0
18 |
19 | diff = torch.abs(pred - target)
20 | b = np.e**(gamma / alpha) - 1
21 | loss = torch.where(
22 | diff < beta, alpha / b *
23 | (b * diff + 1) * torch.log(b * diff / beta + 1) - alpha * diff,
24 | gamma * diff + gamma / b - alpha * beta)
25 |
26 | return loss
27 |
28 |
29 | @LOSSES.register_module
30 | class BalancedL1Loss(nn.Module):
31 | """Balanced L1 Loss
32 |
33 | arXiv: https://arxiv.org/pdf/1904.02701.pdf (CVPR 2019)
34 | """
35 |
36 | def __init__(self,
37 | alpha=0.5,
38 | gamma=1.5,
39 | beta=1.0,
40 | reduction='mean',
41 | loss_weight=1.0):
42 | super(BalancedL1Loss, self).__init__()
43 | self.alpha = alpha
44 | self.gamma = gamma
45 | self.beta = beta
46 | self.reduction = reduction
47 | self.loss_weight = loss_weight
48 |
49 | def forward(self,
50 | pred,
51 | target,
52 | weight=None,
53 | avg_factor=None,
54 | reduction_override=None,
55 | **kwargs):
56 | assert reduction_override in (None, 'none', 'mean', 'sum')
57 | reduction = (
58 | reduction_override if reduction_override else self.reduction)
59 | loss_bbox = self.loss_weight * balanced_l1_loss(
60 | pred,
61 | target,
62 | weight,
63 | alpha=self.alpha,
64 | gamma=self.gamma,
65 | beta=self.beta,
66 | reduction=reduction,
67 | avg_factor=avg_factor,
68 | **kwargs)
69 | return loss_bbox
70 |
--------------------------------------------------------------------------------
/mmdet/models/losses/kldiv.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torch.nn as nn
3 | import torch.nn.functional as F
4 | from ..registry import LOSSES
5 | from .utils import weight_reduce_loss
6 |
7 |
8 | def kl_divergence(source, target, reduction='mean'):
9 |
10 | return F.kl_div(source, target, reduction=reduction)
11 |
12 | @LOSSES.register_module
13 | class KLDivLoss(nn.Module):
14 |
15 | def __init__(self,
16 | reduction='mean',
17 | loss_weight=1.0):
18 | super(KLDivLoss, self).__init__()
19 | self.reduction = reduction
20 | self.loss_weight = loss_weight
21 |
22 | self.criterion = kl_divergence
23 |
24 | def forward(self,
25 | cls_score,
26 | label,
27 | weight=None,
28 | avg_factor=None,
29 | reduction_override=None,
30 | **kwargs):
31 | assert reduction_override in (None, 'none', 'mean', 'sum')
32 | cls_score = F.sigmoid(cls_score)
33 | reduction = (
34 | reduction_override if reduction_override else self.reduction)
35 | loss_cls = self.loss_weight * self.criterion(
36 | cls_score,
37 | label,
38 | reduction=reduction)
39 | return loss_cls
40 |
--------------------------------------------------------------------------------
/mmdet/models/losses/mse_loss.py:
--------------------------------------------------------------------------------
1 | import torch.nn as nn
2 | import torch.nn.functional as F
3 |
4 | from .utils import weighted_loss
5 | from ..registry import LOSSES
6 |
7 | mse_loss = weighted_loss(F.mse_loss)
8 |
9 |
10 | @LOSSES.register_module
11 | class MSELoss(nn.Module):
12 |
13 | def __init__(self, reduction='mean', loss_weight=1.0):
14 | super().__init__()
15 | self.reduction = reduction
16 | self.loss_weight = loss_weight
17 |
18 | def forward(self, pred, target, weight=None, avg_factor=None):
19 | loss = self.loss_weight * mse_loss(
20 | pred,
21 | target,
22 | weight,
23 | reduction=self.reduction,
24 | avg_factor=avg_factor)
25 | return loss
26 |
--------------------------------------------------------------------------------
/mmdet/models/losses/smooth_l1_loss.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torch.nn as nn
3 |
4 | from .utils import weighted_loss
5 | from ..registry import LOSSES
6 |
7 |
8 | @weighted_loss
9 | def smooth_l1_loss(pred, target, beta=1.0):
10 | assert beta > 0
11 | assert pred.size() == target.size() and target.numel() > 0
12 | diff = torch.abs(pred - target)
13 | loss = torch.where(diff < beta, 0.5 * diff * diff / beta,
14 | diff - 0.5 * beta)
15 | return loss
16 |
17 |
18 | @LOSSES.register_module
19 | class SmoothL1Loss(nn.Module):
20 |
21 | def __init__(self, beta=1.0, reduction='mean', loss_weight=1.0):
22 | super(SmoothL1Loss, self).__init__()
23 | self.beta = beta
24 | self.reduction = reduction
25 | self.loss_weight = loss_weight
26 |
27 | def forward(self,
28 | pred,
29 | target,
30 | weight=None,
31 | avg_factor=None,
32 | reduction_override=None,
33 | **kwargs):
34 | assert reduction_override in (None, 'none', 'mean', 'sum')
35 | reduction = (
36 | reduction_override if reduction_override else self.reduction)
37 | loss_bbox = self.loss_weight * smooth_l1_loss(
38 | pred,
39 | target,
40 | weight,
41 | beta=self.beta,
42 | reduction=reduction,
43 | avg_factor=avg_factor,
44 | **kwargs)
45 | return loss_bbox
46 |
--------------------------------------------------------------------------------
/mmdet/models/mask_heads/__init__.py:
--------------------------------------------------------------------------------
1 | from .fcn_mask_head import FCNMaskHead
2 | from .fused_semantic_head import FusedSemanticHead
3 | from .grid_head import GridHead
4 | from .htc_mask_head import HTCMaskHead
5 | from .maskiou_head import MaskIoUHead
6 |
7 | __all__ = [
8 | 'FCNMaskHead', 'HTCMaskHead', 'FusedSemanticHead', 'GridHead',
9 | 'MaskIoUHead'
10 | ]
11 |
--------------------------------------------------------------------------------
/mmdet/models/mask_heads/__pycache__/__init__.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/models/mask_heads/__pycache__/__init__.cpython-37.pyc
--------------------------------------------------------------------------------
/mmdet/models/mask_heads/__pycache__/fcn_mask_head.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/models/mask_heads/__pycache__/fcn_mask_head.cpython-37.pyc
--------------------------------------------------------------------------------
/mmdet/models/mask_heads/__pycache__/fused_semantic_head.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/models/mask_heads/__pycache__/fused_semantic_head.cpython-37.pyc
--------------------------------------------------------------------------------
/mmdet/models/mask_heads/__pycache__/grid_head.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/models/mask_heads/__pycache__/grid_head.cpython-37.pyc
--------------------------------------------------------------------------------
/mmdet/models/mask_heads/__pycache__/htc_mask_head.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/models/mask_heads/__pycache__/htc_mask_head.cpython-37.pyc
--------------------------------------------------------------------------------
/mmdet/models/mask_heads/__pycache__/maskiou_head.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/models/mask_heads/__pycache__/maskiou_head.cpython-37.pyc
--------------------------------------------------------------------------------
/mmdet/models/mask_heads/htc_mask_head.py:
--------------------------------------------------------------------------------
1 | from .fcn_mask_head import FCNMaskHead
2 | from ..registry import HEADS
3 | from ..utils import ConvModule
4 |
5 |
6 | @HEADS.register_module
7 | class HTCMaskHead(FCNMaskHead):
8 |
9 | def __init__(self, *args, **kwargs):
10 | super(HTCMaskHead, self).__init__(*args, **kwargs)
11 | self.conv_res = ConvModule(
12 | self.conv_out_channels,
13 | self.conv_out_channels,
14 | 1,
15 | conv_cfg=self.conv_cfg,
16 | norm_cfg=self.norm_cfg)
17 |
18 | def init_weights(self):
19 | super(HTCMaskHead, self).init_weights()
20 | self.conv_res.init_weights()
21 |
22 | def forward(self, x, res_feat=None, return_logits=True, return_feat=True):
23 | if res_feat is not None:
24 | res_feat = self.conv_res(res_feat)
25 | x = x + res_feat
26 | for conv in self.convs:
27 | x = conv(x)
28 | res_feat = x
29 | outs = []
30 | if return_logits:
31 | x = self.upsample(x)
32 | if self.upsample_method == 'deconv':
33 | x = self.relu(x)
34 | mask_pred = self.conv_logits(x)
35 | outs.append(mask_pred)
36 | if return_feat:
37 | outs.append(res_feat)
38 | return outs if len(outs) > 1 else outs[0]
39 |
--------------------------------------------------------------------------------
/mmdet/models/necks/__init__.py:
--------------------------------------------------------------------------------
1 | from .fpn import FPN
2 | from .bfp import BFP
3 | from .hrfpn import HRFPN
4 | from .csp_neck import CSPNeck
5 | from .hrcspfpn import HRCSPFPN
6 |
7 | __all__ = ['FPN', 'BFP', 'HRCSPFPN', 'HRFPN', 'CSPNeck']
8 |
--------------------------------------------------------------------------------
/mmdet/models/necks/__pycache__/__init__.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/models/necks/__pycache__/__init__.cpython-37.pyc
--------------------------------------------------------------------------------
/mmdet/models/necks/__pycache__/bfp.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/models/necks/__pycache__/bfp.cpython-37.pyc
--------------------------------------------------------------------------------
/mmdet/models/necks/__pycache__/csp_neck.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/models/necks/__pycache__/csp_neck.cpython-37.pyc
--------------------------------------------------------------------------------
/mmdet/models/necks/__pycache__/fpn.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/models/necks/__pycache__/fpn.cpython-37.pyc
--------------------------------------------------------------------------------
/mmdet/models/necks/__pycache__/hrcspfpn.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/models/necks/__pycache__/hrcspfpn.cpython-37.pyc
--------------------------------------------------------------------------------
/mmdet/models/necks/__pycache__/hrfpn.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/models/necks/__pycache__/hrfpn.cpython-37.pyc
--------------------------------------------------------------------------------
/mmdet/models/plugins/__init__.py:
--------------------------------------------------------------------------------
1 | from .non_local import NonLocal2D
2 | from .generalized_attention import GeneralizedAttention
3 |
4 | __all__ = ['NonLocal2D', 'GeneralizedAttention']
5 |
--------------------------------------------------------------------------------
/mmdet/models/plugins/__pycache__/__init__.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/models/plugins/__pycache__/__init__.cpython-37.pyc
--------------------------------------------------------------------------------
/mmdet/models/plugins/__pycache__/generalized_attention.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/models/plugins/__pycache__/generalized_attention.cpython-37.pyc
--------------------------------------------------------------------------------
/mmdet/models/plugins/__pycache__/non_local.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/models/plugins/__pycache__/non_local.cpython-37.pyc
--------------------------------------------------------------------------------
/mmdet/models/registry.py:
--------------------------------------------------------------------------------
1 | from mmdet.utils import Registry
2 |
3 | BACKBONES = Registry('backbone')
4 | NECKS = Registry('neck')
5 | ROI_EXTRACTORS = Registry('roi_extractor')
6 | SHARED_HEADS = Registry('shared_head')
7 | HEADS = Registry('head')
8 | LOSSES = Registry('loss')
9 | DETECTORS = Registry('detector')
10 |
--------------------------------------------------------------------------------
/mmdet/models/roi_extractors/__init__.py:
--------------------------------------------------------------------------------
1 | from .single_level import SingleRoIExtractor
2 |
3 | __all__ = ['SingleRoIExtractor']
4 |
--------------------------------------------------------------------------------
/mmdet/models/roi_extractors/__pycache__/__init__.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/models/roi_extractors/__pycache__/__init__.cpython-37.pyc
--------------------------------------------------------------------------------
/mmdet/models/roi_extractors/__pycache__/single_level.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/models/roi_extractors/__pycache__/single_level.cpython-37.pyc
--------------------------------------------------------------------------------
/mmdet/models/shared_heads/__init__.py:
--------------------------------------------------------------------------------
1 | from .res_layer import ResLayer
2 |
3 | __all__ = ['ResLayer']
4 |
--------------------------------------------------------------------------------
/mmdet/models/shared_heads/__pycache__/__init__.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/models/shared_heads/__pycache__/__init__.cpython-37.pyc
--------------------------------------------------------------------------------
/mmdet/models/shared_heads/__pycache__/res_layer.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/models/shared_heads/__pycache__/res_layer.cpython-37.pyc
--------------------------------------------------------------------------------
/mmdet/models/shared_heads/res_layer.py:
--------------------------------------------------------------------------------
1 | import logging
2 |
3 | import torch.nn as nn
4 | from mmcv.cnn import constant_init, kaiming_init
5 | from mmcv.runner import load_checkpoint
6 |
7 | from mmdet.core import auto_fp16
8 | from ..backbones import ResNet, make_res_layer
9 | from ..registry import SHARED_HEADS
10 |
11 |
12 | @SHARED_HEADS.register_module
13 | class ResLayer(nn.Module):
14 |
15 | def __init__(self,
16 | depth,
17 | stage=3,
18 | stride=2,
19 | dilation=1,
20 | style='pytorch',
21 | norm_cfg=dict(type='BN', requires_grad=True),
22 | norm_eval=True,
23 | with_cp=False,
24 | dcn=None):
25 | super(ResLayer, self).__init__()
26 | self.norm_eval = norm_eval
27 | self.norm_cfg = norm_cfg
28 | self.stage = stage
29 | self.fp16_enabled = False
30 | block, stage_blocks = ResNet.arch_settings[depth]
31 | stage_block = stage_blocks[stage]
32 | planes = 64 * 2**stage
33 | inplanes = 64 * 2**(stage - 1) * block.expansion
34 |
35 | res_layer = make_res_layer(
36 | block,
37 | inplanes,
38 | planes,
39 | stage_block,
40 | stride=stride,
41 | dilation=dilation,
42 | style=style,
43 | with_cp=with_cp,
44 | norm_cfg=self.norm_cfg,
45 | dcn=dcn)
46 | self.add_module('layer{}'.format(stage + 1), res_layer)
47 |
48 | def init_weights(self, pretrained=None):
49 | if isinstance(pretrained, str):
50 | logger = logging.getLogger()
51 | load_checkpoint(self, pretrained, strict=False, logger=logger)
52 | elif pretrained is None:
53 | for m in self.modules():
54 | if isinstance(m, nn.Conv2d):
55 | kaiming_init(m)
56 | elif isinstance(m, nn.BatchNorm2d):
57 | constant_init(m, 1)
58 | else:
59 | raise TypeError('pretrained must be a str or None')
60 |
61 | @auto_fp16()
62 | def forward(self, x):
63 | res_layer = getattr(self, 'layer{}'.format(self.stage + 1))
64 | out = res_layer(x)
65 | return out
66 |
67 | def train(self, mode=True):
68 | super(ResLayer, self).train(mode)
69 | if self.norm_eval:
70 | for m in self.modules():
71 | if isinstance(m, nn.BatchNorm2d):
72 | m.eval()
73 |
--------------------------------------------------------------------------------
/mmdet/models/utils/__init__.py:
--------------------------------------------------------------------------------
1 | from .conv_ws import conv_ws_2d, ConvWS2d
2 | from .conv_module import build_conv_layer, ConvModule
3 | from .norm import build_norm_layer
4 | from .scale import Scale
5 | from .weight_init import (xavier_init, normal_init, uniform_init, kaiming_init,
6 | bias_init_with_prob)
7 |
8 | __all__ = [
9 | 'conv_ws_2d', 'ConvWS2d', 'build_conv_layer', 'ConvModule',
10 | 'build_norm_layer', 'xavier_init', 'normal_init', 'uniform_init',
11 | 'kaiming_init', 'bias_init_with_prob', 'Scale'
12 | ]
13 |
--------------------------------------------------------------------------------
/mmdet/models/utils/__pycache__/__init__.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/models/utils/__pycache__/__init__.cpython-37.pyc
--------------------------------------------------------------------------------
/mmdet/models/utils/__pycache__/conv_module.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/models/utils/__pycache__/conv_module.cpython-37.pyc
--------------------------------------------------------------------------------
/mmdet/models/utils/__pycache__/conv_ws.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/models/utils/__pycache__/conv_ws.cpython-37.pyc
--------------------------------------------------------------------------------
/mmdet/models/utils/__pycache__/norm.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/models/utils/__pycache__/norm.cpython-37.pyc
--------------------------------------------------------------------------------
/mmdet/models/utils/__pycache__/scale.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/models/utils/__pycache__/scale.cpython-37.pyc
--------------------------------------------------------------------------------
/mmdet/models/utils/__pycache__/weight_init.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/models/utils/__pycache__/weight_init.cpython-37.pyc
--------------------------------------------------------------------------------
/mmdet/models/utils/conv_ws.py:
--------------------------------------------------------------------------------
1 | import torch.nn as nn
2 | import torch.nn.functional as F
3 |
4 |
5 | def conv_ws_2d(input,
6 | weight,
7 | bias=None,
8 | stride=1,
9 | padding=0,
10 | dilation=1,
11 | groups=1,
12 | eps=1e-5):
13 | c_in = weight.size(0)
14 | weight_flat = weight.view(c_in, -1)
15 | mean = weight_flat.mean(dim=1, keepdim=True).view(c_in, 1, 1, 1)
16 | std = weight_flat.std(dim=1, keepdim=True).view(c_in, 1, 1, 1)
17 | weight = (weight - mean) / (std + eps)
18 | return F.conv2d(input, weight, bias, stride, padding, dilation, groups)
19 |
20 |
21 | class ConvWS2d(nn.Conv2d):
22 |
23 | def __init__(self,
24 | in_channels,
25 | out_channels,
26 | kernel_size,
27 | stride=1,
28 | padding=0,
29 | dilation=1,
30 | groups=1,
31 | bias=True,
32 | eps=1e-5):
33 | super(ConvWS2d, self).__init__(
34 | in_channels,
35 | out_channels,
36 | kernel_size,
37 | stride=stride,
38 | padding=padding,
39 | dilation=dilation,
40 | groups=groups,
41 | bias=bias)
42 | self.eps = eps
43 |
44 | def forward(self, x):
45 | return conv_ws_2d(x, self.weight, self.bias, self.stride, self.padding,
46 | self.dilation, self.groups, self.eps)
47 |
--------------------------------------------------------------------------------
/mmdet/models/utils/norm.py:
--------------------------------------------------------------------------------
1 | import torch.nn as nn
2 |
3 | norm_cfg = {
4 | # format: layer_type: (abbreviation, module)
5 | 'BN': ('bn', nn.BatchNorm2d),
6 | 'SyncBN': ('bn', nn.SyncBatchNorm),
7 | 'GN': ('gn', nn.GroupNorm),
8 | # and potentially 'SN'
9 | }
10 |
11 |
12 | def build_norm_layer(cfg, num_features, postfix=''):
13 | """ Build normalization layer
14 |
15 | Args:
16 | cfg (dict): cfg should contain:
17 | type (str): identify norm layer type.
18 | layer args: args needed to instantiate a norm layer.
19 | requires_grad (bool): [optional] whether stop gradient updates
20 | num_features (int): number of channels from input.
21 | postfix (int, str): appended into norm abbreviation to
22 | create named layer.
23 |
24 | Returns:
25 | name (str): abbreviation + postfix
26 | layer (nn.Module): created norm layer
27 | """
28 | assert isinstance(cfg, dict) and 'type' in cfg
29 | cfg_ = cfg.copy()
30 |
31 | layer_type = cfg_.pop('type')
32 | if layer_type not in norm_cfg:
33 | raise KeyError('Unrecognized norm type {}'.format(layer_type))
34 | else:
35 | abbr, norm_layer = norm_cfg[layer_type]
36 | if norm_layer is None:
37 | raise NotImplementedError
38 |
39 | assert isinstance(postfix, (int, str))
40 | name = abbr + str(postfix)
41 |
42 | requires_grad = cfg_.pop('requires_grad', True)
43 | cfg_.setdefault('eps', 1e-5)
44 | if layer_type != 'GN':
45 | layer = norm_layer(num_features, **cfg_)
46 | if layer_type == 'SyncBN':
47 | layer._specify_ddp_gpu_num(1)
48 | else:
49 | assert 'num_groups' in cfg_
50 | layer = norm_layer(num_channels=num_features, **cfg_)
51 |
52 | for param in layer.parameters():
53 | param.requires_grad = requires_grad
54 |
55 | return name, layer
56 |
--------------------------------------------------------------------------------
/mmdet/models/utils/scale.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torch.nn as nn
3 |
4 |
5 | class Scale(nn.Module):
6 |
7 | def __init__(self, scale=1.0):
8 | super(Scale, self).__init__()
9 | self.scale = nn.Parameter(torch.tensor(scale, dtype=torch.float))
10 |
11 | def forward(self, x):
12 | return x * self.scale
13 |
--------------------------------------------------------------------------------
/mmdet/models/utils/weight_init.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import torch.nn as nn
3 |
4 |
5 | def xavier_init(module, gain=1, bias=0, distribution='normal'):
6 | assert distribution in ['uniform', 'normal']
7 | if distribution == 'uniform':
8 | nn.init.xavier_uniform_(module.weight, gain=gain)
9 | else:
10 | nn.init.xavier_normal_(module.weight, gain=gain)
11 | if hasattr(module, 'bias'):
12 | nn.init.constant_(module.bias, bias)
13 |
14 |
15 | def normal_init(module, mean=0, std=1, bias=0):
16 | nn.init.normal_(module.weight, mean, std)
17 | if hasattr(module, 'bias'):
18 | nn.init.constant_(module.bias, bias)
19 |
20 |
21 | def uniform_init(module, a=0, b=1, bias=0):
22 | nn.init.uniform_(module.weight, a, b)
23 | if hasattr(module, 'bias'):
24 | nn.init.constant_(module.bias, bias)
25 |
26 |
27 | def kaiming_init(module,
28 | mode='fan_out',
29 | nonlinearity='relu',
30 | bias=0,
31 | distribution='normal'):
32 | assert distribution in ['uniform', 'normal']
33 | if distribution == 'uniform':
34 | nn.init.kaiming_uniform_(
35 | module.weight, mode=mode, nonlinearity=nonlinearity)
36 | else:
37 | nn.init.kaiming_normal_(
38 | module.weight, mode=mode, nonlinearity=nonlinearity)
39 | if hasattr(module, 'bias'):
40 | nn.init.constant_(module.bias, bias)
41 |
42 |
43 | def bias_init_with_prob(prior_prob):
44 | """ initialize conv/fc bias value according to giving probablity"""
45 | bias_init = float(-np.log((1 - prior_prob) / prior_prob))
46 | return bias_init
47 |
--------------------------------------------------------------------------------
/mmdet/ops/__init__.py:
--------------------------------------------------------------------------------
1 | from .dcn import (DeformConv, DeformConvPack, ModulatedDeformConv,
2 | ModulatedDeformConvPack, DeformRoIPooling,
3 | DeformRoIPoolingPack, ModulatedDeformRoIPoolingPack,
4 | deform_conv, modulated_deform_conv, deform_roi_pooling)
5 | from .gcb import ContextBlock
6 | from .nms import nms, soft_nms
7 | from .roi_align import RoIAlign, roi_align
8 | from .roi_pool import RoIPool, roi_pool
9 | from .sigmoid_focal_loss import SigmoidFocalLoss, sigmoid_focal_loss
10 | from .masked_conv import MaskedConv2d
11 |
12 | __all__ = [
13 | 'nms', 'soft_nms', 'RoIAlign', 'roi_align', 'RoIPool', 'roi_pool',
14 | 'DeformConv', 'DeformConvPack', 'DeformRoIPooling', 'DeformRoIPoolingPack',
15 | 'ModulatedDeformRoIPoolingPack', 'ModulatedDeformConv',
16 | 'ModulatedDeformConvPack', 'deform_conv', 'modulated_deform_conv',
17 | 'deform_roi_pooling', 'SigmoidFocalLoss', 'sigmoid_focal_loss',
18 | 'MaskedConv2d', 'ContextBlock'
19 | ]
20 |
--------------------------------------------------------------------------------
/mmdet/ops/__pycache__/__init__.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/ops/__pycache__/__init__.cpython-37.pyc
--------------------------------------------------------------------------------
/mmdet/ops/dcn/__init__.py:
--------------------------------------------------------------------------------
1 | from .functions.deform_conv import deform_conv, modulated_deform_conv
2 | from .functions.deform_pool import deform_roi_pooling
3 | from .modules.deform_conv import (DeformConv, ModulatedDeformConv,
4 | DeformConvPack, ModulatedDeformConvPack)
5 | from .modules.deform_pool import (DeformRoIPooling, DeformRoIPoolingPack,
6 | ModulatedDeformRoIPoolingPack)
7 |
8 | __all__ = [
9 | 'DeformConv', 'DeformConvPack', 'ModulatedDeformConv',
10 | 'ModulatedDeformConvPack', 'DeformRoIPooling', 'DeformRoIPoolingPack',
11 | 'ModulatedDeformRoIPoolingPack', 'deform_conv', 'modulated_deform_conv',
12 | 'deform_roi_pooling'
13 | ]
14 |
--------------------------------------------------------------------------------
/mmdet/ops/dcn/__pycache__/__init__.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/ops/dcn/__pycache__/__init__.cpython-37.pyc
--------------------------------------------------------------------------------
/mmdet/ops/dcn/deform_conv_cuda.cpython-37m-x86_64-linux-gnu.so:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/ops/dcn/deform_conv_cuda.cpython-37m-x86_64-linux-gnu.so
--------------------------------------------------------------------------------
/mmdet/ops/dcn/deform_pool_cuda.cpython-37m-x86_64-linux-gnu.so:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/ops/dcn/deform_pool_cuda.cpython-37m-x86_64-linux-gnu.so
--------------------------------------------------------------------------------
/mmdet/ops/dcn/functions/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/ops/dcn/functions/__init__.py
--------------------------------------------------------------------------------
/mmdet/ops/dcn/functions/__pycache__/__init__.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/ops/dcn/functions/__pycache__/__init__.cpython-37.pyc
--------------------------------------------------------------------------------
/mmdet/ops/dcn/functions/__pycache__/deform_conv.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/ops/dcn/functions/__pycache__/deform_conv.cpython-37.pyc
--------------------------------------------------------------------------------
/mmdet/ops/dcn/functions/__pycache__/deform_pool.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/ops/dcn/functions/__pycache__/deform_pool.cpython-37.pyc
--------------------------------------------------------------------------------
/mmdet/ops/dcn/modules/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/ops/dcn/modules/__init__.py
--------------------------------------------------------------------------------
/mmdet/ops/dcn/modules/__pycache__/__init__.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/ops/dcn/modules/__pycache__/__init__.cpython-37.pyc
--------------------------------------------------------------------------------
/mmdet/ops/dcn/modules/__pycache__/deform_conv.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/ops/dcn/modules/__pycache__/deform_conv.cpython-37.pyc
--------------------------------------------------------------------------------
/mmdet/ops/dcn/modules/__pycache__/deform_pool.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/ops/dcn/modules/__pycache__/deform_pool.cpython-37.pyc
--------------------------------------------------------------------------------
/mmdet/ops/gcb/__init__.py:
--------------------------------------------------------------------------------
1 | from .context_block import ContextBlock
2 |
3 | __all__ = [
4 | 'ContextBlock',
5 | ]
6 |
--------------------------------------------------------------------------------
/mmdet/ops/gcb/__pycache__/__init__.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/ops/gcb/__pycache__/__init__.cpython-37.pyc
--------------------------------------------------------------------------------
/mmdet/ops/gcb/__pycache__/context_block.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/ops/gcb/__pycache__/context_block.cpython-37.pyc
--------------------------------------------------------------------------------
/mmdet/ops/masked_conv/__init__.py:
--------------------------------------------------------------------------------
1 | from .functions.masked_conv import masked_conv2d
2 | from .modules.masked_conv import MaskedConv2d
3 |
4 | __all__ = ['masked_conv2d', 'MaskedConv2d']
5 |
--------------------------------------------------------------------------------
/mmdet/ops/masked_conv/__pycache__/__init__.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/ops/masked_conv/__pycache__/__init__.cpython-37.pyc
--------------------------------------------------------------------------------
/mmdet/ops/masked_conv/functions/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/ops/masked_conv/functions/__init__.py
--------------------------------------------------------------------------------
/mmdet/ops/masked_conv/functions/__pycache__/__init__.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/ops/masked_conv/functions/__pycache__/__init__.cpython-37.pyc
--------------------------------------------------------------------------------
/mmdet/ops/masked_conv/functions/__pycache__/masked_conv.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/ops/masked_conv/functions/__pycache__/masked_conv.cpython-37.pyc
--------------------------------------------------------------------------------
/mmdet/ops/masked_conv/masked_conv2d_cuda.cpython-37m-x86_64-linux-gnu.so:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/ops/masked_conv/masked_conv2d_cuda.cpython-37m-x86_64-linux-gnu.so
--------------------------------------------------------------------------------
/mmdet/ops/masked_conv/modules/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/ops/masked_conv/modules/__init__.py
--------------------------------------------------------------------------------
/mmdet/ops/masked_conv/modules/__pycache__/__init__.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/ops/masked_conv/modules/__pycache__/__init__.cpython-37.pyc
--------------------------------------------------------------------------------
/mmdet/ops/masked_conv/modules/__pycache__/masked_conv.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/ops/masked_conv/modules/__pycache__/masked_conv.cpython-37.pyc
--------------------------------------------------------------------------------
/mmdet/ops/masked_conv/modules/masked_conv.py:
--------------------------------------------------------------------------------
1 | import torch.nn as nn
2 | from ..functions.masked_conv import masked_conv2d
3 |
4 |
5 | class MaskedConv2d(nn.Conv2d):
6 | """A MaskedConv2d which inherits the official Conv2d.
7 |
8 | The masked forward doesn't implement the backward function and only
9 | supports the stride parameter to be 1 currently.
10 | """
11 |
12 | def __init__(self,
13 | in_channels,
14 | out_channels,
15 | kernel_size,
16 | stride=1,
17 | padding=0,
18 | dilation=1,
19 | groups=1,
20 | bias=True):
21 | super(MaskedConv2d,
22 | self).__init__(in_channels, out_channels, kernel_size, stride,
23 | padding, dilation, groups, bias)
24 |
25 | def forward(self, input, mask=None):
26 | if mask is None: # fallback to the normal Conv2d
27 | return super(MaskedConv2d, self).forward(input)
28 | else:
29 | return masked_conv2d(input, mask, self.weight, self.bias,
30 | self.padding)
31 |
--------------------------------------------------------------------------------
/mmdet/ops/nms/__init__.py:
--------------------------------------------------------------------------------
1 | from .nms_wrapper import nms, soft_nms
2 |
3 | __all__ = ['nms', 'soft_nms']
4 |
--------------------------------------------------------------------------------
/mmdet/ops/nms/__pycache__/__init__.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/ops/nms/__pycache__/__init__.cpython-37.pyc
--------------------------------------------------------------------------------
/mmdet/ops/nms/__pycache__/nms_wrapper.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/ops/nms/__pycache__/nms_wrapper.cpython-37.pyc
--------------------------------------------------------------------------------
/mmdet/ops/nms/nms_cpu.cpython-37m-x86_64-linux-gnu.so:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/ops/nms/nms_cpu.cpython-37m-x86_64-linux-gnu.so
--------------------------------------------------------------------------------
/mmdet/ops/nms/nms_cuda.cpython-37m-x86_64-linux-gnu.so:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/ops/nms/nms_cuda.cpython-37m-x86_64-linux-gnu.so
--------------------------------------------------------------------------------
/mmdet/ops/nms/soft_nms_cpu.cpython-37m-x86_64-linux-gnu.so:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/ops/nms/soft_nms_cpu.cpython-37m-x86_64-linux-gnu.so
--------------------------------------------------------------------------------
/mmdet/ops/nms/src/nms_cuda.cpp:
--------------------------------------------------------------------------------
1 | // Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
2 | #include
3 |
4 | #define CHECK_CUDA(x) TORCH_CHECK(x.type().is_cuda(), #x, " must be a CUDAtensor ")
5 |
6 | at::Tensor nms_cuda(const at::Tensor boxes, float nms_overlap_thresh);
7 |
8 | at::Tensor nms(const at::Tensor& dets, const float threshold) {
9 | CHECK_CUDA(dets);
10 | if (dets.numel() == 0)
11 | return at::empty({0}, dets.options().dtype(at::kLong).device(at::kCPU));
12 | return nms_cuda(dets, threshold);
13 | }
14 |
15 | PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
16 | m.def("nms", &nms, "non-maximum suppression");
17 | }
18 |
--------------------------------------------------------------------------------
/mmdet/ops/roi_align/__init__.py:
--------------------------------------------------------------------------------
1 | from .functions.roi_align import roi_align
2 | from .modules.roi_align import RoIAlign
3 |
4 | __all__ = ['roi_align', 'RoIAlign']
5 |
--------------------------------------------------------------------------------
/mmdet/ops/roi_align/__pycache__/__init__.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/ops/roi_align/__pycache__/__init__.cpython-37.pyc
--------------------------------------------------------------------------------
/mmdet/ops/roi_align/functions/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/ops/roi_align/functions/__init__.py
--------------------------------------------------------------------------------
/mmdet/ops/roi_align/functions/__pycache__/__init__.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/ops/roi_align/functions/__pycache__/__init__.cpython-37.pyc
--------------------------------------------------------------------------------
/mmdet/ops/roi_align/functions/__pycache__/roi_align.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/ops/roi_align/functions/__pycache__/roi_align.cpython-37.pyc
--------------------------------------------------------------------------------
/mmdet/ops/roi_align/functions/roi_align.py:
--------------------------------------------------------------------------------
1 | from torch.autograd import Function
2 |
3 | from .. import roi_align_cuda
4 |
5 |
6 | class RoIAlignFunction(Function):
7 |
8 | @staticmethod
9 | def forward(ctx, features, rois, out_size, spatial_scale, sample_num=0):
10 | if isinstance(out_size, int):
11 | out_h = out_size
12 | out_w = out_size
13 | elif isinstance(out_size, tuple):
14 | assert len(out_size) == 2
15 | assert isinstance(out_size[0], int)
16 | assert isinstance(out_size[1], int)
17 | out_h, out_w = out_size
18 | else:
19 | raise TypeError(
20 | '"out_size" must be an integer or tuple of integers')
21 | ctx.spatial_scale = spatial_scale
22 | ctx.sample_num = sample_num
23 | ctx.save_for_backward(rois)
24 | ctx.feature_size = features.size()
25 |
26 | batch_size, num_channels, data_height, data_width = features.size()
27 | num_rois = rois.size(0)
28 |
29 | output = features.new_zeros(num_rois, num_channels, out_h, out_w)
30 | if features.is_cuda:
31 | roi_align_cuda.forward(features, rois, out_h, out_w, spatial_scale,
32 | sample_num, output)
33 | else:
34 | raise NotImplementedError
35 |
36 | return output
37 |
38 | @staticmethod
39 | def backward(ctx, grad_output):
40 | feature_size = ctx.feature_size
41 | spatial_scale = ctx.spatial_scale
42 | sample_num = ctx.sample_num
43 | rois = ctx.saved_tensors[0]
44 | assert (feature_size is not None and grad_output.is_cuda)
45 |
46 | batch_size, num_channels, data_height, data_width = feature_size
47 | out_w = grad_output.size(3)
48 | out_h = grad_output.size(2)
49 |
50 | grad_input = grad_rois = None
51 | if ctx.needs_input_grad[0]:
52 | grad_input = rois.new_zeros(batch_size, num_channels, data_height,
53 | data_width)
54 | roi_align_cuda.backward(grad_output.contiguous(), rois, out_h,
55 | out_w, spatial_scale, sample_num,
56 | grad_input)
57 |
58 | return grad_input, grad_rois, None, None, None
59 |
60 |
61 | roi_align = RoIAlignFunction.apply
62 |
--------------------------------------------------------------------------------
/mmdet/ops/roi_align/gradcheck.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import torch
3 | from torch.autograd import gradcheck
4 |
5 | import os.path as osp
6 | import sys
7 | sys.path.append(osp.abspath(osp.join(__file__, '../../')))
8 | from roi_align import RoIAlign # noqa: E402
9 |
10 | feat_size = 15
11 | spatial_scale = 1.0 / 8
12 | img_size = feat_size / spatial_scale
13 | num_imgs = 2
14 | num_rois = 20
15 |
16 | batch_ind = np.random.randint(num_imgs, size=(num_rois, 1))
17 | rois = np.random.rand(num_rois, 4) * img_size * 0.5
18 | rois[:, 2:] += img_size * 0.5
19 | rois = np.hstack((batch_ind, rois))
20 |
21 | feat = torch.randn(
22 | num_imgs, 16, feat_size, feat_size, requires_grad=True, device='cuda:0')
23 | rois = torch.from_numpy(rois).float().cuda()
24 | inputs = (feat, rois)
25 | print('Gradcheck for roi align...')
26 | test = gradcheck(RoIAlign(3, spatial_scale), inputs, atol=1e-3, eps=1e-3)
27 | print(test)
28 | test = gradcheck(RoIAlign(3, spatial_scale, 2), inputs, atol=1e-3, eps=1e-3)
29 | print(test)
30 |
--------------------------------------------------------------------------------
/mmdet/ops/roi_align/modules/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/ops/roi_align/modules/__init__.py
--------------------------------------------------------------------------------
/mmdet/ops/roi_align/modules/__pycache__/__init__.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/ops/roi_align/modules/__pycache__/__init__.cpython-37.pyc
--------------------------------------------------------------------------------
/mmdet/ops/roi_align/modules/__pycache__/roi_align.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/ops/roi_align/modules/__pycache__/roi_align.cpython-37.pyc
--------------------------------------------------------------------------------
/mmdet/ops/roi_align/modules/roi_align.py:
--------------------------------------------------------------------------------
1 | from torch.nn.modules.module import Module
2 | from ..functions.roi_align import RoIAlignFunction
3 |
4 |
5 | class RoIAlign(Module):
6 |
7 | def __init__(self, out_size, spatial_scale, sample_num=0):
8 | super(RoIAlign, self).__init__()
9 |
10 | self.out_size = out_size
11 | self.spatial_scale = float(spatial_scale)
12 | self.sample_num = int(sample_num)
13 |
14 | def forward(self, features, rois):
15 | return RoIAlignFunction.apply(features, rois, self.out_size,
16 | self.spatial_scale, self.sample_num)
17 |
--------------------------------------------------------------------------------
/mmdet/ops/roi_align/roi_align_cuda.cpython-37m-x86_64-linux-gnu.so:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/ops/roi_align/roi_align_cuda.cpython-37m-x86_64-linux-gnu.so
--------------------------------------------------------------------------------
/mmdet/ops/roi_pool/__init__.py:
--------------------------------------------------------------------------------
1 | from .functions.roi_pool import roi_pool
2 | from .modules.roi_pool import RoIPool
3 |
4 | __all__ = ['roi_pool', 'RoIPool']
5 |
--------------------------------------------------------------------------------
/mmdet/ops/roi_pool/__pycache__/__init__.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/ops/roi_pool/__pycache__/__init__.cpython-37.pyc
--------------------------------------------------------------------------------
/mmdet/ops/roi_pool/functions/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/ops/roi_pool/functions/__init__.py
--------------------------------------------------------------------------------
/mmdet/ops/roi_pool/functions/__pycache__/__init__.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/ops/roi_pool/functions/__pycache__/__init__.cpython-37.pyc
--------------------------------------------------------------------------------
/mmdet/ops/roi_pool/functions/__pycache__/roi_pool.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/ops/roi_pool/functions/__pycache__/roi_pool.cpython-37.pyc
--------------------------------------------------------------------------------
/mmdet/ops/roi_pool/functions/roi_pool.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from torch.autograd import Function
3 |
4 | from .. import roi_pool_cuda
5 |
6 |
7 | class RoIPoolFunction(Function):
8 |
9 | @staticmethod
10 | def forward(ctx, features, rois, out_size, spatial_scale):
11 | if isinstance(out_size, int):
12 | out_h = out_size
13 | out_w = out_size
14 | elif isinstance(out_size, tuple):
15 | assert len(out_size) == 2
16 | assert isinstance(out_size[0], int)
17 | assert isinstance(out_size[1], int)
18 | out_h, out_w = out_size
19 | else:
20 | raise TypeError(
21 | '"out_size" must be an integer or tuple of integers')
22 | assert features.is_cuda
23 | ctx.save_for_backward(rois)
24 | num_channels = features.size(1)
25 | num_rois = rois.size(0)
26 | out_size = (num_rois, num_channels, out_h, out_w)
27 | output = features.new_zeros(out_size)
28 | argmax = features.new_zeros(out_size, dtype=torch.int)
29 | roi_pool_cuda.forward(features, rois, out_h, out_w, spatial_scale,
30 | output, argmax)
31 | ctx.spatial_scale = spatial_scale
32 | ctx.feature_size = features.size()
33 | ctx.argmax = argmax
34 |
35 | return output
36 |
37 | @staticmethod
38 | def backward(ctx, grad_output):
39 | assert grad_output.is_cuda
40 | spatial_scale = ctx.spatial_scale
41 | feature_size = ctx.feature_size
42 | argmax = ctx.argmax
43 | rois = ctx.saved_tensors[0]
44 | assert feature_size is not None
45 |
46 | grad_input = grad_rois = None
47 | if ctx.needs_input_grad[0]:
48 | grad_input = grad_output.new_zeros(feature_size)
49 | roi_pool_cuda.backward(grad_output.contiguous(), rois, argmax,
50 | spatial_scale, grad_input)
51 |
52 | return grad_input, grad_rois, None, None
53 |
54 |
55 | roi_pool = RoIPoolFunction.apply
56 |
--------------------------------------------------------------------------------
/mmdet/ops/roi_pool/gradcheck.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from torch.autograd import gradcheck
3 |
4 | import os.path as osp
5 | import sys
6 | sys.path.append(osp.abspath(osp.join(__file__, '../../')))
7 | from roi_pool import RoIPool # noqa: E402
8 |
9 | feat = torch.randn(4, 16, 15, 15, requires_grad=True).cuda()
10 | rois = torch.Tensor([[0, 0, 0, 50, 50], [0, 10, 30, 43, 55],
11 | [1, 67, 40, 110, 120]]).cuda()
12 | inputs = (feat, rois)
13 | print('Gradcheck for roi pooling...')
14 | test = gradcheck(RoIPool(4, 1.0 / 8), inputs, eps=1e-5, atol=1e-3)
15 | print(test)
16 |
--------------------------------------------------------------------------------
/mmdet/ops/roi_pool/modules/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/ops/roi_pool/modules/__init__.py
--------------------------------------------------------------------------------
/mmdet/ops/roi_pool/modules/__pycache__/__init__.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/ops/roi_pool/modules/__pycache__/__init__.cpython-37.pyc
--------------------------------------------------------------------------------
/mmdet/ops/roi_pool/modules/__pycache__/roi_pool.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/ops/roi_pool/modules/__pycache__/roi_pool.cpython-37.pyc
--------------------------------------------------------------------------------
/mmdet/ops/roi_pool/modules/roi_pool.py:
--------------------------------------------------------------------------------
1 | from torch.nn.modules.module import Module
2 | from ..functions.roi_pool import roi_pool
3 |
4 |
5 | class RoIPool(Module):
6 |
7 | def __init__(self, out_size, spatial_scale):
8 | super(RoIPool, self).__init__()
9 |
10 | self.out_size = out_size
11 | self.spatial_scale = float(spatial_scale)
12 |
13 | def forward(self, features, rois):
14 | return roi_pool(features, rois, self.out_size, self.spatial_scale)
15 |
--------------------------------------------------------------------------------
/mmdet/ops/roi_pool/roi_pool_cuda.cpython-37m-x86_64-linux-gnu.so:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/ops/roi_pool/roi_pool_cuda.cpython-37m-x86_64-linux-gnu.so
--------------------------------------------------------------------------------
/mmdet/ops/sigmoid_focal_loss/__init__.py:
--------------------------------------------------------------------------------
1 | from .modules.sigmoid_focal_loss import SigmoidFocalLoss, sigmoid_focal_loss
2 |
3 | __all__ = ['SigmoidFocalLoss', 'sigmoid_focal_loss']
4 |
--------------------------------------------------------------------------------
/mmdet/ops/sigmoid_focal_loss/__pycache__/__init__.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/ops/sigmoid_focal_loss/__pycache__/__init__.cpython-37.pyc
--------------------------------------------------------------------------------
/mmdet/ops/sigmoid_focal_loss/functions/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/ops/sigmoid_focal_loss/functions/__init__.py
--------------------------------------------------------------------------------
/mmdet/ops/sigmoid_focal_loss/functions/__pycache__/__init__.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/ops/sigmoid_focal_loss/functions/__pycache__/__init__.cpython-37.pyc
--------------------------------------------------------------------------------
/mmdet/ops/sigmoid_focal_loss/functions/__pycache__/sigmoid_focal_loss.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/ops/sigmoid_focal_loss/functions/__pycache__/sigmoid_focal_loss.cpython-37.pyc
--------------------------------------------------------------------------------
/mmdet/ops/sigmoid_focal_loss/functions/sigmoid_focal_loss.py:
--------------------------------------------------------------------------------
1 | from torch.autograd import Function
2 | from torch.autograd.function import once_differentiable
3 |
4 | from .. import sigmoid_focal_loss_cuda
5 |
6 |
7 | class SigmoidFocalLossFunction(Function):
8 |
9 | @staticmethod
10 | def forward(ctx, input, target, gamma=2.0, alpha=0.25):
11 | ctx.save_for_backward(input, target)
12 | num_classes = input.shape[1]
13 | ctx.num_classes = num_classes
14 | ctx.gamma = gamma
15 | ctx.alpha = alpha
16 |
17 | loss = sigmoid_focal_loss_cuda.forward(input, target, num_classes,
18 | gamma, alpha)
19 | return loss
20 |
21 | @staticmethod
22 | @once_differentiable
23 | def backward(ctx, d_loss):
24 | input, target = ctx.saved_tensors
25 | num_classes = ctx.num_classes
26 | gamma = ctx.gamma
27 | alpha = ctx.alpha
28 | d_loss = d_loss.contiguous()
29 | d_input = sigmoid_focal_loss_cuda.backward(input, target, d_loss,
30 | num_classes, gamma, alpha)
31 | return d_input, None, None, None, None
32 |
33 |
34 | sigmoid_focal_loss = SigmoidFocalLossFunction.apply
35 |
--------------------------------------------------------------------------------
/mmdet/ops/sigmoid_focal_loss/modules/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/ops/sigmoid_focal_loss/modules/__init__.py
--------------------------------------------------------------------------------
/mmdet/ops/sigmoid_focal_loss/modules/__pycache__/__init__.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/ops/sigmoid_focal_loss/modules/__pycache__/__init__.cpython-37.pyc
--------------------------------------------------------------------------------
/mmdet/ops/sigmoid_focal_loss/modules/__pycache__/sigmoid_focal_loss.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/ops/sigmoid_focal_loss/modules/__pycache__/sigmoid_focal_loss.cpython-37.pyc
--------------------------------------------------------------------------------
/mmdet/ops/sigmoid_focal_loss/modules/sigmoid_focal_loss.py:
--------------------------------------------------------------------------------
1 | from torch import nn
2 |
3 | from ..functions.sigmoid_focal_loss import sigmoid_focal_loss
4 |
5 |
6 | # TODO: remove this module
7 | class SigmoidFocalLoss(nn.Module):
8 |
9 | def __init__(self, gamma, alpha):
10 | super(SigmoidFocalLoss, self).__init__()
11 | self.gamma = gamma
12 | self.alpha = alpha
13 |
14 | def forward(self, logits, targets):
15 | assert logits.is_cuda
16 | loss = sigmoid_focal_loss(logits, targets, self.gamma, self.alpha)
17 | return loss.sum()
18 |
19 | def __repr__(self):
20 | tmpstr = self.__class__.__name__ + "("
21 | tmpstr += "gamma=" + str(self.gamma)
22 | tmpstr += ", alpha=" + str(self.alpha)
23 | tmpstr += ")"
24 | return tmpstr
25 |
--------------------------------------------------------------------------------
/mmdet/ops/sigmoid_focal_loss/sigmoid_focal_loss_cuda.cpython-37m-x86_64-linux-gnu.so:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/ops/sigmoid_focal_loss/sigmoid_focal_loss_cuda.cpython-37m-x86_64-linux-gnu.so
--------------------------------------------------------------------------------
/mmdet/ops/sigmoid_focal_loss/src/sigmoid_focal_loss.cpp:
--------------------------------------------------------------------------------
1 | // modify from
2 | // https://github.com/facebookresearch/maskrcnn-benchmark/blob/master/maskrcnn_benchmark/csrc/SigmoidFocalLoss.h
3 | #include
4 |
5 | at::Tensor SigmoidFocalLoss_forward_cuda(const at::Tensor &logits,
6 | const at::Tensor &targets,
7 | const int num_classes,
8 | const float gamma, const float alpha);
9 |
10 | at::Tensor SigmoidFocalLoss_backward_cuda(const at::Tensor &logits,
11 | const at::Tensor &targets,
12 | const at::Tensor &d_losses,
13 | const int num_classes,
14 | const float gamma, const float alpha);
15 |
16 | // Interface for Python
17 | at::Tensor SigmoidFocalLoss_forward(const at::Tensor &logits,
18 | const at::Tensor &targets,
19 | const int num_classes, const float gamma,
20 | const float alpha) {
21 | if (logits.type().is_cuda()) {
22 | return SigmoidFocalLoss_forward_cuda(logits, targets, num_classes, gamma,
23 | alpha);
24 | }
25 | }
26 |
27 | at::Tensor SigmoidFocalLoss_backward(const at::Tensor &logits,
28 | const at::Tensor &targets,
29 | const at::Tensor &d_losses,
30 | const int num_classes, const float gamma,
31 | const float alpha) {
32 | if (logits.type().is_cuda()) {
33 | return SigmoidFocalLoss_backward_cuda(logits, targets, d_losses,
34 | num_classes, gamma, alpha);
35 | }
36 | }
37 |
38 | PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
39 | m.def("forward", &SigmoidFocalLoss_forward,
40 | "SigmoidFocalLoss forward (CUDA)");
41 | m.def("backward", &SigmoidFocalLoss_backward,
42 | "SigmoidFocalLoss backward (CUDA)");
43 | }
44 |
--------------------------------------------------------------------------------
/mmdet/utils/__init__.py:
--------------------------------------------------------------------------------
1 | from .registry import Registry, build_from_cfg
2 |
3 | __all__ = ['Registry', 'build_from_cfg']
4 |
5 |
--------------------------------------------------------------------------------
/mmdet/utils/__pycache__/__init__.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/utils/__pycache__/__init__.cpython-37.pyc
--------------------------------------------------------------------------------
/mmdet/utils/__pycache__/registry.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/mmdet/utils/__pycache__/registry.cpython-37.pyc
--------------------------------------------------------------------------------
/mmdet/version.py:
--------------------------------------------------------------------------------
1 | # GENERATED VERSION FILE
2 | # TIME: Tue Nov 1 17:55:33 2022
3 |
4 | __version__ = '0.6.0+unknown'
5 | short_version = '0.6.0'
6 |
--------------------------------------------------------------------------------
/run.sh:
--------------------------------------------------------------------------------
1 | cfg_name="swin_tiny"
2 |
3 | CUDA_VISIBLE_DEVICES=0 python ./tools/test_city_person.py configs/solider/cp/${cfg_name}.py ./work_dirs/cp/$cfg_name/epoch_ 1 241 --out ${cfg_name}.json --mean_teacher 2>&1 | tee ${cfg_name}.txt
4 |
--------------------------------------------------------------------------------
/tools/ECPB/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2019 EuroCity Persons Benchmark
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/tools/ECPB/README_ECPB.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/tools/ECPB/README_ECPB.pdf
--------------------------------------------------------------------------------
/tools/ECPB/TheEuroCityPersonsBenchmark.docx:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/tools/ECPB/TheEuroCityPersonsBenchmark.docx
--------------------------------------------------------------------------------
/tools/ECPB/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/tools/ECPB/__init__.py
--------------------------------------------------------------------------------
/tools/ECPB/conversion_test/ecp.json:
--------------------------------------------------------------------------------
1 | {
2 | "children": [
3 | {
4 | "score": 0.9,
5 | "tags": [
6 | "occluded>80"
7 | ],
8 | "y1": 195.36,
9 | "y0": 164.64,
10 | "x0": 677.94,
11 | "x1": 690.85,
12 | "orient": -1.66,
13 | "identity": "rider"
14 | },
15 | {
16 | "score": 0.7,
17 | "tags": [
18 | "truncated>80"
19 | ],
20 | "y1": 195.36,
21 | "y0": 164.64,
22 | "x0": 677.94,
23 | "x1": 690.85,
24 | "orient": -1.66,
25 | "identity": "pedestrian"
26 | }
27 | ],
28 | "identity": "frame"
29 | }
--------------------------------------------------------------------------------
/tools/ECPB/conversion_test/kitti.txt:
--------------------------------------------------------------------------------
1 | Cyclist 0 3 -1.66 677.94 164.64 690.85 195.36 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.9
2 | Pedestrian 0.8 0 -1.66 677.94 164.64 690.85 195.36 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.7
3 |
--------------------------------------------------------------------------------
/tools/ECPB/conversion_test/kitti_in.txt:
--------------------------------------------------------------------------------
1 | Cyclist 0.00 3 -1.66 677.94 164.64 690.85 195.36 1.86 0.60 2.02 4.58 1.37 44.76 -1.56 0.9
2 | Pedestrian 0.90 0 -1.66 677.94 164.64 690.85 195.36 1.86 0.60 2.02 4.58 1.37 44.76 -1.56 0.7
--------------------------------------------------------------------------------
/tools/ECPB/detect.py:
--------------------------------------------------------------------------------
1 | import json
2 | import os
3 | import glob
4 | import dataconverter
5 |
6 |
7 | # TODO adapt this method to get real detections on the given image.
8 | # You have to stick to the given result format.
9 | def mock_detector(image):
10 | mock_detections = [{'x0': 0.0,
11 | 'x1': 10.0,
12 | 'y0': 0.0,
13 | 'y1': 100.0,
14 | 'score': 0.8,
15 | 'identity': 'pedestrian',
16 | 'orient': 0.0},
17 | {'x0': 10.0,
18 | 'x1': 20.0,
19 | 'y0': 0.0,
20 | 'y1': 1000.0,
21 | 'score': 0.7,
22 | 'identity': 'rider',
23 | 'orient': 1.0}]
24 | return mock_detections
25 |
26 |
27 | def run_detector_on_dataset(time='day', mode='val'):
28 | assert mode in ['val', 'test']
29 | assert time in ['day', 'night']
30 |
31 | eval_imgs = glob.glob('./data/{}/img/{}/*/*'.format(time, mode))
32 | destdir = './data/mock_detections/{}/{}/'.format(time, mode)
33 | dataconverter.create_base_dir(destdir)
34 |
35 | for im in eval_imgs:
36 | detections = mock_detector(im)
37 | destfile = os.path.join(destdir, os.path.basename(im).replace('.png', '.json'))
38 | frame = {'identity': 'frame'}
39 | frame['children'] = detections
40 | json.dump(frame, open(destfile, 'w'), indent=1)
41 |
42 |
43 | if __name__ == "__main__":
44 | run_detector_on_dataset(time='day', mode='val')
45 |
--------------------------------------------------------------------------------
/tools/caltech/eval_caltech/AS.mat:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/tools/caltech/eval_caltech/AS.mat
--------------------------------------------------------------------------------
/tools/caltech/eval_caltech/ResultsEval/gt-new/gt-All.mat:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/tools/caltech/eval_caltech/ResultsEval/gt-new/gt-All.mat
--------------------------------------------------------------------------------
/tools/caltech/eval_caltech/ResultsEval/gt-new/gt-Occ=heavy.mat:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/tools/caltech/eval_caltech/ResultsEval/gt-new/gt-Occ=heavy.mat
--------------------------------------------------------------------------------
/tools/caltech/eval_caltech/ResultsEval/gt-new/gt-Reasonable.mat:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/tools/caltech/eval_caltech/ResultsEval/gt-new/gt-Reasonable.mat
--------------------------------------------------------------------------------
/tools/caltech/eval_caltech/bbGt.m:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/tools/caltech/eval_caltech/bbGt.m
--------------------------------------------------------------------------------
/tools/caltech/eval_caltech/extract_img_anno.m:
--------------------------------------------------------------------------------
1 | % extract_img_anno()
2 | % --------------------------------------------------------
3 | % RPN_BF
4 | % Copyright (c) 2016, Liliang Zhang
5 | % Licensed under The MIT License [see LICENSE for details]
6 | % --------------------------------------------------------
7 |
8 | dataDir='./datasets/caltech/';
9 | addpath(genpath('./code3.2.1'));
10 | addpath(genpath('./toolbox'));
11 | for s=1:2
12 | if(s==1), type='test'; skip=[];continue; else type='train'; skip=3; end
13 | dbInfo(['Usa' type]);
14 | if(exist([dataDir type '/annotations'],'dir')), continue; end
15 | dbExtract([dataDir type],1,skip);
16 | end
17 |
18 |
--------------------------------------------------------------------------------
/tools/check_log_loss.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import re
3 | import json
4 | import matplotlib.pyplot as plt
5 |
6 | def parse_log_file(fname):
7 | with open(fname) as f:
8 | content = f.readlines()
9 | data = []
10 | num_pre = -1
11 | for line in content:
12 | if 'Epoch' in line and 'loss' in line and (not 'nan' in line):
13 | start = line.find('Epoch')
14 | start2 = line.find('time')
15 | # find all float number in string
16 | result1 = re.findall(r"[-+]?\d*\.\d+|\d+", line[start:start2])
17 | result2 = re.findall(r"[-+]?\d*\.\d+|\d+", line[start2:])
18 | result = result1[0:4] + result2
19 | assert num_pre < 0 or len(result)==num_pre, 'number of parse loss should be the same'
20 | data.append(np.array([float(item) for item in result]))
21 | num_pre = len(result)
22 | data = np.array(data)
23 | print(data.shape)
24 |
25 | iteration = (data[:,0]-1)*(data[:,2]) + data[:,1]
26 | lr_rate = data[:,3]
27 |
28 | # loss starts from index 10
29 | data = data[:,7:]
30 |
31 | plt.subplot(221)
32 | plt.plot(iteration, data[:, -1]) # total loss
33 | plt.subplot(222)
34 | plt.plot(iteration, data[:, -2]) # box loss
35 | plt.subplot(223)
36 | plt.plot(iteration, data[:, -4]) # class acc
37 | plt.subplot(224)
38 | plt.plot(iteration, lr_rate)
39 | plt.show()
40 |
41 |
42 | if __name__ == '__main__':
43 | fname = '/home/hust/tools/log_fold/mmdetect/log_cascade_rcnn_x152_caffe_32x8d_fpn.txt'
44 | fname = '/home/hust/tools/log_fold/mmdetect/log_cascade_rcnn_x101_64x4d_fpn_1x_trnbn.txt'
45 | fname = '/home/hust/tools/log_fold/mmdetect/log_cascade_rcnn_densenet161.txt'
46 | parse_log_file(fname)
--------------------------------------------------------------------------------
/tools/cityPerson/__pycache__/coco.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/tools/cityPerson/__pycache__/coco.cpython-37.pyc
--------------------------------------------------------------------------------
/tools/cityPerson/__pycache__/eval_MR_multisetup.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/tools/cityPerson/__pycache__/eval_MR_multisetup.cpython-37.pyc
--------------------------------------------------------------------------------
/tools/cityPerson/__pycache__/eval_demo.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tinyvision/SOLIDER-PedestrianDetection/fe80382f1809dc41619638dd510d23b2c6bd9622/tools/cityPerson/__pycache__/eval_demo.cpython-37.pyc
--------------------------------------------------------------------------------
/tools/cityPerson/eval_demo.py:
--------------------------------------------------------------------------------
1 | import os
2 | from tools.cityPerson.coco import COCO
3 | from tools.cityPerson.eval_MR_multisetup import COCOeval
4 |
5 |
6 | def validate(annFile, dt_path, ecp=False):
7 | print("\n")
8 | if ecp:
9 | print("EvalSetMode=ECP")
10 | mean_MR = []
11 | my_id_setup = []
12 | for id_setup in range(0, 4):
13 | cocoGt = COCO(annFile)
14 | cocoDt = cocoGt.loadRes(dt_path)
15 | imgIds = sorted(cocoGt.getImgIds())
16 | cocoEval = COCOeval(cocoGt, cocoDt, 'bbox', ecp=ecp)
17 | cocoEval.params.imgIds = imgIds
18 | cocoEval.evaluate(id_setup)
19 | cocoEval.accumulate()
20 | mean_MR.append(cocoEval.summarize_nofile(id_setup))
21 | my_id_setup.append(id_setup)
22 | return mean_MR
23 |
--------------------------------------------------------------------------------
/tools/coco_eval.py:
--------------------------------------------------------------------------------
1 | from argparse import ArgumentParser
2 |
3 | from mmdet.core import coco_eval
4 |
5 |
6 | def main():
7 | parser = ArgumentParser(description='COCO Evaluation')
8 | parser.add_argument('result', help='result file path')
9 | parser.add_argument('--ann', help='annotation file path')
10 | parser.add_argument(
11 | '--types',
12 | type=str,
13 | nargs='+',
14 | choices=['proposal_fast', 'proposal', 'bbox', 'segm', 'keypoint'],
15 | default=['bbox'],
16 | help='result types')
17 | parser.add_argument(
18 | '--max-dets',
19 | type=int,
20 | nargs='+',
21 | default=[100, 300, 1000],
22 | help='proposal numbers, only used for recall evaluation')
23 | args = parser.parse_args()
24 | coco_eval(args.result, args.types, args.ann, args.max_dets)
25 |
26 |
27 | if __name__ == '__main__':
28 | main()
29 |
--------------------------------------------------------------------------------
/tools/crowdhuman/eval_demo.py:
--------------------------------------------------------------------------------
1 | import os
2 | from tools.crowdhuman.coco import COCO
3 | from tools.crowdhuman.eval_MR_multisetup import COCOeval
4 |
5 |
6 | def validate(annFile, dt_path):
7 | mean_MR = []
8 | for id_setup in range(0, 4):
9 | cocoGt = COCO(annFile)
10 | cocoDt = cocoGt.loadRes(dt_path)
11 | imgIds = sorted(cocoGt.getImgIds())
12 | cocoEval = COCOeval(cocoGt, cocoDt, 'bbox')
13 | cocoEval.params.imgIds = imgIds
14 | cocoEval.evaluate(id_setup)
15 | if id_setup==0:
16 | fpps, scores = cocoEval.accumulate(id_setup=id_setup)
17 | else:
18 | cocoEval.accumulate(id_setup=id_setup)
19 | mean_MR.append(cocoEval.summarize_nofile(id_setup))
20 | return mean_MR
21 |
22 | if __name__ == "__main__":
23 | MRs = validate('/home/ljp/code/citypersons/evaluation/val_gt.json', '/home/ljp/code/mmdetection/result_ori_csp.json')
24 | # MRs = validate('/media/ljp/Data/data/cityscapes/leftImg8bit_trainvaltest/train_evaluation.json', '/home/ljp/code/mmdetection/train_result.json')
25 | print('Checkpoint %d: [Reasonable: %.2f%%], [Bare: %.2f%%], [Partial: %.2f%%], [Heavy: %.2f%%]'
26 | % (0, MRs[0] * 100, MRs[1] * 100, MRs[2] * 100, MRs[3] * 100))
27 |
28 |
--------------------------------------------------------------------------------
/tools/demo.py:
--------------------------------------------------------------------------------
1 | import argparse
2 |
3 | import os
4 | import os.path as osp
5 | import sys
6 | sys.path.insert(0, osp.join(osp.dirname(osp.abspath(__file__)), '../'))
7 | import time
8 | import cv2
9 | import torch
10 | import glob
11 | import json
12 | import mmcv
13 |
14 | from mmdet.apis import inference_detector, init_detector, show_result
15 |
16 |
17 | def parse_args():
18 | parser = argparse.ArgumentParser(description='MMDet test detector')
19 | parser.add_argument('config', help='test config file path')
20 | parser.add_argument('checkpoint', help='checkpoint file')
21 | parser.add_argument('input_img_dir', type=str, help='the dir of input images')
22 | parser.add_argument('output_dir', type=str, help='the dir for result images')
23 | parser.add_argument(
24 | '--launcher',
25 | choices=['none', 'pytorch', 'slurm', 'mpi'],
26 | default='none',
27 | help='job launcher')
28 | parser.add_argument('--local_rank', type=int, default=0)
29 | parser.add_argument('--mean_teacher', action='store_true', help='test the mean teacher pth')
30 | args = parser.parse_args()
31 | return args
32 |
33 |
34 |
35 | def mock_detector(model, image_name, output_dir):
36 | image = cv2.imread(image_name)
37 | results = inference_detector(model, image)
38 | basename = os.path.basename(image_name).split('.')[0]
39 | result_name = basename + "_result.jpg"
40 | result_name = os.path.join(output_dir, result_name)
41 | show_result(image, results, model.CLASSES, out_file=result_name)
42 |
43 | def create_base_dir(dest):
44 | basedir = os.path.dirname(dest)
45 | if not os.path.exists(basedir):
46 | os.makedirs(basedir)
47 |
48 | def run_detector_on_dataset():
49 | args = parse_args()
50 | input_dir = args.input_img_dir
51 | output_dir = args.output_dir
52 | if not os.path.exists(output_dir):
53 | os.makedirs(output_dir)
54 | print(input_dir)
55 | eval_imgs = glob.glob(os.path.join(input_dir, '*.png'))
56 | print(eval_imgs)
57 |
58 | model = init_detector(
59 | args.config, args.checkpoint, device=torch.device('cuda:0'))
60 |
61 | prog_bar = mmcv.ProgressBar(len(eval_imgs))
62 | for im in eval_imgs:
63 | detections = mock_detector(model, im, output_dir)
64 | prog_bar.update()
65 |
66 | if __name__ == '__main__':
67 | run_detector_on_dataset()
68 |
--------------------------------------------------------------------------------
/tools/dist_test.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | PYTHON=${PYTHON:-"python"}
4 |
5 | CONFIG=$1
6 | CHECKPOINT=$2
7 | GPUS=$3
8 |
9 | $PYTHON -m torch.distributed.launch --nproc_per_node=$GPUS \
10 | $(dirname "$0")/test.py $CONFIG $CHECKPOINT --launcher pytorch ${@:4}
11 |
--------------------------------------------------------------------------------
/tools/dist_train.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | PYTHON=${PYTHON:-"python"}
4 |
5 | CONFIG=$1
6 | GPUS=$2
7 |
8 | #$PYTHON -m torch.distributed.launch --nproc_per_node=$GPUS --master_port=11002\
9 | # $(dirname "$0")/train.py $CONFIG --launcher pytorch ${@:3}
10 |
11 | #CUDA_VISIBLE_DEVICES='4,5'
12 | $PYTHON -m torch.distributed.launch --nproc_per_node=$GPUS --master_port=11003\
13 | $(dirname "$0")/train.py $CONFIG --seed 0 --launcher pytorch ${@:3}
14 |
--------------------------------------------------------------------------------
/tools/publish_model.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | import subprocess
3 | import torch
4 |
5 |
6 | def parse_args():
7 | parser = argparse.ArgumentParser(
8 | description='Process a checkpoint to be published')
9 | parser.add_argument('in_file', help='input checkpoint filename')
10 | parser.add_argument('out_file', help='output checkpoint filename')
11 | args = parser.parse_args()
12 | return args
13 |
14 |
15 | def process_checkpoint(in_file, out_file):
16 | checkpoint = torch.load(in_file, map_location='cpu')
17 | # remove optimizer for smaller file size
18 | if 'optimizer' in checkpoint:
19 | del checkpoint['optimizer']
20 | # if it is necessary to remove some sensitive data in checkpoint['meta'],
21 | # add the code here.
22 | torch.save(checkpoint, out_file)
23 | sha = subprocess.check_output(['sha256sum', out_file]).decode()
24 | final_file = out_file.rstrip('.pth') + '-{}.pth'.format(sha[:8])
25 | subprocess.Popen(['mv', out_file, final_file])
26 |
27 |
28 | def main():
29 | args = parse_args()
30 | process_checkpoint(args.in_file, args.out_file)
31 |
32 |
33 | if __name__ == '__main__':
34 | main()
35 |
--------------------------------------------------------------------------------
/tools/slurm_test.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | set -x
4 |
5 | PARTITION=$1
6 | JOB_NAME=$2
7 | CONFIG=$3
8 | CHECKPOINT=$4
9 | GPUS=${GPUS:-8}
10 | GPUS_PER_NODE=${GPUS_PER_NODE:-8}
11 | CPUS_PER_TASK=${CPUS_PER_TASK:-5}
12 | PY_ARGS=${@:5}
13 | SRUN_ARGS=${SRUN_ARGS:-""}
14 |
15 | srun -p ${PARTITION} \
16 | --job-name=${JOB_NAME} \
17 | --gres=gpu:${GPUS_PER_NODE} \
18 | --ntasks=${GPUS} \
19 | --ntasks-per-node=${GPUS_PER_NODE} \
20 | --cpus-per-task=${CPUS_PER_TASK} \
21 | --kill-on-bad-exit=1 \
22 | ${SRUN_ARGS} \
23 | python -u tools/test.py ${CONFIG} ${CHECKPOINT} --launcher="slurm" ${PY_ARGS}
24 |
--------------------------------------------------------------------------------
/tools/slurm_train.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | set -x
4 |
5 | PARTITION=$1
6 | JOB_NAME=$2
7 | CONFIG=$3
8 | WORK_DIR=$4
9 | GPUS=${5:-8}
10 | GPUS_PER_NODE=${GPUS_PER_NODE:-8}
11 | CPUS_PER_TASK=${CPUS_PER_TASK:-5}
12 | SRUN_ARGS=${SRUN_ARGS:-""}
13 | PY_ARGS=${PY_ARGS:-"--validate"}
14 |
15 | srun -p ${PARTITION} \
16 | --job-name=${JOB_NAME} \
17 | --gres=gpu:${GPUS_PER_NODE} \
18 | --ntasks=${GPUS} \
19 | --ntasks-per-node=${GPUS_PER_NODE} \
20 | --cpus-per-task=${CPUS_PER_TASK} \
21 | --kill-on-bad-exit=1 \
22 | ${SRUN_ARGS} \
23 | python -u tools/train.py ${CONFIG} --work_dir=${WORK_DIR} --launcher="slurm" ${PY_ARGS}
24 |
--------------------------------------------------------------------------------
/tools/upgrade_model_version.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | import re
3 | from collections import OrderedDict
4 |
5 | import torch
6 |
7 |
8 | def convert(in_file, out_file):
9 | """Convert keys in checkpoints.
10 |
11 | There can be some breaking changes during the development of mmdetection,
12 | and this tool is used for upgrading checkpoints trained with old versions
13 | to the latest one.
14 | """
15 | checkpoint = torch.load(in_file)
16 | in_state_dict = checkpoint.pop('state_dict')
17 | out_state_dict = OrderedDict()
18 | for key, val in in_state_dict.items():
19 | # Use ConvModule instead of nn.Conv2d in RetinaNet
20 | # cls_convs.0.weight -> cls_convs.0.conv.weight
21 | m = re.search(r'(cls_convs|reg_convs).\d.(weight|bias)', key)
22 | if m is not None:
23 | param = m.groups()[1]
24 | new_key = key.replace(param, 'conv.{}'.format(param))
25 | out_state_dict[new_key] = val
26 | continue
27 |
28 | out_state_dict[key] = val
29 | checkpoint['state_dict'] = out_state_dict
30 | torch.save(checkpoint, out_file)
31 |
32 |
33 | def main():
34 | parser = argparse.ArgumentParser(description='Upgrade model version')
35 | parser.add_argument('in_file', help='input checkpoint file')
36 | parser.add_argument('out_file', help='output checkpoint file')
37 | args = parser.parse_args()
38 | convert(args.in_file, args.out_file)
39 |
40 |
41 | if __name__ == '__main__':
42 | main()
43 |
--------------------------------------------------------------------------------
/tools/voc_eval.py:
--------------------------------------------------------------------------------
1 | from argparse import ArgumentParser
2 |
3 | import mmcv
4 | import numpy as np
5 |
6 | from mmdet import datasets
7 | from mmdet.core import eval_map
8 |
9 |
10 | def voc_eval(result_file, dataset, iou_thr=0.5):
11 | det_results = mmcv.load(result_file)
12 | gt_bboxes = []
13 | gt_labels = []
14 | gt_ignore = []
15 | for i in range(len(dataset)):
16 | ann = dataset.get_ann_info(i)
17 | bboxes = ann['bboxes']
18 | labels = ann['labels']
19 | if 'bboxes_ignore' in ann:
20 | ignore = np.concatenate([
21 | np.zeros(bboxes.shape[0], dtype=np.bool),
22 | np.ones(ann['bboxes_ignore'].shape[0], dtype=np.bool)
23 | ])
24 | gt_ignore.append(ignore)
25 | bboxes = np.vstack([bboxes, ann['bboxes_ignore']])
26 | labels = np.concatenate([labels, ann['labels_ignore']])
27 | gt_bboxes.append(bboxes)
28 | gt_labels.append(labels)
29 | if not gt_ignore:
30 | gt_ignore = gt_ignore
31 | if hasattr(dataset, 'year') and dataset.year == 2007:
32 | dataset_name = 'voc07'
33 | else:
34 | dataset_name = dataset.CLASSES
35 | eval_map(
36 | det_results,
37 | gt_bboxes,
38 | gt_labels,
39 | gt_ignore=gt_ignore,
40 | scale_ranges=None,
41 | iou_thr=iou_thr,
42 | dataset=dataset_name,
43 | print_summary=True)
44 |
45 |
46 | def main():
47 | parser = ArgumentParser(description='VOC Evaluation')
48 | parser.add_argument('result', help='result file path')
49 | parser.add_argument('config', help='config file path')
50 | parser.add_argument(
51 | '--iou-thr',
52 | type=float,
53 | default=0.5,
54 | help='IoU threshold for evaluation')
55 | args = parser.parse_args()
56 | cfg = mmcv.Config.fromfile(args.config)
57 | test_dataset = mmcv.runner.obj_from_dict(cfg.data.test, datasets)
58 | voc_eval(args.result, test_dataset, args.iou_thr)
59 |
60 |
61 | if __name__ == '__main__':
62 | main()
63 |
--------------------------------------------------------------------------------